diff --git a/MVS/HelloEarth/BattleFireDirect.cpp b/MVS/HelloEarth/BattleFireDirect.cpp new file mode 100644 index 00000000..17475632 --- /dev/null +++ b/MVS/HelloEarth/BattleFireDirect.cpp @@ -0,0 +1,511 @@ +#include "BattleFireDirect.h" +ID3D12Device* gD3D12Device = nullptr; +ID3D12CommandQueue* gCommandQueue = nullptr; +IDXGISwapChain3* gSwapChain = nullptr; +ID3D12Resource* gDSRT = nullptr, * gColorRTs[2]; +int gCurrentRTIndex = 0; +ID3D12DescriptorHeap* gSwapChainRTVHeap = nullptr; +ID3D12DescriptorHeap* gSwapChainDSVHeap = nullptr; +UINT gRTVDescriptorSize = 0; +UINT gDSVDescriptorSize = 0; +ID3D12CommandAllocator* gCommandAllocator = nullptr; +ID3D12GraphicsCommandList* gCommandList = nullptr; +ID3D12Fence* gFence = nullptr; +HANDLE gFenceEvent = nullptr; +UINT64 gFenceValue = 0; + +ID3D12RootSignature* InitRootSignature() { + //1110001110101111111111111111111111 + D3D12_ROOT_PARAMETER rootParameters[4]; + rootParameters[1].ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS; + rootParameters[1].ShaderVisibility = D3D12_SHADER_VISIBILITY_VERTEX; + rootParameters[1].Constants.RegisterSpace = 0; + rootParameters[1].Constants.ShaderRegister = 0; + rootParameters[1].Constants.Num32BitValues = 4; + + rootParameters[0].ParameterType = D3D12_ROOT_PARAMETER_TYPE_CBV; + rootParameters[0].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL; + rootParameters[0].Descriptor.RegisterSpace = 0; + rootParameters[0].Descriptor.ShaderRegister = 1;//cbv + + D3D12_DESCRIPTOR_RANGE descriptorRange[1]; + descriptorRange[0].RangeType = D3D12_DESCRIPTOR_RANGE_TYPE_SRV; + descriptorRange[0].RegisterSpace = 0; + descriptorRange[0].BaseShaderRegister = 0;//t0 + descriptorRange[0].NumDescriptors = 1; + descriptorRange[0].OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND; + + rootParameters[2].ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE; + rootParameters[2].ShaderVisibility = D3D12_SHADER_VISIBILITY_PIXEL; + rootParameters[2].DescriptorTable.pDescriptorRanges = descriptorRange; + rootParameters[2].DescriptorTable.NumDescriptorRanges = _countof(descriptorRange);//cbv + + rootParameters[3].ParameterType = D3D12_ROOT_PARAMETER_TYPE_SRV; + rootParameters[3].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL; + rootParameters[3].Descriptor.RegisterSpace = 1; + rootParameters[3].Descriptor.ShaderRegister = 0;//srv + + D3D12_STATIC_SAMPLER_DESC samplerDesc[1]; + memset(samplerDesc, 0,sizeof(D3D12_STATIC_SAMPLER_DESC)*_countof(samplerDesc)); + samplerDesc[0].Filter = D3D12_FILTER_MIN_MAG_MIP_LINEAR; + samplerDesc[0].AddressU = D3D12_TEXTURE_ADDRESS_MODE_CLAMP; + samplerDesc[0].AddressV = D3D12_TEXTURE_ADDRESS_MODE_CLAMP; + samplerDesc[0].AddressW = D3D12_TEXTURE_ADDRESS_MODE_CLAMP; + samplerDesc[0].BorderColor = D3D12_STATIC_BORDER_COLOR_OPAQUE_BLACK; + samplerDesc[0].MaxLOD = D3D12_FLOAT32_MAX; + samplerDesc[0].RegisterSpace = 0; + samplerDesc[0].ShaderRegister = 0;//s0 + samplerDesc[0].ShaderVisibility = D3D12_SHADER_VISIBILITY_PIXEL; + + D3D12_ROOT_SIGNATURE_DESC rootSignatureDesc = {}; + rootSignatureDesc.NumParameters = _countof(rootParameters); + rootSignatureDesc.pParameters = rootParameters; + rootSignatureDesc.NumStaticSamplers = _countof(samplerDesc); + rootSignatureDesc.pStaticSamplers = samplerDesc; + rootSignatureDesc.Flags = D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT; + + //64 DWORD -> float 128 WORD -> 16bit + ID3DBlob* signature; + HRESULT hResult = D3D12SerializeRootSignature(&rootSignatureDesc, D3D_ROOT_SIGNATURE_VERSION_1, &signature, nullptr); + ID3D12RootSignature* d3d12RootSignature; + gD3D12Device->CreateRootSignature( + 0, signature->GetBufferPointer(), signature->GetBufferSize(), + IID_PPV_ARGS(&d3d12RootSignature)); + + return d3d12RootSignature; +} +void CreateShaderFromFile( + LPCTSTR inShaderFilePath, + const char* inMainFunctionName, + const char* inTarget,//"vs_5_0","ps_5_0","vs_4_0" + D3D12_SHADER_BYTECODE* inShader) { + ID3DBlob* shaderBuffer = nullptr; + ID3DBlob* errorBuffer = nullptr; + HRESULT hResult = D3DCompileFromFile(inShaderFilePath, nullptr, nullptr, + inMainFunctionName, inTarget, D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION, + 0, &shaderBuffer, &errorBuffer); + if (FAILED(hResult)) { + char szLog[1024] = {0}; + strcpy(szLog, (char*)errorBuffer->GetBufferPointer()); + printf("CreateShaderFromFile error : [%s][%s]:[%s]\n", inMainFunctionName, inTarget, szLog); + errorBuffer->Release(); + return; + } + inShader->pShaderBytecode = shaderBuffer->GetBufferPointer(); + inShader->BytecodeLength = shaderBuffer->GetBufferSize(); +} +ID3D12Resource* CreateConstantBufferObject(int inDataLen) { + D3D12_HEAP_PROPERTIES d3dHeapProperties = {}; + d3dHeapProperties.Type = D3D12_HEAP_TYPE_UPLOAD;//cpu,gpu + D3D12_RESOURCE_DESC d3d12ResourceDesc = {}; + d3d12ResourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER; + d3d12ResourceDesc.Alignment = 0; + d3d12ResourceDesc.Width = inDataLen; + d3d12ResourceDesc.Height = 1; + d3d12ResourceDesc.DepthOrArraySize = 1; + d3d12ResourceDesc.MipLevels = 1; + d3d12ResourceDesc.Format = DXGI_FORMAT_UNKNOWN; + d3d12ResourceDesc.SampleDesc.Count = 1; + d3d12ResourceDesc.SampleDesc.Quality = 0; + d3d12ResourceDesc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR; + d3d12ResourceDesc.Flags = D3D12_RESOURCE_FLAG_NONE; + + ID3D12Resource* bufferObject = nullptr; + gD3D12Device->CreateCommittedResource( + &d3dHeapProperties, + D3D12_HEAP_FLAG_NONE, + &d3d12ResourceDesc, + D3D12_RESOURCE_STATE_GENERIC_READ, + nullptr, + IID_PPV_ARGS(&bufferObject) + ); + return bufferObject; +} +void UpdateConstantBuffer(ID3D12Resource* inCB, void* inData, int inDataLen) { + D3D12_RANGE d3d12Range = { 0 }; + unsigned char* pBuffer = nullptr; + inCB->Map(0, &d3d12Range, (void**)&pBuffer); + memcpy(pBuffer, inData, inDataLen); + inCB->Unmap(0, nullptr); +} + +ID3D12PipelineState* CreatePSO(ID3D12RootSignature* inID3D12RootSignature, + D3D12_SHADER_BYTECODE inVertexShader, D3D12_SHADER_BYTECODE inPixelShader, + D3D12_SHADER_BYTECODE inGSShader) { + D3D12_INPUT_ELEMENT_DESC vertexDataElementDesc[] = { + {"POSITION",0,DXGI_FORMAT_R32G32B32A32_FLOAT,0,0,D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,0}, + {"TEXCOORD",0,DXGI_FORMAT_R32G32B32A32_FLOAT,0,sizeof(float) * 4,D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,0}, + {"NORMAL",0,DXGI_FORMAT_R32G32B32A32_FLOAT,0,sizeof(float) * 8,D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,0}, + {"TANGENT",0,DXGI_FORMAT_R32G32B32A32_FLOAT,0,sizeof(float) * 12,D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,0} + }; + D3D12_INPUT_LAYOUT_DESC vertexDataLayoutDesc = {}; + vertexDataLayoutDesc.NumElements = 4; + vertexDataLayoutDesc.pInputElementDescs = vertexDataElementDesc; + + D3D12_GRAPHICS_PIPELINE_STATE_DESC psoDesc = {}; + psoDesc.pRootSignature = inID3D12RootSignature; + psoDesc.VS = inVertexShader; + psoDesc.GS = inGSShader; + psoDesc.PS = inPixelShader; + psoDesc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM; + psoDesc.DSVFormat = DXGI_FORMAT_D24_UNORM_S8_UINT; + psoDesc.SampleDesc.Count = 1; + psoDesc.SampleDesc.Quality = 0; + psoDesc.SampleMask = 0xffffffff; + psoDesc.InputLayout = vertexDataLayoutDesc; + psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; + + psoDesc.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID; + psoDesc.RasterizerState.CullMode = D3D12_CULL_MODE_BACK; + psoDesc.RasterizerState.DepthClipEnable = TRUE; + + psoDesc.DepthStencilState.DepthEnable = true; + psoDesc.DepthStencilState.DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ALL; + psoDesc.DepthStencilState.DepthFunc = D3D12_COMPARISON_FUNC_LESS_EQUAL; + + psoDesc.BlendState = { 0 }; + D3D12_RENDER_TARGET_BLEND_DESC rtBlendDesc = { + FALSE,FALSE, + D3D12_BLEND_SRC_ALPHA,D3D12_BLEND_INV_SRC_ALPHA,D3D12_BLEND_OP_ADD, + D3D12_BLEND_SRC_ALPHA,D3D12_BLEND_INV_SRC_ALPHA,D3D12_BLEND_OP_ADD, + D3D12_LOGIC_OP_NOOP, + D3D12_COLOR_WRITE_ENABLE_ALL, + }; + for (int i = 0; i < D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT; ++i) + psoDesc.BlendState.RenderTarget[i] = rtBlendDesc; + psoDesc.NumRenderTargets = 1; + ID3D12PipelineState* d3d12PSO = nullptr; + + HRESULT hResult = gD3D12Device->CreateGraphicsPipelineState(&psoDesc, IID_PPV_ARGS(&d3d12PSO)); + if (FAILED(hResult)) { + return nullptr; + } + return d3d12PSO; +} + +bool InitD3D12(HWND inHWND, int inWidth, int inHeight) { + HRESULT hResult; + UINT dxgiFactoryFlags = 0; +#ifdef _DEBUG + { + ID3D12Debug* debugController = nullptr; + if (SUCCEEDED(D3D12GetDebugInterface(IID_PPV_ARGS(&debugController)))) { + debugController->EnableDebugLayer(); + dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG; + } + } +#endif + IDXGIFactory4* dxgiFactory; + hResult = CreateDXGIFactory2(dxgiFactoryFlags, IID_PPV_ARGS(&dxgiFactory)); + if (FAILED(hResult)) { + return false; + } + IDXGIAdapter1* adapter; + int adapterIndex = 0; + bool adapterFound = false; + while (dxgiFactory->EnumAdapters1(adapterIndex, &adapter) != DXGI_ERROR_NOT_FOUND) { + DXGI_ADAPTER_DESC1 desc; + adapter->GetDesc1(&desc); + if (desc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) { + continue; + } + hResult = D3D12CreateDevice(adapter, D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device), nullptr); + if (SUCCEEDED(hResult)) { + adapterFound = true; + break; + } + adapterIndex++; + } + if (false == adapterFound) { + return false; + } + hResult = D3D12CreateDevice(adapter, D3D_FEATURE_LEVEL_11_0, IID_PPV_ARGS(&gD3D12Device)); + if (FAILED(hResult)) { + return false; + } + D3D12_COMMAND_QUEUE_DESC d3d12CommandQueueDesc = {}; + hResult = gD3D12Device->CreateCommandQueue(&d3d12CommandQueueDesc, IID_PPV_ARGS(&gCommandQueue)); + if (FAILED(hResult)) { + return false; + } + DXGI_SWAP_CHAIN_DESC swapChainDesc = {}; + swapChainDesc.BufferCount = 2; + swapChainDesc.BufferDesc = {}; + swapChainDesc.BufferDesc.Width = inWidth; + swapChainDesc.BufferDesc.Height = inHeight; + swapChainDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; + swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; + swapChainDesc.OutputWindow = inHWND; + swapChainDesc.SampleDesc.Count = 1; + swapChainDesc.Windowed = true; + swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD; + + IDXGISwapChain* swapChain = nullptr; + dxgiFactory->CreateSwapChain(gCommandQueue, &swapChainDesc, &swapChain); + gSwapChain = static_cast(swapChain); + + D3D12_HEAP_PROPERTIES d3dHeapProperties = {}; + d3dHeapProperties.Type = D3D12_HEAP_TYPE_DEFAULT; + + D3D12_RESOURCE_DESC d3d12ResourceDesc = {}; + d3d12ResourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D; + d3d12ResourceDesc.Alignment = 0; + d3d12ResourceDesc.Width = inWidth; + d3d12ResourceDesc.Height = inHeight; + d3d12ResourceDesc.DepthOrArraySize = 1; + d3d12ResourceDesc.MipLevels = 1; + d3d12ResourceDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT; + d3d12ResourceDesc.SampleDesc.Count = 1; + d3d12ResourceDesc.SampleDesc.Quality = 0; + d3d12ResourceDesc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN; + d3d12ResourceDesc.Flags = D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL; + + D3D12_CLEAR_VALUE dsClearValue = {}; + dsClearValue.Format = DXGI_FORMAT_D24_UNORM_S8_UINT; + dsClearValue.DepthStencil.Depth = 1.0f; + dsClearValue.DepthStencil.Stencil = 0; + + gD3D12Device->CreateCommittedResource(&d3dHeapProperties, + D3D12_HEAP_FLAG_NONE, + &d3d12ResourceDesc, + D3D12_RESOURCE_STATE_DEPTH_WRITE, + &dsClearValue, + IID_PPV_ARGS(&gDSRT) + ); + //RTV,DSV,alloc + D3D12_DESCRIPTOR_HEAP_DESC d3dDescriptorHeapDescRTV = {}; + d3dDescriptorHeapDescRTV.NumDescriptors = 2; + d3dDescriptorHeapDescRTV.Type = D3D12_DESCRIPTOR_HEAP_TYPE_RTV; + gD3D12Device->CreateDescriptorHeap(&d3dDescriptorHeapDescRTV, IID_PPV_ARGS(&gSwapChainRTVHeap)); + gRTVDescriptorSize = gD3D12Device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_RTV); + + D3D12_DESCRIPTOR_HEAP_DESC d3dDescriptorHeapDescDSV = {}; + d3dDescriptorHeapDescDSV.NumDescriptors = 1; + d3dDescriptorHeapDescDSV.Type = D3D12_DESCRIPTOR_HEAP_TYPE_DSV; + gD3D12Device->CreateDescriptorHeap(&d3dDescriptorHeapDescDSV, IID_PPV_ARGS(&gSwapChainDSVHeap)); + gDSVDescriptorSize = gD3D12Device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_DSV); + + D3D12_CPU_DESCRIPTOR_HANDLE rtvHeapStart = gSwapChainRTVHeap->GetCPUDescriptorHandleForHeapStart(); + for (int i = 0; i < 2; i++) { + gSwapChain->GetBuffer(i, IID_PPV_ARGS(&gColorRTs[i])); + D3D12_CPU_DESCRIPTOR_HANDLE rtvPointer; + rtvPointer.ptr = rtvHeapStart.ptr + i * gRTVDescriptorSize; + gD3D12Device->CreateRenderTargetView(gColorRTs[i], nullptr, rtvPointer); + } + D3D12_DEPTH_STENCIL_VIEW_DESC d3dDSViewDesc = {}; + d3dDSViewDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT; + d3dDSViewDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2D; + + gD3D12Device->CreateDepthStencilView(gDSRT, &d3dDSViewDesc, gSwapChainDSVHeap->GetCPUDescriptorHandleForHeapStart()); + + gD3D12Device->CreateCommandAllocator(D3D12_COMMAND_LIST_TYPE_DIRECT, IID_PPV_ARGS(&gCommandAllocator)); + gD3D12Device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, gCommandAllocator, nullptr, IID_PPV_ARGS(&gCommandList)); + + gD3D12Device->CreateFence(0, D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&gFence)); + gFenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr); + + return true; +} +ID3D12CommandAllocator* GetCommandAllocator() { + return gCommandAllocator; +} +ID3D12GraphicsCommandList* GetCommandList() { + return gCommandList; +} +void WaitForCompletionOfCommandList() { + if (gFence->GetCompletedValue() < gFenceValue) { + gFence->SetEventOnCompletion(gFenceValue, gFenceEvent); + WaitForSingleObject(gFenceEvent, INFINITE); + } +} +void EndCommandList() { + gCommandList->Close();// + ID3D12CommandList* ppCommandLists[] = { gCommandList }; + gCommandQueue->ExecuteCommandLists(1, ppCommandLists); + //CommandList + gFenceValue += 1; + gCommandQueue->Signal(gFence, gFenceValue);// +} +void BeginRenderToSwapChain(ID3D12GraphicsCommandList* inCommandList) { + gCurrentRTIndex = gSwapChain->GetCurrentBackBufferIndex(); + D3D12_RESOURCE_BARRIER barrier = InitResourceBarrier(gColorRTs[gCurrentRTIndex], D3D12_RESOURCE_STATE_PRESENT, D3D12_RESOURCE_STATE_RENDER_TARGET); + inCommandList->ResourceBarrier(1, &barrier); + D3D12_CPU_DESCRIPTOR_HANDLE colorRT, dsv; + dsv.ptr = gSwapChainDSVHeap->GetCPUDescriptorHandleForHeapStart().ptr; + colorRT.ptr = gSwapChainRTVHeap->GetCPUDescriptorHandleForHeapStart().ptr + gCurrentRTIndex * gRTVDescriptorSize; + inCommandList->OMSetRenderTargets(1, &colorRT, FALSE, &dsv); + D3D12_VIEWPORT viewport = { 0.0f,0.0f,1280.0f,720.0f }; + D3D12_RECT scissorRect = { 0,0,1280,720 }; + inCommandList->RSSetViewports(1, &viewport); + inCommandList->RSSetScissorRects(1, &scissorRect); + const float clearColor[] = { 0.0f,0.0f,0.0f,1.0f }; + inCommandList->ClearRenderTargetView(colorRT, clearColor, 0, nullptr); + inCommandList->ClearDepthStencilView(dsv, D3D12_CLEAR_FLAG_DEPTH | D3D12_CLEAR_FLAG_STENCIL, 1.0f, 0, 0, nullptr); +} +void EndRenderToSwapChain(ID3D12GraphicsCommandList* inCommandList) { + D3D12_RESOURCE_BARRIER barrier = InitResourceBarrier(gColorRTs[gCurrentRTIndex], D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_PRESENT); + inCommandList->ResourceBarrier(1, &barrier); +} +void SwapD3D12Buffers() { + gSwapChain->Present(0, 0); +} +ID3D12Resource* CreateBufferObject(ID3D12GraphicsCommandList* inCommandList, + void* inData, int inDataLen, D3D12_RESOURCE_STATES inFinalResourceState) { + D3D12_HEAP_PROPERTIES d3dHeapProperties = {}; + d3dHeapProperties.Type = D3D12_HEAP_TYPE_DEFAULT;//gpu + D3D12_RESOURCE_DESC d3d12ResourceDesc = {}; + d3d12ResourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER; + d3d12ResourceDesc.Alignment = 0; + d3d12ResourceDesc.Width = inDataLen; + d3d12ResourceDesc.Height = 1; + d3d12ResourceDesc.DepthOrArraySize = 1; + d3d12ResourceDesc.MipLevels = 1; + d3d12ResourceDesc.Format = DXGI_FORMAT_UNKNOWN; + d3d12ResourceDesc.SampleDesc.Count = 1; + d3d12ResourceDesc.SampleDesc.Quality = 0; + d3d12ResourceDesc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR; + d3d12ResourceDesc.Flags = D3D12_RESOURCE_FLAG_NONE; + + ID3D12Resource* bufferObject = nullptr; + gD3D12Device->CreateCommittedResource( + &d3dHeapProperties, + D3D12_HEAP_FLAG_NONE, + &d3d12ResourceDesc, + D3D12_RESOURCE_STATE_COPY_DEST, + nullptr, + IID_PPV_ARGS(&bufferObject) + ); + d3d12ResourceDesc = bufferObject->GetDesc(); + UINT64 memorySizeUsed = 0; + UINT64 rowSizeInBytes = 0; + UINT rowUsed = 0; + D3D12_PLACED_SUBRESOURCE_FOOTPRINT subresourceFootprint; + gD3D12Device->GetCopyableFootprints(&d3d12ResourceDesc, 0, 1, 0, + &subresourceFootprint, &rowUsed, &rowSizeInBytes, &memorySizeUsed); + // 3 x 4 x 4 = 48bytes,32bytes,24bytes + 24bytes + ID3D12Resource* tempBufferObject = nullptr; + d3dHeapProperties = {}; + d3dHeapProperties.Type = D3D12_HEAP_TYPE_UPLOAD;//cpu,gpu + gD3D12Device->CreateCommittedResource( + &d3dHeapProperties, + D3D12_HEAP_FLAG_NONE, + &d3d12ResourceDesc, + D3D12_RESOURCE_STATE_GENERIC_READ, + nullptr, + IID_PPV_ARGS(&tempBufferObject) + ); + + BYTE* pData; + tempBufferObject->Map(0, nullptr, reinterpret_cast(&pData)); + BYTE* pDstTempBuffer = reinterpret_cast(pData + subresourceFootprint.Offset); + const BYTE* pSrcData = reinterpret_cast(inData); + for (UINT i = 0; i < rowUsed; i++) { + memcpy(pDstTempBuffer + subresourceFootprint.Footprint.RowPitch * i, pSrcData + rowSizeInBytes * i, rowSizeInBytes); + } + tempBufferObject->Unmap(0, nullptr); + inCommandList->CopyBufferRegion(bufferObject, 0, tempBufferObject, 0, subresourceFootprint.Footprint.Width); + D3D12_RESOURCE_BARRIER barrier = InitResourceBarrier(bufferObject, D3D12_RESOURCE_STATE_COPY_DEST, inFinalResourceState); + inCommandList->ResourceBarrier(1, &barrier); + return bufferObject; +} +D3D12_RESOURCE_BARRIER InitResourceBarrier( + ID3D12Resource* inResource, D3D12_RESOURCE_STATES inPrevState, + D3D12_RESOURCE_STATES inNextState) { + D3D12_RESOURCE_BARRIER d3d12ResourceBarrier; + memset(&d3d12ResourceBarrier, 0, sizeof(d3d12ResourceBarrier)); + d3d12ResourceBarrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION; + d3d12ResourceBarrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE; + d3d12ResourceBarrier.Transition.pResource = inResource; + d3d12ResourceBarrier.Transition.StateBefore = inPrevState; + d3d12ResourceBarrier.Transition.StateAfter = inNextState; + d3d12ResourceBarrier.Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES; + return d3d12ResourceBarrier; +} + +ID3D12Resource* CreateTexture2D(ID3D12GraphicsCommandList* inCommandList, + const void* inPixelData, int inDataSizeInBytes, int inWidth, int inHeight, + DXGI_FORMAT inFormat) { + D3D12_HEAP_PROPERTIES d3dHeapProperties = {}; + d3dHeapProperties.Type = D3D12_HEAP_TYPE_DEFAULT; + + D3D12_RESOURCE_DESC d3d12ResourceDesc = {}; + d3d12ResourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D; + d3d12ResourceDesc.Alignment = 0; + d3d12ResourceDesc.Width = inWidth; + d3d12ResourceDesc.Height = inHeight; + d3d12ResourceDesc.DepthOrArraySize = 1; + d3d12ResourceDesc.MipLevels = 1; + d3d12ResourceDesc.Format = inFormat; + d3d12ResourceDesc.SampleDesc.Count = 1; + d3d12ResourceDesc.SampleDesc.Quality = 0; + d3d12ResourceDesc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN; + d3d12ResourceDesc.Flags = D3D12_RESOURCE_FLAG_NONE; + + ID3D12Resource* texture = nullptr; + gD3D12Device->CreateCommittedResource(&d3dHeapProperties, + D3D12_HEAP_FLAG_NONE, + &d3d12ResourceDesc, + D3D12_RESOURCE_STATE_COPY_DEST, + nullptr, + IID_PPV_ARGS(&texture) + ); + d3d12ResourceDesc = texture->GetDesc(); + UINT64 memorySizeUsed = 0; + UINT64 rowSizeInBytes = 0; + UINT rowUsed = 0; + D3D12_PLACED_SUBRESOURCE_FOOTPRINT subresourceFootprint; + gD3D12Device->GetCopyableFootprints(&d3d12ResourceDesc, 0, 1, 0, + &subresourceFootprint, &rowUsed, &rowSizeInBytes, &memorySizeUsed); + // 3 x 4 x 4 = 48bytes,32bytes,24bytes + 24bytes + ID3D12Resource* tempBufferObject = nullptr; + D3D12_HEAP_PROPERTIES d3dTempHeapProperties = {}; + d3dTempHeapProperties.Type = D3D12_HEAP_TYPE_UPLOAD;//cpu,gpu + + D3D12_RESOURCE_DESC d3d12TempResourceDesc = {}; + d3d12TempResourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER; + d3d12TempResourceDesc.Alignment = 0; + d3d12TempResourceDesc.Width = memorySizeUsed; + d3d12TempResourceDesc.Height = 1; + d3d12TempResourceDesc.DepthOrArraySize = 1; + d3d12TempResourceDesc.MipLevels = 1; + d3d12TempResourceDesc.Format = DXGI_FORMAT_UNKNOWN; + d3d12TempResourceDesc.SampleDesc.Count = 1; + d3d12TempResourceDesc.SampleDesc.Quality = 0; + d3d12TempResourceDesc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR; + d3d12TempResourceDesc.Flags = D3D12_RESOURCE_FLAG_NONE; + + gD3D12Device->CreateCommittedResource( + &d3dTempHeapProperties, + D3D12_HEAP_FLAG_NONE, + &d3d12TempResourceDesc, + D3D12_RESOURCE_STATE_GENERIC_READ, + nullptr, + IID_PPV_ARGS(&tempBufferObject) + ); + BYTE* pData; + tempBufferObject->Map(0, nullptr, reinterpret_cast(&pData)); + BYTE* pDstTempBuffer = reinterpret_cast(pData + subresourceFootprint.Offset); + const BYTE* pSrcData = reinterpret_cast(inPixelData); + for (UINT i = 0; i < rowUsed; i++) { + memcpy(pDstTempBuffer + subresourceFootprint.Footprint.RowPitch * i, pSrcData + rowSizeInBytes * i, rowSizeInBytes); + } + tempBufferObject->Unmap(0, nullptr); + D3D12_TEXTURE_COPY_LOCATION dst = {}; + dst.pResource = texture; + dst.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX; + dst.SubresourceIndex = 0; + + D3D12_TEXTURE_COPY_LOCATION src = {}; + src.pResource = tempBufferObject; + src.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT; + src.PlacedFootprint = subresourceFootprint; + inCommandList->CopyTextureRegion(&dst, 0, 0, 0, &src, nullptr); + + D3D12_RESOURCE_BARRIER barrier = InitResourceBarrier(texture, + D3D12_RESOURCE_STATE_COPY_DEST,D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE); + inCommandList->ResourceBarrier(1, &barrier); + return texture; +} +ID3D12Device* GetD3DDevice() { + return gD3D12Device; +} \ No newline at end of file diff --git a/MVS/HelloEarth/BattleFireDirect.h b/MVS/HelloEarth/BattleFireDirect.h new file mode 100644 index 00000000..53d7f9e7 --- /dev/null +++ b/MVS/HelloEarth/BattleFireDirect.h @@ -0,0 +1,35 @@ +#pragma once +#include +#include +#include +#include +#include + +D3D12_RESOURCE_BARRIER InitResourceBarrier( + ID3D12Resource* inResource, D3D12_RESOURCE_STATES inPrevState, + D3D12_RESOURCE_STATES inNextState); +ID3D12RootSignature* InitRootSignature(); +void CreateShaderFromFile( + LPCTSTR inShaderFilePath, + const char* inMainFunctionName, + const char* inTarget,//"vs_5_0","ps_5_0","vs_4_0" + D3D12_SHADER_BYTECODE* inShader); +ID3D12Resource* CreateConstantBufferObject(int inDataLen); +void UpdateConstantBuffer(ID3D12Resource* inCB, void* inData, int inDataLen); +ID3D12Resource* CreateBufferObject(ID3D12GraphicsCommandList* inCommandList, + void* inData, int inDataLen, D3D12_RESOURCE_STATES inFinalResourceState); +ID3D12PipelineState* CreatePSO(ID3D12RootSignature* inID3D12RootSignature, + D3D12_SHADER_BYTECODE inVertexShader, D3D12_SHADER_BYTECODE inPixelShader, + D3D12_SHADER_BYTECODE inGSShader); +bool InitD3D12(HWND inHWND, int inWidth, int inHeight); +ID3D12GraphicsCommandList* GetCommandList(); +ID3D12CommandAllocator* GetCommandAllocator(); +void WaitForCompletionOfCommandList(); +void EndCommandList(); +void BeginRenderToSwapChain(ID3D12GraphicsCommandList* inCommandList); +void EndRenderToSwapChain(ID3D12GraphicsCommandList* inCommandList); +void SwapD3D12Buffers(); +ID3D12Resource* CreateTexture2D(ID3D12GraphicsCommandList* inCommandList, + const void*inPixelData,int inDataSizeInBytes,int inWidth,int inHeight, + DXGI_FORMAT inFormat); +ID3D12Device* GetD3DDevice(); diff --git a/MVS/HelloEarth/README.md b/MVS/HelloEarth/README.md new file mode 100644 index 00000000..2474e43b --- /dev/null +++ b/MVS/HelloEarth/README.md @@ -0,0 +1,114 @@ +# HelloEarth + +基础 DirectX 12 渲染示例项目,展示如何搭建基本的渲染管线。 + +## 简介 + +HelloEarth 是 XCEngine 项目的入门级示例,通过渲染一个带纹理的球体,帮助理解 DirectX 12 的核心概念和渲染流程。 + +## 技术栈 + +- **渲染 API**: DirectX 12 +- **语言**: C++17 +- **构建系统**: CMake +- **依赖库**: DirectX 12 SDK, stb_image + +## 项目结构 + +``` +HelloEarth/ +├── main.cpp # 程序入口 +├── BattleFireDirect.cpp/h # DirectX 12 核心渲染实现 +├── StaticMeshComponent.cpp/h # 静态网格组件 +├── Utils.cpp/h # 工具函数 +├── stbi/ # 图像加载库 +│ ├── stb_image.h +│ └── stb_image.cpp +└── Res/ # 资源文件 + ├── Shader/ + │ ├── gs.hlsl # 几何着色器 + │ └── ndctriangle.hlsl # 三角形着色器 + ├── Model/ + │ └── Sphere.lhsm # 球体模型 + └── Image/ + └── earth_d.jpg # 地球纹理 +``` + +## 构建方法 + +### 前置要求 + +- Windows 10/11 +- Visual Studio 2019 或更高版本 +- CMake 3.15+ + +### 构建步骤 + +```bash +# 创建并进入构建目录 +mkdir build && cd build + +# 配置项目 +cmake .. + +# 编译 +cmake --build . --config Release +``` + +### 运行 + +编译完成后运行 `HelloEarth.exe`(如果生成了可执行文件) + +## 功能特性 + +### 渲染管线 +- DirectX 12 渲染环境初始化 +- 命令队列和命令列表管理 +- 资源转换和同步 + +### 着色器 +- 顶点着色器(VS) +- 几何着色器(GS) +- 像素着色器(PS) + +### 资源管理 +- 静态网格加载和渲染 +- 纹理加载(支持 JPG/PNG) +- 常量缓冲区管理 +- 着色器资源视图(SRV) + +### 数学运算 +- 投影矩阵(Perspective Projection) +- 视图矩阵(View Matrix) +- 模型矩阵(Model Matrix) +- 法线矩阵计算 + +## 核心概念 + +### 渲染流程 +1. 初始化 D3D12 设备和命令队列 +2. 创建命令分配器和命令列表 +3. 加载着色器(编译 HLSL) +4. 创建根签名和 PSO 管道状态 +5. 加载网格模型和纹理 +6. 创建常量缓冲区并更新数据 +7. 渲染循环: + - 重置命令分配器和列表 + - 设置渲染目标 + - 设置根签名和 PSO + - 设置描述符堆 + - 绑定常量缓冲和纹理 + - 提交绘制命令 + - 呈现交换链 + +### 关键接口 +- `InitD3D12` - 初始化 DirectX 12 +- `CreateShaderFromFile` - 从文件加载着色器 +- `CreatePSO` - 创建管道状态对象 +- `StaticMeshComponent::Render` - 渲染网格 + +## 资源说明 + +- 模型文件格式:`.lhsm`(自定义格式) +- 纹理支持:PNG、JPG +- 着色器:HLSL(Shader Model 5.1) diff --git a/MVS/HelloEarth/Res/Image/earth_d.jpg b/MVS/HelloEarth/Res/Image/earth_d.jpg new file mode 100644 index 00000000..663081b5 Binary files /dev/null and b/MVS/HelloEarth/Res/Image/earth_d.jpg differ diff --git a/MVS/HelloEarth/Res/Image/head.png b/MVS/HelloEarth/Res/Image/head.png new file mode 100644 index 00000000..055c373f Binary files /dev/null and b/MVS/HelloEarth/Res/Image/head.png differ diff --git a/MVS/HelloEarth/Res/Model/Sphere.lhsm b/MVS/HelloEarth/Res/Model/Sphere.lhsm new file mode 100644 index 00000000..b9e0c40d Binary files /dev/null and b/MVS/HelloEarth/Res/Model/Sphere.lhsm differ diff --git a/MVS/HelloEarth/Res/Shader/gs.hlsl b/MVS/HelloEarth/Res/Shader/gs.hlsl new file mode 100644 index 00000000..66a3f189 --- /dev/null +++ b/MVS/HelloEarth/Res/Shader/gs.hlsl @@ -0,0 +1,99 @@ +struct VertexData{ + float4 position:POSITION; + float4 texcoord:TEXCOORD0; + float4 normal:NORMAL; + float4 tangent:TANGENT; +}; + +struct VSOut{ + float4 position:SV_POSITION; + float4 normal:NORMAL; + float4 texcoord:TEXCOORD0; +}; + +static const float PI=3.141592; +cbuffer globalConstants:register(b0){ + float4 misc; +}; + +Texture2D T_DiffuseTexture:register(t0); +SamplerState samplerState:register(s0); + +struct MaterialData{ + float r; +}; +StructuredBuffer materialData:register(t0,space1); +cbuffer DefaultVertexCB:register(b1){ + float4x4 ProjectionMatrix; + float4x4 ViewMatrix; + float4x4 ModelMatrix; + float4x4 IT_ModelMatrix; + float4x4 ReservedMemory[1020]; +}; + +VSOut MainVS(VertexData inVertexData){ + VSOut vo; + vo.normal=mul(IT_ModelMatrix,inVertexData.normal); + float4 positionWS=mul(ModelMatrix,inVertexData.position); + float4 positionVS=mul(ViewMatrix,positionWS); + vo.position=mul(ProjectionMatrix,positionVS); + //vo.position=float4(positionWS.xyz+vo.normal.xyz*sin(misc.x)*0.2f,1.0f); + vo.texcoord=inVertexData.texcoord; + return vo; +} + +[maxvertexcount(4)] +void MainGS(triangle VSOut inPoint[3],uint inPrimitiveID:SV_PrimitiveID, + inout TriangleStream outTriangleStream){ + outTriangleStream.Append(inPoint[0]); + outTriangleStream.Append(inPoint[1]); + outTriangleStream.Append(inPoint[2]); + /*VSOut vo; + float3 positionWS=inPoint[0].position.xyz; + float3 N=normalize(inPoint[0].normal.xyz); + vo.normal=float4(N,0.0f); + float3 helperVec=abs(N.y)>0.999?float3(0.0f,0.0f,1.0f):float3(0.0f,1.0f,0.0f); + float3 tangent=normalize(cross(N,helperVec));//u + float3 bitangent=normalize(cross(tangent,N));//v + float scale=materialData[inPrimitiveID].r; + + + float3 p0WS=positionWS-(bitangent*0.5f-tangent*0.5f)*scale;//left bottom + float4 p0VS=mul(ViewMatrix,float4(p0WS,1.0f)); + vo.position=mul(ProjectionMatrix,p0VS); + vo.texcoord=float4(0.0f,1.0f,0.0f,0.0f); + outTriangleStream.Append(vo); + + float3 p1WS=positionWS-(bitangent*0.5f+tangent*0.5f)*scale;//right bottom + float4 p1VS=mul(ViewMatrix,float4(p1WS,1.0f)); + vo.position=mul(ProjectionMatrix,p1VS); + vo.texcoord=float4(1.0f,1.0f,0.0f,0.0f); + outTriangleStream.Append(vo); + + float3 p2WS=positionWS+(bitangent*0.5f+tangent*0.5f)*scale;//left top + float4 p2VS=mul(ViewMatrix,float4(p2WS,1.0f)); + vo.position=mul(ProjectionMatrix,p2VS); + vo.texcoord=float4(0.0f,0.0f,0.0f,0.0f); + outTriangleStream.Append(vo); + + float3 p3WS=positionWS+(bitangent*0.5f-tangent*0.5f)*scale;//right top + float4 p3VS=mul(ViewMatrix,float4(p3WS,1.0f)); + vo.position=mul(ProjectionMatrix,p3VS); + vo.texcoord=float4(1.0f,0.0f,0.0f,0.0f); + outTriangleStream.Append(vo);*/ + +} + +float4 MainPS(VSOut inPSInput):SV_TARGET{ + float3 N=normalize(inPSInput.normal.xyz); + float3 bottomColor=float3(0.1f,0.4f,0.6f); + float3 topColor=float3(0.7f,0.7f,0.7f); + float theta=asin(N.y);//-PI/2 ~ PI/2 + theta/=PI;//-0.5~0.5 + theta+=0.5f;//0.0~1.0 + float ambientColorIntensity=1.0; + float3 ambientColor=lerp(bottomColor,topColor,theta)*ambientColorIntensity; + float4 diffuseColor=T_DiffuseTexture.Sample(samplerState,inPSInput.texcoord.xy); + float3 surfaceColor=diffuseColor.rgb; + return float4(surfaceColor,1.0f); +} \ No newline at end of file diff --git a/MVS/HelloEarth/Res/Shader/ndctriangle.hlsl b/MVS/HelloEarth/Res/Shader/ndctriangle.hlsl new file mode 100644 index 00000000..edee8bf9 --- /dev/null +++ b/MVS/HelloEarth/Res/Shader/ndctriangle.hlsl @@ -0,0 +1,65 @@ +struct VertexData{ + float4 position:POSITION; + float4 texcoord:TEXCOORD0; + float4 normal:NORMAL; + float4 tangent:TANGENT; +}; + +struct VSOut{ + float4 position:SV_POSITION; + float4 normal:NORMAL; + float4 texcoord:TEXCOORD0; + float4 positionWS:TEXCOORD1; +}; + +static const float PI=3.141592; +cbuffer globalConstants:register(b0){ + float4 misc; +}; + +cbuffer DefaultVertexCB:register(b1){ + float4x4 ProjectionMatrix; + float4x4 ViewMatrix; + float4x4 ModelMatrix; + float4x4 IT_ModelMatrix; + float4x4 ReservedMemory[1020]; +}; + +VSOut MainVS(VertexData inVertexData){ + VSOut vo; + vo.normal=mul(IT_ModelMatrix,inVertexData.normal); + float3 positionMS=inVertexData.position.xyz+vo.normal*sin(misc.x); + float4 positionWS=mul(ModelMatrix,float4(positionMS,1.0)); + float4 positionVS=mul(ViewMatrix,positionWS); + vo.position=mul(ProjectionMatrix,positionVS); + vo.positionWS=positionWS; + vo.texcoord=inVertexData.texcoord; + return vo; +} + +float4 MainPS(VSOut inPSInput):SV_TARGET{ + float3 N=normalize(inPSInput.normal.xyz); + float3 bottomColor=float3(0.1f,0.4f,0.6f); + float3 topColor=float3(0.7f,0.7f,0.7f); + float theta=asin(N.y);//-PI/2 ~ PI/2 + theta/=PI;//-0.5~0.5 + theta+=0.5f;//0.0~1.0 + float ambientColorIntensity=0.2; + float3 ambientColor=lerp(bottomColor,topColor,theta)*ambientColorIntensity; + float3 L=normalize(float3(1.0f,1.0f,-1.0f)); + + float diffuseIntensity=max(0.0f,dot(N,L)); + float3 diffuseLightColor=float3(0.1f,0.4f,0.6f); + float3 diffuseColor=diffuseLightColor*diffuseIntensity; + + float3 specularColor=float3(0.0f,0.0f,0.0f); + if(diffuseIntensity>0.0f){ + float3 cameraPositionWS=float3(0.0f,0.0f,0.0f); + float3 V=normalize(cameraPositionWS.xyz-inPSInput.positionWS.xyz); + float3 R=normalize(reflect(-L,N)); + float specularIntensity=pow(max(0.0f,dot(V,R)),128.0f); + specularColor=float3(1.0f,1.0f,1.0f)*specularIntensity; + } + float3 surfaceColor=ambientColor+diffuseColor+specularColor; + return float4(surfaceColor,1.0f); +} \ No newline at end of file diff --git a/MVS/HelloEarth/StaticMeshComponent.cpp b/MVS/HelloEarth/StaticMeshComponent.cpp new file mode 100644 index 00000000..7f7aec70 --- /dev/null +++ b/MVS/HelloEarth/StaticMeshComponent.cpp @@ -0,0 +1,90 @@ +#include "StaticMeshComponent.h" +#include "BattleFireDirect.h" +#include + +void StaticMeshComponent::SetVertexCount(int inVertexCount) { + mVertexCount = inVertexCount; + mVertexData = new StaticMeshComponentVertexData[inVertexCount]; + memset(mVertexData, 0, sizeof(StaticMeshComponentVertexData)*inVertexCount); +} +void StaticMeshComponent::SetVertexPosition(int inIndex, float inX, float inY, float inZ, float inW /* = 1.0f */) { + mVertexData[inIndex].mPosition[0] = inX; + mVertexData[inIndex].mPosition[1] = inY; + mVertexData[inIndex].mPosition[2] = inZ; + mVertexData[inIndex].mPosition[3] = inW; +} +void StaticMeshComponent::SetVertexTexcoord(int inIndex, float inX, float inY, float inZ, float inW /* = 1.0f */) { + mVertexData[inIndex].mTexcoord[0] = inX; + mVertexData[inIndex].mTexcoord[1] = inY; + mVertexData[inIndex].mTexcoord[2] = inZ; + mVertexData[inIndex].mTexcoord[3] = inW; +} +void StaticMeshComponent::SetVertexNormal(int inIndex, float inX, float inY, float inZ, float inW /* = 1.0f */) { + mVertexData[inIndex].mNormal[0] = inX; + mVertexData[inIndex].mNormal[1] = inY; + mVertexData[inIndex].mNormal[2] = inZ; + mVertexData[inIndex].mNormal[3] = inW; +} +void StaticMeshComponent::SetVertexTangent(int inIndex, float inX, float inY, float inZ, float inW /* = 1.0f */) { + mVertexData[inIndex].mTangent[0] = inX; + mVertexData[inIndex].mTangent[1] = inY; + mVertexData[inIndex].mTangent[2] = inZ; + mVertexData[inIndex].mTangent[3] = inW; +} +void StaticMeshComponent::InitFromFile(ID3D12GraphicsCommandList* inCommandList, const char* inFilePath) { + FILE* pFile = nullptr; + errno_t err = fopen_s(&pFile, inFilePath, "rb"); + if (err == 0) { + int temp = 0; + fread(&temp, 4, 1, pFile); + mVertexCount = temp; + mVertexData = new StaticMeshComponentVertexData[mVertexCount]; + fread(mVertexData, 1, sizeof(StaticMeshComponentVertexData) * mVertexCount, pFile); + mVBO=CreateBufferObject(inCommandList,mVertexData, + sizeof(StaticMeshComponentVertexData) * mVertexCount, + D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER); + mVBOView.BufferLocation = mVBO->GetGPUVirtualAddress(); + mVBOView.SizeInBytes = sizeof(StaticMeshComponentVertexData) * mVertexCount; + mVBOView.StrideInBytes = sizeof(StaticMeshComponentVertexData); + + while (!feof(pFile)) { + fread(&temp, 4, 1, pFile); + if (feof(pFile)) { + break; + } + char name[256] = {0}; + fread(name, 1, temp, pFile); + fread(&temp, 4, 1, pFile); + SubMesh* submesh = new SubMesh; + submesh->mIndexCount = temp; + unsigned int *indexes = new unsigned int[temp]; + fread(indexes, 1, sizeof(unsigned int) * temp, pFile); + submesh->mIBO = CreateBufferObject(inCommandList, indexes, + sizeof(unsigned int) * temp, + D3D12_RESOURCE_STATE_INDEX_BUFFER); + + submesh->mIBView.BufferLocation = submesh->mIBO->GetGPUVirtualAddress(); + submesh->mIBView.SizeInBytes = sizeof(unsigned int) * temp; + submesh->mIBView.Format = DXGI_FORMAT_R32_UINT; + mSubMeshes.insert(std::pair(name,submesh)); + delete[]indexes; + } + fclose(pFile); + } +} +void StaticMeshComponent::Render(ID3D12GraphicsCommandList* inCommandList) { + D3D12_VERTEX_BUFFER_VIEW vbos[] = { + mVBOView + }; + inCommandList->IASetVertexBuffers(0, 1, vbos); + if (mSubMeshes.empty()) { + inCommandList->DrawInstanced(mVertexCount, 1, 0, 0); + } + else { + for (auto iter = mSubMeshes.begin(); + iter != mSubMeshes.end(); iter++) { + inCommandList->IASetIndexBuffer(&iter->second->mIBView); + inCommandList->DrawIndexedInstanced(iter->second->mIndexCount, 1, 0, 0, 0); + } + } +} \ No newline at end of file diff --git a/MVS/HelloEarth/StaticMeshComponent.h b/MVS/HelloEarth/StaticMeshComponent.h new file mode 100644 index 00000000..b00bfbb7 --- /dev/null +++ b/MVS/HelloEarth/StaticMeshComponent.h @@ -0,0 +1,31 @@ +#pragma once +#include +#include +#include +struct StaticMeshComponentVertexData { + float mPosition[4]; + float mTexcoord[4]; + float mNormal[4]; + float mTangent[4]; +}; +struct SubMesh { + ID3D12Resource* mIBO; + D3D12_INDEX_BUFFER_VIEW mIBView; + int mIndexCount; +}; +class StaticMeshComponent{ +public: + ID3D12Resource* mVBO; + D3D12_VERTEX_BUFFER_VIEW mVBOView; + StaticMeshComponentVertexData* mVertexData; + int mVertexCount; + std::unordered_map mSubMeshes; + void SetVertexCount(int inVertexCount); + void SetVertexPosition(int inIndex, float inX, float inY, float inZ, float inW = 1.0f); + void SetVertexTexcoord(int inIndex, float inX, float inY, float inZ, float inW = 1.0f); + void SetVertexNormal(int inIndex, float inX, float inY, float inZ, float inW = 1.0f); + void SetVertexTangent(int inIndex, float inX, float inY, float inZ, float inW = 1.0f); + void InitFromFile(ID3D12GraphicsCommandList*inCommandList,const char* inFilePath); + void Render(ID3D12GraphicsCommandList* inCommandList); +}; + diff --git a/MVS/HelloEarth/Utils.cpp b/MVS/HelloEarth/Utils.cpp new file mode 100644 index 00000000..940108b8 --- /dev/null +++ b/MVS/HelloEarth/Utils.cpp @@ -0,0 +1,10 @@ +#include "Utils.h" +#include +#include + +float srandom() { + float number = float(rand())/float(RAND_MAX);//0.0~1.0f + number *= 2.0f;//0.0~2.0 + number -= 1.0f;//-1.0f~1.0f; + return number; +} \ No newline at end of file diff --git a/MVS/HelloEarth/Utils.h b/MVS/HelloEarth/Utils.h new file mode 100644 index 00000000..ae1f8765 --- /dev/null +++ b/MVS/HelloEarth/Utils.h @@ -0,0 +1,3 @@ +#pragma once +float srandom();//-1.0f~1.0f + diff --git a/MVS/HelloEarth/main.cpp b/MVS/HelloEarth/main.cpp new file mode 100644 index 00000000..35750f70 --- /dev/null +++ b/MVS/HelloEarth/main.cpp @@ -0,0 +1,188 @@ +#include +#include "BattleFireDirect.h" +#include "StaticMeshComponent.h" +#include "stbi/stb_image.h" +#include "Utils.h" + +#pragma comment(lib,"d3d12.lib") +#pragma comment(lib,"dxgi.lib") +#pragma comment(lib,"d3dcompiler.lib") +#pragma comment(lib,"winmm.lib") + +LPCTSTR gWindowClassName = L"BattleFire"; + +LRESULT CALLBACK WindowProc(HWND inHWND, UINT inMSG, WPARAM inWParam, LPARAM inLParam) { + switch (inMSG) { + case WM_CLOSE: + PostQuitMessage(0);//enqueue WM_QUIT + break; + } + return DefWindowProc(inHWND, inMSG, inWParam, inLParam); +} +int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int inShowCmd) { + //register + WNDCLASSEX wndClassEx; + wndClassEx.cbSize = sizeof(WNDCLASSEX); + wndClassEx.style = CS_HREDRAW | CS_VREDRAW; + wndClassEx.cbClsExtra = NULL;//class + wndClassEx.cbWndExtra = NULL;//instance + wndClassEx.hInstance = hInstance; + wndClassEx.hIcon = LoadIcon(NULL, IDI_APPLICATION); + wndClassEx.hIconSm = LoadIcon(NULL, IDI_APPLICATION); + wndClassEx.hCursor = LoadCursor(NULL, IDC_ARROW); + wndClassEx.hbrBackground = NULL; + wndClassEx.lpszMenuName = NULL; + wndClassEx.lpszClassName = gWindowClassName; + wndClassEx.lpfnWndProc = WindowProc; + if (!RegisterClassEx(&wndClassEx)) { + MessageBox(NULL, L"Register Class Failed!", L"Error", MB_OK | MB_ICONERROR); + return -1; + } + //create + int viewportWidth = 1280; + int viewportHeight = 720; + RECT rect; + rect.left = 0; + rect.top = 0; + rect.right = viewportWidth; + rect.bottom = viewportHeight; + AdjustWindowRect(&rect, WS_OVERLAPPEDWINDOW, FALSE); + int windowWidth = rect.right - rect.left; + int windowHeight = rect.bottom - rect.top; + HWND hwnd = CreateWindowEx(NULL, + gWindowClassName, + L"My Render Window", + WS_OVERLAPPEDWINDOW, + CW_USEDEFAULT, CW_USEDEFAULT, + windowWidth, windowHeight, + NULL, + NULL, + hInstance, + NULL); + if (!hwnd) { + MessageBox(NULL, L"Create Window Failed!", L"Error", MB_OK | MB_ICONERROR); + return -1; + } + //show + InitD3D12(hwnd, 1280, 720); + ID3D12GraphicsCommandList* commandList = GetCommandList(); + ID3D12CommandAllocator* commandAllocator = GetCommandAllocator(); + StaticMeshComponent staticMeshComponent; + staticMeshComponent.InitFromFile(commandList, "Res/Model/Sphere.lhsm"); + + ID3D12RootSignature* rootSignature = InitRootSignature(); + D3D12_SHADER_BYTECODE vs,gs,ps; + CreateShaderFromFile(L"Res/Shader/gs.hlsl", "MainVS", "vs_5_1", &vs); + CreateShaderFromFile(L"Res/Shader/gs.hlsl", "MainGS", "gs_5_1", &gs); + CreateShaderFromFile(L"Res/Shader/gs.hlsl", "MainPS", "ps_5_1", &ps); + ID3D12PipelineState*pso=CreatePSO(rootSignature, vs, ps, gs); + + ID3D12Resource* cb = CreateConstantBufferObject(65536);//1024x64(4x4) + DirectX::XMMATRIX projectionMatrix=DirectX::XMMatrixPerspectiveFovLH( + (45.0f*3.141592f)/180.0f,1280.0f/720.0f,0.1f,1000.0f); + DirectX::XMMATRIX viewMatrix = DirectX::XMMatrixIdentity(); + DirectX::XMMATRIX modelMatrix = DirectX::XMMatrixTranslation(0.0f,0.0f,5.0f); + //modelMatrix *= DirectX::XMMatrixRotationZ(90.0f*3.141592f/180.0f); + DirectX::XMFLOAT4X4 tempMatrix; + float matrices[64]; + + DirectX::XMStoreFloat4x4(&tempMatrix, projectionMatrix); + memcpy(matrices, &tempMatrix, sizeof(float) * 16); + DirectX::XMStoreFloat4x4(&tempMatrix, viewMatrix); + memcpy(matrices+16, &tempMatrix, sizeof(float) * 16); + DirectX::XMStoreFloat4x4(&tempMatrix, modelMatrix); + memcpy(matrices + 32, &tempMatrix, sizeof(float) * 16);; + DirectX::XMVECTOR determinant; + DirectX::XMMATRIX inverseModelMatrix = DirectX::XMMatrixInverse(&determinant, modelMatrix); + if (DirectX::XMVectorGetX(determinant) != 0.0f) { + DirectX::XMMATRIX normalMatrix = DirectX::XMMatrixTranspose(inverseModelMatrix); + DirectX::XMStoreFloat4x4(&tempMatrix, modelMatrix); + memcpy(matrices + 48, &tempMatrix, sizeof(float) * 16);; + } + UpdateConstantBuffer(cb, matrices, sizeof(float) * 64); + + + ID3D12Resource* sb = CreateConstantBufferObject(65536);//1024x64(4x4) + struct MaterialData { + float r; + }; + MaterialData* materialDatas = new MaterialData[3000]; + for (int i=0;i<3000;i++){ + materialDatas[i].r = srandom() * 0.1f + 0.1f;//0.0~1.0 + } + UpdateConstantBuffer(sb, materialDatas, sizeof(MaterialData) * 3000); + + int imageWidth, imageHeight,imageChannel; + //stbi_set_flip_vertically_on_load(true); + stbi_uc* pixels = stbi_load("Res/Image/earth_d.jpg", &imageWidth, &imageHeight, &imageChannel, 4); + ID3D12Resource* texture = CreateTexture2D(commandList, pixels, + imageWidth * imageHeight * imageChannel, imageWidth, imageHeight,DXGI_FORMAT_R8G8B8A8_UNORM); + delete[]pixels; + ID3D12Device* d3dDevice = GetD3DDevice(); + + ID3D12DescriptorHeap* srvHeap = nullptr; + D3D12_DESCRIPTOR_HEAP_DESC d3dDescriptorHeapDescSRV = {}; + d3dDescriptorHeapDescSRV.NumDescriptors = 3; + d3dDescriptorHeapDescSRV.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV; + d3dDescriptorHeapDescSRV.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE; + d3dDevice->CreateDescriptorHeap(&d3dDescriptorHeapDescSRV, IID_PPV_ARGS(&srvHeap)); + + ID3D12DescriptorHeap* descriptorHeaps[] = {srvHeap}; + + D3D12_SHADER_RESOURCE_VIEW_DESC srvDesc = {}; + srvDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; + srvDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING; + srvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2D; + srvDesc.Texture2D.MipLevels = 1; + + D3D12_CPU_DESCRIPTOR_HANDLE srvHeapPtr = srvHeap->GetCPUDescriptorHandleForHeapStart(); + d3dDevice->CreateShaderResourceView(texture, &srvDesc, srvHeapPtr); + srvHeapPtr.ptr += d3dDevice->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV); + + EndCommandList(); + WaitForCompletionOfCommandList(); + + ShowWindow(hwnd, inShowCmd); + UpdateWindow(hwnd); + float color[] = {0.5f,0.5f,0.5f,1.0f}; + MSG msg; + DWORD last_time = timeGetTime(); + DWORD appStartTime = last_time; + while (true){ + ZeroMemory(&msg, sizeof(MSG)); + if (PeekMessage(&msg,NULL,0,0,PM_REMOVE)) { + if (msg.message == WM_QUIT) { + break; + } + TranslateMessage(&msg); + DispatchMessage(&msg); + } else { + //rendering + WaitForCompletionOfCommandList(); + DWORD current_time = timeGetTime();//ms + DWORD frameTime = current_time - last_time; + DWORD timeSinceAppStartInMS = current_time - appStartTime; + last_time = current_time; + float frameTimeInSecond = float(frameTime) / 1000.0f;//second + float timeSinceAppStartInSecond = float(timeSinceAppStartInMS) / 1000.0f; + color[0] = timeSinceAppStartInSecond; + commandAllocator->Reset(); + commandList->Reset(commandAllocator, nullptr); + BeginRenderToSwapChain(commandList); + //draw + commandList->SetPipelineState(pso); + commandList->SetGraphicsRootSignature(rootSignature); + commandList->SetDescriptorHeaps(_countof(descriptorHeaps),descriptorHeaps); + commandList->SetGraphicsRootConstantBufferView(0, cb->GetGPUVirtualAddress()); + commandList->SetGraphicsRoot32BitConstants(1, 4, color, 0); + commandList->SetGraphicsRootDescriptorTable(2, srvHeap->GetGPUDescriptorHandleForHeapStart()); + commandList->SetGraphicsRootShaderResourceView(3, sb->GetGPUVirtualAddress()); + commandList->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST); + staticMeshComponent.Render(commandList); + EndRenderToSwapChain(commandList); + EndCommandList(); + SwapD3D12Buffers(); + } + } + return 0; +} \ No newline at end of file diff --git a/MVS/HelloEarth/stbi/stb_image.cpp b/MVS/HelloEarth/stbi/stb_image.cpp new file mode 100644 index 00000000..badb3ef4 --- /dev/null +++ b/MVS/HelloEarth/stbi/stb_image.cpp @@ -0,0 +1,2 @@ +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" \ No newline at end of file diff --git a/MVS/HelloEarth/stbi/stb_image.h b/MVS/HelloEarth/stbi/stb_image.h new file mode 100644 index 00000000..bf44a3ad --- /dev/null +++ b/MVS/HelloEarth/stbi/stb_image.h @@ -0,0 +1,7194 @@ +/* stb_image - v2.14 - public domain image loader - http://nothings.org/stb_image.h +no warranty implied; use at your own risk + +Do this: +#define STB_IMAGE_IMPLEMENTATION +before you include this file in *one* C or C++ file to create the implementation. + +// i.e. it should look like this: +#include ... +#include ... +#include ... +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" + +You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. +And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + +QUICK NOTES: +Primarily of interest to game developers and other people who can +avoid problematic images and only need the trivial interface + +JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) +PNG 1/2/4/8-bit-per-channel (16 bpc not supported) + +TGA (not sure what subset, if a subset) +BMP non-1bpp, non-RLE +PSD (composited view only, no extra channels, 8/16 bit-per-channel) + +GIF (*comp always reports as 4-channel) +HDR (radiance rgbE format) +PIC (Softimage PIC) +PNM (PPM and PGM binary only) + +Animated GIF still needs a proper API, but here's one way to do it: +http://gist.github.com/urraka/685d9a6340b26b830d49 + +- decode from memory or through FILE (define STBI_NO_STDIO to remove code) +- decode from arbitrary I/O callbacks +- SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + +Full documentation under "DOCUMENTATION" below. + + +Revision 2.00 release notes: + +- Progressive JPEG is now supported. + +- PPM and PGM binary formats are now supported, thanks to Ken Miller. + +- x86 platforms now make use of SSE2 SIMD instructions for +JPEG decoding, and ARM platforms can use NEON SIMD if requested. +This work was done by Fabian "ryg" Giesen. SSE2 is used by +default, but NEON must be enabled explicitly; see docs. + +With other JPEG optimizations included in this version, we see +2x speedup on a JPEG on an x86 machine, and a 1.5x speedup +on a JPEG on an ARM machine, relative to previous versions of this +library. The same results will not obtain for all JPGs and for all +x86/ARM machines. (Note that progressive JPEGs are significantly +slower to decode than regular JPEGs.) This doesn't mean that this +is the fastest JPEG decoder in the land; rather, it brings it +closer to parity with standard libraries. If you want the fastest +decode, look elsewhere. (See "Philosophy" section of docs below.) + +See final bullet items below for more info on SIMD. + +- Added STBI_MALLOC, STBI_REALLOC, and STBI_FREE macros for replacing +the memory allocator. Unlike other STBI libraries, these macros don't +support a context parameter, so if you need to pass a context into +the allocator, you'll have to store it in a global or a thread-local +variable. + +- Split existing STBI_NO_HDR flag into two flags, STBI_NO_HDR and +STBI_NO_LINEAR. +STBI_NO_HDR: suppress implementation of .hdr reader format +STBI_NO_LINEAR: suppress high-dynamic-range light-linear float API + +- You can suppress implementation of any of the decoders to reduce +your code footprint by #defining one or more of the following +symbols before creating the implementation. + +STBI_NO_JPEG +STBI_NO_PNG +STBI_NO_BMP +STBI_NO_PSD +STBI_NO_TGA +STBI_NO_GIF +STBI_NO_HDR +STBI_NO_PIC +STBI_NO_PNM (.ppm and .pgm) + +- You can request *only* certain decoders and suppress all other ones +(this will be more forward-compatible, as addition of new decoders +doesn't require you to disable them explicitly): + +STBI_ONLY_JPEG +STBI_ONLY_PNG +STBI_ONLY_BMP +STBI_ONLY_PSD +STBI_ONLY_TGA +STBI_ONLY_GIF +STBI_ONLY_HDR +STBI_ONLY_PIC +STBI_ONLY_PNM (.ppm and .pgm) + +Note that you can define multiples of these, and you will get all +of them ("only x" and "only y" is interpreted to mean "only x&y"). + +- If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB + +- Compilation of all SIMD code can be suppressed with +#define STBI_NO_SIMD +It should not be necessary to disable SIMD unless you have issues +compiling (e.g. using an x86 compiler which doesn't support SSE +intrinsics or that doesn't support the method used to detect +SSE2 support at run-time), and even those can be reported as +bugs so I can refine the built-in compile-time checking to be +smarter. + +- The old STBI_SIMD system which allowed installing a user-defined +IDCT etc. has been removed. If you need this, don't upgrade. My +assumption is that almost nobody was doing this, and those who +were will find the built-in SIMD more satisfactory anyway. + +- RGB values computed for JPEG images are slightly different from +previous versions of stb_image. (This is due to using less +integer precision in SIMD.) The C code has been adjusted so +that the same RGB values will be computed regardless of whether +SIMD support is available, so your app should always produce +consistent results. But these results are slightly different from +previous versions. (Specifically, about 3% of available YCbCr values +will compute different RGB results from pre-1.49 versions by +-1; +most of the deviating values are one smaller in the G channel.) + +- If you must produce consistent results with previous versions of +stb_image, #define STBI_JPEG_OLD and you will get the same results +you used to; however, you will not get the SIMD speedups for +the YCbCr-to-RGB conversion step (although you should still see +significant JPEG speedup from the other changes). + +Please note that STBI_JPEG_OLD is a temporary feature; it will be +removed in future versions of the library. It is only intended for +near-term back-compatibility use. + + +Latest revision history: +2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes +2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes +2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 +RGB-format JPEG; remove white matting in PSD; +allocate large structures on the stack; +correct channel count for PNG & BMP +2.10 (2016-01-22) avoid warning introduced in 2.09 +2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED +2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA +2.07 (2015-09-13) partial animated GIF support +limited 16-bit PSD support +minor bugs, code cleanup, and compiler warnings + +See end of file for full revision history. + + +============================ Contributors ========================= + +Image formats Extensions, features +Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) +Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) +Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) +Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) +Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) +Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) +Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) +github:urraka (animated gif) Junggon Kim (PNM comments) +Daniel Gibson (16-bit TGA) +socks-the-fox (16-bit TGA) +Optimizations & bugfixes +Fabian "ryg" Giesen +Arseny Kapoulkine + +Bug & warning fixes +Marc LeBlanc David Woo Guillaume George Martins Mozeiko +Christpher Lloyd Martin Golini Jerry Jansson Joseph Thomson +Dave Moore Roy Eltham Hayaki Saito Phil Jordan +Won Chun Luke Graham Johan Duparc Nathan Reed +the Horde3D community Thomas Ruf Ronny Chevalier Nick Verigakis +Janez Zemva John Bartholomew Michal Cichon github:svdijk +Jonathan Blow Ken Hamada Tero Hanninen Baldur Karlsson +Laurent Gomila Cort Stratton Sergio Gonzalez github:romigrou +Aruelien Pocheville Thibault Reuille Cass Everitt Matthew Gregan +Ryamond Barbiero Paul Du Bois Engin Manap github:snagar +Michaelangel007@github Oriol Ferrer Mesia Dale Weiler github:Zelex +Philipp Wiesemann Josh Tobin github:rlyeh github:grim210@github +Blazej Dariusz Roszkowski github:sammyhw + + +LICENSE + +This software is dual-licensed to the public domain and under the following +license: you are granted a perpetual, irrevocable license to copy, modify, +publish, and distribute this file as you see fit. + +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 16-bit-per-channel PNG +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - no 1-bit BMP +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data) +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'req_comp' if req_comp is non-zero, or *comp otherwise. +// If req_comp is non-zero, *comp has the number of components that _would_ +// have been output otherwise. E.g. if you set req_comp to 4, you will always +// get RGBA output, but you can check *comp to see if it's trivially opaque +// because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *comp will be unchanged. The function stbi_failure_reason() +// can be queried for an extremely brief, end-user unfriendly explanation +// of why the load failed. Define STBI_NO_FAILURE_STRINGS to avoid +// compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy to use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries do not emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// make more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// The output of the JPEG decoder is slightly different from versions where +// SIMD support was introduced (that is, for versions before 1.49). The +// difference is only +-1 in the 8-bit RGB channels, and only on a small +// fraction of pixels. You can force the pre-1.49 behavior by defining +// STBI_JPEG_OLD, but this will disable some of the SIMD decoding path +// and hence cost some performance. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image now supports loading HDR images in general, and currently +// the Radiance .HDR file format, although the support is provided +// generically. You can still load any file through the existing interface; +// if you attempt to load an HDR file, it will be automatically remapped to +// LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// By default we convert iphone-formatted PNGs back to RGB, even though +// they are internally encoded differently. You can disable this conversion +// by by calling stbi_convert_iphone_png_to_rgb(0), in which case +// you will always just get the native iphone "format" through (which +// is BGR stored in RGB). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// + + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for req_comp + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif + + ////////////////////////////////////////////////////////////////////////////// + // + // PRIMARY API - works on images of any type + // + + // + // load image by filename, open file, or memory buffer + // + + typedef struct + { + int(*read) (void *user, char *data, int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void(*skip) (void *user, int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int(*eof) (void *user); // returns nonzero if we are at end of file/data + } stbi_io_callbacks; + + //////////////////////////////////// + // + // 8-bits-per-channel interface + // + + STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO + STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + // for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + + //////////////////////////////////// + // + // 16-bits-per-channel interface + // + + STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +#ifndef STBI_NO_STDIO + STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + // @TODO the other variants + + //////////////////////////////////// + // + // float-per-channel interface + // +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + + // stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR + STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); + STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO + STBIDEF int stbi_is_hdr(char const *filename); + STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + + // get a VERY brief reason for failure + // NOT THREADSAFE + STBIDEF const char *stbi_failure_reason(void); + + // free the loaded image -- this is just free() + STBIDEF void stbi_image_free(void *retval_from_stbi_load); + + // get image dimensions & components without fully decoding + STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); + STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); + +#ifndef STBI_NO_STDIO + STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp); + STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp); + +#endif + + + + // for image formats that explicitly notate that they have premultiplied alpha, + // we just return the colors as stored in the file. set this flag to force + // unpremultiplication. results are undefined if the unpremultiply overflow. + STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + + // indicate whether we should process iphone images back to canonical format, + // or just pass them through "as-is" + STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + + // flip the image vertically, so the first pixel in the output array is the bottom left + STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + + // ZLIB client - used by PNG, available for other purposes + + STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); + STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); + STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); + STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); + STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) +#ifndef STBI_ONLY_JPEG +#define STBI_NO_JPEG +#endif +#ifndef STBI_ONLY_PNG +#define STBI_NO_PNG +#endif +#ifndef STBI_ONLY_BMP +#define STBI_NO_BMP +#endif +#ifndef STBI_ONLY_PSD +#define STBI_NO_PSD +#endif +#ifndef STBI_ONLY_TGA +#define STBI_NO_TGA +#endif +#ifndef STBI_ONLY_GIF +#define STBI_NO_GIF +#endif +#ifndef STBI_ONLY_HDR +#define STBI_NO_HDR +#endif +#ifndef STBI_ONLY_PIC +#define STBI_NO_PIC +#endif +#ifndef STBI_ONLY_PNM +#define STBI_NO_PNM +#endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + + +#ifndef _MSC_VER +#ifdef __cplusplus +#define stbi_inline inline +#else +#define stbi_inline +#endif +#else +#define stbi_inline __forceinline +#endif + + +#ifdef _MSC_VER +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32) == 4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL +#define stbi_lrot(x,y) _lrotl(x,y) +#else +#define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// NOTE: not clear do we actually need this for the 64-bit path? +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// (but compiling with -msse2 allows the compiler to use SSE2 everywhere; +// this is just broken and gcc are jerks for not fixing it properly +// http://www.virtualdub.org/blog/pivot/entry.php?id=363 ) +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info, 1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax, 1 + cpuid + mov res, edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +static int stbi__sse2_available() +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +static int stbi__sse2_available() +{ +#if defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 // GCC 4.8 or later + // GCC 4.8+ has a nice way to do this + return __builtin_cpu_supports("sse2"); +#else + // portable way to do this, preferably without using GCC inline ASM? + // just bail for now. + return 0; +#endif +} +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +// assume GCC or Clang on ARM targets +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *)buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *)buffer + len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int)fread(data, 1, size, (FILE*)user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + fseek((FILE*)user, n, SEEK_CUR); +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*)user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *)f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +// this is not threadsafe +static const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX / b; +} + +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} + +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS +#define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) +#define stbi__err(x,y) stbi__err(y) +#else +#define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load = flag_true_if_should_flip; +} + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + +#ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s, x, y, comp, req_comp, ri); +#endif +#ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s, x, y, comp, req_comp, ri); +#endif +#ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s, x, y, comp, req_comp, ri); +#endif +#ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s, x, y, comp, req_comp, ri); +#endif +#ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s, x, y, comp, req_comp, ri, bpc); +#endif +#ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s, x, y, comp, req_comp, ri); +#endif +#ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s, x, y, comp, req_comp, ri); +#endif + +#ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x, y, comp, req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } +#endif + +#ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s, x, y, comp, req_comp, ri); +#endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *)stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *)stbi__malloc(img_len * 2); + if (enlarged == NULL) return (stbi__uint16 *)stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 8) { + STBI_ASSERT(ri.bits_per_channel == 16); + result = stbi__convert_16_to_8((stbi__uint16 *)result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int w = *x, h = *y; + int channels = req_comp ? req_comp : *comp; + int row, col, z; + stbi_uc *image = (stbi_uc *)result; + + // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once + for (row = 0; row < (h >> 1); row++) { + for (col = 0; col < w; col++) { + for (z = 0; z < channels; z++) { + stbi_uc temp = image[(row * w + col) * channels + z]; + image[(row * w + col) * channels + z] = image[((h - row - 1) * w + col) * channels + z]; + image[((h - row - 1) * w + col) * channels + z] = temp; + } + } + } + } + + return (unsigned char *)result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 16) { + STBI_ASSERT(ri.bits_per_channel == 8); + result = stbi__convert_8_to_16((stbi_uc *)result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int w = *x, h = *y; + int channels = req_comp ? req_comp : *comp; + int row, col, z; + stbi__uint16 *image = (stbi__uint16 *)result; + + // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once + for (row = 0; row < (h >> 1); row++) { + for (col = 0; col < w; col++) { + for (z = 0; z < channels; z++) { + stbi__uint16 temp = image[(row * w + col) * channels + z]; + image[(row * w + col) * channels + z] = image[((h - row - 1) * w + col) * channels + z]; + image[((h - row - 1) * w + col) * channels + z] = temp; + } + } + } + } + + return (stbi__uint16 *)result; +} + +#ifndef STBI_NO_HDR +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int w = *x, h = *y; + int depth = req_comp ? req_comp : *comp; + int row, col, z; + float temp; + + // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once + for (row = 0; row < (h >> 1); row++) { + for (col = 0; col < w; col++) { + for (z = 0; z < depth; z++) { + temp = result[(row * w + col) * depth + z]; + result[(row * w + col) * depth + z] = result[((h - row - 1) * w + col) * depth + z]; + result[((h - row - 1) * w + col) * depth + z] = temp; + } + } + } + } +} +#endif + +#ifndef STBI_NO_STDIO + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f = 0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f, x, y, comp, req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s, f); + result = stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, -(int)(s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s, f); + result = stbi__load_and_postprocess_16bit(&s, x, y, comp, req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, -(int)(s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *)stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f, x, y, comp, req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; +#ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s, x, y, comp, req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data, x, y, comp, req_comp); + return hdr_data; + } +#endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__loadf_main(&s, x, y, comp, req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__loadf_main(&s, x, y, comp, req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f, x, y, comp, req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s, f); + return stbi__loadf_main(&s, x, y, comp, req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ +#ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__hdr_test(&s); +#else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; +#endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result = 0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ +#ifndef STBI_NO_HDR + stbi__context s; + stbi__start_file(&s, f); + return stbi__hdr_test(&s); +#else + STBI_NOTUSED(f); + return 0; +#endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ +#ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__hdr_test(&s); +#else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; +#endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma = 2.2f, stbi__l2h_scale = 1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i = 1.0f / 2.2f, stbi__h2l_scale_i = 1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1 / gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1 / scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load = 0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data, (char*)s->buffer_start, s->buflen); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + 1; + *s->img_buffer = 0; + } + else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} + +static void stbi__skip(stbi__context *s, int n) +{ + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int)(s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} + +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int)(s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*)buffer + blen, n - blen); + res = (count == (n - blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer + n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } + else + return 0; +} + +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} + +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + return z + (stbi__get16le(s) << 16); +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + + +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc)(((r * 77) + (g * 150) + (29 * b)) >> 8); +} + +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i, j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *)stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j = 0; j < (int)y; ++j) { + unsigned char *src = data + j * x * img_n; + unsigned char *dest = good + j * x * req_comp; + +#define STBI__COMBO(a,b) ((a)*8+(b)) +#define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1, 2) { dest[0] = src[0], dest[1] = 255; } break; + STBI__CASE(1, 3) { dest[0] = dest[1] = dest[2] = src[0]; } break; + STBI__CASE(1, 4) { dest[0] = dest[1] = dest[2] = src[0], dest[3] = 255; } break; + STBI__CASE(2, 1) { dest[0] = src[0]; } break; + STBI__CASE(2, 3) { dest[0] = dest[1] = dest[2] = src[0]; } break; + STBI__CASE(2, 4) { dest[0] = dest[1] = dest[2] = src[0], dest[3] = src[1]; } break; + STBI__CASE(3, 4) { dest[0] = src[0], dest[1] = src[1], dest[2] = src[2], dest[3] = 255; } break; + STBI__CASE(3, 1) { dest[0] = stbi__compute_y(src[0], src[1], src[2]); } break; + STBI__CASE(3, 2) { dest[0] = stbi__compute_y(src[0], src[1], src[2]), dest[1] = 255; } break; + STBI__CASE(4, 1) { dest[0] = stbi__compute_y(src[0], src[1], src[2]); } break; + STBI__CASE(4, 2) { dest[0] = stbi__compute_y(src[0], src[1], src[2]), dest[1] = src[3]; } break; + STBI__CASE(4, 3) { dest[0] = src[0], dest[1] = src[1], dest[2] = src[2]; } break; + default: STBI_ASSERT(0); + } +#undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16)(((r * 77) + (g * 150) + (29 * b)) >> 8); +} + +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i, j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *)stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *)stbi__errpuc("outofmem", "Out of memory"); + } + + for (j = 0; j < (int)y; ++j) { + stbi__uint16 *src = data + j * x * img_n; + stbi__uint16 *dest = good + j * x * req_comp; + +#define STBI__COMBO(a,b) ((a)*8+(b)) +#define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1, 2) { dest[0] = src[0], dest[1] = 0xffff; } break; + STBI__CASE(1, 3) { dest[0] = dest[1] = dest[2] = src[0]; } break; + STBI__CASE(1, 4) { dest[0] = dest[1] = dest[2] = src[0], dest[3] = 0xffff; } break; + STBI__CASE(2, 1) { dest[0] = src[0]; } break; + STBI__CASE(2, 3) { dest[0] = dest[1] = dest[2] = src[0]; } break; + STBI__CASE(2, 4) { dest[0] = dest[1] = dest[2] = src[0], dest[3] = src[1]; } break; + STBI__CASE(3, 4) { dest[0] = src[0], dest[1] = src[1], dest[2] = src[2], dest[3] = 0xffff; } break; + STBI__CASE(3, 1) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); } break; + STBI__CASE(3, 2) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]), dest[1] = 0xffff; } break; + STBI__CASE(4, 1) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); } break; + STBI__CASE(4, 2) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]), dest[1] = src[3]; } break; + STBI__CASE(4, 3) { dest[0] = src[0], dest[1] = src[1], dest[2] = src[2]; } break; + default: STBI_ASSERT(0); + } +#undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i, k, n; + float *output; + if (!data) return NULL; + output = (float *)stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp - 1; + for (i = 0; i < x*y; ++i) { + for (k = 0; k < n; ++k) { + output[i*comp + k] = (float)(pow(data[i*comp + k] / 255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + if (k < comp) output[i*comp + k] = data[i*comp + k] / 255.0f; + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i, k, n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *)stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp - 1; + for (i = 0; i < x*y; ++i) { + for (k = 0; k < n; ++k) { + float z = (float)pow(data[i*comp + k] * stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc)stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp + k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc)stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi_uc dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + + // sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + + // definition of jpeg image component + struct + { + int id; + int h, v; + int tq; + int hd, ha; + int dc_pred; + + int x, y, w2, h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + + // kernels + void(*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void(*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i, j, k = 0, code; + // build size list for each symbol (from JPEG spec) + for (i = 0; i < 16; ++i) + for (j = 0; j < count[i]; ++j) + h->size[k++] = (stbi_uc)(i + 1); + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for (j = 1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16)(code++); + if (code - 1 >= (1 << j)) return stbi__err("bad code lengths", "Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16 - j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i = 0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS - s); + int m = 1 << (FAST_BITS - s); + for (j = 0; j < m; ++j) { + h->fast[c + j] = (stbi_uc)i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i = 0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (-1 << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16)((k << 8) + (run << 4) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + if (c != 0) { + j->marker = (unsigned char)c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static stbi__uint32 stbi__bmask[17] = { 0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535 }; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c, k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k = FAST_BITS + 1; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + + sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB + k = stbi_lrot(j->code_buffer, n); + STBI_ASSERT(n >= 0 && n < (int)(sizeof(stbi__bmask) / sizeof(*stbi__bmask))); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & ~sgn); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static stbi_uc stbi__jpeg_dezigzag[64 + 15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi_uc *dequant) +{ + int diff, dc, k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0) return stbi__err("bad huffman code", "Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data, 0, 64 * sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short)(dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c, r, s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short)((r >> 8) * dequant[zig]); + } + else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code", "Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } + else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short)(stbi__extend_receive(j, s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff, dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data, 0, 64 * sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + diff = t ? stbi__extend_receive(j, t) : 0; + + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short)(dc << j->succ_low); + } + else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short)(1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c, r, s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short)((r >> 8) << shift); + } + else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code", "Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } + else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short)(stbi__extend_receive(j, s) << shift); + } + } + } while (k <= j->spec_end); + } + else { + // refinement scan for these AC coefficients + + short bit = (short)(1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit) == 0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } + else { + k = j->spec_start; + do { + int r, s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code", "Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } + else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } + else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit) == 0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + else { + if (r == 0) { + *p = (short)s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int)x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc)x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) << 12) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i, val[64], *v = val; + stbi_uc *o; + short *d = data; + + // columns + for (i = 0; i < 8; ++i, ++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[8] == 0 && d[16] == 0 && d[24] == 0 && d[32] == 0 + && d[40] == 0 && d[48] == 0 && d[56] == 0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0] << 2; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } + else { + STBI__IDCT_1D(d[0], d[8], d[16], d[24], d[32], d[40], d[48], d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[0] = (x0 + t3) >> 10; + v[56] = (x0 - t3) >> 10; + v[8] = (x1 + t2) >> 10; + v[48] = (x1 - t2) >> 10; + v[16] = (x2 + t1) >> 10; + v[40] = (x2 - t1) >> 10; + v[24] = (x3 + t0) >> 10; + v[32] = (x3 - t0) >> 10; + } + } + + for (i = 0, v = val, o = out; i < 8; ++i, v += 8, o += out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128 << 17); + x1 += 65536 + (128 << 17); + x2 += 65536 + (128 << 17); + x3 += 65536 + (128 << 17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0 + t3) >> 17); + o[7] = stbi__clamp((x0 - t3) >> 17); + o[1] = stbi__clamp((x1 + t2) >> 17); + o[6] = stbi__clamp((x1 - t2) >> 17); + o[2] = stbi__clamp((x2 + t1) >> 17); + o[5] = stbi__clamp((x2 - t1) >> 17); + o[3] = stbi__clamp((x3 + t0) >> 17); + o[4] = stbi__clamp((x3 - t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y +#define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y +#define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) +#define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add +#define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub +#define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack +#define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) +#define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) +#define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + +#define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f(0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f(0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f(3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f(2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f(1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128 << 17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0 * 8)); + row1 = _mm_load_si128((const __m128i *) (data + 1 * 8)); + row2 = _mm_load_si128((const __m128i *) (data + 2 * 8)); + row3 = _mm_load_si128((const __m128i *) (data + 3 * 8)); + row4 = _mm_load_si128((const __m128i *) (data + 4 * 8)); + row5 = _mm_load_si128((const __m128i *) (data + 5 * 8)); + row6 = _mm_load_si128((const __m128i *) (data + 6 * 8)); + row7 = _mm_load_si128((const __m128i *) (data + 7 * 8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f(0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f(1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f(0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f(2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f(3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f(1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + + // wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + + // wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + + // butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0 * 8); + row1 = vld1q_s16(data + 1 * 8); + row2 = vld1q_s16(data + 2 * 8); + row3 = vld1q_s16(data + 3 * 8); + row4 = vld1q_s16(data + 4 * 8); + row5 = vld1q_s16(data + 5 * 8); + row6 = vld1q_s16(data + 6 * 8); + row7 = vld1q_s16(data + 7 * 8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { + // these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. + // whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i, j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x + 7) >> 3; + int h = (z->img_comp[n].y + 7) >> 3; + for (j = 0; j < h; ++j) { + for (i = 0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc + z->img_comp[n].hd, z->huff_ac + ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data + z->img_comp[n].w2*j * 8 + i * 8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + else { // interleaved + int i, j, k, x, y; + STBI_SIMD_ALIGN(short, data[64]); + for (j = 0; j < z->img_mcu_y; ++j) { + for (i = 0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k = 0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y = 0; y < z->img_comp[n].v; ++y) { + for (x = 0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x) * 8; + int y2 = (j*z->img_comp[n].v + y) * 8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc + z->img_comp[n].hd, z->huff_ac + ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data + z->img_comp[n].w2*y2 + x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } + else { + if (z->scan_n == 1) { + int i, j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x + 7) >> 3; + int h = (z->img_comp[n].y + 7) >> 3; + for (j = 0; j < h; ++j) { + for (i = 0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + else { // interleaved + int i, j, k, x, y; + for (j = 0; j < z->img_mcu_y; ++j) { + for (i = 0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k = 0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y = 0; y < z->img_comp[n].v; ++y) { + for (x = 0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi_uc *dequant) +{ + int i; + for (i = 0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i, j, n; + for (n = 0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x + 7) >> 3; + int h = (z->img_comp[n].y + 7) >> 3; + for (j = 0; j < h; ++j) { + for (i = 0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data + z->img_comp[n].w2*j * 8 + i * 8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker", "Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len", "Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s) - 2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4; + int t = q & 15, i; + if (p != 0) return stbi__err("bad DQT type", "Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table", "Corrupt JPEG"); + for (i = 0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = stbi__get8(z->s); + L -= 65; + } + return L == 0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s) - 2; + while (L > 0) { + stbi_uc *v; + int sizes[16], i, n = 0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header", "Corrupt JPEG"); + for (i = 0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc + th, sizes)) return 0; + v = z->huff_dc[th].values; + } + else { + if (!stbi__build_huffman(z->huff_ac + th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i = 0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L == 0; + } + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + stbi__skip(z->s, stbi__get16be(z->s) - 2); + return 1; + } + return 0; +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int)z->s->img_n) return stbi__err("bad SOS component count", "Corrupt JPEG"); + if (Ls != 6 + 2 * z->scan_n) return stbi__err("bad SOS len", "Corrupt JPEG"); + for (i = 0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff", "Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff", "Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } + else { + if (z->spec_start != 0) return stbi__err("bad SOS", "Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS", "Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i = 0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf, p, i, q, h_max = 1, v_max = 1, c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len", "Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit", "JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width", "Corrupt JPEG"); // JPEG requires + c = stbi__get8(s); + if (c != 3 && c != 1) return stbi__err("bad component count", "Corrupt JPEG"); // JFIF requires + s->img_n = c; + for (i = 0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8 + 3 * s->img_n) return stbi__err("bad SOF len", "Corrupt JPEG"); + + z->rgb = 0; + for (i = 0; i < s->img_n; ++i) { + static unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (z->img_comp[i].id != i + 1) // JFIF requires + if (z->img_comp[i].id != i) { // some version of jpegtran outputs non-JFIF-compliant files! + // somethings output this (see http://fileformats.archiveteam.org/wiki/JPEG#Color_format) + if (z->img_comp[i].id != rgb[i]) + return stbi__err("bad component ID", "Corrupt JPEG"); + ++z->rgb; + } + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H", "Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V", "Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ", "Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i = 0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w - 1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h - 1) / z->img_mcu_h; + + for (i = 0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max - 1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max - 1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i + 1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*)(((size_t)z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i + 1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*)(((size_t)z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI", "Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z, m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none) { + // handle 0s at the end of image data from IP Kamera 9060 + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + if (x == 255) { + j->marker = stbi__get8(j->s); + break; + } + else if (x != 0) { + return stbi__err("junk before marker", "Corrupt JPEG"); + } + } + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + } + else { + if (!stbi__process_marker(j, m)) return 0; + } + m = stbi__get_marker(j); + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i = 0; i < w; ++i) + out[i] = stbi__div4(3 * in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0] * 3 + input[1] + 2); + for (i = 1; i < w - 1; ++i) { + int n = 3 * input[i] + 2; + out[i * 2 + 0] = stbi__div4(n + input[i - 1]); + out[i * 2 + 1] = stbi__div4(n + input[i + 1]); + } + out[i * 2 + 0] = stbi__div4(input[w - 2] * 3 + input[w - 1] + 2); + out[i * 2 + 1] = input[w - 1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i, t0, t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3 * in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3 * in_near[0] + in_far[0]; + out[0] = stbi__div4(t1 + 2); + for (i = 1; i < w; ++i) { + t0 = t1; + t1 = 3 * in_near[i] + in_far[i]; + out[i * 2 - 1] = stbi__div16(3 * t0 + t1 + 8); + out[i * 2] = stbi__div16(3 * t1 + t0 + 8); + } + out[w * 2 - 1] = stbi__div4(t1 + 2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i = 0, t0, t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3 * in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3 * in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w - 1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3 * in_near[i + 8] + in_far[i + 8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i * 2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3 * in_near[i + 8] + in_far[i + 8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i * 2, o); +#endif + + // "previous" value for next iter + t1 = 3 * in_near[i + 7] + in_far[i + 7]; + } + + t0 = t1; + t1 = 3 * in_near[i] + in_far[i]; + out[i * 2] = stbi__div16(3 * t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3 * in_near[i] + in_far[i]; + out[i * 2 - 1] = stbi__div16(3 * t0 + t1 + 8); + out[i * 2] = stbi__div16(3 * t1 + t0 + 8); + } + out[w * 2 - 1] = stbi__div4(t1 + 2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i, j; + STBI_NOTUSED(in_far); + for (i = 0; i < w; ++i) + for (j = 0; j < hs; ++j) + out[i*hs + j] = in_near[i]; + return out; +} + +#ifdef STBI_JPEG_OLD +// this is the same YCbCr-to-RGB calculation that stb_image has used +// historically before the algorithm changes in 1.49 +#define float2fixed(x) ((int) ((x) * 65536 + 0.5)) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i = 0; i < count; ++i) { + int y_fixed = (y[i] << 16) + 32768; // rounding + int r, g, b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr*float2fixed(1.40200f); + g = y_fixed - cr*float2fixed(0.71414f) - cb*float2fixed(0.34414f); + b = y_fixed + cb*float2fixed(1.77200f); + r >>= 16; + g >>= 16; + b >>= 16; + if ((unsigned)r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned)g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned)b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#else +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i = 0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1 << 19); // rounding + int r, g, b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* float2fixed(1.40200f); + g = y_fixed + (cr*-float2fixed(0.71414f)) + ((cb*-float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned)r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned)g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned)b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16((short)(1.40200f*4096.0f + 0.5f)); + __m128i cr_const1 = _mm_set1_epi16(-(short)(0.71414f*4096.0f + 0.5f)); + __m128i cb_const0 = _mm_set1_epi16(-(short)(0.34414f*4096.0f + 0.5f)); + __m128i cb_const1 = _mm_set1_epi16((short)(1.77200f*4096.0f + 0.5f)); + __m128i y_bias = _mm_set1_epi8((char)(unsigned char)128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i + 7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y + i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr + i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb + i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16((short)(1.40200f*4096.0f + 0.5f)); + int16x8_t cr_const1 = vdupq_n_s16(-(short)(0.71414f*4096.0f + 0.5f)); + int16x8_t cb_const0 = vdupq_n_s16(-(short)(0.34414f*4096.0f + 0.5f)); + int16x8_t cb_const1 = vdupq_n_s16((short)(1.77200f*4096.0f + 0.5f)); + + for (; i + 7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8 * 4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1 << 19); // rounding + int r, g, b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* float2fixed(1.40200f); + g = y_fixed + cr*-float2fixed(0.71414f) + ((cb*-float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned)r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned)g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned)b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; +#ifndef STBI_JPEG_OLD + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; +#endif + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; +#ifndef STBI_JPEG_OLD + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; +#endif + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0, *line1; + int hs, vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n; + + if (z->s->img_n == 3 && n < 3) + decode_n = 1; + else + decode_n = z->s->img_n; + + // resample and color-convert + { + int k; + unsigned int i, j; + stbi_uc *output; + stbi_uc *coutput[4]; + + stbi__resample res_comp[4]; + + for (k = 0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *)stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs - 1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *)stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j = 0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k = 0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (z->rgb == 3) { + for (i = 0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } + else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } + else + for (i = 0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } + else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i = 0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i = 0; i < z->s->img_x; ++i) *out++ = y[i], *out++ = 255; + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x, y, comp, req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg j; + j.s = s; + stbi__setup_jpeg(&j); + r = stbi__decode_jpeg_header(&j, STBI__SCAN_type); + stbi__rewind(s); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind(j->s); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*)(stbi__malloc(sizeof(stbi__jpeg))); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[288]; + stbi__uint16 value[288]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16 - bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, stbi_uc *sizelist, int num) +{ + int i, k = 0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i = 0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i = 1; i < 16; ++i) + if (sizes[i] >(1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i = 1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16)code; + z->firstsymbol[i] = (stbi__uint16)k; + code = (code + sizes[i]); + if (sizes[i]) + if (code - 1 >= (1 << i)) return stbi__err("bad codelengths", "Corrupt PNG"); + z->maxcode[i] = code << (16 - i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i = 0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16)((s << 9) | i); + z->size[c] = (stbi_uc)s; + z->value[c] = (stbi__uint16)i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s], s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + if (z->zbuffer >= z->zbuffer_end) return 0; + return *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + STBI_ASSERT(z->code_buffer < (1U << z->num_bits)); + z->code_buffer |= (unsigned int)stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b, s, k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s = STBI__ZFAST_BITS + 1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s == 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16 - s)) - z->firstcode[s] + z->firstsymbol[s]; + STBI_ASSERT(z->size[b] == s); + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b, s; + if (a->num_bits < 16) stbi__fill_bits(a); + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit", "Corrupt PNG"); + cur = (int)(z->zout - z->zout_start); + limit = old_limit = (int)(z->zout_end - z->zout_start); + while (cur + n > limit) + limit *= 2; + q = (char *)STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static int stbi__zlength_extra[31] = +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0 }; + +static int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13 }; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for (;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code", "Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char)z; + } + else { + stbi_uc *p; + int len, dist; + if (z == 256) { + a->zout = zout; + return 1; + } + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0) return stbi__err("bad huffman code", "Corrupt PNG"); + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist", "Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *)(zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } + else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286 + 32 + 137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i, n; + + int hlit = stbi__zreceive(a, 5) + 257; + int hdist = stbi__zreceive(a, 5) + 1; + int hclen = stbi__zreceive(a, 4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i = 0; i < hclen; ++i) { + int s = stbi__zreceive(a, 3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc)s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc)c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a, 2) + 3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n - 1]; + } + else if (c == 17) + c = stbi__zreceive(a, 3) + 3; + else { + STBI_ASSERT(c == 18); + c = stbi__zreceive(a, 7) + 11; + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes + n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths", "Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes + hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len, nlen, k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc)(a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + STBI_ASSERT(a->num_bits == 0); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt", "Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer", "Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if ((cmf * 256 + flg) % 31 != 0) return stbi__err("bad zlib header", "Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict", "Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression", "Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +// @TODO: should statically initialize these for optimal thread safety +static stbi_uc stbi__zdefault_length[288], stbi__zdefault_distance[32]; +static void stbi__init_zdefaults(void) +{ + int i; // use <= to match clearly with spec + for (i = 0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for (; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for (; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for (; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i = 0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a, 1); + type = stbi__zreceive(a, 2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } + else if (type == 3) { + return 0; + } + else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zdefault_distance[31]) stbi__init_zdefaults(); + if (!stbi__zbuild_huffman(&a->z_length, stbi__zdefault_length, 288)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } + else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *)stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *)buffer; + a.zbuffer_end = (stbi_uc *)buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int)(a.zout - a.zout_start); + return a.zout_start; + } + else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *)stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *)buffer; + a.zbuffer_end = (stbi_uc *)buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int)(a.zout - a.zout_start); + return a.zout_start; + } + else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *)ibuffer; + a.zbuffer_end = (stbi_uc *)ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int)(a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *)stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *)buffer; + a.zbuffer_end = (stbi_uc *)buffer + len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int)(a.zout - a.zout_start); + return a.zout_start; + } + else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *)ibuffer; + a.zbuffer_end = (stbi_uc *)ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int)(a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i = 0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig", "Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none = 0, + STBI__F_sub = 1, + STBI__F_up = 2, + STBI__F_avg = 3, + STBI__F_paeth = 4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p - a); + int pb = abs(p - b); + int pc = abs(p - c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16 ? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i, j, stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n + 1); + a->out = (stbi_uc *)stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + if (s->img_x == x && s->img_y == y) { + if (raw_len != img_len) return stbi__err("not enough pixels", "Corrupt PNG"); + } + else { // interlaced: + if (raw_len < img_len) return stbi__err("not enough pixels", "Corrupt PNG"); + } + + for (j = 0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior = cur - stride; + int filter = *raw++; + + if (filter > 4) + return stbi__err("invalid filter", "Corrupt PNG"); + + if (depth < 8) { + STBI_ASSERT(img_width_bytes <= x); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k = 0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none: cur[k] = raw[k]; break; + case STBI__F_sub: cur[k] = raw[k]; break; + case STBI__F_up: cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg: cur[k] = STBI__BYTECAST(raw[k] + (prior[k] >> 1)); break; + case STBI__F_paeth: cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0, prior[k], 0)); break; + case STBI__F_avg_first: cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } + else if (depth == 16) { + if (img_n != out_n) { + cur[filter_bytes] = 255; // first pixel top byte + cur[filter_bytes + 1] = 255; // first pixel bottom byte + } + raw += filter_bytes; + cur += output_bytes; + prior += output_bytes; + } + else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*filter_bytes; +#define STBI__CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k - filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k - filter_bytes]) >> 1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - filter_bytes], prior[k], prior[k - filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k - filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - filter_bytes], 0, 0)); } break; + } +#undef STBI__CASE + raw += nk; + } + else { + STBI_ASSERT(img_n + 1 == out_n); +#define STBI__CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ + for (k=0; k < filter_bytes; ++k) + switch (filter) { + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k - output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k - output_bytes]) >> 1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - output_bytes], prior[k], prior[k - output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k - output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - output_bytes], 0, 0)); } break; + } +#undef STBI__CASE + + // the loop above sets the high byte of the pixels' alpha, but for + // 16 bit png files we also need the low byte set. we'll do that here. + if (depth == 16) { + cur = a->out + stride*j; // start at the beginning of the row again + for (i = 0; i < x; ++i, cur += output_bytes) { + cur[filter_bytes + 1] = 255; + } + } + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j = 0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k = x*img_n; k >= 2; k -= 2, ++in) { + *cur++ = scale * ((*in >> 4)); + *cur++ = scale * ((*in) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4)); + } + else if (depth == 2) { + for (k = x*img_n; k >= 4; k -= 4, ++in) { + *cur++ = scale * ((*in >> 6)); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6)); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } + else if (depth == 1) { + for (k = x*img_n; k >= 8; k -= 8, ++in) { + *cur++ = scale * ((*in >> 7)); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7)); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q = x - 1; q >= 0; --q) { + cur[q * 2 + 1] = 255; + cur[q * 2 + 0] = cur[q]; + } + } + else { + STBI_ASSERT(img_n == 3); + for (q = x - 1; q >= 0; --q) { + cur[q * 4 + 3] = 255; + cur[q * 4 + 2] = cur[q * 3 + 2]; + cur[q * 4 + 1] = cur[q * 3 + 1]; + cur[q * 4 + 0] = cur[q * 3 + 0]; + } + } + } + } + } + else if (depth == 16) { + // force the image data from big-endian to platform-native. + // this is done in a separate pass due to the decoding relying + // on the data being untouched, but could probably be done + // per-line during decode if care is taken. + stbi_uc *cur = a->out; + stbi__uint16 *cur16 = (stbi__uint16*)cur; + + for (i = 0; i < x*y*out_n; ++i, cur16++, cur += 2) { + *cur16 = (cur[0] << 8) | cur[1]; + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *)stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + for (p = 0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i, j, x, y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p] - 1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p] - 1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j = 0; j < y; ++j) { + for (i = 0; i < x; ++i) { + int out_y = j*yspc[p] + yorig[p]; + int out_x = i*xspc[p] + xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x + i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } + else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*)z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } + else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *)stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i = 0; i < pixel_count; ++i) { + int n = orig[i] * 4; + p[0] = palette[n]; + p[1] = palette[n + 1]; + p[2] = palette[n + 2]; + p += 3; + } + } + else { + for (i = 0; i < pixel_count; ++i) { + int n = orig[i] * 4; + p[0] = palette[n]; + p[1] = palette[n + 1]; + p[2] = palette[n + 2]; + p[3] = palette[n + 3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load = 0; +static int stbi__de_iphone_flag = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag = flag_true_if_should_convert; +} + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i = 0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } + else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i = 0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + p[0] = p[2] * 255 / a; + p[1] = p[1] * 255 / a; + p[2] = t * 255 / a; + } + else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } + else { + // convert bgr to rgb + for (i = 0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n = 0; + stbi_uc has_trans = 0, tc[3]; + stbi__uint16 tc16[3]; + stbi__uint32 ioff = 0, idata_limit = 0, i, pal_len = 0; + int first = 1, k, interlace = 0, color = 0, is_iphone = 0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C', 'g', 'B', 'I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I', 'H', 'D', 'R'): { + int comp, filter; + if (!first) return stbi__err("multiple IHDR", "Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len", "Corrupt PNG"); + s->img_x = stbi__get32be(s); if (s->img_x > (1 << 24)) return stbi__err("too large", "Very large image (corrupt?)"); + s->img_y = stbi__get32be(s); if (s->img_y > (1 << 24)) return stbi__err("too large", "Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only", "PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype", "Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype", "Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype", "Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method", "Corrupt PNG"); + filter = stbi__get8(s); if (filter) return stbi__err("bad filter method", "Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method", "Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image", "Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (scan == STBI__SCAN_header) return 1; + } + else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large", "Corrupt PNG"); + // if SCAN_header, have to scan to see if we have a tRNS + } + break; + } + + case STBI__PNG_TYPE('P', 'L', 'T', 'E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256 * 3) return stbi__err("invalid PLTE", "Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE", "Corrupt PNG"); + for (i = 0; i < pal_len; ++i) { + palette[i * 4 + 0] = stbi__get8(s); + palette[i * 4 + 1] = stbi__get8(s); + palette[i * 4 + 2] = stbi__get8(s); + palette[i * 4 + 3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t', 'R', 'N', 'S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT", "Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE", "Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len", "Corrupt PNG"); + pal_img_n = 4; + for (i = 0; i < c.length; ++i) + palette[i * 4 + 3] = stbi__get8(s); + } + else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha", "Corrupt PNG"); + if (c.length != (stbi__uint32)s->img_n * 2) return stbi__err("bad tRNS len", "Corrupt PNG"); + has_trans = 1; + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } + else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I', 'D', 'A', 'T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE", "Corrupt PNG"); + if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *)STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata + ioff, c.length)) return stbi__err("outofdata", "Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I', 'E', 'N', 'D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT", "Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *)stbi_zlib_decode_malloc_guesssize_headerflag((char *)z->idata, ioff, raw_len, (int *)&raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n + 1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n + 1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } + else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } + STBI_FREE(z->expanded); z->expanded = NULL; + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { +#ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); +#endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result = NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth < 8) + ri->bits_per_channel = 8; + else + ri->bits_per_channel = p->depth; + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *)result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *)result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x, y, comp, req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind(p->s); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n = 0; + if (z == 0) return -1; + if (z >= 0x10000) n += 16, z >>= 16; + if (z >= 0x00100) n += 8, z >>= 8; + if (z >= 0x00010) n += 4, z >>= 4; + if (z >= 0x00004) n += 2, z >>= 2; + if (z >= 0x00002) n += 1, z >>= 1; + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +static int stbi__shiftsigned(int v, int shift, int bits) +{ + int result; + int z = 0; + + if (shift < 0) v <<= -shift; + else v >>= shift; + result = v; + + z = bits; + while (z < 8) { + result += v >> z; + z += bits; + } + return result; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr, mg, mb, ma, all_a; +} stbi__bmp_data; + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } + else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (info->bpp == 1) return stbi__errpuc("monochrome", "BMP type not supported: 1-bit"); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } + else { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } + } + else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } + else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } + else { + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + stbi__get32le(s); // discard color space + for (i = 0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *)1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr = 0, mg = 0, mb = 0, ma = 0, all_a; + stbi_uc pal[256][4]; + int psize = 0, i, j, width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int)s->img_y) > 0; + s->img_y = abs((int)s->img_y); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - 14 - 24) / 3; + } + else { + if (info.bpp < 16) + psize = (info.offset - 14 - info.hsz) >> 2; + } + + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *)stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z = 0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i = 0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - 14 - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width) & 3; + for (j = 0; j < (int)s->img_y; ++j) { + for (i = 0; i < (int)s->img_x; i += 2) { + int v = stbi__get8(s), v2 = 0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i + 1 == (int)s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + else { + int rshift = 0, gshift = 0, bshift = 0, ashift = 0, rcount = 0, gcount = 0, bcount = 0, acount = 0; + int z = 0; + int easy = 0; + stbi__skip(s, info.offset - 14 - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2 * s->img_x; + else /* bpp = 32 and pad = 0 */ width = 0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } + else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr) - 7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg) - 7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb) - 7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma) - 7; acount = stbi__bitcount(ma); + } + for (j = 0; j < (int)s->img_y; ++j) { + if (easy) { + for (i = 0; i < (int)s->img_x; ++i) { + unsigned char a; + out[z + 2] = stbi__get8(s); + out[z + 1] = stbi__get8(s); + out[z + 0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } + else { + int bpp = info.bpp; + for (i = 0; i < (int)s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32)stbi__get16le(s) : stbi__get32le(s)); + int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i = 4 * s->img_x*s->img_y - 1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j = 0; j < (int)s->img_y >> 1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y - 1 - j)*s->img_x*target; + for (i = 0; i < (int)s->img_x*target; ++i) { + t = p1[i], p1[i] = p2[i], p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch (bits_per_pixel) { + case 8: return STBI_grey; + case 16: if (is_grey) return STBI_grey_alpha; + // else: fall-through + case 15: if (is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fall-through + case 32: return bits_per_pixel / 8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if (tga_colormap_type > 1) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if (tga_colormap_type == 1) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s, 4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) { + stbi__rewind(s); + return 0; + } + stbi__skip(s, 4); // skip image x and y origin + tga_colormap_bpp = sz; + } + else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ((tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11)) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s, 9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if (tga_w < 1) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if (tga_h < 1) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if ((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } + else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if (!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if (tga_color_type > 1) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if (tga_color_type == 1) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s, 4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) goto errorEnd; + stbi__skip(s, 4); // skip image x and y origin + } + else { // "normal" image w/o colormap + if ((sz != 2) && (sz != 3) && (sz != 10) && (sz != 11)) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s, 9); // skip colormap specification and image x/y origin + } + if (stbi__get16le(s) < 1) goto errorEnd; // test width + if (stbi__get16le(s) < 1) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ((tga_color_type == 1) && (sz != 8) && (sz != 16)) goto errorEnd; // for colormapped images, bpp is size of an index + if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255) / 31); + out[1] = (stbi_uc)((g * 255) / 31); + out[2] = (stbi_uc)((b * 255) / 31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16 = 0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = { 0 }; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + + // do a tiny bit of precessing + if (tga_image_type >= 8) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if (tga_indexed) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if (!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset); + + if (!tga_indexed && !tga_is_RLE && !tga_rgb16) { + for (i = 0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height - i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } + else { + // do I need to load a palette? + if (tga_indexed) + { + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i = 0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } + else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i = 0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if (tga_is_RLE) + { + if (RLE_count == 0) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } + else if (!RLE_repeating) + { + read_next_pixel = 1; + } + } + else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if (read_next_pixel) + { + // load however much data we did have + if (tga_indexed) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if (pal_idx >= tga_palette_len) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx + j]; + } + } + else if (tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } + else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp + j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if (tga_inverted) + { + for (j = 0; j * 2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if (tga_palette != NULL) + { + STBI_FREE(tga_palette); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i = 0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } + else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } + else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w, h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s, stbi__get32be(s)); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s)); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s)); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *)stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } + else + out = (stbi_uc *)stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceeded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out + channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } + else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } + else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *)out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } + else { + stbi_uc *p = out + channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } + else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *)out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16)stbi__get16be(s); + } + else { + stbi_uc *p = out + channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc)(stbi__get16be(s) >> 8); + } + else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i = 0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *)out + 4 * i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16)(pixel[0] * ra + inv_a); + pixel[1] = (stbi__uint16)(pixel[1] * ra + inv_a); + pixel[2] = (stbi__uint16)(pixel[2] * ra + inv_a); + } + } + } + else { + for (i = 0; i < w*h; ++i) { + unsigned char *pixel = out + 4 * i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char)(pixel[0] * ra + inv_a); + pixel[1] = (unsigned char)(pixel[1] * ra + inv_a); + pixel[2] = (unsigned char)(pixel[2] * ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *)stbi__convert_format16((stbi__uint16 *)out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s, const char *str) +{ + int i; + for (i = 0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s, "\x53\x80\xF6\x34")) + return 0; + + for (i = 0; i<84; ++i) + stbi__get8(s); + + if (!stbi__pic_is4(s, "PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size, type, channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask = 0x80, i; + + for (i = 0; i<4; ++i, mask >>= 1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file", "PIC file too short"); + dest[i] = stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel, stbi_uc *dest, const stbi_uc *src) +{ + int mask = 0x80, i; + + for (i = 0; i<4; ++i, mask >>= 1) + if (channel&mask) + dest[i] = src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s, int width, int height, int *comp, stbi_uc *result) +{ + int act_comp = 0, num_packets = 0, y, chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets == sizeof(packets) / sizeof(packets[0])) + return stbi__errpuc("bad format", "too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file", "file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format", "packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for (y = 0; ytype) { + default: + return stbi__errpuc("bad format", "packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for (x = 0; xchannel, dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left = width, i; + + while (left>0) { + stbi_uc count, value[4]; + + count = stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file", "file too short (pure read count)"); + + if (count > left) + count = (stbi_uc)left; + + if (!stbi__readval(s, packet->channel, value)) return 0; + + for (i = 0; ichannel, dest, value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left = width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file", "file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count == 128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file", "scanline overrun"); + + if (!stbi__readval(s, packet->channel, value)) + return 0; + + for (i = 0; ichannel, dest, value); + } + else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file", "scanline overrun"); + + for (i = 0; ichannel, dest)) + return 0; + } + left -= count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s, int *px, int *py, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x, y; + STBI_NOTUSED(ri); + + for (i = 0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file", "file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *)stbi__malloc_mad3(x, y, 4, 0); + memset(result, 0xff, x*y * 4); + + if (!stbi__pic_load_core(s, x, y, comp, result)) { + STBI_FREE(result); + result = 0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result = stbi__convert_format(result, 4, req_comp, x, y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w, h; + stbi_uc *out, *old_out; // output buffer (always 4 components) + int flags, bgindex, ratio, transparent, eflags, delay; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[4096]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i = 0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s, g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*)stbi__malloc(sizeof(stbi__gif)); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind(s); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + p = &g->out[g->cur_x + g->cur_y]; + c = &g->color_table[g->codes[code].suffix * 4]; + + if (c[3] >= 128) { + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc)init_code; + g->codes[init_code].suffix = (stbi_uc)init_code; + } + + // support no starting clear code + avail = clear + 2; + oldcode = -1; + + len = 0; + for (;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32)stbi__get8(s) << valid_bits; + valid_bits += 8; + } + else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } + else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s, len); + return g->out; + } + else if (code <= avail) { + if (first) return stbi__errpuc("no clear code", "Corrupt GIF"); + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 4096) return stbi__errpuc("too many codes", "Corrupt GIF"); + p->prefix = (stbi__int16)oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } + else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16)code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } + else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +static void stbi__fill_gif_background(stbi__gif *g, int x0, int y0, int x1, int y1) +{ + int x, y; + stbi_uc *c = g->pal[g->bgindex]; + for (y = y0; y < y1; y += 4 * g->w) { + for (x = x0; x < x1; x += 4) { + stbi_uc *p = &g->out[y + x]; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = 0; + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp) +{ + int i; + stbi_uc *prev_out = 0; + + if (g->out == 0 && !stbi__gif_header(s, g, comp, 0)) + return 0; // stbi__g_failure_reason set by stbi__gif_header + + if (!stbi__mad3sizes_valid(g->w, g->h, 4, 0)) + return stbi__errpuc("too large", "GIF too large"); + + prev_out = g->out; + g->out = (stbi_uc *)stbi__malloc_mad3(4, g->w, g->h, 0); + if (g->out == 0) return stbi__errpuc("outofmem", "Out of memory"); + + switch ((g->eflags & 0x1C) >> 2) { + case 0: // unspecified (also always used on 1st frame) + stbi__fill_gif_background(g, 0, 0, 4 * g->w, 4 * g->w * g->h); + break; + case 1: // do not dispose + if (prev_out) memcpy(g->out, prev_out, 4 * g->w * g->h); + g->old_out = prev_out; + break; + case 2: // dispose to background + if (prev_out) memcpy(g->out, prev_out, 4 * g->w * g->h); + stbi__fill_gif_background(g, g->start_x, g->start_y, g->max_x, g->max_y); + break; + case 3: // dispose to previous + if (g->old_out) { + for (i = g->start_y; i < g->max_y; i += 4 * g->w) + memcpy(&g->out[i + g->start_x], &g->old_out[i + g->start_x], g->max_x - g->start_x); + } + break; + } + + for (;;) { + switch (stbi__get8(s)) { + case 0x2C: /* Image Descriptor */ + { + int prev_trans = -1; + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } + else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s, g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *)g->lpal; + } + else if (g->flags & 0x80) { + if (g->transparent >= 0 && (g->eflags & 0x01)) { + prev_trans = g->pal[g->transparent][3]; + g->pal[g->transparent][3] = 0; + } + g->color_table = (stbi_uc *)g->pal; + } + else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (o == NULL) return NULL; + + if (prev_trans != -1) + g->pal[g->transparent][3] = (stbi_uc)prev_trans; + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + if (stbi__get8(s) == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = stbi__get16le(s); + g->transparent = stbi__get8(s); + } + else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) + stbi__skip(s, len); + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *)s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } + + STBI_NOTUSED(req_comp); +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif* g = (stbi__gif*)stbi__malloc(sizeof(stbi__gif)); + memset(g, 0, sizeof(*g)); + STBI_NOTUSED(ri); + + u = stbi__gif_load_next(s, g, comp, req_comp); + if (u == (stbi_uc *)s) u = 0; // end of animated gif marker + if (u) { + *x = g->w; + *y = g->h; + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g->w, g->h); + } + else if (g->out) + STBI_FREE(g->out); + STBI_FREE(g); + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s, x, y, comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i = 0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if (!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len = 0; + char c = '\0'; + + c = (char)stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN - 1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char)stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if (input[3] != 0) { + float f1; + // Exponent + f1 = (float)ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } + else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1, c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s, buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for (;;) { + token = stbi__hdr_gettoken(s, buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s, buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int)strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int)strtol(token, NULL, 10); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *)stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if (width < 8 || width >= 32768) { + // Read flat data + for (j = 0; j < height; ++j) { + for (i = 0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } + else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc)c1; + rgbe[1] = (stbi_uc)c2; + rgbe[2] = (stbi_uc)len; + rgbe[3] = (stbi_uc)stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *)stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } + else { + // Dump + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i = 0; i < width; ++i) + stbi__hdr_convert(hdr_data + (j*width + i)*req_comp, scanline + i * 4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind(s); + return 0; + } + + for (;;) { + token = stbi__hdr_gettoken(s, buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind(s); + return 0; + } + token = stbi__hdr_gettoken(s, buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind(s); + return 0; + } + token += 3; + *y = (int)strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind(s); + return 0; + } + token += 3; + *x = (int)strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + stbi__rewind(s); + if (p == NULL) + return 0; + *x = s->img_x; + *y = s->img_y; + *comp = info.ma ? 4 : 3; + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind(s); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind(s); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind(s); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + if (stbi__get16be(s) != 8) { + stbi__rewind(s); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind(s); + return 0; + } + *comp = 4; + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp = 0, num_packets = 0, chained; + stbi__pic_packet packets[10]; + + if (!stbi__pic_is4(s, "\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind(s); + return 0; + } + if ((*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets == sizeof(packets) / sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind(s); + return 0; + } + if (packet->size != 8) { + stbi__rewind(s); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) +// Does not support 16-bit-per-channel + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char)stbi__get8(s); + t = (char)stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n)) + return 0; + + *x = s->img_x; + *y = s->img_y; + *comp = s->img_n; + + if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *)stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y); + + if (req_comp && req_comp != s->img_n) { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char)stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r') + *c = (char)stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value * 10 + (*c - '0'); + *c = (char)stbi__get8(s); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv; + char c, p, t; + + stbi__rewind(s); + + // Get identifier + p = (char)stbi__get8(s); + t = (char)stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char)stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + + if (maxv > 255) + return stbi__err("max value > 255", "PPM image not 8-bit"); + else + return 1; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ +#ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; +#endif + + // test tga last because it's a crappy test! +#ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; +#endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s, x, y, comp); + fseek(f, pos, SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__info_main(&s, x, y, comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)c, user); + return stbi__info_main(&s, x, y, comp); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* +revision history: +2.13 (2016-11-29) add 16-bit API, only supported for PNG right now +2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes +2.11 (2016-04-02) allocate large structures on the stack +remove white matting for transparent PSD +fix reported channel count for PNG & BMP +re-enable SSE2 in non-gcc 64-bit +support RGB-formatted JPEG +read 16-bit PNGs (only as 8-bit) +2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED +2.09 (2016-01-16) allow comments in PNM files +16-bit-per-pixel TGA (not bit-per-component) +info() for TGA could break due to .hdr handling +info() for BMP to shares code instead of sloppy parse +can use STBI_REALLOC_SIZED if allocator doesn't support realloc +code cleanup +2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA +2.07 (2015-09-13) fix compiler warnings +partial animated GIF support +limited 16-bpc PSD support +#ifdef unused functions +bug with < 92 byte PIC,PNM,HDR,TGA +2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value +2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning +2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit +2.03 (2015-04-12) extra corruption checking (mmozeiko) +stbi_set_flip_vertically_on_load (nguillemot) +fix NEON support; fix mingw support +2.02 (2015-01-19) fix incorrect assert, fix warning +2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 +2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG +2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) +progressive JPEG (stb) +PGM/PPM support (Ken Miller) +STBI_MALLOC,STBI_REALLOC,STBI_FREE +GIF bugfix -- seemingly never worked +STBI_NO_*, STBI_ONLY_* +1.48 (2014-12-14) fix incorrectly-named assert() +1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) +optimize PNG (ryg) +fix bug in interlaced PNG with user-specified channel count (stb) +1.46 (2014-08-26) +fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG +1.45 (2014-08-16) +fix MSVC-ARM internal compiler error by wrapping malloc +1.44 (2014-08-07) +various warning fixes from Ronny Chevalier +1.43 (2014-07-15) +fix MSVC-only compiler problem in code changed in 1.42 +1.42 (2014-07-09) +don't define _CRT_SECURE_NO_WARNINGS (affects user code) +fixes to stbi__cleanup_jpeg path +added STBI_ASSERT to avoid requiring assert.h +1.41 (2014-06-25) +fix search&replace from 1.36 that messed up comments/error messages +1.40 (2014-06-22) +fix gcc struct-initialization warning +1.39 (2014-06-15) +fix to TGA optimization when req_comp != number of components in TGA; +fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) +add support for BMP version 5 (more ignored fields) +1.38 (2014-06-06) +suppress MSVC warnings on integer casts truncating values +fix accidental rename of 'skip' field of I/O +1.37 (2014-06-04) +remove duplicate typedef +1.36 (2014-06-03) +convert to header file single-file library +if de-iphone isn't set, load iphone images color-swapped instead of returning NULL +1.35 (2014-05-27) +various warnings +fix broken STBI_SIMD path +fix bug where stbi_load_from_file no longer left file pointer in correct place +fix broken non-easy path for 32-bit BMP (possibly never used) +TGA optimization by Arseny Kapoulkine +1.34 (unknown) +use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case +1.33 (2011-07-14) +make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements +1.32 (2011-07-13) +support for "info" function for all supported filetypes (SpartanJ) +1.31 (2011-06-20) +a few more leak fixes, bug in PNG handling (SpartanJ) +1.30 (2011-06-11) +added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) +removed deprecated format-specific test/load functions +removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway +error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) +fix inefficiency in decoding 32-bit BMP (David Woo) +1.29 (2010-08-16) +various warning fixes from Aurelien Pocheville +1.28 (2010-08-01) +fix bug in GIF palette transparency (SpartanJ) +1.27 (2010-08-01) +cast-to-stbi_uc to fix warnings +1.26 (2010-07-24) +fix bug in file buffering for PNG reported by SpartanJ +1.25 (2010-07-17) +refix trans_data warning (Won Chun) +1.24 (2010-07-12) +perf improvements reading from files on platforms with lock-heavy fgetc() +minor perf improvements for jpeg +deprecated type-specific functions so we'll get feedback if they're needed +attempt to fix trans_data warning (Won Chun) +1.23 fixed bug in iPhone support +1.22 (2010-07-10) +removed image *writing* support +stbi_info support from Jetro Lauha +GIF support from Jean-Marc Lienher +iPhone PNG-extensions from James Brown +warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) +1.21 fix use of 'stbi_uc' in header (reported by jon blow) +1.20 added support for Softimage PIC, by Tom Seddon +1.19 bug in interlaced PNG corruption check (found by ryg) +1.18 (2008-08-02) +fix a threading bug (local mutable static) +1.17 support interlaced PNG +1.16 major bugfix - stbi__convert_format converted one too many pixels +1.15 initialize some fields for thread safety +1.14 fix threadsafe conversion bug +header-file-only version (#define STBI_HEADER_FILE_ONLY before including) +1.13 threadsafe +1.12 const qualifiers in the API +1.11 Support installable IDCT, colorspace conversion routines +1.10 Fixes for 64-bit (don't use "unsigned long") +optimized upsampling by Fabian "ryg" Giesen +1.09 Fix format-conversion for PSD code (bad global variables!) +1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz +1.07 attempt to fix C++ warning/errors again +1.06 attempt to fix C++ warning/errors again +1.05 fix TGA loading to return correct *comp and use good luminance calc +1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free +1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR +1.02 support for (subset of) HDR files, float interface for preferred access to them +1.01 fix bug: possible bug in handling right-side up bmps... not sure +fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all +1.00 interface to zlib that skips zlib header +0.99 correct handling of alpha in palette +0.98 TGA loader by lonesock; dynamically add loaders (untested) +0.97 jpeg errors on too large a file; also catch another malloc failure +0.96 fix detection of invalid v value - particleman@mollyrocket forum +0.95 during header scan, seek to markers in case of padding +0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same +0.93 handle jpegtran output; verbose errors +0.92 read 4,8,16,24,32-bit BMP files of several formats +0.91 output 24-bit Windows 3.0 BMP files +0.90 fix a few more warnings; bump version number to approach 1.0 +0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd +0.60 fix compiling as c++ +0.59 fix warnings: merge Dave Moore's -Wall fixes +0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian +0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available +0.56 fix bug: zlib uncompressed mode len vs. nlen +0.55 fix bug: restart_interval not initialized to 0 +0.54 allow NULL for 'int *comp' +0.53 fix bug in png 3->4; speedup png decoding +0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments +0.51 obey req_comp requests, 1-component jpegs return as 1-component, +on 'test' only check type, not whether we support this variant +0.50 (2006-11-19) +first released version +*/ \ No newline at end of file diff --git a/MVS/VolumeRenderer/BattleFireDirect.cpp b/MVS/VolumeRenderer/BattleFireDirect.cpp new file mode 100644 index 00000000..d5916de9 --- /dev/null +++ b/MVS/VolumeRenderer/BattleFireDirect.cpp @@ -0,0 +1,673 @@ +#include "BattleFireDirect.h" +ID3D12Device* gD3D12Device = nullptr; +ID3D12CommandQueue* gCommandQueue = nullptr; +IDXGISwapChain3* gSwapChain = nullptr; +ID3D12Resource* gDSRT = nullptr, * gColorRTs[2]; +int gCurrentRTIndex = 0; +ID3D12DescriptorHeap* gSwapChainRTVHeap = nullptr; +ID3D12DescriptorHeap* gSwapChainDSVHeap = nullptr; +UINT gRTVDescriptorSize = 0; +UINT gDSVDescriptorSize = 0; +ID3D12CommandAllocator* gCommandAllocator = nullptr; +ID3D12GraphicsCommandList* gCommandList = nullptr; +ID3D12Fence* gFence = nullptr; +HANDLE gFenceEvent = nullptr; +UINT64 gFenceValue = 0; + +ID3D12RootSignature* InitRootSignature() { + //1110001110101111111111111111111111 + D3D12_ROOT_PARAMETER rootParameters[4]; + rootParameters[1].ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS; + rootParameters[1].ShaderVisibility = D3D12_SHADER_VISIBILITY_VERTEX; + rootParameters[1].Constants.RegisterSpace = 0; + rootParameters[1].Constants.ShaderRegister = 0; + rootParameters[1].Constants.Num32BitValues = 4; + + rootParameters[0].ParameterType = D3D12_ROOT_PARAMETER_TYPE_CBV; + rootParameters[0].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL; + rootParameters[0].Descriptor.RegisterSpace = 0; + rootParameters[0].Descriptor.ShaderRegister = 1;//cbv + + D3D12_DESCRIPTOR_RANGE descriptorRange[1]; + descriptorRange[0].RangeType = D3D12_DESCRIPTOR_RANGE_TYPE_SRV; + descriptorRange[0].RegisterSpace = 0; + descriptorRange[0].BaseShaderRegister = 0;//t0 + descriptorRange[0].NumDescriptors = 1; + descriptorRange[0].OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND; + + rootParameters[2].ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE; + rootParameters[2].ShaderVisibility = D3D12_SHADER_VISIBILITY_PIXEL; + rootParameters[2].DescriptorTable.pDescriptorRanges = descriptorRange; + rootParameters[2].DescriptorTable.NumDescriptorRanges = _countof(descriptorRange);//cbv + + rootParameters[3].ParameterType = D3D12_ROOT_PARAMETER_TYPE_SRV; + rootParameters[3].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL; + rootParameters[3].Descriptor.RegisterSpace = 1; + rootParameters[3].Descriptor.ShaderRegister = 0;//srv + + D3D12_STATIC_SAMPLER_DESC samplerDesc[1]; + memset(samplerDesc, 0,sizeof(D3D12_STATIC_SAMPLER_DESC)*_countof(samplerDesc)); + samplerDesc[0].Filter = D3D12_FILTER_MIN_MAG_MIP_LINEAR; + samplerDesc[0].AddressU = D3D12_TEXTURE_ADDRESS_MODE_CLAMP; + samplerDesc[0].AddressV = D3D12_TEXTURE_ADDRESS_MODE_CLAMP; + samplerDesc[0].AddressW = D3D12_TEXTURE_ADDRESS_MODE_CLAMP; + samplerDesc[0].BorderColor = D3D12_STATIC_BORDER_COLOR_OPAQUE_BLACK; + samplerDesc[0].MaxLOD = D3D12_FLOAT32_MAX; + samplerDesc[0].RegisterSpace = 0; + samplerDesc[0].ShaderRegister = 0;//s0 + samplerDesc[0].ShaderVisibility = D3D12_SHADER_VISIBILITY_PIXEL; + + D3D12_ROOT_SIGNATURE_DESC rootSignatureDesc = {}; + rootSignatureDesc.NumParameters = _countof(rootParameters); + rootSignatureDesc.pParameters = rootParameters; + rootSignatureDesc.NumStaticSamplers = _countof(samplerDesc); + rootSignatureDesc.pStaticSamplers = samplerDesc; + rootSignatureDesc.Flags = D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT; + + //64 DWORD -> float 128 WORD -> 16bit + ID3DBlob* signature; + HRESULT hResult = D3D12SerializeRootSignature(&rootSignatureDesc, D3D_ROOT_SIGNATURE_VERSION_1, &signature, nullptr); + ID3D12RootSignature* d3d12RootSignature; + gD3D12Device->CreateRootSignature( + 0, signature->GetBufferPointer(), signature->GetBufferSize(), + IID_PPV_ARGS(&d3d12RootSignature)); + + return d3d12RootSignature; +} + +ID3D12RootSignature* InitVolumeRootSignature() { + D3D12_ROOT_PARAMETER rootParameters[2]; + + rootParameters[0].ParameterType = D3D12_ROOT_PARAMETER_TYPE_CBV; + rootParameters[0].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL; + rootParameters[0].Descriptor.RegisterSpace = 0; + rootParameters[0].Descriptor.ShaderRegister = 1; + + rootParameters[1].ParameterType = D3D12_ROOT_PARAMETER_TYPE_SRV; + rootParameters[1].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL; + rootParameters[1].Descriptor.RegisterSpace = 0; + rootParameters[1].Descriptor.ShaderRegister = 1; + + D3D12_ROOT_SIGNATURE_DESC rootSignatureDesc = {}; + rootSignatureDesc.NumParameters = _countof(rootParameters); + rootSignatureDesc.pParameters = rootParameters; + rootSignatureDesc.NumStaticSamplers = 0; + rootSignatureDesc.pStaticSamplers = nullptr; + rootSignatureDesc.Flags = D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT; + + ID3DBlob* signature; + HRESULT hResult = D3D12SerializeRootSignature(&rootSignatureDesc, D3D_ROOT_SIGNATURE_VERSION_1, &signature, nullptr); + ID3D12RootSignature* d3d12RootSignature; + gD3D12Device->CreateRootSignature( + 0, signature->GetBufferPointer(), signature->GetBufferSize(), + IID_PPV_ARGS(&d3d12RootSignature)); + + return d3d12RootSignature; +} + +void CreateShaderFromFile( + LPCTSTR inShaderFilePath, + const char* inMainFunctionName, + const char* inTarget,//"vs_5_0","ps_5_0","vs_4_0" + D3D12_SHADER_BYTECODE* inShader) { + ID3DBlob* shaderBuffer = nullptr; + ID3DBlob* errorBuffer = nullptr; + HRESULT hResult = D3DCompileFromFile(inShaderFilePath, nullptr, D3D_COMPILE_STANDARD_FILE_INCLUDE, + inMainFunctionName, inTarget, D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION, + 0, &shaderBuffer, &errorBuffer); + if (FAILED(hResult)) { + char szLog[1024] = {0}; + if (errorBuffer) { + strcpy(szLog, (char*)errorBuffer->GetBufferPointer()); + } else { + strcpy(szLog, "Unknown error"); + } + printf("CreateShaderFromFile error : [%s][%s]:[%s]\n", inMainFunctionName, inTarget, szLog); + errorBuffer->Release(); + return; + } + inShader->pShaderBytecode = shaderBuffer->GetBufferPointer(); + inShader->BytecodeLength = shaderBuffer->GetBufferSize(); +} +ID3D12Resource* CreateConstantBufferObject(int inDataLen) { + D3D12_HEAP_PROPERTIES d3dHeapProperties = {}; + d3dHeapProperties.Type = D3D12_HEAP_TYPE_UPLOAD;//cpu,gpu + D3D12_RESOURCE_DESC d3d12ResourceDesc = {}; + d3d12ResourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER; + d3d12ResourceDesc.Alignment = 0; + d3d12ResourceDesc.Width = inDataLen; + d3d12ResourceDesc.Height = 1; + d3d12ResourceDesc.DepthOrArraySize = 1; + d3d12ResourceDesc.MipLevels = 1; + d3d12ResourceDesc.Format = DXGI_FORMAT_UNKNOWN; + d3d12ResourceDesc.SampleDesc.Count = 1; + d3d12ResourceDesc.SampleDesc.Quality = 0; + d3d12ResourceDesc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR; + d3d12ResourceDesc.Flags = D3D12_RESOURCE_FLAG_NONE; + + ID3D12Resource* bufferObject = nullptr; + gD3D12Device->CreateCommittedResource( + &d3dHeapProperties, + D3D12_HEAP_FLAG_NONE, + &d3d12ResourceDesc, + D3D12_RESOURCE_STATE_GENERIC_READ, + nullptr, + IID_PPV_ARGS(&bufferObject) + ); + return bufferObject; +} +void UpdateConstantBuffer(ID3D12Resource* inCB, void* inData, int inDataLen) { + D3D12_RANGE d3d12Range = { 0 }; + unsigned char* pBuffer = nullptr; + inCB->Map(0, &d3d12Range, (void**)&pBuffer); + memcpy(pBuffer, inData, inDataLen); + inCB->Unmap(0, nullptr); +} + +ID3D12PipelineState* CreatePSO(ID3D12RootSignature* inID3D12RootSignature, + D3D12_SHADER_BYTECODE inVertexShader, D3D12_SHADER_BYTECODE inPixelShader, + D3D12_SHADER_BYTECODE inGSShader) { + D3D12_INPUT_ELEMENT_DESC vertexDataElementDesc[] = { + {"POSITION",0,DXGI_FORMAT_R32G32B32A32_FLOAT,0,0,D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,0}, + {"TEXCOORD",0,DXGI_FORMAT_R32G32B32A32_FLOAT,0,sizeof(float) * 4,D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,0}, + {"NORMAL",0,DXGI_FORMAT_R32G32B32A32_FLOAT,0,sizeof(float) * 8,D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,0}, + {"TANGENT",0,DXGI_FORMAT_R32G32B32A32_FLOAT,0,sizeof(float) * 12,D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,0} + }; + D3D12_INPUT_LAYOUT_DESC vertexDataLayoutDesc = {}; + vertexDataLayoutDesc.NumElements = 4; + vertexDataLayoutDesc.pInputElementDescs = vertexDataElementDesc; + + D3D12_GRAPHICS_PIPELINE_STATE_DESC psoDesc = {}; + psoDesc.pRootSignature = inID3D12RootSignature; + psoDesc.VS = inVertexShader; + psoDesc.GS = inGSShader; + psoDesc.PS = inPixelShader; + psoDesc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM; + psoDesc.DSVFormat = DXGI_FORMAT_D24_UNORM_S8_UINT; + psoDesc.SampleDesc.Count = 1; + psoDesc.SampleDesc.Quality = 0; + psoDesc.SampleMask = 0xffffffff; + psoDesc.InputLayout = vertexDataLayoutDesc; + psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; + + psoDesc.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID; + psoDesc.RasterizerState.CullMode = D3D12_CULL_MODE_BACK; + psoDesc.RasterizerState.DepthClipEnable = TRUE; + + psoDesc.DepthStencilState.DepthEnable = true; + psoDesc.DepthStencilState.DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ALL; + psoDesc.DepthStencilState.DepthFunc = D3D12_COMPARISON_FUNC_LESS_EQUAL; + + psoDesc.BlendState = { 0 }; + D3D12_RENDER_TARGET_BLEND_DESC rtBlendDesc = { + FALSE,FALSE, + D3D12_BLEND_SRC_ALPHA,D3D12_BLEND_INV_SRC_ALPHA,D3D12_BLEND_OP_ADD, + D3D12_BLEND_SRC_ALPHA,D3D12_BLEND_INV_SRC_ALPHA,D3D12_BLEND_OP_ADD, + D3D12_LOGIC_OP_NOOP, + D3D12_COLOR_WRITE_ENABLE_ALL, + }; + for (int i = 0; i < D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT; ++i) + psoDesc.BlendState.RenderTarget[i] = rtBlendDesc; + psoDesc.NumRenderTargets = 1; + ID3D12PipelineState* d3d12PSO = nullptr; + + HRESULT hResult = gD3D12Device->CreateGraphicsPipelineState(&psoDesc, IID_PPV_ARGS(&d3d12PSO)); + if (FAILED(hResult)) { + return nullptr; + } + return d3d12PSO; +} + +ID3D12PipelineState* CreateVolumePSO(ID3D12RootSignature* inID3D12RootSignature, + D3D12_SHADER_BYTECODE inVertexShader, D3D12_SHADER_BYTECODE inPixelShader) { + D3D12_INPUT_ELEMENT_DESC vertexDataElementDesc[] = { + {"POSITION",0,DXGI_FORMAT_R32G32B32_FLOAT,0,0,D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,0} + }; + D3D12_INPUT_LAYOUT_DESC vertexDataLayoutDesc = {}; + vertexDataLayoutDesc.NumElements = 1; + vertexDataLayoutDesc.pInputElementDescs = vertexDataElementDesc; + + D3D12_GRAPHICS_PIPELINE_STATE_DESC psoDesc = {}; + psoDesc.pRootSignature = inID3D12RootSignature; + psoDesc.VS = inVertexShader; + psoDesc.PS = inPixelShader; + psoDesc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM; + psoDesc.DSVFormat = DXGI_FORMAT_D24_UNORM_S8_UINT; + psoDesc.SampleDesc.Count = 1; + psoDesc.SampleDesc.Quality = 0; + psoDesc.SampleMask = 0xffffffff; + psoDesc.InputLayout = vertexDataLayoutDesc; + psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE; + + psoDesc.RasterizerState.FillMode = D3D12_FILL_MODE_WIREFRAME; + psoDesc.RasterizerState.CullMode = D3D12_CULL_MODE_NONE; + psoDesc.RasterizerState.FrontCounterClockwise = FALSE; + psoDesc.RasterizerState.DepthBias = D3D12_DEFAULT_DEPTH_BIAS; + psoDesc.RasterizerState.DepthBiasClamp = D3D12_DEFAULT_DEPTH_BIAS_CLAMP; + psoDesc.RasterizerState.SlopeScaledDepthBias = D3D12_DEFAULT_SLOPE_SCALED_DEPTH_BIAS; + psoDesc.RasterizerState.DepthClipEnable = TRUE; + psoDesc.RasterizerState.MultisampleEnable = FALSE; + psoDesc.RasterizerState.AntialiasedLineEnable = FALSE; + psoDesc.RasterizerState.ForcedSampleCount = 0; + psoDesc.RasterizerState.ConservativeRaster = D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF; + + psoDesc.DepthStencilState.DepthEnable = TRUE; + psoDesc.DepthStencilState.DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ALL; + psoDesc.DepthStencilState.DepthFunc = D3D12_COMPARISON_FUNC_LESS; + psoDesc.DepthStencilState.StencilEnable = FALSE; + + psoDesc.BlendState.AlphaToCoverageEnable = FALSE; + psoDesc.BlendState.IndependentBlendEnable = FALSE; + for (int i = 0; i < D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT; ++i) { + psoDesc.BlendState.RenderTarget[i].BlendEnable = FALSE; + psoDesc.BlendState.RenderTarget[i].LogicOpEnable = FALSE; + psoDesc.BlendState.RenderTarget[i].SrcBlend = D3D12_BLEND_ONE; + psoDesc.BlendState.RenderTarget[i].DestBlend = D3D12_BLEND_ZERO; + psoDesc.BlendState.RenderTarget[i].BlendOp = D3D12_BLEND_OP_ADD; + psoDesc.BlendState.RenderTarget[i].SrcBlendAlpha = D3D12_BLEND_ONE; + psoDesc.BlendState.RenderTarget[i].DestBlendAlpha = D3D12_BLEND_ZERO; + psoDesc.BlendState.RenderTarget[i].BlendOpAlpha = D3D12_BLEND_OP_ADD; + psoDesc.BlendState.RenderTarget[i].LogicOp = D3D12_LOGIC_OP_NOOP; + psoDesc.BlendState.RenderTarget[i].RenderTargetWriteMask = D3D12_COLOR_WRITE_ENABLE_ALL; + } + psoDesc.NumRenderTargets = 1; + ID3D12PipelineState* d3d12PSO = nullptr; + + HRESULT hResult = gD3D12Device->CreateGraphicsPipelineState(&psoDesc, IID_PPV_ARGS(&d3d12PSO)); + if (FAILED(hResult)) { + printf("CreateVolumePSO failed: 0x%08X\n", hResult); + return nullptr; + } + return d3d12PSO; +} + +ID3D12PipelineState* CreateQuadPSO(ID3D12RootSignature* inID3D12RootSignature, + D3D12_SHADER_BYTECODE inVertexShader, D3D12_SHADER_BYTECODE inPixelShader) { + D3D12_INPUT_ELEMENT_DESC vertexDataElementDesc[] = { + {"POSITION",0,DXGI_FORMAT_R32G32_FLOAT,0,0,D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,0}, + {"TEXCOORD",0,DXGI_FORMAT_R32G32_FLOAT,0,8,D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,0} + }; + D3D12_INPUT_LAYOUT_DESC vertexDataLayoutDesc = {}; + vertexDataLayoutDesc.NumElements = 2; + vertexDataLayoutDesc.pInputElementDescs = vertexDataElementDesc; + + D3D12_GRAPHICS_PIPELINE_STATE_DESC psoDesc = {}; + psoDesc.pRootSignature = inID3D12RootSignature; + psoDesc.VS = inVertexShader; + psoDesc.PS = inPixelShader; + psoDesc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM; + psoDesc.DSVFormat = DXGI_FORMAT_D24_UNORM_S8_UINT; + psoDesc.SampleDesc.Count = 1; + psoDesc.SampleDesc.Quality = 0; + psoDesc.SampleMask = 0xffffffff; + psoDesc.InputLayout = vertexDataLayoutDesc; + psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; + + psoDesc.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID; + psoDesc.RasterizerState.CullMode = D3D12_CULL_MODE_NONE; + psoDesc.RasterizerState.FrontCounterClockwise = FALSE; + psoDesc.RasterizerState.DepthBias = D3D12_DEFAULT_DEPTH_BIAS; + psoDesc.RasterizerState.DepthBiasClamp = D3D12_DEFAULT_DEPTH_BIAS_CLAMP; + psoDesc.RasterizerState.SlopeScaledDepthBias = D3D12_DEFAULT_SLOPE_SCALED_DEPTH_BIAS; + psoDesc.RasterizerState.DepthClipEnable = TRUE; + psoDesc.RasterizerState.MultisampleEnable = FALSE; + psoDesc.RasterizerState.AntialiasedLineEnable = FALSE; + psoDesc.RasterizerState.ForcedSampleCount = 0; + psoDesc.RasterizerState.ConservativeRaster = D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF; + + psoDesc.DepthStencilState.DepthEnable = TRUE; + psoDesc.DepthStencilState.DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ZERO; + psoDesc.DepthStencilState.DepthFunc = D3D12_COMPARISON_FUNC_LESS; + psoDesc.DepthStencilState.StencilEnable = FALSE; + + psoDesc.BlendState.AlphaToCoverageEnable = FALSE; + psoDesc.BlendState.IndependentBlendEnable = FALSE; + for (int i = 0; i < D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT; ++i) { + psoDesc.BlendState.RenderTarget[i].BlendEnable = TRUE; + psoDesc.BlendState.RenderTarget[i].LogicOpEnable = FALSE; + psoDesc.BlendState.RenderTarget[i].SrcBlend = D3D12_BLEND_SRC_ALPHA; + psoDesc.BlendState.RenderTarget[i].DestBlend = D3D12_BLEND_INV_SRC_ALPHA; + psoDesc.BlendState.RenderTarget[i].BlendOp = D3D12_BLEND_OP_ADD; + psoDesc.BlendState.RenderTarget[i].SrcBlendAlpha = D3D12_BLEND_ONE; + psoDesc.BlendState.RenderTarget[i].DestBlendAlpha = D3D12_BLEND_INV_SRC_ALPHA; + psoDesc.BlendState.RenderTarget[i].BlendOpAlpha = D3D12_BLEND_OP_ADD; + psoDesc.BlendState.RenderTarget[i].LogicOp = D3D12_LOGIC_OP_NOOP; + psoDesc.BlendState.RenderTarget[i].RenderTargetWriteMask = D3D12_COLOR_WRITE_ENABLE_ALL; + } + psoDesc.NumRenderTargets = 1; + ID3D12PipelineState* d3d12PSO = nullptr; + + HRESULT hResult = gD3D12Device->CreateGraphicsPipelineState(&psoDesc, IID_PPV_ARGS(&d3d12PSO)); + if (FAILED(hResult)) { + printf("CreateQuadPSO failed: 0x%08X\n", hResult); + return nullptr; + } + return d3d12PSO; +} + +bool InitD3D12(HWND inHWND, int inWidth, int inHeight) { + HRESULT hResult; + UINT dxgiFactoryFlags = 0; +#ifdef _DEBUG + { + ID3D12Debug* debugController = nullptr; + if (SUCCEEDED(D3D12GetDebugInterface(IID_PPV_ARGS(&debugController)))) { + debugController->EnableDebugLayer(); + dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG; + } + } +#endif + IDXGIFactory4* dxgiFactory; + hResult = CreateDXGIFactory2(dxgiFactoryFlags, IID_PPV_ARGS(&dxgiFactory)); + if (FAILED(hResult)) { + return false; + } + IDXGIAdapter1* adapter; + int adapterIndex = 0; + bool adapterFound = false; + while (dxgiFactory->EnumAdapters1(adapterIndex, &adapter) != DXGI_ERROR_NOT_FOUND) { + DXGI_ADAPTER_DESC1 desc; + adapter->GetDesc1(&desc); + if (desc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) { + continue; + } + hResult = D3D12CreateDevice(adapter, D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device), nullptr); + if (SUCCEEDED(hResult)) { + adapterFound = true; + break; + } + adapterIndex++; + } + if (false == adapterFound) { + return false; + } + hResult = D3D12CreateDevice(adapter, D3D_FEATURE_LEVEL_11_0, IID_PPV_ARGS(&gD3D12Device)); + if (FAILED(hResult)) { + return false; + } + D3D12_COMMAND_QUEUE_DESC d3d12CommandQueueDesc = {}; + hResult = gD3D12Device->CreateCommandQueue(&d3d12CommandQueueDesc, IID_PPV_ARGS(&gCommandQueue)); + if (FAILED(hResult)) { + return false; + } + DXGI_SWAP_CHAIN_DESC swapChainDesc = {}; + swapChainDesc.BufferCount = 2; + swapChainDesc.BufferDesc = {}; + swapChainDesc.BufferDesc.Width = inWidth; + swapChainDesc.BufferDesc.Height = inHeight; + swapChainDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; + swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; + swapChainDesc.OutputWindow = inHWND; + swapChainDesc.SampleDesc.Count = 1; + swapChainDesc.Windowed = true; + swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD; + + IDXGISwapChain* swapChain = nullptr; + dxgiFactory->CreateSwapChain(gCommandQueue, &swapChainDesc, &swapChain); + gSwapChain = static_cast(swapChain); + + D3D12_HEAP_PROPERTIES d3dHeapProperties = {}; + d3dHeapProperties.Type = D3D12_HEAP_TYPE_DEFAULT; + + D3D12_RESOURCE_DESC d3d12ResourceDesc = {}; + d3d12ResourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D; + d3d12ResourceDesc.Alignment = 0; + d3d12ResourceDesc.Width = inWidth; + d3d12ResourceDesc.Height = inHeight; + d3d12ResourceDesc.DepthOrArraySize = 1; + d3d12ResourceDesc.MipLevels = 1; + d3d12ResourceDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT; + d3d12ResourceDesc.SampleDesc.Count = 1; + d3d12ResourceDesc.SampleDesc.Quality = 0; + d3d12ResourceDesc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN; + d3d12ResourceDesc.Flags = D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL; + + D3D12_CLEAR_VALUE dsClearValue = {}; + dsClearValue.Format = DXGI_FORMAT_D24_UNORM_S8_UINT; + dsClearValue.DepthStencil.Depth = 1.0f; + dsClearValue.DepthStencil.Stencil = 0; + + gD3D12Device->CreateCommittedResource(&d3dHeapProperties, + D3D12_HEAP_FLAG_NONE, + &d3d12ResourceDesc, + D3D12_RESOURCE_STATE_DEPTH_WRITE, + &dsClearValue, + IID_PPV_ARGS(&gDSRT) + ); + //RTV,DSV,alloc + D3D12_DESCRIPTOR_HEAP_DESC d3dDescriptorHeapDescRTV = {}; + d3dDescriptorHeapDescRTV.NumDescriptors = 2; + d3dDescriptorHeapDescRTV.Type = D3D12_DESCRIPTOR_HEAP_TYPE_RTV; + gD3D12Device->CreateDescriptorHeap(&d3dDescriptorHeapDescRTV, IID_PPV_ARGS(&gSwapChainRTVHeap)); + gRTVDescriptorSize = gD3D12Device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_RTV); + + D3D12_DESCRIPTOR_HEAP_DESC d3dDescriptorHeapDescDSV = {}; + d3dDescriptorHeapDescDSV.NumDescriptors = 1; + d3dDescriptorHeapDescDSV.Type = D3D12_DESCRIPTOR_HEAP_TYPE_DSV; + gD3D12Device->CreateDescriptorHeap(&d3dDescriptorHeapDescDSV, IID_PPV_ARGS(&gSwapChainDSVHeap)); + gDSVDescriptorSize = gD3D12Device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_DSV); + + D3D12_CPU_DESCRIPTOR_HANDLE rtvHeapStart = gSwapChainRTVHeap->GetCPUDescriptorHandleForHeapStart(); + for (int i = 0; i < 2; i++) { + gSwapChain->GetBuffer(i, IID_PPV_ARGS(&gColorRTs[i])); + D3D12_CPU_DESCRIPTOR_HANDLE rtvPointer; + rtvPointer.ptr = rtvHeapStart.ptr + i * gRTVDescriptorSize; + gD3D12Device->CreateRenderTargetView(gColorRTs[i], nullptr, rtvPointer); + } + D3D12_DEPTH_STENCIL_VIEW_DESC d3dDSViewDesc = {}; + d3dDSViewDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT; + d3dDSViewDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2D; + + gD3D12Device->CreateDepthStencilView(gDSRT, &d3dDSViewDesc, gSwapChainDSVHeap->GetCPUDescriptorHandleForHeapStart()); + + gD3D12Device->CreateCommandAllocator(D3D12_COMMAND_LIST_TYPE_DIRECT, IID_PPV_ARGS(&gCommandAllocator)); + gD3D12Device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, gCommandAllocator, nullptr, IID_PPV_ARGS(&gCommandList)); + + gD3D12Device->CreateFence(0, D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&gFence)); + gFenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr); + + return true; +} +ID3D12CommandAllocator* GetCommandAllocator() { + return gCommandAllocator; +} +ID3D12GraphicsCommandList* GetCommandList() { + return gCommandList; +} +void WaitForCompletionOfCommandList() { + if (gFence->GetCompletedValue() < gFenceValue) { + gFence->SetEventOnCompletion(gFenceValue, gFenceEvent); + WaitForSingleObject(gFenceEvent, INFINITE); + } +} +void EndCommandList() { + gCommandList->Close();// + ID3D12CommandList* ppCommandLists[] = { gCommandList }; + gCommandQueue->ExecuteCommandLists(1, ppCommandLists); + //CommandList + gFenceValue += 1; + gCommandQueue->Signal(gFence, gFenceValue);// +} +void BeginRenderToSwapChain(ID3D12GraphicsCommandList* inCommandList) { + gCurrentRTIndex = gSwapChain->GetCurrentBackBufferIndex(); + D3D12_RESOURCE_BARRIER barrier = InitResourceBarrier(gColorRTs[gCurrentRTIndex], D3D12_RESOURCE_STATE_PRESENT, D3D12_RESOURCE_STATE_RENDER_TARGET); + inCommandList->ResourceBarrier(1, &barrier); + D3D12_CPU_DESCRIPTOR_HANDLE colorRT, dsv; + dsv.ptr = gSwapChainDSVHeap->GetCPUDescriptorHandleForHeapStart().ptr; + colorRT.ptr = gSwapChainRTVHeap->GetCPUDescriptorHandleForHeapStart().ptr + gCurrentRTIndex * gRTVDescriptorSize; + inCommandList->OMSetRenderTargets(1, &colorRT, FALSE, &dsv); + D3D12_VIEWPORT viewport = { 0.0f,0.0f,1280.0f,720.0f }; + D3D12_RECT scissorRect = { 0,0,1280,720 }; + inCommandList->RSSetViewports(1, &viewport); + inCommandList->RSSetScissorRects(1, &scissorRect); + const float clearColor[] = { 0.0f,0.0f,0.0f,1.0f }; + inCommandList->ClearRenderTargetView(colorRT, clearColor, 0, nullptr); + inCommandList->ClearDepthStencilView(dsv, D3D12_CLEAR_FLAG_DEPTH | D3D12_CLEAR_FLAG_STENCIL, 1.0f, 0, 0, nullptr); +} +void EndRenderToSwapChain(ID3D12GraphicsCommandList* inCommandList) { + D3D12_RESOURCE_BARRIER barrier = InitResourceBarrier(gColorRTs[gCurrentRTIndex], D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_PRESENT); + inCommandList->ResourceBarrier(1, &barrier); +} +void SwapD3D12Buffers() { + gSwapChain->Present(0, 0); +} +ID3D12Resource* CreateBufferObject(ID3D12GraphicsCommandList* inCommandList, + void* inData, int inDataLen, D3D12_RESOURCE_STATES inFinalResourceState) { + D3D12_HEAP_PROPERTIES d3dHeapProperties = {}; + d3dHeapProperties.Type = D3D12_HEAP_TYPE_DEFAULT;//gpu + D3D12_RESOURCE_DESC d3d12ResourceDesc = {}; + d3d12ResourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER; + d3d12ResourceDesc.Alignment = 0; + d3d12ResourceDesc.Width = inDataLen; + d3d12ResourceDesc.Height = 1; + d3d12ResourceDesc.DepthOrArraySize = 1; + d3d12ResourceDesc.MipLevels = 1; + d3d12ResourceDesc.Format = DXGI_FORMAT_UNKNOWN; + d3d12ResourceDesc.SampleDesc.Count = 1; + d3d12ResourceDesc.SampleDesc.Quality = 0; + d3d12ResourceDesc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR; + d3d12ResourceDesc.Flags = D3D12_RESOURCE_FLAG_NONE; + + ID3D12Resource* bufferObject = nullptr; + gD3D12Device->CreateCommittedResource( + &d3dHeapProperties, + D3D12_HEAP_FLAG_NONE, + &d3d12ResourceDesc, + D3D12_RESOURCE_STATE_COPY_DEST, + nullptr, + IID_PPV_ARGS(&bufferObject) + ); + d3d12ResourceDesc = bufferObject->GetDesc(); + UINT64 memorySizeUsed = 0; + UINT64 rowSizeInBytes = 0; + UINT rowUsed = 0; + D3D12_PLACED_SUBRESOURCE_FOOTPRINT subresourceFootprint; + gD3D12Device->GetCopyableFootprints(&d3d12ResourceDesc, 0, 1, 0, + &subresourceFootprint, &rowUsed, &rowSizeInBytes, &memorySizeUsed); + // 3 x 4 x 4 = 48bytes,32bytes,24bytes + 24bytes + ID3D12Resource* tempBufferObject = nullptr; + d3dHeapProperties = {}; + d3dHeapProperties.Type = D3D12_HEAP_TYPE_UPLOAD;//cpu,gpu + gD3D12Device->CreateCommittedResource( + &d3dHeapProperties, + D3D12_HEAP_FLAG_NONE, + &d3d12ResourceDesc, + D3D12_RESOURCE_STATE_GENERIC_READ, + nullptr, + IID_PPV_ARGS(&tempBufferObject) + ); + + BYTE* pData; + tempBufferObject->Map(0, nullptr, reinterpret_cast(&pData)); + BYTE* pDstTempBuffer = reinterpret_cast(pData + subresourceFootprint.Offset); + const BYTE* pSrcData = reinterpret_cast(inData); + for (UINT i = 0; i < rowUsed; i++) { + memcpy(pDstTempBuffer + subresourceFootprint.Footprint.RowPitch * i, pSrcData + rowSizeInBytes * i, rowSizeInBytes); + } + tempBufferObject->Unmap(0, nullptr); + inCommandList->CopyBufferRegion(bufferObject, 0, tempBufferObject, 0, subresourceFootprint.Footprint.Width); + D3D12_RESOURCE_BARRIER barrier = InitResourceBarrier(bufferObject, D3D12_RESOURCE_STATE_COPY_DEST, inFinalResourceState); + inCommandList->ResourceBarrier(1, &barrier); + return bufferObject; +} +D3D12_RESOURCE_BARRIER InitResourceBarrier( + ID3D12Resource* inResource, D3D12_RESOURCE_STATES inPrevState, + D3D12_RESOURCE_STATES inNextState) { + D3D12_RESOURCE_BARRIER d3d12ResourceBarrier; + memset(&d3d12ResourceBarrier, 0, sizeof(d3d12ResourceBarrier)); + d3d12ResourceBarrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION; + d3d12ResourceBarrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE; + d3d12ResourceBarrier.Transition.pResource = inResource; + d3d12ResourceBarrier.Transition.StateBefore = inPrevState; + d3d12ResourceBarrier.Transition.StateAfter = inNextState; + d3d12ResourceBarrier.Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES; + return d3d12ResourceBarrier; +} + +ID3D12Resource* CreateTexture2D(ID3D12GraphicsCommandList* inCommandList, + const void* inPixelData, int inDataSizeInBytes, int inWidth, int inHeight, + DXGI_FORMAT inFormat) { + D3D12_HEAP_PROPERTIES d3dHeapProperties = {}; + d3dHeapProperties.Type = D3D12_HEAP_TYPE_DEFAULT; + + D3D12_RESOURCE_DESC d3d12ResourceDesc = {}; + d3d12ResourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D; + d3d12ResourceDesc.Alignment = 0; + d3d12ResourceDesc.Width = inWidth; + d3d12ResourceDesc.Height = inHeight; + d3d12ResourceDesc.DepthOrArraySize = 1; + d3d12ResourceDesc.MipLevels = 1; + d3d12ResourceDesc.Format = inFormat; + d3d12ResourceDesc.SampleDesc.Count = 1; + d3d12ResourceDesc.SampleDesc.Quality = 0; + d3d12ResourceDesc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN; + d3d12ResourceDesc.Flags = D3D12_RESOURCE_FLAG_NONE; + + ID3D12Resource* texture = nullptr; + gD3D12Device->CreateCommittedResource(&d3dHeapProperties, + D3D12_HEAP_FLAG_NONE, + &d3d12ResourceDesc, + D3D12_RESOURCE_STATE_COPY_DEST, + nullptr, + IID_PPV_ARGS(&texture) + ); + d3d12ResourceDesc = texture->GetDesc(); + UINT64 memorySizeUsed = 0; + UINT64 rowSizeInBytes = 0; + UINT rowUsed = 0; + D3D12_PLACED_SUBRESOURCE_FOOTPRINT subresourceFootprint; + gD3D12Device->GetCopyableFootprints(&d3d12ResourceDesc, 0, 1, 0, + &subresourceFootprint, &rowUsed, &rowSizeInBytes, &memorySizeUsed); + // 3 x 4 x 4 = 48bytes,32bytes,24bytes + 24bytes + ID3D12Resource* tempBufferObject = nullptr; + D3D12_HEAP_PROPERTIES d3dTempHeapProperties = {}; + d3dTempHeapProperties.Type = D3D12_HEAP_TYPE_UPLOAD;//cpu,gpu + + D3D12_RESOURCE_DESC d3d12TempResourceDesc = {}; + d3d12TempResourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER; + d3d12TempResourceDesc.Alignment = 0; + d3d12TempResourceDesc.Width = memorySizeUsed; + d3d12TempResourceDesc.Height = 1; + d3d12TempResourceDesc.DepthOrArraySize = 1; + d3d12TempResourceDesc.MipLevels = 1; + d3d12TempResourceDesc.Format = DXGI_FORMAT_UNKNOWN; + d3d12TempResourceDesc.SampleDesc.Count = 1; + d3d12TempResourceDesc.SampleDesc.Quality = 0; + d3d12TempResourceDesc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR; + d3d12TempResourceDesc.Flags = D3D12_RESOURCE_FLAG_NONE; + + gD3D12Device->CreateCommittedResource( + &d3dTempHeapProperties, + D3D12_HEAP_FLAG_NONE, + &d3d12TempResourceDesc, + D3D12_RESOURCE_STATE_GENERIC_READ, + nullptr, + IID_PPV_ARGS(&tempBufferObject) + ); + BYTE* pData; + tempBufferObject->Map(0, nullptr, reinterpret_cast(&pData)); + BYTE* pDstTempBuffer = reinterpret_cast(pData + subresourceFootprint.Offset); + const BYTE* pSrcData = reinterpret_cast(inPixelData); + for (UINT i = 0; i < rowUsed; i++) { + memcpy(pDstTempBuffer + subresourceFootprint.Footprint.RowPitch * i, pSrcData + rowSizeInBytes * i, rowSizeInBytes); + } + tempBufferObject->Unmap(0, nullptr); + D3D12_TEXTURE_COPY_LOCATION dst = {}; + dst.pResource = texture; + dst.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX; + dst.SubresourceIndex = 0; + + D3D12_TEXTURE_COPY_LOCATION src = {}; + src.pResource = tempBufferObject; + src.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT; + src.PlacedFootprint = subresourceFootprint; + inCommandList->CopyTextureRegion(&dst, 0, 0, 0, &src, nullptr); + + D3D12_RESOURCE_BARRIER barrier = InitResourceBarrier(texture, + D3D12_RESOURCE_STATE_COPY_DEST,D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE); + inCommandList->ResourceBarrier(1, &barrier); + return texture; +} +ID3D12Device* GetD3DDevice() { + return gD3D12Device; +} \ No newline at end of file diff --git a/MVS/VolumeRenderer/BattleFireDirect.h b/MVS/VolumeRenderer/BattleFireDirect.h new file mode 100644 index 00000000..c2bda888 --- /dev/null +++ b/MVS/VolumeRenderer/BattleFireDirect.h @@ -0,0 +1,40 @@ +#pragma once +#include +#include +#include +#include +#include + +D3D12_RESOURCE_BARRIER InitResourceBarrier( + ID3D12Resource* inResource, D3D12_RESOURCE_STATES inPrevState, + D3D12_RESOURCE_STATES inNextState); +ID3D12RootSignature* InitRootSignature(); +ID3D12RootSignature* InitVolumeRootSignature(); +void CreateShaderFromFile( + LPCTSTR inShaderFilePath, + const char* inMainFunctionName, + const char* inTarget,//"vs_5_0","ps_5_0","vs_4_0" + D3D12_SHADER_BYTECODE* inShader); +ID3D12Resource* CreateConstantBufferObject(int inDataLen); +void UpdateConstantBuffer(ID3D12Resource* inCB, void* inData, int inDataLen); +ID3D12Resource* CreateBufferObject(ID3D12GraphicsCommandList* inCommandList, + void* inData, int inDataLen, D3D12_RESOURCE_STATES inFinalResourceState); +ID3D12PipelineState* CreatePSO(ID3D12RootSignature* inID3D12RootSignature, + D3D12_SHADER_BYTECODE inVertexShader, D3D12_SHADER_BYTECODE inPixelShader, + D3D12_SHADER_BYTECODE inGSShader); +ID3D12PipelineState* CreateVolumePSO(ID3D12RootSignature* inID3D12RootSignature, + D3D12_SHADER_BYTECODE inVertexShader, D3D12_SHADER_BYTECODE inPixelShader); +ID3D12PipelineState* CreateQuadPSO(ID3D12RootSignature* inID3D12RootSignature, + D3D12_SHADER_BYTECODE inVertexShader, D3D12_SHADER_BYTECODE inPixelShader); +bool InitD3D12(HWND inHWND, int inWidth, int inHeight); +ID3D12GraphicsCommandList* GetCommandList(); +ID3D12CommandAllocator* GetCommandAllocator(); +void WaitForCompletionOfCommandList(); +void EndCommandList(); +void BeginRenderToSwapChain(ID3D12GraphicsCommandList* inCommandList); +void EndRenderToSwapChain(ID3D12GraphicsCommandList* inCommandList); +void SwapD3D12Buffers(); +ID3D12Resource* CreateTexture2D(ID3D12GraphicsCommandList* inCommandList, + const void*inPixelData,int inDataSizeInBytes,int inWidth,int inHeight, + DXGI_FORMAT inFormat); +ID3D12Device* GetD3DDevice(); diff --git a/MVS/VolumeRenderer/CMakeLists.txt b/MVS/VolumeRenderer/CMakeLists.txt new file mode 100644 index 00000000..c11d5a6c --- /dev/null +++ b/MVS/VolumeRenderer/CMakeLists.txt @@ -0,0 +1,56 @@ +cmake_minimum_required(VERSION 3.15) +project(XCVolumeRenderer VERSION 1.0 LANGUAGES CXX) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) + +set(SOURCES + main.cpp + BattleFireDirect.cpp + StaticMeshComponent.cpp + Utils.cpp + stbi/stb_image.cpp + NanoVDBLoader.cpp +) + +set(HEADERS + BattleFireDirect.h + StaticMeshComponent.h + Utils.h + stbi/stb_image.h +) + +add_executable(${PROJECT_NAME} WIN32 ${SOURCES} ${HEADERS}) + +target_compile_definitions(${PROJECT_NAME} PRIVATE UNICODE _UNICODE) +target_compile_options(${PROJECT_NAME} PRIVATE /utf-8 /MT) + +target_include_directories(${PROJECT_NAME} PRIVATE + ${CMAKE_SOURCE_DIR} +) + +target_link_libraries(${PROJECT_NAME} PRIVATE + d3d12.lib + dxgi.lib + d3dcompiler.lib + winmm.lib + kernel32.lib + user32.lib +) + +if(CMAKE_BUILD_TYPE STREQUAL "Debug") + target_compile_definitions(${PROJECT_NAME} PRIVATE _DEBUG) +else() + target_compile_definitions(${PROJECT_NAME} PRIVATE NDEBUG) +endif() + +set_target_properties(${PROJECT_NAME} PROPERTIES + RUNTIME_OUTPUT_DIRECTORY "${CMAKE_SOURCE_DIR}" +) + +add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_directory + ${CMAKE_SOURCE_DIR}/Res + $/Res +) diff --git a/MVS/VolumeRenderer/NanoVDBLoader.cpp b/MVS/VolumeRenderer/NanoVDBLoader.cpp new file mode 100644 index 00000000..c0ca095e --- /dev/null +++ b/MVS/VolumeRenderer/NanoVDBLoader.cpp @@ -0,0 +1,134 @@ +#define NOMINMAX +#include "NanoVDBLoader.h" +#include +#include "BattleFireDirect.h" +#include +#include +#include +#include +#include + +bool LoadNanoVDB(const char* filePath, NanoVDBData& outData, ID3D12GraphicsCommandList* cmdList, ID3D12CommandAllocator* cmdAlloc) { + try { + if (cmdAlloc && cmdList) { + cmdAlloc->Reset(); + cmdList->Reset(cmdAlloc, nullptr); + } + + nanovdb::GridHandle gridHandle = nanovdb::io::readGrid(filePath); + + const uint64_t byteSize = gridHandle.buffer().bufferSize(); + const uint64_t elementCount = byteSize / sizeof(uint32_t); + + void* bufferData = malloc(byteSize); + if (!bufferData) { + std::cerr << "Failed to allocate memory for NanoVDB" << std::endl; + return false; + } + + memcpy(bufferData, gridHandle.buffer().data(), byteSize); + + double* bboxData = (double*)((char*)bufferData + 560); + for (int i = 0; i < 6; i++) { + outData.worldBBox[i] = bboxData[i]; + } + + double* voxelSizeData = (double*)((char*)bufferData + 608); + printf("[NanoVDB] Voxel size: [%.6f, %.6f, %.6f]\n", voxelSizeData[0], voxelSizeData[1], voxelSizeData[2]); + + outData.cpuData = bufferData; + outData.byteSize = byteSize; + outData.elementCount = elementCount; + + ID3D12Device* device = GetD3DDevice(); + D3D12_HEAP_PROPERTIES heapProps = {}; + heapProps.Type = D3D12_HEAP_TYPE_DEFAULT; + heapProps.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN; + heapProps.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN; + + D3D12_RESOURCE_DESC resourceDesc = {}; + resourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER; + resourceDesc.Width = byteSize; + resourceDesc.Height = 1; + resourceDesc.DepthOrArraySize = 1; + resourceDesc.MipLevels = 1; + resourceDesc.Format = DXGI_FORMAT_UNKNOWN; + resourceDesc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR; + resourceDesc.SampleDesc.Count = 1; + + HRESULT hr = device->CreateCommittedResource( + &heapProps, + D3D12_HEAP_FLAG_NONE, + &resourceDesc, + D3D12_RESOURCE_STATE_COPY_DEST, + nullptr, + IID_PPV_ARGS(&outData.gpuBuffer) + ); + + if (FAILED(hr)) { + std::cerr << "Failed to create GPU buffer for NanoVDB" << std::endl; + free(bufferData); + return false; + } + + D3D12_HEAP_PROPERTIES uploadHeapProps = {}; + uploadHeapProps.Type = D3D12_HEAP_TYPE_UPLOAD; + + ID3D12Resource* uploadBuffer = nullptr; + hr = device->CreateCommittedResource( + &uploadHeapProps, + D3D12_HEAP_FLAG_NONE, + &resourceDesc, + D3D12_RESOURCE_STATE_GENERIC_READ, + nullptr, + IID_PPV_ARGS(&uploadBuffer) + ); + + if (SUCCEEDED(hr)) { + void* mappedData = nullptr; + D3D12_RANGE readRange = {0, 0}; + uploadBuffer->Map(0, &readRange, &mappedData); + memcpy(mappedData, bufferData, byteSize); + uploadBuffer->Unmap(0, nullptr); + + cmdList->CopyBufferRegion(outData.gpuBuffer, 0, uploadBuffer, 0, byteSize); + + D3D12_RESOURCE_BARRIER barrier = {}; + barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION; + barrier.Transition.pResource = outData.gpuBuffer; + barrier.Transition.StateBefore = D3D12_RESOURCE_STATE_COPY_DEST; + barrier.Transition.StateAfter = D3D12_RESOURCE_STATE_GENERIC_READ; + cmdList->ResourceBarrier(1, &barrier); + + EndCommandList(); + WaitForCompletionOfCommandList(); + uploadBuffer->Release(); + GetCommandAllocator()->Reset(); + cmdList->Reset(GetCommandAllocator(), nullptr); + } + + std::cout << "[NanoVDB] Loaded: " << byteSize << " bytes, " << elementCount << " elements" << std::endl; + std::cout.flush(); + return true; + } + catch (const std::exception& e) { + std::cerr << "[NanoVDB] Error: " << e.what() << std::endl; + std::cerr.flush(); + return false; + } +} + +void FreeNanoVDB(NanoVDBData& data) { + if (data.gpuBuffer) { + data.gpuBuffer->Release(); + data.gpuBuffer = nullptr; + } + if (data.cpuData) { + free(data.cpuData); + data.cpuData = nullptr; + } + data.byteSize = 0; + data.elementCount = 0; +} + + diff --git a/MVS/VolumeRenderer/NanoVDBLoader.h b/MVS/VolumeRenderer/NanoVDBLoader.h new file mode 100644 index 00000000..ce275911 --- /dev/null +++ b/MVS/VolumeRenderer/NanoVDBLoader.h @@ -0,0 +1,16 @@ +#pragma once +#include +#include +#include + +struct NanoVDBData { + ID3D12Resource* gpuBuffer; + void* cpuData; + uint64_t byteSize; + uint64_t elementCount; + double worldBBox[6]; +}; + +bool LoadNanoVDB(const char* filePath, NanoVDBData& outData, ID3D12GraphicsCommandList* cmdList, ID3D12CommandAllocator* cmdAlloc = nullptr); +void FreeNanoVDB(NanoVDBData& data); + diff --git a/MVS/VolumeRenderer/README.md b/MVS/VolumeRenderer/README.md new file mode 100644 index 00000000..2153790a --- /dev/null +++ b/MVS/VolumeRenderer/README.md @@ -0,0 +1,87 @@ +# XCVolumeRenderer + +基于 DirectX 12 的体积渲染器,使用 NanoVDB 格式实现云、烟雾等体积数据的实时渲染。 + +## 技术栈 + +- **渲染API**: DirectX 12 +- **语言**: C++17 +- **构建系统**: CMake +- **依赖库**: DirectX 12 SDK, stb_image, NanoVDB + +## 项目结构 + +``` +XCVolumeRenderer/ +├── main.cpp # 主程序入口 +├── BattleFireDirect.cpp/h # DirectX 12 核心渲染实现 +├── NanoVDBLoader.cpp/h # NanoVDB 体积数据加载器 +├── StaticMeshComponent.cpp/h # 静态网格组件 +├── Utils.cpp/h # 工具函数 +├── stbi/ # 图像加载库 +├── Res/ +│ ├── Shader/ # HLSL 着色器 +│ │ ├── volume.hlsl # 体积渲染着色器 +│ │ ├── gs.hlsl # 几何着色器 +│ │ └── PNanoVDB.hlsl # NanoVDB GPU 解析 +│ ├── Model/ # 模型文件 +│ ├── Image/ # 纹理图片 +│ └── NanoVDB/ # NanoVDB 体积数据 +├── CMakeLists.txt # CMake 构建配置 +└── build/ # 构建目录 +``` + +## 构建方法 + +### 前置要求 + +- Windows 10/11 +- Visual Studio 2019 或更高版本 +- CMake 3.15+ + +### 构建步骤 + +```bash +# 创建并进入构建目录 +mkdir build && cd build + +# 配置项目 +cmake .. + +# 编译 +cmake --build . --config Release +``` + +### 运行 + +编译完成后,运行 `XCVolumeRenderer.exe` 或使用 `run.bat` + +## 功能特性 + +### 核心渲染 +- DirectX 12 渲染管线 +- 几何着色器(GS)支持 +- 纹理映射 +- 常量缓冲区(CBV/SRV) + +### 体积渲染 +- NanoVDB 格式支持 +- 光线步进(Ray Marching) +- HDDA 空间跳跃加速 +- 体积阴影(Volumetric Shadow) +- 指数步长抖动采样 +- Gamma 校正 + +### 参数配置 +- DensityScale - 密度缩放 +- StepSize - 步进大小 +- MaxSteps - 最大步数 +- LightDir - 光照方向 +- LightSamples - 阴影采样数 + +## 资源说明 + +- 着色器文件位于 `Res/Shader/` 目录 +- 模型文件为 `.lhsm` 格式 +- 纹理支持 PNG、JPG 格式 +- 体积数据支持 NanoVDB `.nvdb` 格式 \ No newline at end of file diff --git a/MVS/VolumeRenderer/Res/Image/earth_d.jpg b/MVS/VolumeRenderer/Res/Image/earth_d.jpg new file mode 100644 index 00000000..663081b5 Binary files /dev/null and b/MVS/VolumeRenderer/Res/Image/earth_d.jpg differ diff --git a/MVS/VolumeRenderer/Res/Image/head.png b/MVS/VolumeRenderer/Res/Image/head.png new file mode 100644 index 00000000..055c373f Binary files /dev/null and b/MVS/VolumeRenderer/Res/Image/head.png differ diff --git a/MVS/VolumeRenderer/Res/Model/Sphere.lhsm b/MVS/VolumeRenderer/Res/Model/Sphere.lhsm new file mode 100644 index 00000000..b9e0c40d Binary files /dev/null and b/MVS/VolumeRenderer/Res/Model/Sphere.lhsm differ diff --git a/MVS/VolumeRenderer/Res/NanoVDB/bunny.nvdb b/MVS/VolumeRenderer/Res/NanoVDB/bunny.nvdb new file mode 100644 index 00000000..482cabfa Binary files /dev/null and b/MVS/VolumeRenderer/Res/NanoVDB/bunny.nvdb differ diff --git a/MVS/VolumeRenderer/Res/NanoVDB/cloud.nvdb b/MVS/VolumeRenderer/Res/NanoVDB/cloud.nvdb new file mode 100644 index 00000000..5fe9e983 Binary files /dev/null and b/MVS/VolumeRenderer/Res/NanoVDB/cloud.nvdb differ diff --git a/MVS/VolumeRenderer/Res/Shader/PNanoVDB.hlsl b/MVS/VolumeRenderer/Res/Shader/PNanoVDB.hlsl new file mode 100644 index 00000000..67c93b08 --- /dev/null +++ b/MVS/VolumeRenderer/Res/Shader/PNanoVDB.hlsl @@ -0,0 +1,3454 @@ + +// Copyright Contributors to the OpenVDB Project +// SPDX-License-Identifier: MPL-2.0 + +/*! + \file PNanoVDB.h + + \author Andrew Reidmeyer + + \brief This file is a portable (e.g. pointer-less) C99/GLSL/HLSL port + of NanoVDB.h, which is compatible with most graphics APIs. +*/ + +#ifndef NANOVDB_PNANOVDB_H_HAS_BEEN_INCLUDED +#define NANOVDB_PNANOVDB_H_HAS_BEEN_INCLUDED + +// ------------------------------------------------ Configuration ----------------------------------------------------------- + +// platforms +//#define PNANOVDB_C +//#define PNANOVDB_HLSL +//#define PNANOVDB_GLSL + +// addressing mode +// PNANOVDB_ADDRESS_32 +// PNANOVDB_ADDRESS_64 +#if defined(PNANOVDB_C) +#ifndef PNANOVDB_ADDRESS_32 +#define PNANOVDB_ADDRESS_64 +#endif +#elif defined(PNANOVDB_HLSL) +#ifndef PNANOVDB_ADDRESS_64 +#define PNANOVDB_ADDRESS_32 +#endif +#elif defined(PNANOVDB_GLSL) +#ifndef PNANOVDB_ADDRESS_64 +#define PNANOVDB_ADDRESS_32 +#endif +#endif + +// bounds checking +//#define PNANOVDB_BUF_BOUNDS_CHECK + +// enable HDDA by default on HLSL/GLSL, make explicit on C +#if defined(PNANOVDB_C) +//#define PNANOVDB_HDDA +#ifdef PNANOVDB_HDDA +#ifndef PNANOVDB_CMATH +#define PNANOVDB_CMATH +#endif +#endif +#elif defined(PNANOVDB_HLSL) +#define PNANOVDB_HDDA +#elif defined(PNANOVDB_GLSL) +#define PNANOVDB_HDDA +#endif + +#ifdef PNANOVDB_CMATH +#ifndef __CUDACC_RTC__ +#include +#endif +#endif + +// ------------------------------------------------ Buffer ----------------------------------------------------------- + +#if defined(PNANOVDB_BUF_CUSTOM) +// NOP +#elif defined(PNANOVDB_C) +#define PNANOVDB_BUF_C +#elif defined(PNANOVDB_HLSL) +#define PNANOVDB_BUF_HLSL +#elif defined(PNANOVDB_GLSL) +#define PNANOVDB_BUF_GLSL +#endif + +#if defined(PNANOVDB_BUF_C) +#ifndef __CUDACC_RTC__ +#include +#endif +#if defined(__CUDACC__) +#define PNANOVDB_BUF_FORCE_INLINE static __host__ __device__ __forceinline__ +#elif defined(_WIN32) +#define PNANOVDB_BUF_FORCE_INLINE static inline __forceinline +#else +#define PNANOVDB_BUF_FORCE_INLINE static inline __attribute__((always_inline)) +#endif +typedef struct pnanovdb_buf_t +{ + uint32_t* data; +#ifdef PNANOVDB_BUF_BOUNDS_CHECK + uint64_t size_in_words; +#endif +}pnanovdb_buf_t; +PNANOVDB_BUF_FORCE_INLINE pnanovdb_buf_t pnanovdb_make_buf(uint32_t* data, uint64_t size_in_words) +{ + pnanovdb_buf_t ret; + ret.data = data; +#ifdef PNANOVDB_BUF_BOUNDS_CHECK + ret.size_in_words = size_in_words; +#endif + return ret; +} +#if defined(PNANOVDB_ADDRESS_32) +PNANOVDB_BUF_FORCE_INLINE uint32_t pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint32_t byte_offset) +{ + uint32_t wordaddress = (byte_offset >> 2u); +#ifdef PNANOVDB_BUF_BOUNDS_CHECK + return wordaddress < buf.size_in_words ? buf.data[wordaddress] : 0u; +#else + return buf.data[wordaddress]; +#endif +} +PNANOVDB_BUF_FORCE_INLINE uint64_t pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint32_t byte_offset) +{ + uint64_t* data64 = (uint64_t*)buf.data; + uint32_t wordaddress64 = (byte_offset >> 3u); +#ifdef PNANOVDB_BUF_BOUNDS_CHECK + uint64_t size_in_words64 = buf.size_in_words >> 1u; + return wordaddress64 < size_in_words64 ? data64[wordaddress64] : 0llu; +#else + return data64[wordaddress64]; +#endif +} +PNANOVDB_BUF_FORCE_INLINE void pnanovdb_buf_write_uint32(pnanovdb_buf_t buf, uint32_t byte_offset, uint32_t value) +{ + uint32_t wordaddress = (byte_offset >> 2u); +#ifdef PNANOVDB_BUF_BOUNDS_CHECK + if (wordaddress < buf.size_in_words) + { + buf.data[wordaddress] = value; +} +#else + buf.data[wordaddress] = value; +#endif +} +PNANOVDB_BUF_FORCE_INLINE void pnanovdb_buf_write_uint64(pnanovdb_buf_t buf, uint32_t byte_offset, uint64_t value) +{ + uint64_t* data64 = (uint64_t*)buf.data; + uint32_t wordaddress64 = (byte_offset >> 3u); +#ifdef PNANOVDB_BUF_BOUNDS_CHECK + uint64_t size_in_words64 = buf.size_in_words >> 1u; + if (wordaddress64 < size_in_words64) + { + data64[wordaddress64] = value; + } +#else + data64[wordaddress64] = value; +#endif +} +#elif defined(PNANOVDB_ADDRESS_64) +PNANOVDB_BUF_FORCE_INLINE uint32_t pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint64_t byte_offset) +{ + uint64_t wordaddress = (byte_offset >> 2u); +#ifdef PNANOVDB_BUF_BOUNDS_CHECK + return wordaddress < buf.size_in_words ? buf.data[wordaddress] : 0u; +#else + return buf.data[wordaddress]; +#endif +} +PNANOVDB_BUF_FORCE_INLINE uint64_t pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint64_t byte_offset) +{ + uint64_t* data64 = (uint64_t*)buf.data; + uint64_t wordaddress64 = (byte_offset >> 3u); +#ifdef PNANOVDB_BUF_BOUNDS_CHECK + uint64_t size_in_words64 = buf.size_in_words >> 1u; + return wordaddress64 < size_in_words64 ? data64[wordaddress64] : 0llu; +#else + return data64[wordaddress64]; +#endif +} +PNANOVDB_BUF_FORCE_INLINE void pnanovdb_buf_write_uint32(pnanovdb_buf_t buf, uint64_t byte_offset, uint32_t value) +{ + uint64_t wordaddress = (byte_offset >> 2u); +#ifdef PNANOVDB_BUF_BOUNDS_CHECK + if (wordaddress < buf.size_in_words) + { + buf.data[wordaddress] = value; + } +#else + buf.data[wordaddress] = value; +#endif +} +PNANOVDB_BUF_FORCE_INLINE void pnanovdb_buf_write_uint64(pnanovdb_buf_t buf, uint64_t byte_offset, uint64_t value) +{ + uint64_t* data64 = (uint64_t*)buf.data; + uint64_t wordaddress64 = (byte_offset >> 3u); +#ifdef PNANOVDB_BUF_BOUNDS_CHECK + uint64_t size_in_words64 = buf.size_in_words >> 1u; + if (wordaddress64 < size_in_words64) + { + data64[wordaddress64] = value; + } +#else + data64[wordaddress64] = value; +#endif +} +#endif +typedef uint32_t pnanovdb_grid_type_t; +#define PNANOVDB_GRID_TYPE_GET(grid_typeIn, nameIn) pnanovdb_grid_type_constants[grid_typeIn].nameIn +#elif defined(PNANOVDB_BUF_HLSL) +#if defined(PNANOVDB_ADDRESS_32) +#define pnanovdb_buf_t StructuredBuffer +uint pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint byte_offset) +{ + return buf[(byte_offset >> 2u)]; +} +uint2 pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint byte_offset) +{ + uint2 ret; + ret.x = pnanovdb_buf_read_uint32(buf, byte_offset + 0u); + ret.y = pnanovdb_buf_read_uint32(buf, byte_offset + 4u); + return ret; +} +void pnanovdb_buf_write_uint32(pnanovdb_buf_t buf, uint byte_offset, uint value) +{ + // NOP, by default no write in HLSL +} +void pnanovdb_buf_write_uint64(pnanovdb_buf_t buf, uint byte_offset, uint2 value) +{ + // NOP, by default no write in HLSL +} +#elif defined(PNANOVDB_ADDRESS_64) +#define pnanovdb_buf_t StructuredBuffer +uint pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint64_t byte_offset) +{ + return buf[uint(byte_offset >> 2u)]; +} +uint64_t pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint64_t byte_offset) +{ + uint64_t ret; + ret = pnanovdb_buf_read_uint32(buf, byte_offset + 0u); + ret = ret + (uint64_t(pnanovdb_buf_read_uint32(buf, byte_offset + 4u)) << 32u); + return ret; +} +void pnanovdb_buf_write_uint32(pnanovdb_buf_t buf, uint64_t byte_offset, uint value) +{ + // NOP, by default no write in HLSL +} +void pnanovdb_buf_write_uint64(pnanovdb_buf_t buf, uint64_t byte_offset, uint64_t value) +{ + // NOP, by default no write in HLSL +} +#endif +#define pnanovdb_grid_type_t uint +#define PNANOVDB_GRID_TYPE_GET(grid_typeIn, nameIn) pnanovdb_grid_type_constants[grid_typeIn].nameIn +#elif defined(PNANOVDB_BUF_GLSL) +struct pnanovdb_buf_t +{ + uint unused; // to satisfy min struct size? +}; +uint pnanovdb_buf_read_uint32(pnanovdb_buf_t buf, uint byte_offset) +{ + return pnanovdb_buf_data[(byte_offset >> 2u)]; +} +uvec2 pnanovdb_buf_read_uint64(pnanovdb_buf_t buf, uint byte_offset) +{ + uvec2 ret; + ret.x = pnanovdb_buf_read_uint32(buf, byte_offset + 0u); + ret.y = pnanovdb_buf_read_uint32(buf, byte_offset + 4u); + return ret; +} +void pnanovdb_buf_write_uint32(pnanovdb_buf_t buf, uint byte_offset, uint value) +{ + // NOP, by default no write in HLSL +} +void pnanovdb_buf_write_uint64(pnanovdb_buf_t buf, uint byte_offset, uvec2 value) +{ + // NOP, by default no write in HLSL +} +#define pnanovdb_grid_type_t uint +#define PNANOVDB_GRID_TYPE_GET(grid_typeIn, nameIn) pnanovdb_grid_type_constants[grid_typeIn].nameIn +#endif + +// ------------------------------------------------ Basic Types ----------------------------------------------------------- + +// force inline +#if defined(PNANOVDB_C) +#if defined(__CUDACC__) +#define PNANOVDB_FORCE_INLINE static __host__ __device__ __forceinline__ +#elif defined(_WIN32) +#define PNANOVDB_FORCE_INLINE static inline __forceinline +#else +#define PNANOVDB_FORCE_INLINE static inline __attribute__((always_inline)) +#endif +#elif defined(PNANOVDB_HLSL) +#define PNANOVDB_FORCE_INLINE +#elif defined(PNANOVDB_GLSL) +#define PNANOVDB_FORCE_INLINE +#endif + +// struct typedef, static const, inout +#if defined(PNANOVDB_C) +#define PNANOVDB_STRUCT_TYPEDEF(X) typedef struct X X; +#define PNANOVDB_STATIC_CONST static const +#define PNANOVDB_INOUT(X) X* +#define PNANOVDB_IN(X) const X* +#define PNANOVDB_DEREF(X) (*X) +#define PNANOVDB_REF(X) &X +#elif defined(PNANOVDB_HLSL) +#define PNANOVDB_STRUCT_TYPEDEF(X) +#define PNANOVDB_STATIC_CONST static const +#define PNANOVDB_INOUT(X) inout X +#define PNANOVDB_IN(X) X +#define PNANOVDB_DEREF(X) X +#define PNANOVDB_REF(X) X +#elif defined(PNANOVDB_GLSL) +#define PNANOVDB_STRUCT_TYPEDEF(X) +#define PNANOVDB_STATIC_CONST const +#define PNANOVDB_INOUT(X) inout X +#define PNANOVDB_IN(X) X +#define PNANOVDB_DEREF(X) X +#define PNANOVDB_REF(X) X +#endif + +// basic types, type conversion +#if defined(PNANOVDB_C) +#define PNANOVDB_NATIVE_64 +#ifndef __CUDACC_RTC__ +#include +#endif +#if !defined(PNANOVDB_MEMCPY_CUSTOM) +#ifndef __CUDACC_RTC__ +#include +#endif +#define pnanovdb_memcpy memcpy +#endif +typedef uint32_t pnanovdb_uint32_t; +typedef int32_t pnanovdb_int32_t; +typedef int32_t pnanovdb_bool_t; +#define PNANOVDB_FALSE 0 +#define PNANOVDB_TRUE 1 +typedef uint64_t pnanovdb_uint64_t; +typedef int64_t pnanovdb_int64_t; +typedef struct pnanovdb_coord_t +{ + pnanovdb_int32_t x, y, z; +}pnanovdb_coord_t; +typedef struct pnanovdb_vec3_t +{ + float x, y, z; +}pnanovdb_vec3_t; +PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_uint32_as_int32(pnanovdb_uint32_t v) { return (pnanovdb_int32_t)v; } +PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_uint64_as_int64(pnanovdb_uint64_t v) { return (pnanovdb_int64_t)v; } +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_int64_as_uint64(pnanovdb_int64_t v) { return (pnanovdb_uint64_t)v; } +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_int32_as_uint32(pnanovdb_int32_t v) { return (pnanovdb_uint32_t)v; } +PNANOVDB_FORCE_INLINE float pnanovdb_uint32_as_float(pnanovdb_uint32_t v) { float vf; pnanovdb_memcpy(&vf, &v, sizeof(vf)); return vf; } +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_float_as_uint32(float v) { return *((pnanovdb_uint32_t*)(&v)); } +PNANOVDB_FORCE_INLINE double pnanovdb_uint64_as_double(pnanovdb_uint64_t v) { double vf; pnanovdb_memcpy(&vf, &v, sizeof(vf)); return vf; } +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_double_as_uint64(double v) { return *((pnanovdb_uint64_t*)(&v)); } +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint64_low(pnanovdb_uint64_t v) { return (pnanovdb_uint32_t)v; } +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint64_high(pnanovdb_uint64_t v) { return (pnanovdb_uint32_t)(v >> 32u); } +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint32_as_uint64(pnanovdb_uint32_t x, pnanovdb_uint32_t y) { return ((pnanovdb_uint64_t)x) | (((pnanovdb_uint64_t)y) << 32u); } +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint32_as_uint64_low(pnanovdb_uint32_t x) { return ((pnanovdb_uint64_t)x); } +PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_uint64_is_equal(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return a == b; } +PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_int64_is_zero(pnanovdb_int64_t a) { return a == 0; } +#ifdef PNANOVDB_CMATH +PNANOVDB_FORCE_INLINE float pnanovdb_floor(float v) { return floorf(v); } +#endif +PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_float_to_int32(float v) { return (pnanovdb_int32_t)v; } +PNANOVDB_FORCE_INLINE float pnanovdb_int32_to_float(pnanovdb_int32_t v) { return (float)v; } +PNANOVDB_FORCE_INLINE float pnanovdb_uint32_to_float(pnanovdb_uint32_t v) { return (float)v; } +PNANOVDB_FORCE_INLINE float pnanovdb_min(float a, float b) { return a < b ? a : b; } +PNANOVDB_FORCE_INLINE float pnanovdb_max(float a, float b) { return a > b ? a : b; } +#elif defined(PNANOVDB_HLSL) +typedef uint pnanovdb_uint32_t; +typedef int pnanovdb_int32_t; +typedef bool pnanovdb_bool_t; +#define PNANOVDB_FALSE false +#define PNANOVDB_TRUE true +typedef int3 pnanovdb_coord_t; +typedef float3 pnanovdb_vec3_t; +pnanovdb_int32_t pnanovdb_uint32_as_int32(pnanovdb_uint32_t v) { return int(v); } +pnanovdb_uint32_t pnanovdb_int32_as_uint32(pnanovdb_int32_t v) { return uint(v); } +float pnanovdb_uint32_as_float(pnanovdb_uint32_t v) { return asfloat(v); } +pnanovdb_uint32_t pnanovdb_float_as_uint32(float v) { return asuint(v); } +float pnanovdb_floor(float v) { return floor(v); } +pnanovdb_int32_t pnanovdb_float_to_int32(float v) { return int(v); } +float pnanovdb_int32_to_float(pnanovdb_int32_t v) { return float(v); } +float pnanovdb_uint32_to_float(pnanovdb_uint32_t v) { return float(v); } +float pnanovdb_min(float a, float b) { return min(a, b); } +float pnanovdb_max(float a, float b) { return max(a, b); } +#if defined(PNANOVDB_ADDRESS_32) +typedef uint2 pnanovdb_uint64_t; +typedef int2 pnanovdb_int64_t; +pnanovdb_int64_t pnanovdb_uint64_as_int64(pnanovdb_uint64_t v) { return int2(v); } +pnanovdb_uint64_t pnanovdb_int64_as_uint64(pnanovdb_int64_t v) { return uint2(v); } +double pnanovdb_uint64_as_double(pnanovdb_uint64_t v) { return asdouble(v.x, v.y); } +pnanovdb_uint64_t pnanovdb_double_as_uint64(double v) { uint2 ret; asuint(v, ret.x, ret.y); return ret; } +pnanovdb_uint32_t pnanovdb_uint64_low(pnanovdb_uint64_t v) { return v.x; } +pnanovdb_uint32_t pnanovdb_uint64_high(pnanovdb_uint64_t v) { return v.y; } +pnanovdb_uint64_t pnanovdb_uint32_as_uint64(pnanovdb_uint32_t x, pnanovdb_uint32_t y) { return uint2(x, y); } +pnanovdb_uint64_t pnanovdb_uint32_as_uint64_low(pnanovdb_uint32_t x) { return uint2(x, 0); } +bool pnanovdb_uint64_is_equal(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return (a.x == b.x) && (a.y == b.y); } +bool pnanovdb_int64_is_zero(pnanovdb_int64_t a) { return a.x == 0 && a.y == 0; } +#else +typedef uint64_t pnanovdb_uint64_t; +typedef int64_t pnanovdb_int64_t; +pnanovdb_int64_t pnanovdb_uint64_as_int64(pnanovdb_uint64_t v) { return int64_t(v); } +pnanovdb_uint64_t pnanovdb_int64_as_uint64(pnanovdb_int64_t v) { return uint64_t(v); } +double pnanovdb_uint64_as_double(pnanovdb_uint64_t v) { return asdouble(uint(v), uint(v >> 32u)); } +pnanovdb_uint64_t pnanovdb_double_as_uint64(double v) { uint2 ret; asuint(v, ret.x, ret.y); return uint64_t(ret.x) + (uint64_t(ret.y) << 32u); } +pnanovdb_uint32_t pnanovdb_uint64_low(pnanovdb_uint64_t v) { return uint(v); } +pnanovdb_uint32_t pnanovdb_uint64_high(pnanovdb_uint64_t v) { return uint(v >> 32u); } +pnanovdb_uint64_t pnanovdb_uint32_as_uint64(pnanovdb_uint32_t x, pnanovdb_uint32_t y) { return uint64_t(x) + (uint64_t(y) << 32u); } +pnanovdb_uint64_t pnanovdb_uint32_as_uint64_low(pnanovdb_uint32_t x) { return uint64_t(x); } +bool pnanovdb_uint64_is_equal(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return a == b; } +bool pnanovdb_int64_is_zero(pnanovdb_int64_t a) { return a == 0; } +#endif +#elif defined(PNANOVDB_GLSL) +#define pnanovdb_uint32_t uint +#define pnanovdb_int32_t int +#define pnanovdb_bool_t bool +#define PNANOVDB_FALSE false +#define PNANOVDB_TRUE true +#define pnanovdb_uint64_t uvec2 +#define pnanovdb_int64_t ivec2 +#define pnanovdb_coord_t ivec3 +#define pnanovdb_vec3_t vec3 +pnanovdb_int32_t pnanovdb_uint32_as_int32(pnanovdb_uint32_t v) { return int(v); } +pnanovdb_int64_t pnanovdb_uint64_as_int64(pnanovdb_uint64_t v) { return ivec2(v); } +pnanovdb_uint64_t pnanovdb_int64_as_uint64(pnanovdb_int64_t v) { return uvec2(v); } +pnanovdb_uint32_t pnanovdb_int32_as_uint32(pnanovdb_int32_t v) { return uint(v); } +float pnanovdb_uint32_as_float(pnanovdb_uint32_t v) { return uintBitsToFloat(v); } +pnanovdb_uint32_t pnanovdb_float_as_uint32(float v) { return floatBitsToUint(v); } +double pnanovdb_uint64_as_double(pnanovdb_uint64_t v) { return packDouble2x32(uvec2(v.x, v.y)); } +pnanovdb_uint64_t pnanovdb_double_as_uint64(double v) { return unpackDouble2x32(v); } +pnanovdb_uint32_t pnanovdb_uint64_low(pnanovdb_uint64_t v) { return v.x; } +pnanovdb_uint32_t pnanovdb_uint64_high(pnanovdb_uint64_t v) { return v.y; } +pnanovdb_uint64_t pnanovdb_uint32_as_uint64(pnanovdb_uint32_t x, pnanovdb_uint32_t y) { return uvec2(x, y); } +pnanovdb_uint64_t pnanovdb_uint32_as_uint64_low(pnanovdb_uint32_t x) { return uvec2(x, 0); } +bool pnanovdb_uint64_is_equal(pnanovdb_uint64_t a, pnanovdb_uint64_t b) { return (a.x == b.x) && (a.y == b.y); } +bool pnanovdb_int64_is_zero(pnanovdb_int64_t a) { return a.x == 0 && a.y == 0; } +float pnanovdb_floor(float v) { return floor(v); } +pnanovdb_int32_t pnanovdb_float_to_int32(float v) { return int(v); } +float pnanovdb_int32_to_float(pnanovdb_int32_t v) { return float(v); } +float pnanovdb_uint32_to_float(pnanovdb_uint32_t v) { return float(v); } +float pnanovdb_min(float a, float b) { return min(a, b); } +float pnanovdb_max(float a, float b) { return max(a, b); } +#endif + +// ------------------------------------------------ Coord/Vec3 Utilties ----------------------------------------------------------- + +#if defined(PNANOVDB_C) +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_uniform(float a) +{ + pnanovdb_vec3_t v; + v.x = a; + v.y = a; + v.z = a; + return v; +} +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_add(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b) +{ + pnanovdb_vec3_t v; + v.x = a.x + b.x; + v.y = a.y + b.y; + v.z = a.z + b.z; + return v; +} +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_sub(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b) +{ + pnanovdb_vec3_t v; + v.x = a.x - b.x; + v.y = a.y - b.y; + v.z = a.z - b.z; + return v; +} +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_mul(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b) +{ + pnanovdb_vec3_t v; + v.x = a.x * b.x; + v.y = a.y * b.y; + v.z = a.z * b.z; + return v; +} +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_div(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b) +{ + pnanovdb_vec3_t v; + v.x = a.x / b.x; + v.y = a.y / b.y; + v.z = a.z / b.z; + return v; +} +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_min(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b) +{ + pnanovdb_vec3_t v; + v.x = a.x < b.x ? a.x : b.x; + v.y = a.y < b.y ? a.y : b.y; + v.z = a.z < b.z ? a.z : b.z; + return v; +} +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_vec3_max(const pnanovdb_vec3_t a, const pnanovdb_vec3_t b) +{ + pnanovdb_vec3_t v; + v.x = a.x > b.x ? a.x : b.x; + v.y = a.y > b.y ? a.y : b.y; + v.z = a.z > b.z ? a.z : b.z; + return v; +} +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_coord_to_vec3(const pnanovdb_coord_t coord) +{ + pnanovdb_vec3_t v; + v.x = pnanovdb_int32_to_float(coord.x); + v.y = pnanovdb_int32_to_float(coord.y); + v.z = pnanovdb_int32_to_float(coord.z); + return v; +} +PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_coord_uniform(const pnanovdb_int32_t a) +{ + pnanovdb_coord_t v; + v.x = a; + v.y = a; + v.z = a; + return v; +} +PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_coord_add(pnanovdb_coord_t a, pnanovdb_coord_t b) +{ + pnanovdb_coord_t v; + v.x = a.x + b.x; + v.y = a.y + b.y; + v.z = a.z + b.z; + return v; +} +#elif defined(PNANOVDB_HLSL) +pnanovdb_vec3_t pnanovdb_vec3_uniform(float a) { return float3(a, a, a); } +pnanovdb_vec3_t pnanovdb_vec3_add(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a + b; } +pnanovdb_vec3_t pnanovdb_vec3_sub(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a - b; } +pnanovdb_vec3_t pnanovdb_vec3_mul(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a * b; } +pnanovdb_vec3_t pnanovdb_vec3_div(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a / b; } +pnanovdb_vec3_t pnanovdb_vec3_min(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return min(a, b); } +pnanovdb_vec3_t pnanovdb_vec3_max(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return max(a, b); } +pnanovdb_vec3_t pnanovdb_coord_to_vec3(pnanovdb_coord_t coord) { return float3(coord); } +pnanovdb_coord_t pnanovdb_coord_uniform(pnanovdb_int32_t a) { return int3(a, a, a); } +pnanovdb_coord_t pnanovdb_coord_add(pnanovdb_coord_t a, pnanovdb_coord_t b) { return a + b; } +#elif defined(PNANOVDB_GLSL) +pnanovdb_vec3_t pnanovdb_vec3_uniform(float a) { return vec3(a, a, a); } +pnanovdb_vec3_t pnanovdb_vec3_add(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a + b; } +pnanovdb_vec3_t pnanovdb_vec3_sub(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a - b; } +pnanovdb_vec3_t pnanovdb_vec3_mul(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a * b; } +pnanovdb_vec3_t pnanovdb_vec3_div(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return a / b; } +pnanovdb_vec3_t pnanovdb_vec3_min(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return min(a, b); } +pnanovdb_vec3_t pnanovdb_vec3_max(pnanovdb_vec3_t a, pnanovdb_vec3_t b) { return max(a, b); } +pnanovdb_vec3_t pnanovdb_coord_to_vec3(const pnanovdb_coord_t coord) { return vec3(coord); } +pnanovdb_coord_t pnanovdb_coord_uniform(pnanovdb_int32_t a) { return ivec3(a, a, a); } +pnanovdb_coord_t pnanovdb_coord_add(pnanovdb_coord_t a, pnanovdb_coord_t b) { return a + b; } +#endif + +// ------------------------------------------------ Uint64 Utils ----------------------------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint32_countbits(pnanovdb_uint32_t value) +{ +#if defined(PNANOVDB_C) +#if defined(_MSC_VER) && (_MSC_VER >= 1928) && defined(PNANOVDB_USE_INTRINSICS) + return __popcnt(value); +#elif (defined(__GNUC__) || defined(__clang__)) && defined(PNANOVDB_USE_INTRINSICS) + return __builtin_popcount(value); +#else + value = value - ((value >> 1) & 0x55555555); + value = (value & 0x33333333) + ((value >> 2) & 0x33333333); + value = (value + (value >> 4)) & 0x0F0F0F0F; + return (value * 0x01010101) >> 24; +#endif +#elif defined(PNANOVDB_HLSL) + return countbits(value); +#elif defined(PNANOVDB_GLSL) + return bitCount(value); +#endif +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint64_countbits(pnanovdb_uint64_t value) +{ + return pnanovdb_uint32_countbits(pnanovdb_uint64_low(value)) + pnanovdb_uint32_countbits(pnanovdb_uint64_high(value)); +} + +#if defined(PNANOVDB_ADDRESS_32) +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_offset(pnanovdb_uint64_t a, pnanovdb_uint32_t b) +{ + pnanovdb_uint32_t low = pnanovdb_uint64_low(a); + pnanovdb_uint32_t high = pnanovdb_uint64_high(a); + low += b; + if (low < b) + { + high += 1u; + } + return pnanovdb_uint32_as_uint64(low, high); +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_dec(pnanovdb_uint64_t a) +{ + pnanovdb_uint32_t low = pnanovdb_uint64_low(a); + pnanovdb_uint32_t high = pnanovdb_uint64_high(a); + if (low == 0u) + { + high -= 1u; + } + low -= 1u; + return pnanovdb_uint32_as_uint64(low, high); +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint64_to_uint32_lsr(pnanovdb_uint64_t a, pnanovdb_uint32_t b) +{ + pnanovdb_uint32_t low = pnanovdb_uint64_low(a); + pnanovdb_uint32_t high = pnanovdb_uint64_high(a); + return (b >= 32u) ? + (high >> (b - 32)) : + ((low >> b) | ((b > 0) ? (high << (32u - b)) : 0u)); +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_bit_mask(pnanovdb_uint32_t bit_idx) +{ + pnanovdb_uint32_t mask_low = bit_idx < 32u ? 1u << bit_idx : 0u; + pnanovdb_uint32_t mask_high = bit_idx >= 32u ? 1u << (bit_idx - 32u) : 0u; + return pnanovdb_uint32_as_uint64(mask_low, mask_high); +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_and(pnanovdb_uint64_t a, pnanovdb_uint64_t b) +{ + return pnanovdb_uint32_as_uint64( + pnanovdb_uint64_low(a) & pnanovdb_uint64_low(b), + pnanovdb_uint64_high(a) & pnanovdb_uint64_high(b) + ); +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_uint64_any_bit(pnanovdb_uint64_t a) +{ + return pnanovdb_uint64_low(a) != 0u || pnanovdb_uint64_high(a) != 0u; +} + +#else +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_offset(pnanovdb_uint64_t a, pnanovdb_uint32_t b) +{ + return a + b; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_dec(pnanovdb_uint64_t a) +{ + return a - 1u; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_uint64_to_uint32_lsr(pnanovdb_uint64_t a, pnanovdb_uint32_t b) +{ + return pnanovdb_uint64_low(a >> b); +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_bit_mask(pnanovdb_uint32_t bit_idx) +{ + return 1llu << bit_idx; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_uint64_and(pnanovdb_uint64_t a, pnanovdb_uint64_t b) +{ + return a & b; +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_uint64_any_bit(pnanovdb_uint64_t a) +{ + return a != 0llu; +} +#endif + +// ------------------------------------------------ Address Type ----------------------------------------------------------- + +#if defined(PNANOVDB_ADDRESS_32) +struct pnanovdb_address_t +{ + pnanovdb_uint32_t byte_offset; +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_address_t) + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset) +{ + pnanovdb_address_t ret = address; + ret.byte_offset += byte_offset; + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset_neg(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset) +{ + pnanovdb_address_t ret = address; + ret.byte_offset -= byte_offset; + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset_product(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset, pnanovdb_uint32_t multiplier) +{ + pnanovdb_address_t ret = address; + ret.byte_offset += byte_offset * multiplier; + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset64(pnanovdb_address_t address, pnanovdb_uint64_t byte_offset) +{ + pnanovdb_address_t ret = address; + // lose high bits on 32-bit + ret.byte_offset += pnanovdb_uint64_low(byte_offset); + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset64_product(pnanovdb_address_t address, pnanovdb_uint64_t byte_offset, pnanovdb_uint32_t multiplier) +{ + pnanovdb_address_t ret = address; + ret.byte_offset += pnanovdb_uint64_low(byte_offset) * multiplier; + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_address_mask(pnanovdb_address_t address, pnanovdb_uint32_t mask) +{ + return address.byte_offset & mask; +} +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_mask_inv(pnanovdb_address_t address, pnanovdb_uint32_t mask) +{ + pnanovdb_address_t ret = address; + ret.byte_offset &= (~mask); + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_null() +{ + pnanovdb_address_t ret = { 0 }; + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_address_is_null(pnanovdb_address_t address) +{ + return address.byte_offset == 0u; +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_address_in_interval(pnanovdb_address_t address, pnanovdb_address_t min_address, pnanovdb_address_t max_address) +{ + return address.byte_offset >= min_address.byte_offset && address.byte_offset < max_address.byte_offset; +} +#elif defined(PNANOVDB_ADDRESS_64) +struct pnanovdb_address_t +{ + pnanovdb_uint64_t byte_offset; +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_address_t) + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset) +{ + pnanovdb_address_t ret = address; + ret.byte_offset += byte_offset; + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset_neg(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset) +{ + pnanovdb_address_t ret = address; + ret.byte_offset -= byte_offset; + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset_product(pnanovdb_address_t address, pnanovdb_uint32_t byte_offset, pnanovdb_uint32_t multiplier) +{ + pnanovdb_address_t ret = address; + ret.byte_offset += pnanovdb_uint32_as_uint64_low(byte_offset) * pnanovdb_uint32_as_uint64_low(multiplier); + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset64(pnanovdb_address_t address, pnanovdb_uint64_t byte_offset) +{ + pnanovdb_address_t ret = address; + ret.byte_offset += byte_offset; + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_offset64_product(pnanovdb_address_t address, pnanovdb_uint64_t byte_offset, pnanovdb_uint32_t multiplier) +{ + pnanovdb_address_t ret = address; + ret.byte_offset += byte_offset * pnanovdb_uint32_as_uint64_low(multiplier); + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_address_mask(pnanovdb_address_t address, pnanovdb_uint32_t mask) +{ + return pnanovdb_uint64_low(address.byte_offset) & mask; +} +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_mask_inv(pnanovdb_address_t address, pnanovdb_uint32_t mask) +{ + pnanovdb_address_t ret = address; + ret.byte_offset &= (~pnanovdb_uint32_as_uint64_low(mask)); + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_address_null() +{ + pnanovdb_address_t ret = { 0 }; + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_address_is_null(pnanovdb_address_t address) +{ + return address.byte_offset == 0llu; +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_address_in_interval(pnanovdb_address_t address, pnanovdb_address_t min_address, pnanovdb_address_t max_address) +{ + return address.byte_offset >= min_address.byte_offset && address.byte_offset < max_address.byte_offset; +} +#endif + +// ------------------------------------------------ High Level Buffer Read ----------------------------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_read_uint32(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + return pnanovdb_buf_read_uint32(buf, address.byte_offset); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_read_uint64(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + return pnanovdb_buf_read_uint64(buf, address.byte_offset); +} +PNANOVDB_FORCE_INLINE pnanovdb_int32_t pnanovdb_read_int32(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + return pnanovdb_uint32_as_int32(pnanovdb_read_uint32(buf, address)); +} +PNANOVDB_FORCE_INLINE float pnanovdb_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + return pnanovdb_uint32_as_float(pnanovdb_read_uint32(buf, address)); +} +PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_read_int64(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + return pnanovdb_uint64_as_int64(pnanovdb_read_uint64(buf, address)); +} +PNANOVDB_FORCE_INLINE double pnanovdb_read_double(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + return pnanovdb_uint64_as_double(pnanovdb_read_uint64(buf, address)); +} +PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_read_coord(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + pnanovdb_coord_t ret; + ret.x = pnanovdb_uint32_as_int32(pnanovdb_read_uint32(buf, pnanovdb_address_offset(address, 0u))); + ret.y = pnanovdb_uint32_as_int32(pnanovdb_read_uint32(buf, pnanovdb_address_offset(address, 4u))); + ret.z = pnanovdb_uint32_as_int32(pnanovdb_read_uint32(buf, pnanovdb_address_offset(address, 8u))); + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_read_vec3(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + pnanovdb_vec3_t ret; + ret.x = pnanovdb_read_float(buf, pnanovdb_address_offset(address, 0u)); + ret.y = pnanovdb_read_float(buf, pnanovdb_address_offset(address, 4u)); + ret.z = pnanovdb_read_float(buf, pnanovdb_address_offset(address, 8u)); + return ret; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_read_uint16(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, pnanovdb_address_mask_inv(address, 3u)); + return (raw >> (pnanovdb_address_mask(address, 2) << 3)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_read_uint8(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, pnanovdb_address_mask_inv(address, 3u)); + return (raw >> (pnanovdb_address_mask(address, 3) << 3)) & 255; +} +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_read_vec3u16(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + pnanovdb_vec3_t ret; + const float scale = 1.f / 65535.f; + ret.x = scale * pnanovdb_uint32_to_float(pnanovdb_read_uint16(buf, pnanovdb_address_offset(address, 0u))) - 0.5f; + ret.y = scale * pnanovdb_uint32_to_float(pnanovdb_read_uint16(buf, pnanovdb_address_offset(address, 2u))) - 0.5f; + ret.z = scale * pnanovdb_uint32_to_float(pnanovdb_read_uint16(buf, pnanovdb_address_offset(address, 4u))) - 0.5f; + return ret; +} +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_read_vec3u8(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + pnanovdb_vec3_t ret; + const float scale = 1.f / 255.f; + ret.x = scale * pnanovdb_uint32_to_float(pnanovdb_read_uint8(buf, pnanovdb_address_offset(address, 0u))) - 0.5f; + ret.y = scale * pnanovdb_uint32_to_float(pnanovdb_read_uint8(buf, pnanovdb_address_offset(address, 1u))) - 0.5f; + ret.z = scale * pnanovdb_uint32_to_float(pnanovdb_read_uint8(buf, pnanovdb_address_offset(address, 2u))) - 0.5f; + return ret; +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_read_bit(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_uint32_t bit_offset) +{ + pnanovdb_address_t word_address = pnanovdb_address_mask_inv(address, 3u); + pnanovdb_uint32_t bit_index = (pnanovdb_address_mask(address, 3u) << 3u) + bit_offset; + pnanovdb_uint32_t value_word = pnanovdb_buf_read_uint32(buf, word_address.byte_offset); + return ((value_word >> bit_index) & 1) != 0u; +} + +#if defined(PNANOVDB_C) +PNANOVDB_FORCE_INLINE short pnanovdb_read_half(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, address); + return (short)(raw >> (pnanovdb_address_mask(address, 2) << 3)); +} +#elif defined(PNANOVDB_HLSL) +PNANOVDB_FORCE_INLINE float pnanovdb_read_half(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, address); + return f16tof32(raw >> (pnanovdb_address_mask(address, 2) << 3)); +} +#elif defined(PNANOVDB_GLSL) +PNANOVDB_FORCE_INLINE float pnanovdb_read_half(pnanovdb_buf_t buf, pnanovdb_address_t address) +{ + pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, address); + return unpackHalf2x16(raw >> (pnanovdb_address_mask(address, 2) << 3)).x; +} +#endif + +// ------------------------------------------------ High Level Buffer Write ----------------------------------------------------------- + +PNANOVDB_FORCE_INLINE void pnanovdb_write_uint32(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_uint32_t value) +{ + pnanovdb_buf_write_uint32(buf, address.byte_offset, value); +} +PNANOVDB_FORCE_INLINE void pnanovdb_write_uint64(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_uint64_t value) +{ + pnanovdb_buf_write_uint64(buf, address.byte_offset, value); +} +PNANOVDB_FORCE_INLINE void pnanovdb_write_int32(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_int32_t value) +{ + pnanovdb_write_uint32(buf, address, pnanovdb_int32_as_uint32(value)); +} +PNANOVDB_FORCE_INLINE void pnanovdb_write_int64(pnanovdb_buf_t buf, pnanovdb_address_t address, pnanovdb_int64_t value) +{ + pnanovdb_buf_write_uint64(buf, address.byte_offset, pnanovdb_int64_as_uint64(value)); +} +PNANOVDB_FORCE_INLINE void pnanovdb_write_float(pnanovdb_buf_t buf, pnanovdb_address_t address, float value) +{ + pnanovdb_write_uint32(buf, address, pnanovdb_float_as_uint32(value)); +} +PNANOVDB_FORCE_INLINE void pnanovdb_write_double(pnanovdb_buf_t buf, pnanovdb_address_t address, double value) +{ + pnanovdb_write_uint64(buf, address, pnanovdb_double_as_uint64(value)); +} +PNANOVDB_FORCE_INLINE void pnanovdb_write_coord(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) value) +{ + pnanovdb_write_uint32(buf, pnanovdb_address_offset(address, 0u), pnanovdb_int32_as_uint32(PNANOVDB_DEREF(value).x)); + pnanovdb_write_uint32(buf, pnanovdb_address_offset(address, 4u), pnanovdb_int32_as_uint32(PNANOVDB_DEREF(value).y)); + pnanovdb_write_uint32(buf, pnanovdb_address_offset(address, 8u), pnanovdb_int32_as_uint32(PNANOVDB_DEREF(value).z)); +} +PNANOVDB_FORCE_INLINE void pnanovdb_write_vec3(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_vec3_t) value) +{ + pnanovdb_write_float(buf, pnanovdb_address_offset(address, 0u), PNANOVDB_DEREF(value).x); + pnanovdb_write_float(buf, pnanovdb_address_offset(address, 4u), PNANOVDB_DEREF(value).y); + pnanovdb_write_float(buf, pnanovdb_address_offset(address, 8u), PNANOVDB_DEREF(value).z); +} + +// ------------------------------------------------ Core Structures ----------------------------------------------------------- + +#define PNANOVDB_MAGIC_NUMBER 0x304244566f6e614eUL// "NanoVDB0" in hex - little endian (uint64_t) +#define PNANOVDB_MAGIC_GRID 0x314244566f6e614eUL// "NanoVDB1" in hex - little endian (uint64_t) +#define PNANOVDB_MAGIC_FILE 0x324244566f6e614eUL// "NanoVDB2" in hex - little endian (uint64_t) + +#define PNANOVDB_MAJOR_VERSION_NUMBER 32// reflects changes to the ABI +#define PNANOVDB_MINOR_VERSION_NUMBER 6// reflects changes to the API but not ABI +#define PNANOVDB_PATCH_VERSION_NUMBER 0// reflects bug-fixes with no ABI or API changes + +#define PNANOVDB_GRID_TYPE_UNKNOWN 0 +#define PNANOVDB_GRID_TYPE_FLOAT 1 +#define PNANOVDB_GRID_TYPE_DOUBLE 2 +#define PNANOVDB_GRID_TYPE_INT16 3 +#define PNANOVDB_GRID_TYPE_INT32 4 +#define PNANOVDB_GRID_TYPE_INT64 5 +#define PNANOVDB_GRID_TYPE_VEC3F 6 +#define PNANOVDB_GRID_TYPE_VEC3D 7 +#define PNANOVDB_GRID_TYPE_MASK 8 +#define PNANOVDB_GRID_TYPE_HALF 9 +#define PNANOVDB_GRID_TYPE_UINT32 10 +#define PNANOVDB_GRID_TYPE_BOOLEAN 11 +#define PNANOVDB_GRID_TYPE_RGBA8 12 +#define PNANOVDB_GRID_TYPE_FP4 13 +#define PNANOVDB_GRID_TYPE_FP8 14 +#define PNANOVDB_GRID_TYPE_FP16 15 +#define PNANOVDB_GRID_TYPE_FPN 16 +#define PNANOVDB_GRID_TYPE_VEC4F 17 +#define PNANOVDB_GRID_TYPE_VEC4D 18 +#define PNANOVDB_GRID_TYPE_INDEX 19 +#define PNANOVDB_GRID_TYPE_ONINDEX 20 +#define PNANOVDB_GRID_TYPE_INDEXMASK 21 +#define PNANOVDB_GRID_TYPE_ONINDEXMASK 22 +#define PNANOVDB_GRID_TYPE_POINTINDEX 23 +#define PNANOVDB_GRID_TYPE_VEC3U8 24 +#define PNANOVDB_GRID_TYPE_VEC3U16 25 +#define PNANOVDB_GRID_TYPE_END 26 + +#define PNANOVDB_GRID_CLASS_UNKNOWN 0 +#define PNANOVDB_GRID_CLASS_LEVEL_SET 1 // narrow band level set, e.g. SDF +#define PNANOVDB_GRID_CLASS_FOG_VOLUME 2 // fog volume, e.g. density +#define PNANOVDB_GRID_CLASS_STAGGERED 3 // staggered MAC grid, e.g. velocity +#define PNANOVDB_GRID_CLASS_POINT_INDEX 4 // point index grid +#define PNANOVDB_GRID_CLASS_POINT_DATA 5 // point data grid +#define PNANOVDB_GRID_CLASS_TOPOLOGY 6 // grid with active states only (no values) +#define PNANOVDB_GRID_CLASS_VOXEL_VOLUME 7 // volume of geometric cubes, e.g. minecraft +#define PNANOVDB_GRID_CLASS_INDEX_GRID 8 // grid whose values are offsets, e.g. into an external array +#define PNANOVDB_GRID_CLASS_TENSOR_GRID 9 // grid which can have extra metadata and features +#define PNANOVDB_GRID_CLASS_END 10 + +#define PNANOVDB_GRID_FLAGS_HAS_LONG_GRID_NAME (1 << 0) +#define PNANOVDB_GRID_FLAGS_HAS_BBOX (1 << 1) +#define PNANOVDB_GRID_FLAGS_HAS_MIN_MAX (1 << 2) +#define PNANOVDB_GRID_FLAGS_HAS_AVERAGE (1 << 3) +#define PNANOVDB_GRID_FLAGS_HAS_STD_DEVIATION (1 << 4) +#define PNANOVDB_GRID_FLAGS_IS_BREADTH_FIRST (1 << 5) +#define PNANOVDB_GRID_FLAGS_END (1 << 6) + +#define PNANOVDB_LEAF_TYPE_DEFAULT 0 +#define PNANOVDB_LEAF_TYPE_LITE 1 +#define PNANOVDB_LEAF_TYPE_FP 2 +#define PNANOVDB_LEAF_TYPE_INDEX 3 +#define PNANOVDB_LEAF_TYPE_INDEXMASK 4 +#define PNANOVDB_LEAF_TYPE_POINTINDEX 5 + +// BuildType = Unknown, float, double, int16_t, int32_t, int64_t, Vec3f, Vec3d, Mask, ... +// bit count of values in leaf nodes, i.e. 8*sizeof(*nanovdb::LeafNode::mValues) or zero if no values are stored +PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_value_strides_bits[PNANOVDB_GRID_TYPE_END] = { 0, 32, 64, 16, 32, 64, 96, 192, 0, 16, 32, 1, 32, 4, 8, 16, 0, 128, 256, 0, 0, 0, 0, 16, 24, 48 }; +// bit count of the Tile union in InternalNodes, i.e. 8*sizeof(nanovdb::InternalData::Tile) +PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_table_strides_bits[PNANOVDB_GRID_TYPE_END] = { 64, 64, 64, 64, 64, 64, 128, 192, 64, 64, 64, 64, 64, 64, 64, 64, 64, 128, 256, 64, 64, 64, 64, 64, 64, 64 }; +// bit count of min/max values, i.e. 8*sizeof(nanovdb::LeafData::mMinimum) or zero if no min/max exists +PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_minmax_strides_bits[PNANOVDB_GRID_TYPE_END] = { 0, 32, 64, 16, 32, 64, 96, 192, 8, 16, 32, 8, 32, 32, 32, 32, 32, 128, 256, 64, 64, 64, 64, 64, 24, 48 }; +// bit alignment of the value type, controlled by the smallest native type, which is why it is always 0, 8, 16, 32, or 64, e.g. for Vec3f it is 32 +PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_minmax_aligns_bits[PNANOVDB_GRID_TYPE_END] = { 0, 32, 64, 16, 32, 64, 32, 64, 8, 16, 32, 8, 32, 32, 32, 32, 32, 32, 64, 64, 64, 64, 64, 64, 8, 16 }; +// bit alignment of the stats (avg/std-dev) types, e.g. 8*sizeof(nanovdb::LeafData::mAverage) +PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_stat_strides_bits[PNANOVDB_GRID_TYPE_END] = { 0, 32, 64, 32, 32, 64, 32, 64, 8, 32, 32, 8, 32, 32, 32, 32, 32, 32, 64, 64, 64, 64, 64, 64, 32, 32 }; +// one of the 4 leaf types defined above, e.g. PNANOVDB_LEAF_TYPE_INDEX = 3 +PNANOVDB_STATIC_CONST pnanovdb_uint32_t pnanovdb_grid_type_leaf_type[PNANOVDB_GRID_TYPE_END] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 2, 2, 2, 2, 0, 0, 3, 3, 4, 4, 5, 0, 0 }; + +struct pnanovdb_map_t +{ + float matf[9]; + float invmatf[9]; + float vecf[3]; + float taperf; + double matd[9]; + double invmatd[9]; + double vecd[3]; + double taperd; +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_map_t) +struct pnanovdb_map_handle_t { pnanovdb_address_t address; }; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_map_handle_t) + +#define PNANOVDB_MAP_SIZE 264 + +#define PNANOVDB_MAP_OFF_MATF 0 +#define PNANOVDB_MAP_OFF_INVMATF 36 +#define PNANOVDB_MAP_OFF_VECF 72 +#define PNANOVDB_MAP_OFF_TAPERF 84 +#define PNANOVDB_MAP_OFF_MATD 88 +#define PNANOVDB_MAP_OFF_INVMATD 160 +#define PNANOVDB_MAP_OFF_VECD 232 +#define PNANOVDB_MAP_OFF_TAPERD 256 + +PNANOVDB_FORCE_INLINE float pnanovdb_map_get_matf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { + return pnanovdb_read_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_MATF + 4u * index)); +} +PNANOVDB_FORCE_INLINE float pnanovdb_map_get_invmatf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { + return pnanovdb_read_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_INVMATF + 4u * index)); +} +PNANOVDB_FORCE_INLINE float pnanovdb_map_get_vecf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { + return pnanovdb_read_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_VECF + 4u * index)); +} +PNANOVDB_FORCE_INLINE float pnanovdb_map_get_taperf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { + return pnanovdb_read_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_TAPERF)); +} +PNANOVDB_FORCE_INLINE double pnanovdb_map_get_matd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { + return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_MATD + 8u * index)); +} +PNANOVDB_FORCE_INLINE double pnanovdb_map_get_invmatd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { + return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_INVMATD + 8u * index)); +} +PNANOVDB_FORCE_INLINE double pnanovdb_map_get_vecd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { + return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_VECD + 8u * index)); +} +PNANOVDB_FORCE_INLINE double pnanovdb_map_get_taperd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index) { + return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_TAPERD)); +} + +PNANOVDB_FORCE_INLINE void pnanovdb_map_set_matf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, float matf) { + pnanovdb_write_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_MATF + 4u * index), matf); +} +PNANOVDB_FORCE_INLINE void pnanovdb_map_set_invmatf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, float invmatf) { + pnanovdb_write_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_INVMATF + 4u * index), invmatf); +} +PNANOVDB_FORCE_INLINE void pnanovdb_map_set_vecf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, float vecf) { + pnanovdb_write_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_VECF + 4u * index), vecf); +} +PNANOVDB_FORCE_INLINE void pnanovdb_map_set_taperf(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, float taperf) { + pnanovdb_write_float(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_TAPERF), taperf); +} +PNANOVDB_FORCE_INLINE void pnanovdb_map_set_matd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, double matd) { + pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_MATD + 8u * index), matd); +} +PNANOVDB_FORCE_INLINE void pnanovdb_map_set_invmatd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, double invmatd) { + pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_INVMATD + 8u * index), invmatd); +} +PNANOVDB_FORCE_INLINE void pnanovdb_map_set_vecd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, double vecd) { + pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_VECD + 8u * index), vecd); +} +PNANOVDB_FORCE_INLINE void pnanovdb_map_set_taperd(pnanovdb_buf_t buf, pnanovdb_map_handle_t p, pnanovdb_uint32_t index, double taperd) { + pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_MAP_OFF_TAPERD), taperd); +} + +struct pnanovdb_grid_t +{ + pnanovdb_uint64_t magic; // 8 bytes, 0 + pnanovdb_uint64_t checksum; // 8 bytes, 8 + pnanovdb_uint32_t version; // 4 bytes, 16 + pnanovdb_uint32_t flags; // 4 bytes, 20 + pnanovdb_uint32_t grid_index; // 4 bytes, 24 + pnanovdb_uint32_t grid_count; // 4 bytes, 28 + pnanovdb_uint64_t grid_size; // 8 bytes, 32 + pnanovdb_uint32_t grid_name[256 / 4]; // 256 bytes, 40 + pnanovdb_map_t map; // 264 bytes, 296 + double world_bbox[6]; // 48 bytes, 560 + double voxel_size[3]; // 24 bytes, 608 + pnanovdb_uint32_t grid_class; // 4 bytes, 632 + pnanovdb_uint32_t grid_type; // 4 bytes, 636 + pnanovdb_int64_t blind_metadata_offset; // 8 bytes, 640 + pnanovdb_uint32_t blind_metadata_count; // 4 bytes, 648 + pnanovdb_uint32_t pad[5]; // 20 bytes, 652 +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_grid_t) +struct pnanovdb_grid_handle_t { pnanovdb_address_t address; }; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_grid_handle_t) + +#define PNANOVDB_GRID_SIZE 672 + +#define PNANOVDB_GRID_OFF_MAGIC 0 +#define PNANOVDB_GRID_OFF_CHECKSUM 8 +#define PNANOVDB_GRID_OFF_VERSION 16 +#define PNANOVDB_GRID_OFF_FLAGS 20 +#define PNANOVDB_GRID_OFF_GRID_INDEX 24 +#define PNANOVDB_GRID_OFF_GRID_COUNT 28 +#define PNANOVDB_GRID_OFF_GRID_SIZE 32 +#define PNANOVDB_GRID_OFF_GRID_NAME 40 +#define PNANOVDB_GRID_OFF_MAP 296 +#define PNANOVDB_GRID_OFF_WORLD_BBOX 560 +#define PNANOVDB_GRID_OFF_VOXEL_SIZE 608 +#define PNANOVDB_GRID_OFF_GRID_CLASS 632 +#define PNANOVDB_GRID_OFF_GRID_TYPE 636 +#define PNANOVDB_GRID_OFF_BLIND_METADATA_OFFSET 640 +#define PNANOVDB_GRID_OFF_BLIND_METADATA_COUNT 648 + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_grid_get_magic(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { + return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_MAGIC)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_grid_get_checksum(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { + return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_CHECKSUM)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_version(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_VERSION)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_flags(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_FLAGS)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_index(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_INDEX)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_count(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_COUNT)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_grid_get_grid_size(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { + return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_SIZE)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_name(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_NAME + 4u * index)); +} +PNANOVDB_FORCE_INLINE pnanovdb_map_handle_t pnanovdb_grid_get_map(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { + pnanovdb_map_handle_t ret; + ret.address = pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_MAP); + return ret; +} +PNANOVDB_FORCE_INLINE double pnanovdb_grid_get_world_bbox(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index) { + return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_WORLD_BBOX + 8u * index)); +} +PNANOVDB_FORCE_INLINE double pnanovdb_grid_get_voxel_size(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index) { + return pnanovdb_read_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_VOXEL_SIZE + 8u * index)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_class(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_CLASS)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_grid_type(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_TYPE)); +} +PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_grid_get_blind_metadata_offset(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { + return pnanovdb_read_int64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_BLIND_METADATA_OFFSET)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_grid_get_blind_metadata_count(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_BLIND_METADATA_COUNT)); +} + +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_magic(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint64_t magic) { + pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_MAGIC), magic); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_checksum(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint64_t checksum) { + pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_CHECKSUM), checksum); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_version(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t version) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_VERSION), version); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_flags(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t flags) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_FLAGS), flags); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_index(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t grid_index) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_INDEX), grid_index); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_count(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t grid_count) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_COUNT), grid_count); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_size(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint64_t grid_size) { + pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_SIZE), grid_size); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_name(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index, pnanovdb_uint32_t grid_name) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_NAME + 4u * index), grid_name); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_world_bbox(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index, double world_bbox) { + pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_WORLD_BBOX + 8u * index), world_bbox); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_voxel_size(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t index, double voxel_size) { + pnanovdb_write_double(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_VOXEL_SIZE + 8u * index), voxel_size); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_class(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t grid_class) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_CLASS), grid_class); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_grid_type(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t grid_type) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_GRID_TYPE), grid_type); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_blind_metadata_offset(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint64_t blind_metadata_offset) { + pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_BLIND_METADATA_OFFSET), blind_metadata_offset); +} +PNANOVDB_FORCE_INLINE void pnanovdb_grid_set_blind_metadata_count(pnanovdb_buf_t buf, pnanovdb_grid_handle_t p, pnanovdb_uint32_t metadata_count) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRID_OFF_BLIND_METADATA_COUNT), metadata_count); +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_make_version(pnanovdb_uint32_t major, pnanovdb_uint32_t minor, pnanovdb_uint32_t patch_num) +{ + return (major << 21u) | (minor << 10u) | patch_num; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_version_get_major(pnanovdb_uint32_t version) +{ + return (version >> 21u) & ((1u << 11u) - 1u); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_version_get_minor(pnanovdb_uint32_t version) +{ + return (version >> 10u) & ((1u << 11u) - 1u); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_version_get_patch(pnanovdb_uint32_t version) +{ + return version & ((1u << 10u) - 1u); +} + +struct pnanovdb_gridblindmetadata_t +{ + pnanovdb_int64_t byte_offset; // 8 bytes, 0 + pnanovdb_uint64_t element_count; // 8 bytes, 8 + pnanovdb_uint32_t flags; // 4 bytes, 16 + pnanovdb_uint32_t semantic; // 4 bytes, 20 + pnanovdb_uint32_t data_class; // 4 bytes, 24 + pnanovdb_uint32_t data_type; // 4 bytes, 28 + pnanovdb_uint32_t name[256 / 4]; // 256 bytes, 32 +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_gridblindmetadata_t) +struct pnanovdb_gridblindmetadata_handle_t { pnanovdb_address_t address; }; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_gridblindmetadata_handle_t) + +#define PNANOVDB_GRIDBLINDMETADATA_SIZE 288 + +#define PNANOVDB_GRIDBLINDMETADATA_OFF_BYTE_OFFSET 0 +#define PNANOVDB_GRIDBLINDMETADATA_OFF_ELEMENT_COUNT 8 +#define PNANOVDB_GRIDBLINDMETADATA_OFF_FLAGS 16 +#define PNANOVDB_GRIDBLINDMETADATA_OFF_SEMANTIC 20 +#define PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_CLASS 24 +#define PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_TYPE 28 +#define PNANOVDB_GRIDBLINDMETADATA_OFF_NAME 32 + +PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_gridblindmetadata_get_byte_offset(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) { + return pnanovdb_read_int64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_BYTE_OFFSET)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_gridblindmetadata_get_element_count(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) { + return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_ELEMENT_COUNT)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_flags(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_FLAGS)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_semantic(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_SEMANTIC)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_data_class(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_CLASS)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_data_type(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_DATA_TYPE)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_gridblindmetadata_get_name(pnanovdb_buf_t buf, pnanovdb_gridblindmetadata_handle_t p, pnanovdb_uint32_t index) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_GRIDBLINDMETADATA_OFF_NAME + 4u * index)); +} + +struct pnanovdb_tree_t +{ + pnanovdb_uint64_t node_offset_leaf; + pnanovdb_uint64_t node_offset_lower; + pnanovdb_uint64_t node_offset_upper; + pnanovdb_uint64_t node_offset_root; + pnanovdb_uint32_t node_count_leaf; + pnanovdb_uint32_t node_count_lower; + pnanovdb_uint32_t node_count_upper; + pnanovdb_uint32_t tile_count_leaf; + pnanovdb_uint32_t tile_count_lower; + pnanovdb_uint32_t tile_count_upper; + pnanovdb_uint64_t voxel_count; +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_tree_t) +struct pnanovdb_tree_handle_t { pnanovdb_address_t address; }; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_tree_handle_t) + +#define PNANOVDB_TREE_SIZE 64 + +#define PNANOVDB_TREE_OFF_NODE_OFFSET_LEAF 0 +#define PNANOVDB_TREE_OFF_NODE_OFFSET_LOWER 8 +#define PNANOVDB_TREE_OFF_NODE_OFFSET_UPPER 16 +#define PNANOVDB_TREE_OFF_NODE_OFFSET_ROOT 24 +#define PNANOVDB_TREE_OFF_NODE_COUNT_LEAF 32 +#define PNANOVDB_TREE_OFF_NODE_COUNT_LOWER 36 +#define PNANOVDB_TREE_OFF_NODE_COUNT_UPPER 40 +#define PNANOVDB_TREE_OFF_TILE_COUNT_LEAF 44 +#define PNANOVDB_TREE_OFF_TILE_COUNT_LOWER 48 +#define PNANOVDB_TREE_OFF_TILE_COUNT_UPPER 52 +#define PNANOVDB_TREE_OFF_VOXEL_COUNT 56 + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_node_offset_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { + return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_LEAF)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_node_offset_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { + return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_LOWER)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_node_offset_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { + return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_UPPER)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_node_offset_root(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { + return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_ROOT)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_node_count_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_LEAF)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_node_count_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_LOWER)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_node_count_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_UPPER)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_tile_count_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_LEAF)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_tile_count_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_LOWER)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_tree_get_tile_count_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_UPPER)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_tree_get_voxel_count(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p) { + return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_VOXEL_COUNT)); +} + +PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_offset_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t node_offset_leaf) { + pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_LEAF), node_offset_leaf); +} +PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_offset_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t node_offset_lower) { + pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_LOWER), node_offset_lower); +} +PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_offset_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t node_offset_upper) { + pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_UPPER), node_offset_upper); +} +PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_offset_root(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t node_offset_root) { + pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_OFFSET_ROOT), node_offset_root); +} +PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_count_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t node_count_leaf) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_LEAF), node_count_leaf); +} +PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_count_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t node_count_lower) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_LOWER), node_count_lower); +} +PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_node_count_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t node_count_upper) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_NODE_COUNT_UPPER), node_count_upper); +} +PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_tile_count_leaf(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t tile_count_leaf) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_LEAF), tile_count_leaf); +} +PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_tile_count_lower(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t tile_count_lower) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_LOWER), tile_count_lower); +} +PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_tile_count_upper(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint32_t tile_count_upper) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_TILE_COUNT_UPPER), tile_count_upper); +} +PNANOVDB_FORCE_INLINE void pnanovdb_tree_set_voxel_count(pnanovdb_buf_t buf, pnanovdb_tree_handle_t p, pnanovdb_uint64_t voxel_count) { + pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_TREE_OFF_VOXEL_COUNT), voxel_count); +} + +struct pnanovdb_root_t +{ + pnanovdb_coord_t bbox_min; + pnanovdb_coord_t bbox_max; + pnanovdb_uint32_t table_size; + pnanovdb_uint32_t pad1; // background can start here + // background, min, max +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_root_t) +struct pnanovdb_root_handle_t { pnanovdb_address_t address; }; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_root_handle_t) + +#define PNANOVDB_ROOT_BASE_SIZE 28 + +#define PNANOVDB_ROOT_OFF_BBOX_MIN 0 +#define PNANOVDB_ROOT_OFF_BBOX_MAX 12 +#define PNANOVDB_ROOT_OFF_TABLE_SIZE 24 + +PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_root_get_bbox_min(pnanovdb_buf_t buf, pnanovdb_root_handle_t p) { + return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_BBOX_MIN)); +} +PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_root_get_bbox_max(pnanovdb_buf_t buf, pnanovdb_root_handle_t p) { + return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_BBOX_MAX)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_root_get_tile_count(pnanovdb_buf_t buf, pnanovdb_root_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_TABLE_SIZE)); +} + +PNANOVDB_FORCE_INLINE void pnanovdb_root_set_bbox_min(pnanovdb_buf_t buf, pnanovdb_root_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_min) { + pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_BBOX_MIN), bbox_min); +} +PNANOVDB_FORCE_INLINE void pnanovdb_root_set_bbox_max(pnanovdb_buf_t buf, pnanovdb_root_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_max) { + pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_BBOX_MAX), bbox_max); +} +PNANOVDB_FORCE_INLINE void pnanovdb_root_set_tile_count(pnanovdb_buf_t buf, pnanovdb_root_handle_t p, pnanovdb_uint32_t tile_count) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_OFF_TABLE_SIZE), tile_count); +} + +struct pnanovdb_root_tile_t +{ + pnanovdb_uint64_t key; + pnanovdb_int64_t child; // signed byte offset from root to the child node, 0 means it is a constant tile, so use value + pnanovdb_uint32_t state; + pnanovdb_uint32_t pad1; // value can start here + // value +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_root_tile_t) +struct pnanovdb_root_tile_handle_t { pnanovdb_address_t address; }; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_root_tile_handle_t) + +#define PNANOVDB_ROOT_TILE_BASE_SIZE 20 + +#define PNANOVDB_ROOT_TILE_OFF_KEY 0 +#define PNANOVDB_ROOT_TILE_OFF_CHILD 8 +#define PNANOVDB_ROOT_TILE_OFF_STATE 16 + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_root_tile_get_key(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p) { + return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_KEY)); +} +PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_root_tile_get_child(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p) { + return pnanovdb_read_int64(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_CHILD)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_root_tile_get_state(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_STATE)); +} + +PNANOVDB_FORCE_INLINE void pnanovdb_root_tile_set_key(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p, pnanovdb_uint64_t key) { + pnanovdb_write_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_KEY), key); +} +PNANOVDB_FORCE_INLINE void pnanovdb_root_tile_set_child(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p, pnanovdb_int64_t child) { + pnanovdb_write_int64(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_CHILD), child); +} +PNANOVDB_FORCE_INLINE void pnanovdb_root_tile_set_state(pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t p, pnanovdb_uint32_t state) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_ROOT_TILE_OFF_STATE), state); +} + +struct pnanovdb_upper_t +{ + pnanovdb_coord_t bbox_min; + pnanovdb_coord_t bbox_max; + pnanovdb_uint64_t flags; + pnanovdb_uint32_t value_mask[1024]; + pnanovdb_uint32_t child_mask[1024]; + // min, max + // alignas(32) pnanovdb_uint32_t table[]; +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_upper_t) +struct pnanovdb_upper_handle_t { pnanovdb_address_t address; }; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_upper_handle_t) + +#define PNANOVDB_UPPER_TABLE_COUNT 32768 +#define PNANOVDB_UPPER_BASE_SIZE 8224 + +#define PNANOVDB_UPPER_OFF_BBOX_MIN 0 +#define PNANOVDB_UPPER_OFF_BBOX_MAX 12 +#define PNANOVDB_UPPER_OFF_FLAGS 24 +#define PNANOVDB_UPPER_OFF_VALUE_MASK 32 +#define PNANOVDB_UPPER_OFF_CHILD_MASK 4128 + +PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_upper_get_bbox_min(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p) { + return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_BBOX_MIN)); +} +PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_upper_get_bbox_max(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p) { + return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_BBOX_MAX)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_upper_get_flags(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p) { + return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_FLAGS)); +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_upper_get_value_mask(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, pnanovdb_uint32_t bit_index) { + pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_VALUE_MASK + 4u * (bit_index >> 5u))); + return ((value >> (bit_index & 31u)) & 1) != 0u; +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_upper_get_child_mask(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, pnanovdb_uint32_t bit_index) { + pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_CHILD_MASK + 4u * (bit_index >> 5u))); + return ((value >> (bit_index & 31u)) & 1) != 0u; +} + +PNANOVDB_FORCE_INLINE void pnanovdb_upper_set_bbox_min(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_min) { + pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_BBOX_MIN), bbox_min); +} +PNANOVDB_FORCE_INLINE void pnanovdb_upper_set_bbox_max(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_max) { + pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_BBOX_MAX), bbox_max); +} +PNANOVDB_FORCE_INLINE void pnanovdb_upper_set_child_mask(pnanovdb_buf_t buf, pnanovdb_upper_handle_t p, pnanovdb_uint32_t bit_index, pnanovdb_bool_t value) { + pnanovdb_address_t addr = pnanovdb_address_offset(p.address, PNANOVDB_UPPER_OFF_CHILD_MASK + 4u * (bit_index >> 5u)); + pnanovdb_uint32_t valueMask = pnanovdb_read_uint32(buf, addr); + if (!value) { valueMask &= ~(1u << (bit_index & 31u)); } + if (value) valueMask |= (1u << (bit_index & 31u)); + pnanovdb_write_uint32(buf, addr, valueMask); +} + +struct pnanovdb_lower_t +{ + pnanovdb_coord_t bbox_min; + pnanovdb_coord_t bbox_max; + pnanovdb_uint64_t flags; + pnanovdb_uint32_t value_mask[128]; + pnanovdb_uint32_t child_mask[128]; + // min, max + // alignas(32) pnanovdb_uint32_t table[]; +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_lower_t) +struct pnanovdb_lower_handle_t { pnanovdb_address_t address; }; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_lower_handle_t) + +#define PNANOVDB_LOWER_TABLE_COUNT 4096 +#define PNANOVDB_LOWER_BASE_SIZE 1056 + +#define PNANOVDB_LOWER_OFF_BBOX_MIN 0 +#define PNANOVDB_LOWER_OFF_BBOX_MAX 12 +#define PNANOVDB_LOWER_OFF_FLAGS 24 +#define PNANOVDB_LOWER_OFF_VALUE_MASK 32 +#define PNANOVDB_LOWER_OFF_CHILD_MASK 544 + +PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_lower_get_bbox_min(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p) { + return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_BBOX_MIN)); +} +PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_lower_get_bbox_max(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p) { + return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_BBOX_MAX)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_lower_get_flags(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p) { + return pnanovdb_read_uint64(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_FLAGS)); +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_lower_get_value_mask(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, pnanovdb_uint32_t bit_index) { + pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_VALUE_MASK + 4u * (bit_index >> 5u))); + return ((value >> (bit_index & 31u)) & 1) != 0u; +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_lower_get_child_mask(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, pnanovdb_uint32_t bit_index) { + pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_CHILD_MASK + 4u * (bit_index >> 5u))); + return ((value >> (bit_index & 31u)) & 1) != 0u; +} + +PNANOVDB_FORCE_INLINE void pnanovdb_lower_set_bbox_min(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_min) { + pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_BBOX_MIN), bbox_min); +} +PNANOVDB_FORCE_INLINE void pnanovdb_lower_set_bbox_max(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_max) { + pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_BBOX_MAX), bbox_max); +} +PNANOVDB_FORCE_INLINE void pnanovdb_lower_set_child_mask(pnanovdb_buf_t buf, pnanovdb_lower_handle_t p, pnanovdb_uint32_t bit_index, pnanovdb_bool_t value) { + pnanovdb_address_t addr = pnanovdb_address_offset(p.address, PNANOVDB_LOWER_OFF_CHILD_MASK + 4u * (bit_index >> 5u)); + pnanovdb_uint32_t valueMask = pnanovdb_read_uint32(buf, addr); + if (!value) { valueMask &= ~(1u << (bit_index & 31u)); } + if (value) valueMask |= (1u << (bit_index & 31u)); + pnanovdb_write_uint32(buf, addr, valueMask); +} + +struct pnanovdb_leaf_t +{ + pnanovdb_coord_t bbox_min; + pnanovdb_uint32_t bbox_dif_and_flags; + pnanovdb_uint32_t value_mask[16]; + // min, max + // alignas(32) pnanovdb_uint32_t values[]; +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_leaf_t) +struct pnanovdb_leaf_handle_t { pnanovdb_address_t address; }; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_leaf_handle_t) + +#define PNANOVDB_LEAF_TABLE_COUNT 512 +#define PNANOVDB_LEAF_BASE_SIZE 80 + +#define PNANOVDB_LEAF_OFF_BBOX_MIN 0 +#define PNANOVDB_LEAF_OFF_BBOX_DIF_AND_FLAGS 12 +#define PNANOVDB_LEAF_OFF_VALUE_MASK 16 + +#define PNANOVDB_LEAF_TABLE_NEG_OFF_BBOX_DIF_AND_FLAGS 84 +#define PNANOVDB_LEAF_TABLE_NEG_OFF_MINIMUM 16 +#define PNANOVDB_LEAF_TABLE_NEG_OFF_QUANTUM 12 + +PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_leaf_get_bbox_min(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p) { + return pnanovdb_read_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_BBOX_MIN)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_get_bbox_dif_and_flags(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p) { + return pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_BBOX_DIF_AND_FLAGS)); +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_get_value_mask(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p, pnanovdb_uint32_t bit_index) { + pnanovdb_uint32_t value = pnanovdb_read_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_VALUE_MASK + 4u * (bit_index >> 5u))); + return ((value >> (bit_index & 31u)) & 1) != 0u; +} + +PNANOVDB_FORCE_INLINE void pnanovdb_leaf_set_bbox_min(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p, PNANOVDB_IN(pnanovdb_coord_t) bbox_min) { + pnanovdb_write_coord(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_BBOX_MIN), bbox_min); +} +PNANOVDB_FORCE_INLINE void pnanovdb_leaf_set_bbox_dif_and_flags(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t p, pnanovdb_uint32_t bbox_dif_and_flags) { + pnanovdb_write_uint32(buf, pnanovdb_address_offset(p.address, PNANOVDB_LEAF_OFF_BBOX_DIF_AND_FLAGS), bbox_dif_and_flags); +} + +struct pnanovdb_grid_type_constants_t +{ + pnanovdb_uint32_t root_off_background; + pnanovdb_uint32_t root_off_min; + pnanovdb_uint32_t root_off_max; + pnanovdb_uint32_t root_off_ave; + pnanovdb_uint32_t root_off_stddev; + pnanovdb_uint32_t root_size; + pnanovdb_uint32_t value_stride_bits; + pnanovdb_uint32_t table_stride; + pnanovdb_uint32_t root_tile_off_value; + pnanovdb_uint32_t root_tile_size; + pnanovdb_uint32_t upper_off_min; + pnanovdb_uint32_t upper_off_max; + pnanovdb_uint32_t upper_off_ave; + pnanovdb_uint32_t upper_off_stddev; + pnanovdb_uint32_t upper_off_table; + pnanovdb_uint32_t upper_size; + pnanovdb_uint32_t lower_off_min; + pnanovdb_uint32_t lower_off_max; + pnanovdb_uint32_t lower_off_ave; + pnanovdb_uint32_t lower_off_stddev; + pnanovdb_uint32_t lower_off_table; + pnanovdb_uint32_t lower_size; + pnanovdb_uint32_t leaf_off_min; + pnanovdb_uint32_t leaf_off_max; + pnanovdb_uint32_t leaf_off_ave; + pnanovdb_uint32_t leaf_off_stddev; + pnanovdb_uint32_t leaf_off_table; + pnanovdb_uint32_t leaf_size; +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_grid_type_constants_t) + +// The following table with offsets will nedd to be updates as new GridTypes are added in NanoVDB.h +PNANOVDB_STATIC_CONST pnanovdb_grid_type_constants_t pnanovdb_grid_type_constants[PNANOVDB_GRID_TYPE_END] = +{ +{28, 28, 28, 28, 28, 32, 0, 8, 20, 32, 8224, 8224, 8224, 8224, 8224, 270368, 1056, 1056, 1056, 1056, 1056, 33824, 80, 80, 80, 80, 96, 96}, +{28, 32, 36, 40, 44, 64, 32, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 80, 84, 88, 92, 96, 2144}, +{32, 40, 48, 56, 64, 96, 64, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 88, 96, 104, 128, 4224}, +{28, 30, 32, 36, 40, 64, 16, 8, 20, 32, 8224, 8226, 8228, 8232, 8256, 270400, 1056, 1058, 1060, 1064, 1088, 33856, 80, 82, 84, 88, 96, 1120}, +{28, 32, 36, 40, 44, 64, 32, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 80, 84, 88, 92, 96, 2144}, +{32, 40, 48, 56, 64, 96, 64, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 88, 96, 104, 128, 4224}, +{28, 40, 52, 64, 68, 96, 96, 16, 20, 32, 8224, 8236, 8248, 8252, 8256, 532544, 1056, 1068, 1080, 1084, 1088, 66624, 80, 92, 104, 108, 128, 6272}, +{32, 56, 80, 104, 112, 128, 192, 24, 24, 64, 8224, 8248, 8272, 8280, 8288, 794720, 1056, 1080, 1104, 1112, 1120, 99424, 80, 104, 128, 136, 160, 12448}, +{28, 29, 30, 31, 32, 64, 0, 8, 20, 32, 8224, 8225, 8226, 8227, 8256, 270400, 1056, 1057, 1058, 1059, 1088, 33856, 80, 80, 80, 80, 96, 96}, +{28, 30, 32, 36, 40, 64, 16, 8, 20, 32, 8224, 8226, 8228, 8232, 8256, 270400, 1056, 1058, 1060, 1064, 1088, 33856, 80, 82, 84, 88, 96, 1120}, +{28, 32, 36, 40, 44, 64, 32, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 80, 84, 88, 92, 96, 2144}, +{28, 29, 30, 31, 32, 64, 1, 8, 20, 32, 8224, 8225, 8226, 8227, 8256, 270400, 1056, 1057, 1058, 1059, 1088, 33856, 80, 80, 80, 80, 96, 160}, +{28, 32, 36, 40, 44, 64, 32, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 80, 84, 88, 92, 96, 2144}, +{28, 32, 36, 40, 44, 64, 0, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 88, 90, 92, 94, 96, 352}, +{28, 32, 36, 40, 44, 64, 0, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 88, 90, 92, 94, 96, 608}, +{28, 32, 36, 40, 44, 64, 0, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 88, 90, 92, 94, 96, 1120}, +{28, 32, 36, 40, 44, 64, 0, 8, 20, 32, 8224, 8228, 8232, 8236, 8256, 270400, 1056, 1060, 1064, 1068, 1088, 33856, 88, 90, 92, 94, 96, 96}, +{28, 44, 60, 76, 80, 96, 128, 16, 20, 64, 8224, 8240, 8256, 8260, 8288, 532576, 1056, 1072, 1088, 1092, 1120, 66656, 80, 96, 112, 116, 128, 8320}, +{32, 64, 96, 128, 136, 160, 256, 32, 24, 64, 8224, 8256, 8288, 8296, 8320, 1056896, 1056, 1088, 1120, 1128, 1152, 132224, 80, 112, 144, 152, 160, 16544}, +{32, 40, 48, 56, 64, 96, 0, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 80, 80, 80, 80, 96}, +{32, 40, 48, 56, 64, 96, 0, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 80, 80, 80, 80, 96}, +{32, 40, 48, 56, 64, 96, 0, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 80, 80, 80, 80, 160}, +{32, 40, 48, 56, 64, 96, 0, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 80, 80, 80, 80, 160}, +{32, 40, 48, 56, 64, 96, 16, 8, 24, 32, 8224, 8232, 8240, 8248, 8256, 270400, 1056, 1064, 1072, 1080, 1088, 33856, 80, 88, 96, 96, 96, 1120}, +{28, 31, 34, 40, 44, 64, 24, 8, 20, 32, 8224, 8227, 8232, 8236, 8256, 270400, 1056, 1059, 1064, 1068, 1088, 33856, 80, 83, 88, 92, 96, 1632}, +{28, 34, 40, 48, 52, 64, 48, 8, 20, 32, 8224, 8230, 8236, 8240, 8256, 270400, 1056, 1062, 1068, 1072, 1088, 33856, 80, 86, 92, 96, 128, 3200}, +}; + +// ------------------------------------------------ Basic Lookup ----------------------------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_gridblindmetadata_handle_t pnanovdb_grid_get_gridblindmetadata(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, pnanovdb_uint32_t index) +{ + pnanovdb_gridblindmetadata_handle_t meta = { grid.address }; + pnanovdb_uint64_t byte_offset = pnanovdb_grid_get_blind_metadata_offset(buf, grid); + meta.address = pnanovdb_address_offset64(meta.address, byte_offset); + meta.address = pnanovdb_address_offset_product(meta.address, PNANOVDB_GRIDBLINDMETADATA_SIZE, index); + return meta; +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_grid_get_gridblindmetadata_value_address(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, pnanovdb_uint32_t index) +{ + pnanovdb_gridblindmetadata_handle_t meta = pnanovdb_grid_get_gridblindmetadata(buf, grid, index); + pnanovdb_int64_t byte_offset = pnanovdb_gridblindmetadata_get_byte_offset(buf, meta); + pnanovdb_address_t address = pnanovdb_address_offset64(meta.address, pnanovdb_int64_as_uint64(byte_offset)); + return address; +} + +PNANOVDB_FORCE_INLINE pnanovdb_tree_handle_t pnanovdb_grid_get_tree(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid) +{ + pnanovdb_tree_handle_t tree = { grid.address }; + tree.address = pnanovdb_address_offset(tree.address, PNANOVDB_GRID_SIZE); + return tree; +} + +PNANOVDB_FORCE_INLINE pnanovdb_root_handle_t pnanovdb_tree_get_root(pnanovdb_buf_t buf, pnanovdb_tree_handle_t tree) +{ + pnanovdb_root_handle_t root = { tree.address }; + pnanovdb_uint64_t byte_offset = pnanovdb_tree_get_node_offset_root(buf, tree); + root.address = pnanovdb_address_offset64(root.address, byte_offset); + return root; +} + +PNANOVDB_FORCE_INLINE pnanovdb_root_tile_handle_t pnanovdb_root_get_tile(pnanovdb_grid_type_t grid_type, pnanovdb_root_handle_t root, pnanovdb_uint32_t n) +{ + pnanovdb_root_tile_handle_t tile = { root.address }; + tile.address = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_size)); + tile.address = pnanovdb_address_offset_product(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_size), n); + return tile; +} + +PNANOVDB_FORCE_INLINE pnanovdb_root_tile_handle_t pnanovdb_root_get_tile_zero(pnanovdb_grid_type_t grid_type, pnanovdb_root_handle_t root) +{ + pnanovdb_root_tile_handle_t tile = { root.address }; + tile.address = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_size)); + return tile; +} + +PNANOVDB_FORCE_INLINE pnanovdb_upper_handle_t pnanovdb_root_get_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, pnanovdb_root_tile_handle_t tile) +{ + pnanovdb_upper_handle_t upper = { root.address }; + upper.address = pnanovdb_address_offset64(upper.address, pnanovdb_int64_as_uint64(pnanovdb_root_tile_get_child(buf, tile))); + return upper; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_coord_to_key(PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ +#if defined(PNANOVDB_NATIVE_64) + pnanovdb_uint64_t iu = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).x) >> 12u; + pnanovdb_uint64_t ju = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).y) >> 12u; + pnanovdb_uint64_t ku = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).z) >> 12u; + return (ku) | (ju << 21u) | (iu << 42u); +#else + pnanovdb_uint32_t iu = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).x) >> 12u; + pnanovdb_uint32_t ju = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).y) >> 12u; + pnanovdb_uint32_t ku = pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).z) >> 12u; + pnanovdb_uint32_t key_x = ku | (ju << 21); + pnanovdb_uint32_t key_y = (iu << 10) | (ju >> 11); + return pnanovdb_uint32_as_uint64(key_x, key_y); +#endif +} + +PNANOVDB_FORCE_INLINE pnanovdb_root_tile_handle_t pnanovdb_root_find_tile(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + pnanovdb_uint32_t tile_count = pnanovdb_uint32_as_int32(pnanovdb_root_get_tile_count(buf, root)); + pnanovdb_root_tile_handle_t tile = pnanovdb_root_get_tile_zero(grid_type, root); + pnanovdb_uint64_t key = pnanovdb_coord_to_key(ijk); + for (pnanovdb_uint32_t i = 0u; i < tile_count; i++) + { + if (pnanovdb_uint64_is_equal(key, pnanovdb_root_tile_get_key(buf, tile))) + { + return tile; + } + tile.address = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_size)); + } + pnanovdb_root_tile_handle_t null_handle = { pnanovdb_address_null() }; + return null_handle; +} + +// ----------------------------- Leaf Node --------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_coord_to_offset(PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + return (((PNANOVDB_DEREF(ijk).x & 7) >> 0) << (2 * 3)) + + (((PNANOVDB_DEREF(ijk).y & 7) >> 0) << (3)) + + ((PNANOVDB_DEREF(ijk).z & 7) >> 0); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_min_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_min); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_max_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_max); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_ave_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_ave); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_stddev_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_stddev); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_table_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t node, pnanovdb_uint32_t n) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, leaf_off_table) + ((PNANOVDB_GRID_TYPE_GET(grid_type, value_stride_bits) * n) >> 3u); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); + return pnanovdb_leaf_get_table_address(grid_type, buf, leaf, n); +} + +// ----------------------------- Leaf FP Types Specialization --------------------------------------- + +PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fp_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t value_log_bits) +{ + // value_log_bits // 2 3 4 + pnanovdb_uint32_t value_bits = 1u << value_log_bits; // 4 8 16 + pnanovdb_uint32_t value_mask = (1u << value_bits) - 1u; // 0xF 0xFF 0xFFFF + pnanovdb_uint32_t values_per_word_bits = 5u - value_log_bits; // 3 2 1 + pnanovdb_uint32_t values_per_word_mask = (1u << values_per_word_bits) - 1u; // 7 3 1 + + pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); + float minimum = pnanovdb_read_float(buf, pnanovdb_address_offset_neg(address, PNANOVDB_LEAF_TABLE_NEG_OFF_MINIMUM)); + float quantum = pnanovdb_read_float(buf, pnanovdb_address_offset_neg(address, PNANOVDB_LEAF_TABLE_NEG_OFF_QUANTUM)); + pnanovdb_uint32_t raw = pnanovdb_read_uint32(buf, pnanovdb_address_offset(address, ((n >> values_per_word_bits) << 2u))); + pnanovdb_uint32_t value_compressed = (raw >> ((n & values_per_word_mask) << value_log_bits)) & value_mask; + return pnanovdb_uint32_to_float(value_compressed) * quantum + minimum; +} + +PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fp4_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + return pnanovdb_leaf_fp_read_float(buf, address, ijk, 2u); +} + +PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fp8_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + return pnanovdb_leaf_fp_read_float(buf, address, ijk, 3u); +} + +PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fp16_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + return pnanovdb_leaf_fp_read_float(buf, address, ijk, 4u); +} + +PNANOVDB_FORCE_INLINE float pnanovdb_leaf_fpn_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + pnanovdb_uint32_t bbox_dif_and_flags = pnanovdb_read_uint32(buf, pnanovdb_address_offset_neg(address, PNANOVDB_LEAF_TABLE_NEG_OFF_BBOX_DIF_AND_FLAGS)); + pnanovdb_uint32_t flags = bbox_dif_and_flags >> 24u; + pnanovdb_uint32_t value_log_bits = flags >> 5; // b = 0, 1, 2, 3, 4 corresponding to 1, 2, 4, 8, 16 bits + return pnanovdb_leaf_fp_read_float(buf, address, ijk, value_log_bits); +} + +// ----------------------------- Leaf Index Specialization --------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_index_has_stats(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) +{ + return (pnanovdb_leaf_get_bbox_dif_and_flags(buf, leaf) & (1u << 28u)) != 0u; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_index_get_min_index(pnanovdb_buf_t buf, pnanovdb_address_t min_address) +{ + return pnanovdb_uint64_offset(pnanovdb_read_uint64(buf, min_address), 512u); +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_index_get_max_index(pnanovdb_buf_t buf, pnanovdb_address_t max_address) +{ + return pnanovdb_uint64_offset(pnanovdb_read_uint64(buf, max_address), 513u); +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_index_get_ave_index(pnanovdb_buf_t buf, pnanovdb_address_t ave_address) +{ + return pnanovdb_uint64_offset(pnanovdb_read_uint64(buf, ave_address), 514u); +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_index_get_dev_index(pnanovdb_buf_t buf, pnanovdb_address_t dev_address) +{ + return pnanovdb_uint64_offset(pnanovdb_read_uint64(buf, dev_address), 515u); +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_index_get_value_index(pnanovdb_buf_t buf, pnanovdb_address_t value_address, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); + pnanovdb_uint64_t offset = pnanovdb_read_uint64(buf, value_address); + return pnanovdb_uint64_offset(offset, n); +} + +// ----------------------------- Leaf IndexMask Specialization --------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_indexmask_has_stats(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) +{ + return pnanovdb_leaf_index_has_stats(buf, leaf); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_indexmask_get_min_index(pnanovdb_buf_t buf, pnanovdb_address_t min_address) +{ + return pnanovdb_leaf_index_get_min_index(buf, min_address); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_indexmask_get_max_index(pnanovdb_buf_t buf, pnanovdb_address_t max_address) +{ + return pnanovdb_leaf_index_get_max_index(buf, max_address); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_indexmask_get_ave_index(pnanovdb_buf_t buf, pnanovdb_address_t ave_address) +{ + return pnanovdb_leaf_index_get_ave_index(buf, ave_address); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_indexmask_get_dev_index(pnanovdb_buf_t buf, pnanovdb_address_t dev_address) +{ + return pnanovdb_leaf_index_get_dev_index(buf, dev_address); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_indexmask_get_value_index(pnanovdb_buf_t buf, pnanovdb_address_t value_address, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + return pnanovdb_leaf_index_get_value_index(buf, value_address, ijk); +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_indexmask_get_mask_bit(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t n) +{ + pnanovdb_uint32_t word_idx = n >> 5; + pnanovdb_uint32_t bit_idx = n & 31; + pnanovdb_uint32_t val_mask = + pnanovdb_read_uint32(buf, pnanovdb_address_offset(leaf.address, 96u + 4u * word_idx)); + return (val_mask & (1u << bit_idx)) != 0u; +} +PNANOVDB_FORCE_INLINE void pnanovdb_leaf_indexmask_set_mask_bit(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t n, pnanovdb_bool_t v) +{ + pnanovdb_uint32_t word_idx = n >> 5; + pnanovdb_uint32_t bit_idx = n & 31; + pnanovdb_uint32_t val_mask = + pnanovdb_read_uint32(buf, pnanovdb_address_offset(leaf.address, 96u + 4u * word_idx)); + if (v) + { + val_mask = val_mask | (1u << bit_idx); + } + else + { + val_mask = val_mask & ~(1u << bit_idx); + } + pnanovdb_write_uint32(buf, pnanovdb_address_offset(leaf.address, 96u + 4u * word_idx), val_mask); +} + +// ----------------------------- Leaf OnIndex Specialization --------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_onindex_get_value_count(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) +{ + pnanovdb_uint64_t val_mask = pnanovdb_read_uint64(buf, pnanovdb_address_offset(leaf.address, PNANOVDB_LEAF_OFF_VALUE_MASK + 8u * 7u)); + pnanovdb_uint64_t prefix_sum = pnanovdb_read_uint64( + buf, pnanovdb_address_offset(leaf.address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table) + 8u)); + return pnanovdb_uint64_countbits(val_mask) + (pnanovdb_uint64_to_uint32_lsr(prefix_sum, 54u) & 511u); +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindex_get_last_offset(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) +{ + return pnanovdb_uint64_offset( + pnanovdb_read_uint64(buf, pnanovdb_address_offset(leaf.address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table))), + pnanovdb_leaf_onindex_get_value_count(buf, leaf) - 1u); +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_onindex_has_stats(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) +{ + return (pnanovdb_leaf_get_bbox_dif_and_flags(buf, leaf) & (1u << 28u)) != 0u; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindex_get_min_index(pnanovdb_buf_t buf, pnanovdb_address_t min_address) +{ + pnanovdb_leaf_handle_t leaf = { pnanovdb_address_offset_neg(min_address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table)) }; + pnanovdb_uint64_t idx = pnanovdb_uint32_as_uint64_low(0u); + if (pnanovdb_leaf_onindex_has_stats(buf, leaf)) + { + idx = pnanovdb_uint64_offset(pnanovdb_leaf_onindex_get_last_offset(buf, leaf), 1u); + } + return idx; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindex_get_max_index(pnanovdb_buf_t buf, pnanovdb_address_t max_address) +{ + pnanovdb_leaf_handle_t leaf = { pnanovdb_address_offset_neg(max_address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table)) }; + pnanovdb_uint64_t idx = pnanovdb_uint32_as_uint64_low(0u); + if (pnanovdb_leaf_onindex_has_stats(buf, leaf)) + { + idx = pnanovdb_uint64_offset(pnanovdb_leaf_onindex_get_last_offset(buf, leaf), 2u); + } + return idx; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindex_get_ave_index(pnanovdb_buf_t buf, pnanovdb_address_t ave_address) +{ + pnanovdb_leaf_handle_t leaf = { pnanovdb_address_offset_neg(ave_address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table)) }; + pnanovdb_uint64_t idx = pnanovdb_uint32_as_uint64_low(0u); + if (pnanovdb_leaf_onindex_has_stats(buf, leaf)) + { + idx = pnanovdb_uint64_offset(pnanovdb_leaf_onindex_get_last_offset(buf, leaf), 3u); + } + return idx; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindex_get_dev_index(pnanovdb_buf_t buf, pnanovdb_address_t dev_address) +{ + pnanovdb_leaf_handle_t leaf = { pnanovdb_address_offset_neg(dev_address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table)) }; + pnanovdb_uint64_t idx = pnanovdb_uint32_as_uint64_low(0u); + if (pnanovdb_leaf_onindex_has_stats(buf, leaf)) + { + idx = pnanovdb_uint64_offset(pnanovdb_leaf_onindex_get_last_offset(buf, leaf), 4u); + } + return idx; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindex_get_value_index(pnanovdb_buf_t buf, pnanovdb_address_t value_address, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); + pnanovdb_leaf_handle_t leaf = { pnanovdb_address_offset_neg(value_address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_ONINDEX, leaf_off_table)) }; + + pnanovdb_uint32_t word_idx = n >> 6u; + pnanovdb_uint32_t bit_idx = n & 63u; + pnanovdb_uint64_t val_mask = pnanovdb_read_uint64(buf, pnanovdb_address_offset(leaf.address, PNANOVDB_LEAF_OFF_VALUE_MASK + 8u * word_idx)); + pnanovdb_uint64_t mask = pnanovdb_uint64_bit_mask(bit_idx); + pnanovdb_uint64_t value_index = pnanovdb_uint32_as_uint64_low(0u); + if (pnanovdb_uint64_any_bit(pnanovdb_uint64_and(val_mask, mask))) + { + pnanovdb_uint32_t sum = 0u; + sum += pnanovdb_uint64_countbits(pnanovdb_uint64_and(val_mask, pnanovdb_uint64_dec(mask))); + if (word_idx > 0u) + { + pnanovdb_uint64_t prefix_sum = pnanovdb_read_uint64(buf, pnanovdb_address_offset(value_address, 8u)); + sum += pnanovdb_uint64_to_uint32_lsr(prefix_sum, 9u * (word_idx - 1u)) & 511u; + } + pnanovdb_uint64_t offset = pnanovdb_read_uint64(buf, value_address); + value_index = pnanovdb_uint64_offset(offset, sum); + } + return value_index; +} + +// ----------------------------- Leaf OnIndexMask Specialization --------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_onindexmask_get_value_count(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) +{ + return pnanovdb_leaf_onindex_get_value_count(buf, leaf); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindexmask_get_last_offset(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) +{ + return pnanovdb_leaf_onindex_get_last_offset(buf, leaf); +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_onindexmask_has_stats(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) +{ + return pnanovdb_leaf_onindex_has_stats(buf, leaf); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindexmask_get_min_index(pnanovdb_buf_t buf, pnanovdb_address_t min_address) +{ + return pnanovdb_leaf_onindex_get_min_index(buf, min_address); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindexmask_get_max_index(pnanovdb_buf_t buf, pnanovdb_address_t max_address) +{ + return pnanovdb_leaf_onindex_get_max_index(buf, max_address); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindexmask_get_ave_index(pnanovdb_buf_t buf, pnanovdb_address_t ave_address) +{ + return pnanovdb_leaf_onindex_get_ave_index(buf, ave_address); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindexmask_get_dev_index(pnanovdb_buf_t buf, pnanovdb_address_t dev_address) +{ + return pnanovdb_leaf_onindex_get_dev_index(buf, dev_address); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_onindexmask_get_value_index(pnanovdb_buf_t buf, pnanovdb_address_t value_address, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + return pnanovdb_leaf_onindex_get_value_index(buf, value_address, ijk); +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_onindexmask_get_mask_bit(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t n) +{ + pnanovdb_uint32_t word_idx = n >> 5; + pnanovdb_uint32_t bit_idx = n & 31; + pnanovdb_uint32_t val_mask = + pnanovdb_read_uint32(buf, pnanovdb_address_offset(leaf.address, 96u + 4u * word_idx)); + return (val_mask & (1u << bit_idx)) != 0u; +} +PNANOVDB_FORCE_INLINE void pnanovdb_leaf_onindexmask_set_mask_bit(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t n, pnanovdb_bool_t v) +{ + pnanovdb_uint32_t word_idx = n >> 5; + pnanovdb_uint32_t bit_idx = n & 31; + pnanovdb_uint32_t val_mask = + pnanovdb_read_uint32(buf, pnanovdb_address_offset(leaf.address, 96u + 4u * word_idx)); + if (v) + { + val_mask = val_mask | (1u << bit_idx); + } + else + { + val_mask = val_mask & ~(1u << bit_idx); + } + pnanovdb_write_uint32(buf, pnanovdb_address_offset(leaf.address, 96u + 4u * word_idx), val_mask); +} + +// ----------------------------- Leaf PointIndex Specialization --------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_pointindex_get_offset(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) +{ + return pnanovdb_read_uint64(buf, pnanovdb_leaf_get_min_address(PNANOVDB_GRID_TYPE_POINTINDEX, buf, leaf)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_pointindex_get_point_count(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf) +{ + return pnanovdb_read_uint64(buf, pnanovdb_leaf_get_max_address(PNANOVDB_GRID_TYPE_POINTINDEX, buf, leaf)); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_pointindex_get_first(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t i) +{ + return pnanovdb_uint64_offset(pnanovdb_leaf_pointindex_get_offset(buf, leaf), + (i == 0u ? 0u : pnanovdb_read_uint16(buf, pnanovdb_leaf_get_table_address(PNANOVDB_GRID_TYPE_POINTINDEX, buf, leaf, i - 1u)))); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_pointindex_get_last(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t i) +{ + return pnanovdb_uint64_offset(pnanovdb_leaf_pointindex_get_offset(buf, leaf), + pnanovdb_read_uint16(buf, pnanovdb_leaf_get_table_address(PNANOVDB_GRID_TYPE_POINTINDEX, buf, leaf, i))); +} +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_leaf_pointindex_get_value(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t i) +{ + return pnanovdb_uint32_as_uint64_low(pnanovdb_read_uint16(buf, pnanovdb_leaf_get_table_address(PNANOVDB_GRID_TYPE_POINTINDEX, buf, leaf, i))); +} +PNANOVDB_FORCE_INLINE void pnanovdb_leaf_pointindex_set_value_only(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t i, pnanovdb_uint32_t value) +{ + pnanovdb_address_t addr = pnanovdb_leaf_get_table_address(PNANOVDB_GRID_TYPE_POINTINDEX, buf, leaf, i); + pnanovdb_uint32_t raw32 = pnanovdb_read_uint32(buf, pnanovdb_address_mask_inv(addr, 3u)); + if ((i & 1) == 0u) + { + raw32 = (raw32 & 0xFFFF0000) | (value & 0x0000FFFF); + } + else + { + raw32 = (raw32 & 0x0000FFFF) | (value << 16u); + } + pnanovdb_write_uint32(buf, addr, raw32); +} +PNANOVDB_FORCE_INLINE void pnanovdb_leaf_pointindex_set_on(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t i) +{ + pnanovdb_uint32_t word_idx = i >> 5; + pnanovdb_uint32_t bit_idx = i & 31; + pnanovdb_address_t addr = pnanovdb_address_offset(leaf.address, PNANOVDB_LEAF_OFF_VALUE_MASK + 4u * word_idx); + pnanovdb_uint32_t val_mask = pnanovdb_read_uint32(buf, addr); + val_mask = val_mask | (1u << bit_idx); + pnanovdb_write_uint32(buf, addr, val_mask); +} +PNANOVDB_FORCE_INLINE void pnanovdb_leaf_pointindex_set_value(pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, pnanovdb_uint32_t i, pnanovdb_uint32_t value) +{ + pnanovdb_leaf_pointindex_set_on(buf, leaf, i); + pnanovdb_leaf_pointindex_set_value_only(buf, leaf, i, value); +} + +// ------------------------------------------------ Lower Node ----------------------------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_lower_coord_to_offset(PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + return (((PNANOVDB_DEREF(ijk).x & 127) >> 3) << (2 * 4)) + + (((PNANOVDB_DEREF(ijk).y & 127) >> 3) << (4)) + + ((PNANOVDB_DEREF(ijk).z & 127) >> 3); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_min_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_min); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_max_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_max); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_ave_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_ave); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_stddev_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_stddev); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_table_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node, pnanovdb_uint32_t n) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, lower_off_table) + PNANOVDB_GRID_TYPE_GET(grid_type, table_stride) * n; + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_lower_get_table_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node, pnanovdb_uint32_t n) +{ + pnanovdb_address_t table_address = pnanovdb_lower_get_table_address(grid_type, buf, node, n); + return pnanovdb_read_int64(buf, table_address); +} + +PNANOVDB_FORCE_INLINE pnanovdb_leaf_handle_t pnanovdb_lower_get_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, pnanovdb_uint32_t n) +{ + pnanovdb_leaf_handle_t leaf = { lower.address }; + leaf.address = pnanovdb_address_offset64(leaf.address, pnanovdb_int64_as_uint64(pnanovdb_lower_get_table_child(grid_type, buf, lower, n))); + return leaf; +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_value_address_and_level(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) level) +{ + pnanovdb_uint32_t n = pnanovdb_lower_coord_to_offset(ijk); + pnanovdb_address_t value_address; + if (pnanovdb_lower_get_child_mask(buf, lower, n)) + { + pnanovdb_leaf_handle_t child = pnanovdb_lower_get_child(grid_type, buf, lower, n); + value_address = pnanovdb_leaf_get_value_address(grid_type, buf, child, ijk); + PNANOVDB_DEREF(level) = 0u; + } + else + { + value_address = pnanovdb_lower_get_table_address(grid_type, buf, lower, n); + PNANOVDB_DEREF(level) = 1u; + } + return value_address; +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + pnanovdb_uint32_t level; + return pnanovdb_lower_get_value_address_and_level(grid_type, buf, lower, ijk, PNANOVDB_REF(level)); +} + +// ------------------------------------------------ Upper Node ----------------------------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_upper_coord_to_offset(PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + return (((PNANOVDB_DEREF(ijk).x & 4095) >> 7) << (2 * 5)) + + (((PNANOVDB_DEREF(ijk).y & 4095) >> 7) << (5)) + + ((PNANOVDB_DEREF(ijk).z & 4095) >> 7); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_min_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_min); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_max_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_max); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_ave_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_ave); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_stddev_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_stddev); + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_table_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node, pnanovdb_uint32_t n) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, upper_off_table) + PNANOVDB_GRID_TYPE_GET(grid_type, table_stride) * n; + return pnanovdb_address_offset(node.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_int64_t pnanovdb_upper_get_table_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node, pnanovdb_uint32_t n) +{ + pnanovdb_address_t bufAddress = pnanovdb_upper_get_table_address(grid_type, buf, node, n); + return pnanovdb_read_int64(buf, bufAddress); +} + +PNANOVDB_FORCE_INLINE pnanovdb_lower_handle_t pnanovdb_upper_get_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, pnanovdb_uint32_t n) +{ + pnanovdb_lower_handle_t lower = { upper.address }; + lower.address = pnanovdb_address_offset64(lower.address, pnanovdb_int64_as_uint64(pnanovdb_upper_get_table_child(grid_type, buf, upper, n))); + return lower; +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_value_address_and_level(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) level) +{ + pnanovdb_uint32_t n = pnanovdb_upper_coord_to_offset(ijk); + pnanovdb_address_t value_address; + if (pnanovdb_upper_get_child_mask(buf, upper, n)) + { + pnanovdb_lower_handle_t child = pnanovdb_upper_get_child(grid_type, buf, upper, n); + value_address = pnanovdb_lower_get_value_address_and_level(grid_type, buf, child, ijk, level); + } + else + { + value_address = pnanovdb_upper_get_table_address(grid_type, buf, upper, n); + PNANOVDB_DEREF(level) = 2u; + } + return value_address; +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + pnanovdb_uint32_t level; + return pnanovdb_upper_get_value_address_and_level(grid_type, buf, upper, ijk, PNANOVDB_REF(level)); +} + +PNANOVDB_FORCE_INLINE void pnanovdb_upper_set_table_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t node, pnanovdb_uint32_t n, pnanovdb_int64_t child) +{ + pnanovdb_address_t bufAddress = pnanovdb_upper_get_table_address(grid_type, buf, node, n); + pnanovdb_write_int64(buf, bufAddress, child); +} + +// ------------------------------------------------ Root ----------------------------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_min_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_off_min); + return pnanovdb_address_offset(root.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_max_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_off_max); + return pnanovdb_address_offset(root.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_ave_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_off_ave); + return pnanovdb_address_offset(root.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_stddev_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_off_stddev); + return pnanovdb_address_offset(root.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_tile_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_tile_handle_t root_tile) +{ + pnanovdb_uint32_t byte_offset = PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_off_value); + return pnanovdb_address_offset(root_tile.address, byte_offset); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address_and_level(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) level) +{ + pnanovdb_root_tile_handle_t tile = pnanovdb_root_find_tile(grid_type, buf, root, ijk); + pnanovdb_address_t ret; + if (pnanovdb_address_is_null(tile.address)) + { + ret = pnanovdb_address_offset(root.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_off_background)); + PNANOVDB_DEREF(level) = 4u; + } + else if (pnanovdb_int64_is_zero(pnanovdb_root_tile_get_child(buf, tile))) + { + ret = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_off_value)); + PNANOVDB_DEREF(level) = 3u; + } + else + { + pnanovdb_upper_handle_t child = pnanovdb_root_get_child(grid_type, buf, root, tile); + ret = pnanovdb_upper_get_value_address_and_level(grid_type, buf, child, ijk, level); + } + return ret; +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + pnanovdb_uint32_t level; + return pnanovdb_root_get_value_address_and_level(grid_type, buf, root, ijk, PNANOVDB_REF(level)); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address_bit(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) bit_index) +{ + pnanovdb_uint32_t level; + pnanovdb_address_t address = pnanovdb_root_get_value_address_and_level(grid_type, buf, root, ijk, PNANOVDB_REF(level)); + PNANOVDB_DEREF(bit_index) = level == 0u ? pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).x & 7) : 0u; + return address; +} + +PNANOVDB_FORCE_INLINE float pnanovdb_root_fp4_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level) +{ + float ret; + if (level == 0) + { + ret = pnanovdb_leaf_fp4_read_float(buf, address, ijk); + } + else + { + ret = pnanovdb_read_float(buf, address); + } + return ret; +} + +PNANOVDB_FORCE_INLINE float pnanovdb_root_fp8_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level) +{ + float ret; + if (level == 0) + { + ret = pnanovdb_leaf_fp8_read_float(buf, address, ijk); + } + else + { + ret = pnanovdb_read_float(buf, address); + } + return ret; +} + +PNANOVDB_FORCE_INLINE float pnanovdb_root_fp16_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level) +{ + float ret; + if (level == 0) + { + ret = pnanovdb_leaf_fp16_read_float(buf, address, ijk); + } + else + { + ret = pnanovdb_read_float(buf, address); + } + return ret; +} + +PNANOVDB_FORCE_INLINE float pnanovdb_root_fpn_read_float(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level) +{ + float ret; + if (level == 0) + { + ret = pnanovdb_leaf_fpn_read_float(buf, address, ijk); + } + else + { + ret = pnanovdb_read_float(buf, address); + } + return ret; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_root_index_get_value_index(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level) +{ + pnanovdb_uint64_t ret; + if (level == 0) + { + ret = pnanovdb_leaf_index_get_value_index(buf, address, ijk); + } + else + { + ret = pnanovdb_read_uint64(buf, address); + } + return ret; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_root_onindex_get_value_index(pnanovdb_buf_t buf, pnanovdb_address_t address, PNANOVDB_IN(pnanovdb_coord_t) ijk, pnanovdb_uint32_t level) +{ + pnanovdb_uint64_t ret; + if (level == 0) + { + ret = pnanovdb_leaf_onindex_get_value_index(buf, address, ijk); + } + else + { + ret = pnanovdb_read_uint64(buf, address); + } + return ret; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_root_pointindex_get_point_range( + pnanovdb_buf_t buf, + pnanovdb_address_t value_address, + PNANOVDB_IN(pnanovdb_coord_t) ijk, + pnanovdb_uint32_t level, + PNANOVDB_INOUT(pnanovdb_uint64_t)range_begin, + PNANOVDB_INOUT(pnanovdb_uint64_t)range_end +) +{ + pnanovdb_uint32_t local_range_begin = 0u; + pnanovdb_uint32_t local_range_end = 0u; + pnanovdb_uint64_t offset = pnanovdb_uint32_as_uint64_low(0u); + if (level == 0) + { + pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); + // recover leaf address + pnanovdb_leaf_handle_t leaf = { pnanovdb_address_offset_neg(value_address, PNANOVDB_GRID_TYPE_GET(PNANOVDB_GRID_TYPE_POINTINDEX, leaf_off_table) + 2u * n) }; + if (n > 0u) + { + local_range_begin = pnanovdb_read_uint16(buf, pnanovdb_address_offset_neg(value_address, 2u)); + } + local_range_end = pnanovdb_read_uint16(buf, value_address); + offset = pnanovdb_leaf_pointindex_get_offset(buf, leaf); + } + PNANOVDB_DEREF(range_begin) = pnanovdb_uint64_offset(offset, local_range_begin); + PNANOVDB_DEREF(range_end) = pnanovdb_uint64_offset(offset, local_range_end); + return pnanovdb_uint32_as_uint64_low(local_range_end - local_range_begin); +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint64_t pnanovdb_root_pointindex_get_point_address_range( + pnanovdb_buf_t buf, + pnanovdb_grid_type_t value_type, + pnanovdb_address_t value_address, + pnanovdb_address_t blindmetadata_value_address, + PNANOVDB_IN(pnanovdb_coord_t) ijk, + pnanovdb_uint32_t level, + PNANOVDB_INOUT(pnanovdb_address_t)address_begin, + PNANOVDB_INOUT(pnanovdb_address_t)address_end +) +{ + pnanovdb_uint64_t range_begin; + pnanovdb_uint64_t range_end; + pnanovdb_uint64_t range_size = pnanovdb_root_pointindex_get_point_range(buf, value_address, ijk, level, PNANOVDB_REF(range_begin), PNANOVDB_REF(range_end)); + + pnanovdb_uint32_t stride = 12u; // vec3f + if (value_type == PNANOVDB_GRID_TYPE_VEC3U8) + { + stride = 3u; + } + else if (value_type == PNANOVDB_GRID_TYPE_VEC3U16) + { + stride = 6u; + } + PNANOVDB_DEREF(address_begin) = pnanovdb_address_offset64_product(blindmetadata_value_address, range_begin, stride); + PNANOVDB_DEREF(address_end) = pnanovdb_address_offset64_product(blindmetadata_value_address, range_end, stride); + return range_size; +} + +// ------------------------------------------------ ReadAccessor ----------------------------------------------------------- + +struct pnanovdb_readaccessor_t +{ + pnanovdb_coord_t key; + pnanovdb_leaf_handle_t leaf; + pnanovdb_lower_handle_t lower; + pnanovdb_upper_handle_t upper; + pnanovdb_root_handle_t root; +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_readaccessor_t) + +PNANOVDB_FORCE_INLINE void pnanovdb_readaccessor_init(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, pnanovdb_root_handle_t root) +{ + PNANOVDB_DEREF(acc).key.x = 0x7FFFFFFF; + PNANOVDB_DEREF(acc).key.y = 0x7FFFFFFF; + PNANOVDB_DEREF(acc).key.z = 0x7FFFFFFF; + PNANOVDB_DEREF(acc).leaf.address = pnanovdb_address_null(); + PNANOVDB_DEREF(acc).lower.address = pnanovdb_address_null(); + PNANOVDB_DEREF(acc).upper.address = pnanovdb_address_null(); + PNANOVDB_DEREF(acc).root = root; +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_readaccessor_iscached0(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, int dirty) +{ + if (pnanovdb_address_is_null(PNANOVDB_DEREF(acc).leaf.address)) { return PNANOVDB_FALSE; } + if ((dirty & ~((1u << 3) - 1u)) != 0) + { + PNANOVDB_DEREF(acc).leaf.address = pnanovdb_address_null(); + return PNANOVDB_FALSE; + } + return PNANOVDB_TRUE; +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_readaccessor_iscached1(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, int dirty) +{ + if (pnanovdb_address_is_null(PNANOVDB_DEREF(acc).lower.address)) { return PNANOVDB_FALSE; } + if ((dirty & ~((1u << 7) - 1u)) != 0) + { + PNANOVDB_DEREF(acc).lower.address = pnanovdb_address_null(); + return PNANOVDB_FALSE; + } + return PNANOVDB_TRUE; +} +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_readaccessor_iscached2(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, int dirty) +{ + if (pnanovdb_address_is_null(PNANOVDB_DEREF(acc).upper.address)) { return PNANOVDB_FALSE; } + if ((dirty & ~((1u << 12) - 1u)) != 0) + { + PNANOVDB_DEREF(acc).upper.address = pnanovdb_address_null(); + return PNANOVDB_FALSE; + } + return PNANOVDB_TRUE; +} +PNANOVDB_FORCE_INLINE int pnanovdb_readaccessor_computedirty(PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + return (PNANOVDB_DEREF(ijk).x ^ PNANOVDB_DEREF(acc).key.x) | (PNANOVDB_DEREF(ijk).y ^ PNANOVDB_DEREF(acc).key.y) | (PNANOVDB_DEREF(ijk).z ^ PNANOVDB_DEREF(acc).key.z); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_leaf_get_value_address_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) +{ + pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); + return pnanovdb_leaf_get_table_address(grid_type, buf, leaf, n); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_value_address_and_level_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_INOUT(pnanovdb_uint32_t) level) +{ + pnanovdb_uint32_t n = pnanovdb_lower_coord_to_offset(ijk); + pnanovdb_address_t value_address; + if (pnanovdb_lower_get_child_mask(buf, lower, n)) + { + pnanovdb_leaf_handle_t child = pnanovdb_lower_get_child(grid_type, buf, lower, n); + PNANOVDB_DEREF(acc).leaf = child; + PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); + value_address = pnanovdb_leaf_get_value_address_and_cache(grid_type, buf, child, ijk, acc); + PNANOVDB_DEREF(level) = 0u; + } + else + { + value_address = pnanovdb_lower_get_table_address(grid_type, buf, lower, n); + PNANOVDB_DEREF(level) = 1u; + } + return value_address; +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_lower_get_value_address_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) +{ + pnanovdb_uint32_t level; + return pnanovdb_lower_get_value_address_and_level_and_cache(grid_type, buf, lower, ijk, acc, PNANOVDB_REF(level)); +} + +PNANOVDB_FORCE_INLINE void pnanovdb_lower_set_table_child(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t node, pnanovdb_uint32_t n, pnanovdb_int64_t child) +{ + pnanovdb_address_t table_address = pnanovdb_lower_get_table_address(grid_type, buf, node, n); + pnanovdb_write_int64(buf, table_address, child); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_value_address_and_level_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_INOUT(pnanovdb_uint32_t) level) +{ + pnanovdb_uint32_t n = pnanovdb_upper_coord_to_offset(ijk); + pnanovdb_address_t value_address; + if (pnanovdb_upper_get_child_mask(buf, upper, n)) + { + pnanovdb_lower_handle_t child = pnanovdb_upper_get_child(grid_type, buf, upper, n); + PNANOVDB_DEREF(acc).lower = child; + PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); + value_address = pnanovdb_lower_get_value_address_and_level_and_cache(grid_type, buf, child, ijk, acc, level); + } + else + { + value_address = pnanovdb_upper_get_table_address(grid_type, buf, upper, n); + PNANOVDB_DEREF(level) = 2u; + } + return value_address; +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_upper_get_value_address_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) +{ + pnanovdb_uint32_t level; + return pnanovdb_upper_get_value_address_and_level_and_cache(grid_type, buf, upper, ijk, acc, PNANOVDB_REF(level)); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address_and_level_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_INOUT(pnanovdb_uint32_t) level) +{ + pnanovdb_root_tile_handle_t tile = pnanovdb_root_find_tile(grid_type, buf, root, ijk); + pnanovdb_address_t ret; + if (pnanovdb_address_is_null(tile.address)) + { + ret = pnanovdb_address_offset(root.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_off_background)); + PNANOVDB_DEREF(level) = 4u; + } + else if (pnanovdb_int64_is_zero(pnanovdb_root_tile_get_child(buf, tile))) + { + ret = pnanovdb_address_offset(tile.address, PNANOVDB_GRID_TYPE_GET(grid_type, root_tile_off_value)); + PNANOVDB_DEREF(level) = 3u; + } + else + { + pnanovdb_upper_handle_t child = pnanovdb_root_get_child(grid_type, buf, root, tile); + PNANOVDB_DEREF(acc).upper = child; + PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); + ret = pnanovdb_upper_get_value_address_and_level_and_cache(grid_type, buf, child, ijk, acc, level); + } + return ret; +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_root_get_value_address_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) +{ + pnanovdb_uint32_t level; + return pnanovdb_root_get_value_address_and_level_and_cache(grid_type, buf, root, ijk, acc, PNANOVDB_REF(level)); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_readaccessor_get_value_address_and_level(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) level) +{ + int dirty = pnanovdb_readaccessor_computedirty(acc, ijk); + + pnanovdb_address_t value_address; + if (pnanovdb_readaccessor_iscached0(acc, dirty)) + { + value_address = pnanovdb_leaf_get_value_address_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).leaf, ijk, acc); + PNANOVDB_DEREF(level) = 0u; + } + else if (pnanovdb_readaccessor_iscached1(acc, dirty)) + { + value_address = pnanovdb_lower_get_value_address_and_level_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).lower, ijk, acc, level); + } + else if (pnanovdb_readaccessor_iscached2(acc, dirty)) + { + value_address = pnanovdb_upper_get_value_address_and_level_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).upper, ijk, acc, level); + } + else + { + value_address = pnanovdb_root_get_value_address_and_level_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).root, ijk, acc, level); + } + return value_address; +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_readaccessor_get_value_address(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + pnanovdb_uint32_t level; + return pnanovdb_readaccessor_get_value_address_and_level(grid_type, buf, acc, ijk, PNANOVDB_REF(level)); +} + +PNANOVDB_FORCE_INLINE pnanovdb_address_t pnanovdb_readaccessor_get_value_address_bit(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_uint32_t) bit_index) +{ + pnanovdb_uint32_t level; + pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address_and_level(grid_type, buf, acc, ijk, PNANOVDB_REF(level)); + PNANOVDB_DEREF(bit_index) = level == 0u ? pnanovdb_int32_as_uint32(PNANOVDB_DEREF(ijk).x & 7) : 0u; + return address; +} + +// ------------------------------------------------ ReadAccessor GetDim ----------------------------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_leaf_get_dim_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) +{ + return 1u; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_lower_get_dim_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) +{ + pnanovdb_uint32_t n = pnanovdb_lower_coord_to_offset(ijk); + pnanovdb_uint32_t ret; + if (pnanovdb_lower_get_child_mask(buf, lower, n)) + { + pnanovdb_leaf_handle_t child = pnanovdb_lower_get_child(grid_type, buf, lower, n); + PNANOVDB_DEREF(acc).leaf = child; + PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); + ret = pnanovdb_leaf_get_dim_and_cache(grid_type, buf, child, ijk, acc); + } + else + { + ret = (1u << (3u)); // node 0 dim + } + return ret; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_upper_get_dim_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) +{ + pnanovdb_uint32_t n = pnanovdb_upper_coord_to_offset(ijk); + pnanovdb_uint32_t ret; + if (pnanovdb_upper_get_child_mask(buf, upper, n)) + { + pnanovdb_lower_handle_t child = pnanovdb_upper_get_child(grid_type, buf, upper, n); + PNANOVDB_DEREF(acc).lower = child; + PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); + ret = pnanovdb_lower_get_dim_and_cache(grid_type, buf, child, ijk, acc); + } + else + { + ret = (1u << (4u + 3u)); // node 1 dim + } + return ret; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_root_get_dim_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) +{ + pnanovdb_root_tile_handle_t tile = pnanovdb_root_find_tile(grid_type, buf, root, ijk); + pnanovdb_uint32_t ret; + if (pnanovdb_address_is_null(tile.address)) + { + ret = 1u << (5u + 4u + 3u); // background, node 2 dim + } + else if (pnanovdb_int64_is_zero(pnanovdb_root_tile_get_child(buf, tile))) + { + ret = 1u << (5u + 4u + 3u); // tile value, node 2 dim + } + else + { + pnanovdb_upper_handle_t child = pnanovdb_root_get_child(grid_type, buf, root, tile); + PNANOVDB_DEREF(acc).upper = child; + PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); + ret = pnanovdb_upper_get_dim_and_cache(grid_type, buf, child, ijk, acc); + } + return ret; +} + +PNANOVDB_FORCE_INLINE pnanovdb_uint32_t pnanovdb_readaccessor_get_dim(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + int dirty = pnanovdb_readaccessor_computedirty(acc, ijk); + + pnanovdb_uint32_t dim; + if (pnanovdb_readaccessor_iscached0(acc, dirty)) + { + dim = pnanovdb_leaf_get_dim_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).leaf, ijk, acc); + } + else if (pnanovdb_readaccessor_iscached1(acc, dirty)) + { + dim = pnanovdb_lower_get_dim_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).lower, ijk, acc); + } + else if (pnanovdb_readaccessor_iscached2(acc, dirty)) + { + dim = pnanovdb_upper_get_dim_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).upper, ijk, acc); + } + else + { + dim = pnanovdb_root_get_dim_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).root, ijk, acc); + } + return dim; +} + +// ------------------------------------------------ ReadAccessor IsActive ----------------------------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_leaf_is_active_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_leaf_handle_t leaf, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) +{ + pnanovdb_uint32_t n = pnanovdb_leaf_coord_to_offset(ijk); + return pnanovdb_leaf_get_value_mask(buf, leaf, n); +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_lower_is_active_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_lower_handle_t lower, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) +{ + pnanovdb_uint32_t n = pnanovdb_lower_coord_to_offset(ijk); + pnanovdb_bool_t is_active; + if (pnanovdb_lower_get_child_mask(buf, lower, n)) + { + pnanovdb_leaf_handle_t child = pnanovdb_lower_get_child(grid_type, buf, lower, n); + PNANOVDB_DEREF(acc).leaf = child; + PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); + is_active = pnanovdb_leaf_is_active_and_cache(grid_type, buf, child, ijk, acc); + } + else + { + is_active = pnanovdb_lower_get_value_mask(buf, lower, n); + } + return is_active; +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_upper_is_active_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_upper_handle_t upper, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) +{ + pnanovdb_uint32_t n = pnanovdb_upper_coord_to_offset(ijk); + pnanovdb_bool_t is_active; + if (pnanovdb_upper_get_child_mask(buf, upper, n)) + { + pnanovdb_lower_handle_t child = pnanovdb_upper_get_child(grid_type, buf, upper, n); + PNANOVDB_DEREF(acc).lower = child; + PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); + is_active = pnanovdb_lower_is_active_and_cache(grid_type, buf, child, ijk, acc); + } + else + { + is_active = pnanovdb_upper_get_value_mask(buf, upper, n); + } + return is_active; +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_root_is_active_and_cache(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, pnanovdb_root_handle_t root, PNANOVDB_IN(pnanovdb_coord_t) ijk, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc) +{ + pnanovdb_root_tile_handle_t tile = pnanovdb_root_find_tile(grid_type, buf, root, ijk); + pnanovdb_bool_t is_active; + if (pnanovdb_address_is_null(tile.address)) + { + is_active = PNANOVDB_FALSE; // background + } + else if (pnanovdb_int64_is_zero(pnanovdb_root_tile_get_child(buf, tile))) + { + pnanovdb_uint32_t state = pnanovdb_root_tile_get_state(buf, tile); + is_active = state != 0u; // tile value + } + else + { + pnanovdb_upper_handle_t child = pnanovdb_root_get_child(grid_type, buf, root, tile); + PNANOVDB_DEREF(acc).upper = child; + PNANOVDB_DEREF(acc).key = PNANOVDB_DEREF(ijk); + is_active = pnanovdb_upper_is_active_and_cache(grid_type, buf, child, ijk, acc); + } + return is_active; +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_readaccessor_is_active(pnanovdb_grid_type_t grid_type, pnanovdb_buf_t buf, PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, PNANOVDB_IN(pnanovdb_coord_t) ijk) +{ + int dirty = pnanovdb_readaccessor_computedirty(acc, ijk); + + pnanovdb_bool_t is_active; + if (pnanovdb_readaccessor_iscached0(acc, dirty)) + { + is_active = pnanovdb_leaf_is_active_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).leaf, ijk, acc); + } + else if (pnanovdb_readaccessor_iscached1(acc, dirty)) + { + is_active = pnanovdb_lower_is_active_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).lower, ijk, acc); + } + else if (pnanovdb_readaccessor_iscached2(acc, dirty)) + { + is_active = pnanovdb_upper_is_active_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).upper, ijk, acc); + } + else + { + is_active = pnanovdb_root_is_active_and_cache(grid_type, buf, PNANOVDB_DEREF(acc).root, ijk, acc); + } + return is_active; +} + +// ------------------------------------------------ Map Transforms ----------------------------------------------------------- + +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_map_apply(pnanovdb_buf_t buf, pnanovdb_map_handle_t map, PNANOVDB_IN(pnanovdb_vec3_t) src) +{ + pnanovdb_vec3_t dst; + float sx = PNANOVDB_DEREF(src).x; + float sy = PNANOVDB_DEREF(src).y; + float sz = PNANOVDB_DEREF(src).z; + dst.x = sx * pnanovdb_map_get_matf(buf, map, 0) + sy * pnanovdb_map_get_matf(buf, map, 1) + sz * pnanovdb_map_get_matf(buf, map, 2) + pnanovdb_map_get_vecf(buf, map, 0); + dst.y = sx * pnanovdb_map_get_matf(buf, map, 3) + sy * pnanovdb_map_get_matf(buf, map, 4) + sz * pnanovdb_map_get_matf(buf, map, 5) + pnanovdb_map_get_vecf(buf, map, 1); + dst.z = sx * pnanovdb_map_get_matf(buf, map, 6) + sy * pnanovdb_map_get_matf(buf, map, 7) + sz * pnanovdb_map_get_matf(buf, map, 8) + pnanovdb_map_get_vecf(buf, map, 2); + return dst; +} + +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_map_apply_inverse(pnanovdb_buf_t buf, pnanovdb_map_handle_t map, PNANOVDB_IN(pnanovdb_vec3_t) src) +{ + pnanovdb_vec3_t dst; + float sx = PNANOVDB_DEREF(src).x - pnanovdb_map_get_vecf(buf, map, 0); + float sy = PNANOVDB_DEREF(src).y - pnanovdb_map_get_vecf(buf, map, 1); + float sz = PNANOVDB_DEREF(src).z - pnanovdb_map_get_vecf(buf, map, 2); + dst.x = sx * pnanovdb_map_get_invmatf(buf, map, 0) + sy * pnanovdb_map_get_invmatf(buf, map, 1) + sz * pnanovdb_map_get_invmatf(buf, map, 2); + dst.y = sx * pnanovdb_map_get_invmatf(buf, map, 3) + sy * pnanovdb_map_get_invmatf(buf, map, 4) + sz * pnanovdb_map_get_invmatf(buf, map, 5); + dst.z = sx * pnanovdb_map_get_invmatf(buf, map, 6) + sy * pnanovdb_map_get_invmatf(buf, map, 7) + sz * pnanovdb_map_get_invmatf(buf, map, 8); + return dst; +} + +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_map_apply_jacobi(pnanovdb_buf_t buf, pnanovdb_map_handle_t map, PNANOVDB_IN(pnanovdb_vec3_t) src) +{ + pnanovdb_vec3_t dst; + float sx = PNANOVDB_DEREF(src).x; + float sy = PNANOVDB_DEREF(src).y; + float sz = PNANOVDB_DEREF(src).z; + dst.x = sx * pnanovdb_map_get_matf(buf, map, 0) + sy * pnanovdb_map_get_matf(buf, map, 1) + sz * pnanovdb_map_get_matf(buf, map, 2); + dst.y = sx * pnanovdb_map_get_matf(buf, map, 3) + sy * pnanovdb_map_get_matf(buf, map, 4) + sz * pnanovdb_map_get_matf(buf, map, 5); + dst.z = sx * pnanovdb_map_get_matf(buf, map, 6) + sy * pnanovdb_map_get_matf(buf, map, 7) + sz * pnanovdb_map_get_matf(buf, map, 8); + return dst; +} + +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_map_apply_inverse_jacobi(pnanovdb_buf_t buf, pnanovdb_map_handle_t map, PNANOVDB_IN(pnanovdb_vec3_t) src) +{ + pnanovdb_vec3_t dst; + float sx = PNANOVDB_DEREF(src).x; + float sy = PNANOVDB_DEREF(src).y; + float sz = PNANOVDB_DEREF(src).z; + dst.x = sx * pnanovdb_map_get_invmatf(buf, map, 0) + sy * pnanovdb_map_get_invmatf(buf, map, 1) + sz * pnanovdb_map_get_invmatf(buf, map, 2); + dst.y = sx * pnanovdb_map_get_invmatf(buf, map, 3) + sy * pnanovdb_map_get_invmatf(buf, map, 4) + sz * pnanovdb_map_get_invmatf(buf, map, 5); + dst.z = sx * pnanovdb_map_get_invmatf(buf, map, 6) + sy * pnanovdb_map_get_invmatf(buf, map, 7) + sz * pnanovdb_map_get_invmatf(buf, map, 8); + return dst; +} + +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_grid_world_to_indexf(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, PNANOVDB_IN(pnanovdb_vec3_t) src) +{ + pnanovdb_map_handle_t map = pnanovdb_grid_get_map(buf, grid); + return pnanovdb_map_apply_inverse(buf, map, src); +} + +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_grid_index_to_worldf(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, PNANOVDB_IN(pnanovdb_vec3_t) src) +{ + pnanovdb_map_handle_t map = pnanovdb_grid_get_map(buf, grid); + return pnanovdb_map_apply(buf, map, src); +} + +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_grid_world_to_index_dirf(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, PNANOVDB_IN(pnanovdb_vec3_t) src) +{ + pnanovdb_map_handle_t map = pnanovdb_grid_get_map(buf, grid); + return pnanovdb_map_apply_inverse_jacobi(buf, map, src); +} + +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_grid_index_to_world_dirf(pnanovdb_buf_t buf, pnanovdb_grid_handle_t grid, PNANOVDB_IN(pnanovdb_vec3_t) src) +{ + pnanovdb_map_handle_t map = pnanovdb_grid_get_map(buf, grid); + return pnanovdb_map_apply_jacobi(buf, map, src); +} + +// ------------------------------------------------ DitherLUT ----------------------------------------------------------- + +// This table was generated with +/************** + +static constexpr inline uint32 +SYSwang_inthash(uint32 key) +{ + // From http://www.concentric.net/~Ttwang/tech/inthash.htm + key += ~(key << 16); + key ^= (key >> 5); + key += (key << 3); + key ^= (key >> 13); + key += ~(key << 9); + key ^= (key >> 17); + return key; +} + +static void +ut_initDitherR(float *pattern, float offset, + int x, int y, int z, int res, int goalres) +{ + // These offsets are designed to maximize the difference between + // dither values in nearby voxels within a given 2x2x2 cell, without + // producing axis-aligned artifacts. The are organized in row-major + // order. + static const float theDitherOffset[] = {0,4,6,2,5,1,3,7}; + static const float theScale = 0.125F; + int key = (((z << res) + y) << res) + x; + + if (res == goalres) + { + pattern[key] = offset; + return; + } + + // Randomly flip (on each axis) the dithering patterns used by the + // subcells. This key is xor'd with the subcell index below before + // looking up in the dither offset list. + key = SYSwang_inthash(key) & 7; + + x <<= 1; + y <<= 1; + z <<= 1; + + offset *= theScale; + for (int i = 0; i < 8; i++) + ut_initDitherR(pattern, offset+theDitherOffset[i ^ key]*theScale, + x+(i&1), y+((i&2)>>1), z+((i&4)>>2), res+1, goalres); +} + +// This is a compact algorithm that accomplishes essentially the same thing +// as ut_initDither() above. We should eventually switch to use this and +// clean the dead code. +static fpreal32 * +ut_initDitherRecursive(int goalres) +{ + const int nfloat = 1 << (goalres*3); + float *pattern = new float[nfloat]; + ut_initDitherR(pattern, 1.0F, 0, 0, 0, 0, goalres); + + // This has built an even spacing from 1/nfloat to 1.0. + // however, our dither pattern should be 1/(nfloat+1) to nfloat/(nfloat+1) + // So we do a correction here. Note that the earlier calculations are + // done with powers of 2 so are exact, so it does make sense to delay + // the renormalization to this pass. + float correctionterm = nfloat / (nfloat+1.0F); + for (int i = 0; i < nfloat; i++) + pattern[i] *= correctionterm; + return pattern; +} + + theDitherMatrix = ut_initDitherRecursive(3); + + for (int i = 0; i < 512/8; i ++) + { + for (int j = 0; j < 8; j ++) + std::cout << theDitherMatrix[i*8+j] << "f, "; + std::cout << std::endl; + } + + **************/ + +PNANOVDB_STATIC_CONST float pnanovdb_dither_lut[512] = +{ + 0.14425f, 0.643275f, 0.830409f, 0.331384f, 0.105263f, 0.604289f, 0.167641f, 0.666667f, + 0.892788f, 0.393762f, 0.0818713f, 0.580897f, 0.853801f, 0.354776f, 0.916179f, 0.417154f, + 0.612086f, 0.11306f, 0.79922f, 0.300195f, 0.510721f, 0.0116959f, 0.947368f, 0.448343f, + 0.362573f, 0.861598f, 0.0506823f, 0.549708f, 0.261209f, 0.760234f, 0.19883f, 0.697856f, + 0.140351f, 0.639376f, 0.576998f, 0.0779727f, 0.522417f, 0.0233918f, 0.460039f, 0.959064f, + 0.888889f, 0.389864f, 0.327485f, 0.826511f, 0.272904f, 0.77193f, 0.709552f, 0.210526f, + 0.483431f, 0.982456f, 0.296296f, 0.795322f, 0.116959f, 0.615984f, 0.0545809f, 0.553606f, + 0.732943f, 0.233918f, 0.545809f, 0.0467836f, 0.865497f, 0.366472f, 0.803119f, 0.304094f, + 0.518519f, 0.0194932f, 0.45614f, 0.955166f, 0.729045f, 0.230019f, 0.54191f, 0.042885f, + 0.269006f, 0.768031f, 0.705653f, 0.206628f, 0.479532f, 0.978558f, 0.292398f, 0.791423f, + 0.237817f, 0.736842f, 0.424951f, 0.923977f, 0.136452f, 0.635478f, 0.323587f, 0.822612f, + 0.986355f, 0.487329f, 0.674464f, 0.175439f, 0.88499f, 0.385965f, 0.573099f, 0.0740741f, + 0.51462f, 0.0155945f, 0.202729f, 0.701754f, 0.148148f, 0.647174f, 0.834308f, 0.335283f, + 0.265107f, 0.764133f, 0.951267f, 0.452242f, 0.896686f, 0.397661f, 0.08577f, 0.584795f, + 0.8577f, 0.358674f, 0.920078f, 0.421053f, 0.740741f, 0.241715f, 0.678363f, 0.179337f, + 0.109162f, 0.608187f, 0.17154f, 0.670565f, 0.491228f, 0.990253f, 0.42885f, 0.927875f, + 0.0662768f, 0.565302f, 0.62768f, 0.128655f, 0.183236f, 0.682261f, 0.744639f, 0.245614f, + 0.814815f, 0.315789f, 0.378168f, 0.877193f, 0.931774f, 0.432749f, 0.495127f, 0.994152f, + 0.0350877f, 0.534113f, 0.97076f, 0.471735f, 0.214425f, 0.71345f, 0.526316f, 0.0272904f, + 0.783626f, 0.2846f, 0.222222f, 0.721248f, 0.962963f, 0.463938f, 0.276803f, 0.775828f, + 0.966862f, 0.467836f, 0.405458f, 0.904483f, 0.0701754f, 0.569201f, 0.881092f, 0.382066f, + 0.218324f, 0.717349f, 0.654971f, 0.155945f, 0.818713f, 0.319688f, 0.132554f, 0.631579f, + 0.0623782f, 0.561404f, 0.748538f, 0.249513f, 0.912281f, 0.413255f, 0.974659f, 0.475634f, + 0.810916f, 0.311891f, 0.499025f, 0.998051f, 0.163743f, 0.662768f, 0.226121f, 0.725146f, + 0.690058f, 0.191033f, 0.00389864f, 0.502924f, 0.557505f, 0.0584795f, 0.120858f, 0.619883f, + 0.440546f, 0.939571f, 0.752437f, 0.253411f, 0.307992f, 0.807018f, 0.869396f, 0.37037f, + 0.658869f, 0.159844f, 0.346979f, 0.846004f, 0.588694f, 0.0896686f, 0.152047f, 0.651072f, + 0.409357f, 0.908382f, 0.596491f, 0.0974659f, 0.339181f, 0.838207f, 0.900585f, 0.401559f, + 0.34308f, 0.842105f, 0.779727f, 0.280702f, 0.693957f, 0.194932f, 0.25731f, 0.756335f, + 0.592593f, 0.0935673f, 0.0311891f, 0.530214f, 0.444444f, 0.94347f, 0.506823f, 0.00779727f, + 0.68616f, 0.187135f, 0.124756f, 0.623782f, 0.288499f, 0.787524f, 0.350877f, 0.849903f, + 0.436647f, 0.935673f, 0.873294f, 0.374269f, 0.538012f, 0.0389864f, 0.60039f, 0.101365f, + 0.57115f, 0.0721248f, 0.758285f, 0.259259f, 0.719298f, 0.220273f, 0.532164f, 0.0331384f, + 0.321637f, 0.820663f, 0.00974659f, 0.508772f, 0.469786f, 0.968811f, 0.282651f, 0.781676f, + 0.539961f, 0.0409357f, 0.727096f, 0.22807f, 0.500975f, 0.00194932f, 0.563353f, 0.0643275f, + 0.290448f, 0.789474f, 0.477583f, 0.976608f, 0.251462f, 0.750487f, 0.31384f, 0.812865f, + 0.94152f, 0.442495f, 0.879142f, 0.380117f, 0.37232f, 0.871345f, 0.309942f, 0.808967f, + 0.192982f, 0.692008f, 0.130604f, 0.62963f, 0.621832f, 0.122807f, 0.559454f, 0.0604289f, + 0.660819f, 0.161793f, 0.723197f, 0.224172f, 0.403509f, 0.902534f, 0.840156f, 0.341131f, + 0.411306f, 0.910331f, 0.473684f, 0.97271f, 0.653021f, 0.153996f, 0.0916179f, 0.590643f, + 0.196881f, 0.695906f, 0.384016f, 0.883041f, 0.0955166f, 0.594542f, 0.157895f, 0.65692f, + 0.945419f, 0.446394f, 0.633528f, 0.134503f, 0.844055f, 0.345029f, 0.906433f, 0.407407f, + 0.165692f, 0.664717f, 0.103314f, 0.602339f, 0.126706f, 0.625731f, 0.189084f, 0.688109f, + 0.91423f, 0.415205f, 0.851852f, 0.352827f, 0.875244f, 0.376218f, 0.937622f, 0.438596f, + 0.317739f, 0.816764f, 0.255361f, 0.754386f, 0.996101f, 0.497076f, 0.933723f, 0.434698f, + 0.567251f, 0.0682261f, 0.504873f, 0.00584795f, 0.247563f, 0.746589f, 0.185185f, 0.684211f, + 0.037037f, 0.536062f, 0.0994152f, 0.598441f, 0.777778f, 0.278752f, 0.465887f, 0.964912f, + 0.785575f, 0.28655f, 0.847953f, 0.348928f, 0.0292398f, 0.528265f, 0.7154f, 0.216374f, + 0.39961f, 0.898636f, 0.961014f, 0.461988f, 0.0487329f, 0.547758f, 0.111111f, 0.610136f, + 0.649123f, 0.150097f, 0.212476f, 0.711501f, 0.797271f, 0.298246f, 0.859649f, 0.360624f, + 0.118908f, 0.617934f, 0.0565302f, 0.555556f, 0.329435f, 0.82846f, 0.516569f, 0.0175439f, + 0.867446f, 0.368421f, 0.805068f, 0.306043f, 0.578947f, 0.079922f, 0.267057f, 0.766082f, + 0.270955f, 0.76998f, 0.707602f, 0.208577f, 0.668616f, 0.169591f, 0.606238f, 0.107212f, + 0.520468f, 0.0214425f, 0.45809f, 0.957115f, 0.419103f, 0.918129f, 0.356725f, 0.855751f, + 0.988304f, 0.489279f, 0.426901f, 0.925926f, 0.450292f, 0.949318f, 0.512671f, 0.0136452f, + 0.239766f, 0.738791f, 0.676413f, 0.177388f, 0.699805f, 0.20078f, 0.263158f, 0.762183f, + 0.773879f, 0.274854f, 0.337232f, 0.836257f, 0.672515f, 0.173489f, 0.734893f, 0.235867f, + 0.0253411f, 0.524366f, 0.586745f, 0.0877193f, 0.423002f, 0.922027f, 0.48538f, 0.984405f, + 0.74269f, 0.243665f, 0.680312f, 0.181287f, 0.953216f, 0.454191f, 0.1423f, 0.641326f, + 0.493177f, 0.992203f, 0.430799f, 0.929825f, 0.204678f, 0.703704f, 0.890838f, 0.391813f, + 0.894737f, 0.395712f, 0.0838207f, 0.582846f, 0.0448343f, 0.54386f, 0.231969f, 0.730994f, + 0.146199f, 0.645224f, 0.832359f, 0.333333f, 0.793372f, 0.294347f, 0.980507f, 0.481481f, + 0.364522f, 0.863548f, 0.80117f, 0.302144f, 0.824561f, 0.325536f, 0.138402f, 0.637427f, + 0.614035f, 0.11501f, 0.0526316f, 0.551657f, 0.0760234f, 0.575049f, 0.88694f, 0.387914f, +}; + +PNANOVDB_FORCE_INLINE float pnanovdb_dither_lookup(pnanovdb_bool_t enabled, int offset) +{ + return enabled ? pnanovdb_dither_lut[offset & 511] : 0.5f; +} + +// ------------------------------------------------ HDDA ----------------------------------------------------------- + +#ifdef PNANOVDB_HDDA + +// Comment out to disable this explicit round-off check +#define PNANOVDB_ENFORCE_FORWARD_STEPPING + +#define PNANOVDB_HDDA_FLOAT_MAX 1e38f + +struct pnanovdb_hdda_t +{ + pnanovdb_int32_t dim; + float tmin; + float tmax; + pnanovdb_coord_t voxel; + pnanovdb_coord_t step; + pnanovdb_vec3_t delta; + pnanovdb_vec3_t next; +}; +PNANOVDB_STRUCT_TYPEDEF(pnanovdb_hdda_t) + +PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_hdda_pos_to_ijk(PNANOVDB_IN(pnanovdb_vec3_t) pos) +{ + pnanovdb_coord_t voxel; + voxel.x = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).x)); + voxel.y = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).y)); + voxel.z = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).z)); + return voxel; +} + +PNANOVDB_FORCE_INLINE pnanovdb_coord_t pnanovdb_hdda_pos_to_voxel(PNANOVDB_IN(pnanovdb_vec3_t) pos, int dim) +{ + pnanovdb_coord_t voxel; + voxel.x = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).x)) & (~(dim - 1)); + voxel.y = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).y)) & (~(dim - 1)); + voxel.z = pnanovdb_float_to_int32(pnanovdb_floor(PNANOVDB_DEREF(pos).z)) & (~(dim - 1)); + return voxel; +} + +PNANOVDB_FORCE_INLINE pnanovdb_vec3_t pnanovdb_hdda_ray_start(PNANOVDB_IN(pnanovdb_vec3_t) origin, float tmin, PNANOVDB_IN(pnanovdb_vec3_t) direction) +{ + pnanovdb_vec3_t pos = pnanovdb_vec3_add( + pnanovdb_vec3_mul(PNANOVDB_DEREF(direction), pnanovdb_vec3_uniform(tmin)), + PNANOVDB_DEREF(origin) + ); + return pos; +} + +PNANOVDB_FORCE_INLINE void pnanovdb_hdda_init(PNANOVDB_INOUT(pnanovdb_hdda_t) hdda, PNANOVDB_IN(pnanovdb_vec3_t) origin, float tmin, PNANOVDB_IN(pnanovdb_vec3_t) direction, float tmax, int dim) +{ + PNANOVDB_DEREF(hdda).dim = dim; + PNANOVDB_DEREF(hdda).tmin = tmin; + PNANOVDB_DEREF(hdda).tmax = tmax; + + pnanovdb_vec3_t pos = pnanovdb_hdda_ray_start(origin, tmin, direction); + pnanovdb_vec3_t dir_inv = pnanovdb_vec3_div(pnanovdb_vec3_uniform(1.f), PNANOVDB_DEREF(direction)); + + PNANOVDB_DEREF(hdda).voxel = pnanovdb_hdda_pos_to_voxel(PNANOVDB_REF(pos), dim); + + // x + if (PNANOVDB_DEREF(direction).x == 0.f) + { + PNANOVDB_DEREF(hdda).next.x = PNANOVDB_HDDA_FLOAT_MAX; + PNANOVDB_DEREF(hdda).step.x = 0; + PNANOVDB_DEREF(hdda).delta.x = 0.f; + } + else if (dir_inv.x > 0.f) + { + PNANOVDB_DEREF(hdda).step.x = 1; + PNANOVDB_DEREF(hdda).next.x = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.x + dim - pos.x) * dir_inv.x; + PNANOVDB_DEREF(hdda).delta.x = dir_inv.x; + } + else + { + PNANOVDB_DEREF(hdda).step.x = -1; + PNANOVDB_DEREF(hdda).next.x = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.x - pos.x) * dir_inv.x; + PNANOVDB_DEREF(hdda).delta.x = -dir_inv.x; + } + + // y + if (PNANOVDB_DEREF(direction).y == 0.f) + { + PNANOVDB_DEREF(hdda).next.y = PNANOVDB_HDDA_FLOAT_MAX; + PNANOVDB_DEREF(hdda).step.y = 0; + PNANOVDB_DEREF(hdda).delta.y = 0.f; + } + else if (dir_inv.y > 0.f) + { + PNANOVDB_DEREF(hdda).step.y = 1; + PNANOVDB_DEREF(hdda).next.y = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.y + dim - pos.y) * dir_inv.y; + PNANOVDB_DEREF(hdda).delta.y = dir_inv.y; + } + else + { + PNANOVDB_DEREF(hdda).step.y = -1; + PNANOVDB_DEREF(hdda).next.y = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.y - pos.y) * dir_inv.y; + PNANOVDB_DEREF(hdda).delta.y = -dir_inv.y; + } + + // z + if (PNANOVDB_DEREF(direction).z == 0.f) + { + PNANOVDB_DEREF(hdda).next.z = PNANOVDB_HDDA_FLOAT_MAX; + PNANOVDB_DEREF(hdda).step.z = 0; + PNANOVDB_DEREF(hdda).delta.z = 0.f; + } + else if (dir_inv.z > 0.f) + { + PNANOVDB_DEREF(hdda).step.z = 1; + PNANOVDB_DEREF(hdda).next.z = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.z + dim - pos.z) * dir_inv.z; + PNANOVDB_DEREF(hdda).delta.z = dir_inv.z; + } + else + { + PNANOVDB_DEREF(hdda).step.z = -1; + PNANOVDB_DEREF(hdda).next.z = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.z - pos.z) * dir_inv.z; + PNANOVDB_DEREF(hdda).delta.z = -dir_inv.z; + } +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_update(PNANOVDB_INOUT(pnanovdb_hdda_t) hdda, PNANOVDB_IN(pnanovdb_vec3_t) origin, PNANOVDB_IN(pnanovdb_vec3_t) direction, int dim) +{ + if (PNANOVDB_DEREF(hdda).dim == dim) + { + return PNANOVDB_FALSE; + } + PNANOVDB_DEREF(hdda).dim = dim; + + pnanovdb_vec3_t pos = pnanovdb_vec3_add( + pnanovdb_vec3_mul(PNANOVDB_DEREF(direction), pnanovdb_vec3_uniform(PNANOVDB_DEREF(hdda).tmin)), + PNANOVDB_DEREF(origin) + ); + pnanovdb_vec3_t dir_inv = pnanovdb_vec3_div(pnanovdb_vec3_uniform(1.f), PNANOVDB_DEREF(direction)); + + PNANOVDB_DEREF(hdda).voxel = pnanovdb_hdda_pos_to_voxel(PNANOVDB_REF(pos), dim); + + if (PNANOVDB_DEREF(hdda).step.x != 0) + { + PNANOVDB_DEREF(hdda).next.x = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.x - pos.x) * dir_inv.x; + if (PNANOVDB_DEREF(hdda).step.x > 0) + { + PNANOVDB_DEREF(hdda).next.x += dim * dir_inv.x; + } + } + if (PNANOVDB_DEREF(hdda).step.y != 0) + { + PNANOVDB_DEREF(hdda).next.y = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.y - pos.y) * dir_inv.y; + if (PNANOVDB_DEREF(hdda).step.y > 0) + { + PNANOVDB_DEREF(hdda).next.y += dim * dir_inv.y; + } + } + if (PNANOVDB_DEREF(hdda).step.z != 0) + { + PNANOVDB_DEREF(hdda).next.z = PNANOVDB_DEREF(hdda).tmin + (PNANOVDB_DEREF(hdda).voxel.z - pos.z) * dir_inv.z; + if (PNANOVDB_DEREF(hdda).step.z > 0) + { + PNANOVDB_DEREF(hdda).next.z += dim * dir_inv.z; + } + } + + return PNANOVDB_TRUE; +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_step(PNANOVDB_INOUT(pnanovdb_hdda_t) hdda) +{ + pnanovdb_bool_t ret; + if (PNANOVDB_DEREF(hdda).next.x < PNANOVDB_DEREF(hdda).next.y && PNANOVDB_DEREF(hdda).next.x < PNANOVDB_DEREF(hdda).next.z) + { +#ifdef PNANOVDB_ENFORCE_FORWARD_STEPPING + if (PNANOVDB_DEREF(hdda).next.x <= PNANOVDB_DEREF(hdda).tmin) + { + PNANOVDB_DEREF(hdda).next.x += PNANOVDB_DEREF(hdda).tmin - 0.999999f * PNANOVDB_DEREF(hdda).next.x + 1.0e-6f; + } +#endif + PNANOVDB_DEREF(hdda).tmin = PNANOVDB_DEREF(hdda).next.x; + PNANOVDB_DEREF(hdda).next.x += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).delta.x; + PNANOVDB_DEREF(hdda).voxel.x += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).step.x; + ret = PNANOVDB_DEREF(hdda).tmin <= PNANOVDB_DEREF(hdda).tmax; + } + else if (PNANOVDB_DEREF(hdda).next.y < PNANOVDB_DEREF(hdda).next.z) + { +#ifdef PNANOVDB_ENFORCE_FORWARD_STEPPING + if (PNANOVDB_DEREF(hdda).next.y <= PNANOVDB_DEREF(hdda).tmin) + { + PNANOVDB_DEREF(hdda).next.y += PNANOVDB_DEREF(hdda).tmin - 0.999999f * PNANOVDB_DEREF(hdda).next.y + 1.0e-6f; + } +#endif + PNANOVDB_DEREF(hdda).tmin = PNANOVDB_DEREF(hdda).next.y; + PNANOVDB_DEREF(hdda).next.y += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).delta.y; + PNANOVDB_DEREF(hdda).voxel.y += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).step.y; + ret = PNANOVDB_DEREF(hdda).tmin <= PNANOVDB_DEREF(hdda).tmax; + } + else + { +#ifdef PNANOVDB_ENFORCE_FORWARD_STEPPING + if (PNANOVDB_DEREF(hdda).next.z <= PNANOVDB_DEREF(hdda).tmin) + { + PNANOVDB_DEREF(hdda).next.z += PNANOVDB_DEREF(hdda).tmin - 0.999999f * PNANOVDB_DEREF(hdda).next.z + 1.0e-6f; + } +#endif + PNANOVDB_DEREF(hdda).tmin = PNANOVDB_DEREF(hdda).next.z; + PNANOVDB_DEREF(hdda).next.z += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).delta.z; + PNANOVDB_DEREF(hdda).voxel.z += PNANOVDB_DEREF(hdda).dim * PNANOVDB_DEREF(hdda).step.z; + ret = PNANOVDB_DEREF(hdda).tmin <= PNANOVDB_DEREF(hdda).tmax; + } + return ret; +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_ray_clip( + PNANOVDB_IN(pnanovdb_vec3_t) bbox_min, + PNANOVDB_IN(pnanovdb_vec3_t) bbox_max, + PNANOVDB_IN(pnanovdb_vec3_t) origin, PNANOVDB_INOUT(float) tmin, + PNANOVDB_IN(pnanovdb_vec3_t) direction, PNANOVDB_INOUT(float) tmax +) +{ + pnanovdb_vec3_t dir_inv = pnanovdb_vec3_div(pnanovdb_vec3_uniform(1.f), PNANOVDB_DEREF(direction)); + pnanovdb_vec3_t t0 = pnanovdb_vec3_mul(pnanovdb_vec3_sub(PNANOVDB_DEREF(bbox_min), PNANOVDB_DEREF(origin)), dir_inv); + pnanovdb_vec3_t t1 = pnanovdb_vec3_mul(pnanovdb_vec3_sub(PNANOVDB_DEREF(bbox_max), PNANOVDB_DEREF(origin)), dir_inv); + pnanovdb_vec3_t tmin3 = pnanovdb_vec3_min(t0, t1); + pnanovdb_vec3_t tmax3 = pnanovdb_vec3_max(t0, t1); + float tnear = pnanovdb_max(tmin3.x, pnanovdb_max(tmin3.y, tmin3.z)); + float tfar = pnanovdb_min(tmax3.x, pnanovdb_min(tmax3.y, tmax3.z)); + pnanovdb_bool_t hit = tnear <= tfar; + PNANOVDB_DEREF(tmin) = pnanovdb_max(PNANOVDB_DEREF(tmin), tnear); + PNANOVDB_DEREF(tmax) = pnanovdb_min(PNANOVDB_DEREF(tmax), tfar); + return hit; +} + +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_zero_crossing( + pnanovdb_grid_type_t grid_type, + pnanovdb_buf_t buf, + PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, + PNANOVDB_IN(pnanovdb_vec3_t) origin, float tmin, + PNANOVDB_IN(pnanovdb_vec3_t) direction, float tmax, + PNANOVDB_INOUT(float) thit, + PNANOVDB_INOUT(float) v +) +{ + pnanovdb_coord_t bbox_min = pnanovdb_root_get_bbox_min(buf, PNANOVDB_DEREF(acc).root); + pnanovdb_coord_t bbox_max = pnanovdb_root_get_bbox_max(buf, PNANOVDB_DEREF(acc).root); + pnanovdb_vec3_t bbox_minf = pnanovdb_coord_to_vec3(bbox_min); + pnanovdb_vec3_t bbox_maxf = pnanovdb_coord_to_vec3(pnanovdb_coord_add(bbox_max, pnanovdb_coord_uniform(1))); + + pnanovdb_bool_t hit = pnanovdb_hdda_ray_clip(PNANOVDB_REF(bbox_minf), PNANOVDB_REF(bbox_maxf), origin, PNANOVDB_REF(tmin), direction, PNANOVDB_REF(tmax)); + if (!hit || tmax > 1.0e20f) + { + return PNANOVDB_FALSE; + } + + pnanovdb_vec3_t pos = pnanovdb_hdda_ray_start(origin, tmin, direction); + pnanovdb_coord_t ijk = pnanovdb_hdda_pos_to_ijk(PNANOVDB_REF(pos)); + + pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk)); + float v0 = pnanovdb_read_float(buf, address); + + pnanovdb_int32_t dim = pnanovdb_uint32_as_int32(pnanovdb_readaccessor_get_dim(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk))); + pnanovdb_hdda_t hdda; + pnanovdb_hdda_init(PNANOVDB_REF(hdda), origin, tmin, direction, tmax, dim); + while (pnanovdb_hdda_step(PNANOVDB_REF(hdda))) + { + pnanovdb_vec3_t pos_start = pnanovdb_hdda_ray_start(origin, hdda.tmin + 1.0001f, direction); + ijk = pnanovdb_hdda_pos_to_ijk(PNANOVDB_REF(pos_start)); + dim = pnanovdb_uint32_as_int32(pnanovdb_readaccessor_get_dim(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk))); + pnanovdb_hdda_update(PNANOVDB_REF(hdda), origin, direction, dim); + if (hdda.dim > 1 || !pnanovdb_readaccessor_is_active(grid_type, buf, acc, PNANOVDB_REF(ijk))) + { + continue; + } + while (pnanovdb_hdda_step(PNANOVDB_REF(hdda)) && pnanovdb_readaccessor_is_active(grid_type, buf, acc, PNANOVDB_REF(hdda.voxel))) + { + ijk = hdda.voxel; + pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk)); + PNANOVDB_DEREF(v) = pnanovdb_read_float(buf, address); + if (PNANOVDB_DEREF(v) * v0 < 0.f) + { + PNANOVDB_DEREF(thit) = hdda.tmin; + return PNANOVDB_TRUE; + } + } + } + return PNANOVDB_FALSE; +} + +// Copied from Sebastian Gaida https://github.com/AcademySoftwareFoundation/openvdb/pull/1600/commits/d4065e52e5a7baaafc5ae2ad3e5f78f5036c5d5c +PNANOVDB_FORCE_INLINE pnanovdb_bool_t pnanovdb_hdda_tree_marcher( + pnanovdb_grid_type_t grid_type, + pnanovdb_buf_t buf, + PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, + PNANOVDB_IN(pnanovdb_vec3_t) origin, float tmin, + PNANOVDB_IN(pnanovdb_vec3_t) direction, float tmax, + PNANOVDB_INOUT(float) thit, + PNANOVDB_INOUT(float) valueAtHit +) +{ + pnanovdb_coord_t bbox_min = pnanovdb_root_get_bbox_min(buf, PNANOVDB_DEREF(acc).root); + pnanovdb_coord_t bbox_max = pnanovdb_root_get_bbox_max(buf, PNANOVDB_DEREF(acc).root); + pnanovdb_vec3_t bbox_minf = pnanovdb_coord_to_vec3(bbox_min); + pnanovdb_vec3_t bbox_maxf = pnanovdb_coord_to_vec3(pnanovdb_coord_add(bbox_max, pnanovdb_coord_uniform(1))); + + pnanovdb_bool_t hit = pnanovdb_hdda_ray_clip(PNANOVDB_REF(bbox_minf), PNANOVDB_REF(bbox_maxf), origin, PNANOVDB_REF(tmin), direction, PNANOVDB_REF(tmax)); + // Early out if ray does not hit volume + if (!hit || tmax > 1.0e20f) + { + return PNANOVDB_FALSE; + } + + pnanovdb_vec3_t pos = pnanovdb_hdda_ray_start(origin, tmin, direction); + pnanovdb_coord_t ijk = pnanovdb_hdda_pos_to_ijk(PNANOVDB_REF(pos)); + + pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk)); + + pnanovdb_int32_t dim = pnanovdb_uint32_as_int32(pnanovdb_readaccessor_get_dim(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk))); + pnanovdb_hdda_t hdda; + pnanovdb_hdda_init(PNANOVDB_REF(hdda), origin, tmin, direction, tmax, dim); + while (pnanovdb_hdda_step(PNANOVDB_REF(hdda))) + { + pnanovdb_vec3_t pos_start = pnanovdb_hdda_ray_start(origin, hdda.tmin + 1.0001f, direction); + ijk = pnanovdb_hdda_pos_to_ijk(PNANOVDB_REF(pos_start)); + dim = pnanovdb_uint32_as_int32(pnanovdb_readaccessor_get_dim(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk))); + pnanovdb_hdda_update(PNANOVDB_REF(hdda), origin, direction, dim); + // Skip over tile + if (hdda.dim > 1 || !pnanovdb_readaccessor_is_active(grid_type, buf, acc, PNANOVDB_REF(ijk))) + { + continue; + } + // Only check active values + while (pnanovdb_hdda_step(PNANOVDB_REF(hdda)) && pnanovdb_readaccessor_is_active(grid_type, buf, acc, PNANOVDB_REF(hdda.voxel))) + { + ijk = hdda.voxel; + pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk)); + PNANOVDB_DEREF(valueAtHit) = pnanovdb_read_float(buf, address); + + if (PNANOVDB_DEREF(valueAtHit) > 0.f) + { + PNANOVDB_DEREF(thit) = hdda.tmin; + return PNANOVDB_TRUE; + } + } + } + return PNANOVDB_FALSE; +} + +PNANOVDB_FORCE_INLINE float pnanovdb_get_value_coord( + pnanovdb_buf_t buf, + PNANOVDB_INOUT(pnanovdb_readaccessor_t) acc, + PNANOVDB_IN(pnanovdb_vec3_t) pos +) +{ + pnanovdb_coord_t ijk = pnanovdb_hdda_pos_to_ijk(PNANOVDB_REF(pos)); + pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, PNANOVDB_REF(ijk)); + return pnanovdb_read_float(buf, address); +} + +#endif + +#endif // end of NANOVDB_PNANOVDB_H_HAS_BEEN_INCLUDED diff --git a/MVS/VolumeRenderer/Res/Shader/gs.hlsl b/MVS/VolumeRenderer/Res/Shader/gs.hlsl new file mode 100644 index 00000000..66a3f189 --- /dev/null +++ b/MVS/VolumeRenderer/Res/Shader/gs.hlsl @@ -0,0 +1,99 @@ +struct VertexData{ + float4 position:POSITION; + float4 texcoord:TEXCOORD0; + float4 normal:NORMAL; + float4 tangent:TANGENT; +}; + +struct VSOut{ + float4 position:SV_POSITION; + float4 normal:NORMAL; + float4 texcoord:TEXCOORD0; +}; + +static const float PI=3.141592; +cbuffer globalConstants:register(b0){ + float4 misc; +}; + +Texture2D T_DiffuseTexture:register(t0); +SamplerState samplerState:register(s0); + +struct MaterialData{ + float r; +}; +StructuredBuffer materialData:register(t0,space1); +cbuffer DefaultVertexCB:register(b1){ + float4x4 ProjectionMatrix; + float4x4 ViewMatrix; + float4x4 ModelMatrix; + float4x4 IT_ModelMatrix; + float4x4 ReservedMemory[1020]; +}; + +VSOut MainVS(VertexData inVertexData){ + VSOut vo; + vo.normal=mul(IT_ModelMatrix,inVertexData.normal); + float4 positionWS=mul(ModelMatrix,inVertexData.position); + float4 positionVS=mul(ViewMatrix,positionWS); + vo.position=mul(ProjectionMatrix,positionVS); + //vo.position=float4(positionWS.xyz+vo.normal.xyz*sin(misc.x)*0.2f,1.0f); + vo.texcoord=inVertexData.texcoord; + return vo; +} + +[maxvertexcount(4)] +void MainGS(triangle VSOut inPoint[3],uint inPrimitiveID:SV_PrimitiveID, + inout TriangleStream outTriangleStream){ + outTriangleStream.Append(inPoint[0]); + outTriangleStream.Append(inPoint[1]); + outTriangleStream.Append(inPoint[2]); + /*VSOut vo; + float3 positionWS=inPoint[0].position.xyz; + float3 N=normalize(inPoint[0].normal.xyz); + vo.normal=float4(N,0.0f); + float3 helperVec=abs(N.y)>0.999?float3(0.0f,0.0f,1.0f):float3(0.0f,1.0f,0.0f); + float3 tangent=normalize(cross(N,helperVec));//u + float3 bitangent=normalize(cross(tangent,N));//v + float scale=materialData[inPrimitiveID].r; + + + float3 p0WS=positionWS-(bitangent*0.5f-tangent*0.5f)*scale;//left bottom + float4 p0VS=mul(ViewMatrix,float4(p0WS,1.0f)); + vo.position=mul(ProjectionMatrix,p0VS); + vo.texcoord=float4(0.0f,1.0f,0.0f,0.0f); + outTriangleStream.Append(vo); + + float3 p1WS=positionWS-(bitangent*0.5f+tangent*0.5f)*scale;//right bottom + float4 p1VS=mul(ViewMatrix,float4(p1WS,1.0f)); + vo.position=mul(ProjectionMatrix,p1VS); + vo.texcoord=float4(1.0f,1.0f,0.0f,0.0f); + outTriangleStream.Append(vo); + + float3 p2WS=positionWS+(bitangent*0.5f+tangent*0.5f)*scale;//left top + float4 p2VS=mul(ViewMatrix,float4(p2WS,1.0f)); + vo.position=mul(ProjectionMatrix,p2VS); + vo.texcoord=float4(0.0f,0.0f,0.0f,0.0f); + outTriangleStream.Append(vo); + + float3 p3WS=positionWS+(bitangent*0.5f-tangent*0.5f)*scale;//right top + float4 p3VS=mul(ViewMatrix,float4(p3WS,1.0f)); + vo.position=mul(ProjectionMatrix,p3VS); + vo.texcoord=float4(1.0f,0.0f,0.0f,0.0f); + outTriangleStream.Append(vo);*/ + +} + +float4 MainPS(VSOut inPSInput):SV_TARGET{ + float3 N=normalize(inPSInput.normal.xyz); + float3 bottomColor=float3(0.1f,0.4f,0.6f); + float3 topColor=float3(0.7f,0.7f,0.7f); + float theta=asin(N.y);//-PI/2 ~ PI/2 + theta/=PI;//-0.5~0.5 + theta+=0.5f;//0.0~1.0 + float ambientColorIntensity=1.0; + float3 ambientColor=lerp(bottomColor,topColor,theta)*ambientColorIntensity; + float4 diffuseColor=T_DiffuseTexture.Sample(samplerState,inPSInput.texcoord.xy); + float3 surfaceColor=diffuseColor.rgb; + return float4(surfaceColor,1.0f); +} \ No newline at end of file diff --git a/MVS/VolumeRenderer/Res/Shader/ndctriangle.hlsl b/MVS/VolumeRenderer/Res/Shader/ndctriangle.hlsl new file mode 100644 index 00000000..edee8bf9 --- /dev/null +++ b/MVS/VolumeRenderer/Res/Shader/ndctriangle.hlsl @@ -0,0 +1,65 @@ +struct VertexData{ + float4 position:POSITION; + float4 texcoord:TEXCOORD0; + float4 normal:NORMAL; + float4 tangent:TANGENT; +}; + +struct VSOut{ + float4 position:SV_POSITION; + float4 normal:NORMAL; + float4 texcoord:TEXCOORD0; + float4 positionWS:TEXCOORD1; +}; + +static const float PI=3.141592; +cbuffer globalConstants:register(b0){ + float4 misc; +}; + +cbuffer DefaultVertexCB:register(b1){ + float4x4 ProjectionMatrix; + float4x4 ViewMatrix; + float4x4 ModelMatrix; + float4x4 IT_ModelMatrix; + float4x4 ReservedMemory[1020]; +}; + +VSOut MainVS(VertexData inVertexData){ + VSOut vo; + vo.normal=mul(IT_ModelMatrix,inVertexData.normal); + float3 positionMS=inVertexData.position.xyz+vo.normal*sin(misc.x); + float4 positionWS=mul(ModelMatrix,float4(positionMS,1.0)); + float4 positionVS=mul(ViewMatrix,positionWS); + vo.position=mul(ProjectionMatrix,positionVS); + vo.positionWS=positionWS; + vo.texcoord=inVertexData.texcoord; + return vo; +} + +float4 MainPS(VSOut inPSInput):SV_TARGET{ + float3 N=normalize(inPSInput.normal.xyz); + float3 bottomColor=float3(0.1f,0.4f,0.6f); + float3 topColor=float3(0.7f,0.7f,0.7f); + float theta=asin(N.y);//-PI/2 ~ PI/2 + theta/=PI;//-0.5~0.5 + theta+=0.5f;//0.0~1.0 + float ambientColorIntensity=0.2; + float3 ambientColor=lerp(bottomColor,topColor,theta)*ambientColorIntensity; + float3 L=normalize(float3(1.0f,1.0f,-1.0f)); + + float diffuseIntensity=max(0.0f,dot(N,L)); + float3 diffuseLightColor=float3(0.1f,0.4f,0.6f); + float3 diffuseColor=diffuseLightColor*diffuseIntensity; + + float3 specularColor=float3(0.0f,0.0f,0.0f); + if(diffuseIntensity>0.0f){ + float3 cameraPositionWS=float3(0.0f,0.0f,0.0f); + float3 V=normalize(cameraPositionWS.xyz-inPSInput.positionWS.xyz); + float3 R=normalize(reflect(-L,N)); + float specularIntensity=pow(max(0.0f,dot(V,R)),128.0f); + specularColor=float3(1.0f,1.0f,1.0f)*specularIntensity; + } + float3 surfaceColor=ambientColor+diffuseColor+specularColor; + return float4(surfaceColor,1.0f); +} \ No newline at end of file diff --git a/MVS/VolumeRenderer/Res/Shader/volume.hlsl b/MVS/VolumeRenderer/Res/Shader/volume.hlsl new file mode 100644 index 00000000..9f324565 --- /dev/null +++ b/MVS/VolumeRenderer/Res/Shader/volume.hlsl @@ -0,0 +1,250 @@ +#define PNANOVDB_HLSL +#define PNANOVDB_ADDRESS_32 +#include "PNanoVDB.hlsl" + +cbuffer CB0 : register(b1) +{ + float4x4 _InverseViewProjection; // 64 bytes + float4 _CameraPos_Density; // xyz = CameraPos, w = DensityScale + float4 _BBoxMin_Step; // xyz = BBoxMin, w = StepSize + float4 _BBoxMax_MaxSteps; // xyz = BBoxMax, w = MaxSteps + float4 _Rotation_Pad_LightSamples; // x = RotationY, yzw = pad, but we'll use differently + float4 _LightDir_Samples; // xyz = LightDir, w = LightSamples +}; + +StructuredBuffer buf : register(t1); + +struct VSInput +{ + float2 position : POSITION; + float2 texcoord : TEXCOORD0; +}; + +struct PSInput +{ + float4 position : SV_POSITION; + float2 texcoord : TEXCOORD0; + float3 worldPos : TEXCOORD1; +}; + +struct NanoVolume +{ + pnanovdb_grid_handle_t grid; + pnanovdb_grid_type_t grid_type; + pnanovdb_readaccessor_t acc; +}; + +void initVolume(inout NanoVolume volume) +{ + pnanovdb_grid_handle_t grid; + grid.address.byte_offset = 0; + + pnanovdb_grid_type_t grid_type = pnanovdb_buf_read_uint32(buf, PNANOVDB_GRID_OFF_GRID_TYPE); + pnanovdb_tree_handle_t tree = pnanovdb_grid_get_tree(buf, grid); + pnanovdb_root_handle_t root = pnanovdb_tree_get_root(buf, tree); + pnanovdb_readaccessor_t acc; + pnanovdb_readaccessor_init(acc, root); + + volume.grid = grid; + volume.grid_type = grid_type; + volume.acc = acc; +} + +float get_value_coord(inout pnanovdb_readaccessor_t acc, float3 pos) +{ + pnanovdb_vec3_t p = pos; + pnanovdb_coord_t ijk = pnanovdb_hdda_pos_to_ijk(p); + pnanovdb_address_t address = pnanovdb_readaccessor_get_value_address(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, ijk); + return pnanovdb_read_float(buf, address); +} + +uint get_dim_coord(inout pnanovdb_readaccessor_t acc, float3 pos) +{ + pnanovdb_vec3_t p = pos; + pnanovdb_coord_t ijk = pnanovdb_hdda_pos_to_ijk(p); + return pnanovdb_readaccessor_get_dim(PNANOVDB_GRID_TYPE_FLOAT, buf, acc, ijk); +} + +bool get_hdda_hit(inout pnanovdb_readaccessor_t acc, inout float tmin, float3 origin, float3 direction, float tmax, out float valueAtHit) +{ + pnanovdb_vec3_t p_origin = origin; + pnanovdb_vec3_t p_direction = direction; + float thit; + bool hit = pnanovdb_hdda_tree_marcher( + PNANOVDB_GRID_TYPE_FLOAT, + buf, + acc, + p_origin, tmin, + p_direction, tmax, + thit, + valueAtHit + ); + tmin = thit; + return hit; +} + +float phase_function() +{ + return 1.0; +} + +uint rand_xorshift(uint seed) +{ + seed ^= (seed << 13); + seed ^= (seed >> 17); + seed ^= (seed << 5); + return seed; +} + +float random_float(float3 pos) +{ + uint seed = asuint(pos.x + pos.y + pos.z); + float res = float(rand_xorshift(seed)) * (1.0 / 4294967296.0); + res = float(rand_xorshift(asuint(res))) * (1.0 / 4294967296.0); + return res; +} + +float volumetric_shadow(float3 pos, float densityScale, inout pnanovdb_readaccessor_t acc) +{ + float lightSamples = _LightDir_Samples.w; + if (lightSamples < 1) { return 0.0; } + + float3 light_dir = _LightDir_Samples.xyz; + + float shadow = 1.0; + float sigmaS = 0.0; + float sigmaE = 0.0; + float step_size = 1.0; + float jitter = 0; + + int steps = 10; + for (int step = 0; step < steps; step++) + { + float3 sample_pos = pos + (jitter + step_size) * light_dir; + + sigmaS = get_value_coord(acc, sample_pos) * densityScale; + sigmaE = max(0.000001, sigmaS); + sigmaE *= 0.3; + shadow *= exp(-sigmaE * step_size); + + step_size *= 2.0; + } + + return shadow; +} + +PSInput MainVS(VSInput input) +{ + PSInput output; + output.position = float4(input.position, 0.0, 1.0); + output.texcoord = input.texcoord; + + float4 worldPosH = mul(_InverseViewProjection, float4(input.position, 0.5, 1.0)); + output.worldPos = worldPosH.xyz / worldPosH.w; + + return output; +} + +bool intersectBox(float3 origin, float3 dir, float3 boxMin, float3 boxMax, out float tmin, out float tmax) +{ + float3 invDir = 1.0 / dir; + float3 t1 = (boxMin - origin) * invDir; + float3 t2 = (boxMax - origin) * invDir; + + tmin = max(max(min(t1.x, t2.x), min(t1.y, t2.y)), min(t1.z, t2.z)); + tmax = min(min(max(t1.x, t2.x), max(t1.y, t2.y)), max(t1.z, t2.z)); + + return tmax >= tmin && tmax > 0; +} + +float4 MainPS(PSInput input) : SV_TARGET +{ + float3 rayDir = normalize(input.worldPos - _CameraPos_Density.xyz); + + float tmin = 0.01; + float tmax = 5000.0; + + NanoVolume volume; + initVolume(volume); + + float3 color = float3(0, 0, 0); + float transmittance = 1.0; + float acc_density = 0.0; + float3 ambient_light = 0.005; + + float _DensityScale = _CameraPos_Density.w; + float _StepSize = _BBoxMin_Step.w; + float _MaxSteps = _BBoxMax_MaxSteps.w; + float _RotationY = _Rotation_Pad_LightSamples.x; + float _LightSamples = _LightDir_Samples.w; + + float cosR = cos(_RotationY); + float sinR = sin(_RotationY); + float3x3 invRotY = float3x3( + cosR, 0, sinR, + 0, 1, 0, + -sinR, 0, cosR + ); + + float3 localCameraPos = mul(invRotY, _CameraPos_Density.xyz); + float3 localRayDir = mul(invRotY, rayDir); + + float not_used; + bool hit = get_hdda_hit(volume.acc, tmin, localCameraPos, localRayDir, tmax, not_used); + if (!hit) { return float4(0, 0, 0, 0); } + + float skip = 0; + + for (int i = 0; i < (int)_MaxSteps; i++) { + if (tmin >= tmax) break; + + float3 localPos = localCameraPos + localRayDir * tmin; + + uint dim = get_dim_coord(volume.acc, localPos); + if (dim > 1) { + float skip_step = 15.0; + tmin += skip_step; + skip = skip_step; + continue; + } + + float density = get_value_coord(volume.acc, localPos) * _DensityScale; + + if (density < 0.01) { + float skip_step = 5.0; + tmin += skip_step; + skip = skip_step; + continue; + } + + if (skip > 0) { + tmin -= skip * 0.8; + localPos = localCameraPos + localRayDir * tmin; + skip = 0; + } + + float sigmaS = density; + float sigmaE = max(0.000001, sigmaS); + acc_density += sigmaS; + + float shadow = volumetric_shadow(localPos, _DensityScale, volume.acc); + float3 S = sigmaS * phase_function() * shadow * float3(1, 1, 1); + float3 Sint = (S - S * exp(-sigmaE * _StepSize)) / sigmaE; + color += transmittance * Sint; + transmittance *= exp(-sigmaE * _StepSize); + + if (acc_density > 1.0) break; + + if (transmittance < 0.05) + { + transmittance = 0; + break; + } + + tmin += _StepSize; + } + + float3 final_color = (color + ambient_light) * acc_density; + final_color = pow(final_color, 1.0 / 2.2); + return float4(final_color, acc_density); +} \ No newline at end of file diff --git a/MVS/VolumeRenderer/Res/Shader/wireframe.hlsl b/MVS/VolumeRenderer/Res/Shader/wireframe.hlsl new file mode 100644 index 00000000..2b5a55f6 --- /dev/null +++ b/MVS/VolumeRenderer/Res/Shader/wireframe.hlsl @@ -0,0 +1,26 @@ +cbuffer CB0 : register(b1) +{ + float4x4 _ViewProjection; +}; + +struct VSInput +{ + float3 position : POSITION; +}; + +struct PSInput +{ + float4 position : SV_POSITION; +}; + +PSInput MainVS(VSInput input) +{ + PSInput output; + output.position = mul(_ViewProjection, float4(input.position, 1.0)); + return output; +} + +float4 MainPS(PSInput input) : SV_TARGET +{ + return float4(0.0, 1.0, 0.0, 1.0); +} \ No newline at end of file diff --git a/MVS/VolumeRenderer/StaticMeshComponent.cpp b/MVS/VolumeRenderer/StaticMeshComponent.cpp new file mode 100644 index 00000000..7f7aec70 --- /dev/null +++ b/MVS/VolumeRenderer/StaticMeshComponent.cpp @@ -0,0 +1,90 @@ +#include "StaticMeshComponent.h" +#include "BattleFireDirect.h" +#include + +void StaticMeshComponent::SetVertexCount(int inVertexCount) { + mVertexCount = inVertexCount; + mVertexData = new StaticMeshComponentVertexData[inVertexCount]; + memset(mVertexData, 0, sizeof(StaticMeshComponentVertexData)*inVertexCount); +} +void StaticMeshComponent::SetVertexPosition(int inIndex, float inX, float inY, float inZ, float inW /* = 1.0f */) { + mVertexData[inIndex].mPosition[0] = inX; + mVertexData[inIndex].mPosition[1] = inY; + mVertexData[inIndex].mPosition[2] = inZ; + mVertexData[inIndex].mPosition[3] = inW; +} +void StaticMeshComponent::SetVertexTexcoord(int inIndex, float inX, float inY, float inZ, float inW /* = 1.0f */) { + mVertexData[inIndex].mTexcoord[0] = inX; + mVertexData[inIndex].mTexcoord[1] = inY; + mVertexData[inIndex].mTexcoord[2] = inZ; + mVertexData[inIndex].mTexcoord[3] = inW; +} +void StaticMeshComponent::SetVertexNormal(int inIndex, float inX, float inY, float inZ, float inW /* = 1.0f */) { + mVertexData[inIndex].mNormal[0] = inX; + mVertexData[inIndex].mNormal[1] = inY; + mVertexData[inIndex].mNormal[2] = inZ; + mVertexData[inIndex].mNormal[3] = inW; +} +void StaticMeshComponent::SetVertexTangent(int inIndex, float inX, float inY, float inZ, float inW /* = 1.0f */) { + mVertexData[inIndex].mTangent[0] = inX; + mVertexData[inIndex].mTangent[1] = inY; + mVertexData[inIndex].mTangent[2] = inZ; + mVertexData[inIndex].mTangent[3] = inW; +} +void StaticMeshComponent::InitFromFile(ID3D12GraphicsCommandList* inCommandList, const char* inFilePath) { + FILE* pFile = nullptr; + errno_t err = fopen_s(&pFile, inFilePath, "rb"); + if (err == 0) { + int temp = 0; + fread(&temp, 4, 1, pFile); + mVertexCount = temp; + mVertexData = new StaticMeshComponentVertexData[mVertexCount]; + fread(mVertexData, 1, sizeof(StaticMeshComponentVertexData) * mVertexCount, pFile); + mVBO=CreateBufferObject(inCommandList,mVertexData, + sizeof(StaticMeshComponentVertexData) * mVertexCount, + D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER); + mVBOView.BufferLocation = mVBO->GetGPUVirtualAddress(); + mVBOView.SizeInBytes = sizeof(StaticMeshComponentVertexData) * mVertexCount; + mVBOView.StrideInBytes = sizeof(StaticMeshComponentVertexData); + + while (!feof(pFile)) { + fread(&temp, 4, 1, pFile); + if (feof(pFile)) { + break; + } + char name[256] = {0}; + fread(name, 1, temp, pFile); + fread(&temp, 4, 1, pFile); + SubMesh* submesh = new SubMesh; + submesh->mIndexCount = temp; + unsigned int *indexes = new unsigned int[temp]; + fread(indexes, 1, sizeof(unsigned int) * temp, pFile); + submesh->mIBO = CreateBufferObject(inCommandList, indexes, + sizeof(unsigned int) * temp, + D3D12_RESOURCE_STATE_INDEX_BUFFER); + + submesh->mIBView.BufferLocation = submesh->mIBO->GetGPUVirtualAddress(); + submesh->mIBView.SizeInBytes = sizeof(unsigned int) * temp; + submesh->mIBView.Format = DXGI_FORMAT_R32_UINT; + mSubMeshes.insert(std::pair(name,submesh)); + delete[]indexes; + } + fclose(pFile); + } +} +void StaticMeshComponent::Render(ID3D12GraphicsCommandList* inCommandList) { + D3D12_VERTEX_BUFFER_VIEW vbos[] = { + mVBOView + }; + inCommandList->IASetVertexBuffers(0, 1, vbos); + if (mSubMeshes.empty()) { + inCommandList->DrawInstanced(mVertexCount, 1, 0, 0); + } + else { + for (auto iter = mSubMeshes.begin(); + iter != mSubMeshes.end(); iter++) { + inCommandList->IASetIndexBuffer(&iter->second->mIBView); + inCommandList->DrawIndexedInstanced(iter->second->mIndexCount, 1, 0, 0, 0); + } + } +} \ No newline at end of file diff --git a/MVS/VolumeRenderer/StaticMeshComponent.h b/MVS/VolumeRenderer/StaticMeshComponent.h new file mode 100644 index 00000000..b00bfbb7 --- /dev/null +++ b/MVS/VolumeRenderer/StaticMeshComponent.h @@ -0,0 +1,31 @@ +#pragma once +#include +#include +#include +struct StaticMeshComponentVertexData { + float mPosition[4]; + float mTexcoord[4]; + float mNormal[4]; + float mTangent[4]; +}; +struct SubMesh { + ID3D12Resource* mIBO; + D3D12_INDEX_BUFFER_VIEW mIBView; + int mIndexCount; +}; +class StaticMeshComponent{ +public: + ID3D12Resource* mVBO; + D3D12_VERTEX_BUFFER_VIEW mVBOView; + StaticMeshComponentVertexData* mVertexData; + int mVertexCount; + std::unordered_map mSubMeshes; + void SetVertexCount(int inVertexCount); + void SetVertexPosition(int inIndex, float inX, float inY, float inZ, float inW = 1.0f); + void SetVertexTexcoord(int inIndex, float inX, float inY, float inZ, float inW = 1.0f); + void SetVertexNormal(int inIndex, float inX, float inY, float inZ, float inW = 1.0f); + void SetVertexTangent(int inIndex, float inX, float inY, float inZ, float inW = 1.0f); + void InitFromFile(ID3D12GraphicsCommandList*inCommandList,const char* inFilePath); + void Render(ID3D12GraphicsCommandList* inCommandList); +}; + diff --git a/MVS/VolumeRenderer/Utils.cpp b/MVS/VolumeRenderer/Utils.cpp new file mode 100644 index 00000000..940108b8 --- /dev/null +++ b/MVS/VolumeRenderer/Utils.cpp @@ -0,0 +1,10 @@ +#include "Utils.h" +#include +#include + +float srandom() { + float number = float(rand())/float(RAND_MAX);//0.0~1.0f + number *= 2.0f;//0.0~2.0 + number -= 1.0f;//-1.0f~1.0f; + return number; +} \ No newline at end of file diff --git a/MVS/VolumeRenderer/Utils.h b/MVS/VolumeRenderer/Utils.h new file mode 100644 index 00000000..ae1f8765 --- /dev/null +++ b/MVS/VolumeRenderer/Utils.h @@ -0,0 +1,3 @@ +#pragma once +float srandom();//-1.0f~1.0f + diff --git a/MVS/VolumeRenderer/counter.py b/MVS/VolumeRenderer/counter.py new file mode 100644 index 00000000..3711cea5 --- /dev/null +++ b/MVS/VolumeRenderer/counter.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python3 +import os +import sys +import argparse +from pathlib import Path + +if sys.platform == "win32": + sys.stdout.reconfigure(encoding="utf-8") + sys.stderr.reconfigure(encoding="utf-8") + + +def count_lines(file_path: Path) -> int: + try: + with open(file_path, "r", encoding="utf-8", errors="ignore") as f: + return sum(1 for _ in f) + except Exception: + return 0 + + +def should_include(file_path: Path, extensions: list[str]) -> bool: + if not extensions: + return True + return file_path.suffix in extensions + + +def walk_dir(root_path: Path, extensions: list[str], exclude_dirs: set[str]): + results = [] + total_lines = 0 + + for root, dirs, files in os.walk(root_path): + dirs[:] = [d for d in dirs if d not in exclude_dirs] + + rel_root = Path(root).relative_to(root_path) + indent = len(rel_root.parts) if str(rel_root) != "." else 0 + prefix = " " * indent + + if indent > 0: + print(f"{prefix}└── {rel_root.name}/") + + for i, file in enumerate(files): + file_path = Path(root) / file + + if not should_include(file_path, extensions): + continue + + lines = count_lines(file_path) + total_lines += lines + results.append((file_path, lines, indent + 1)) + + is_last = (i == len(files) - 1) and not any( + should_include(Path(root) / f, extensions) for f in dirs + ) + connector = "└──" if is_last else "├──" + + print(f"{' ' * (indent + 1)}{connector} {file} ({lines} lines)") + + return total_lines + + +def main_all(): + directories = { + ".": [".cpp", ".h", ".hlsl"], + "docs": [".md"], + } + + total_all = 0 + results = [] + + for directory, extensions in directories.items(): + root_path = Path(directory) + if not root_path.exists(): + continue + + exclude_dirs = { + ".git", + "build", + "Release", + "bin", + "__pycache__", + ".ruff_cache", + "stbi", + } + + print(f"\n{'=' * 60}") + print(f"项目文件统计: {root_path}") + print(f"后缀过滤: {extensions}") + print(f"{'=' * 60}\n") + + total = walk_dir(root_path, extensions, exclude_dirs) + results.append((directory, total)) + total_all += total + + # 单独统计 stbi + stbi_path = Path("stbi") + if stbi_path.exists(): + exclude_dirs = {".git", "__pycache__"} + print(f"\n{'=' * 60}") + print(f"项目文件统计: stbi (第三方库)") + print(f"后缀过滤: ['.cpp', '.h']") + print(f"{'=' * 60}\n") + + total = walk_dir(stbi_path, [".cpp", ".h"], exclude_dirs) + results.append(("stbi", total)) + total_all += total + + print(f"\n{'=' * 60}") + print("汇总统计") + print(f"{'=' * 60}") + for name, lines in results: + print(f"{name:15} {lines:>10,} 行") + print(f"{'=' * 60}") + print(f"{'总计':15} {total_all:>10,} 行") + print(f"{'=' * 60}\n") + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="统计项目文件行数") + parser.add_argument( + "-e", "--extension", action="append", help="指定后缀名,如: .py .ts .js" + ) + parser.add_argument("-d", "--directory", default=".", help="指定子文件夹路径") + parser.add_argument( + "-x", "--exclude", action="append", default=[], help="排除的文件夹" + ) + parser.add_argument( + "--all", + action="store_true", + help="统计所有源码目录 (./docs/stbi)", + ) + + args = parser.parse_args() + + if args.all: + main_all() + else: + root_path = Path(args.directory).resolve() + extensions = args.extension + exclude_dirs = { + ".git", + "build", + "Release", + "bin", + "__pycache__", + } + exclude_dirs.update(args.exclude) + + if not root_path.exists(): + print(f"错误: 目录 {root_path} 不存在") + sys.exit(1) + + print(f"\n{'=' * 60}") + print(f"项目文件统计: {root_path}") + if extensions: + print(f"后缀过滤: {extensions}") + print(f"{'=' * 60}\n") + + total = walk_dir(root_path, extensions, exclude_dirs) + + print(f"\n{'=' * 60}") + print(f"总行数: {total}") + print(f"{'=' * 60}\n") diff --git a/MVS/VolumeRenderer/imgui.ini b/MVS/VolumeRenderer/imgui.ini new file mode 100644 index 00000000..bf18ac4f --- /dev/null +++ b/MVS/VolumeRenderer/imgui.ini @@ -0,0 +1,24 @@ +[Window][Scene View] +Pos=0,20 +Size=880,480 + +[Window][Hierarchy] +Pos=0,20 +Size=200,480 + +[Window][Inspector] +Pos=1080,20 +Size=200,480 + +[Window][Project] +Pos=0,500 +Size=640,150 + +[Window][Console] +Pos=640,500 +Size=640,150 + +[Window][Debug##Default] +Pos=60,60 +Size=400,400 + diff --git a/MVS/VolumeRenderer/main.cpp b/MVS/VolumeRenderer/main.cpp new file mode 100644 index 00000000..d7b0e66e --- /dev/null +++ b/MVS/VolumeRenderer/main.cpp @@ -0,0 +1,308 @@ +#include +#include +#include +#include +#include "BattleFireDirect.h" +#include "StaticMeshComponent.h" +#include "stbi/stb_image.h" +#include "Utils.h" +#include "NanoVDBLoader.h" + +#pragma comment(lib,"d3d12.lib") +#pragma comment(lib,"dxgi.lib") +#pragma comment(lib,"d3dcompiler.lib") +#pragma comment(lib,"winmm.lib") + +LPCWSTR gWindowClassName = L"BattleFire"; + +LRESULT CALLBACK WindowProc(HWND inHWND, UINT inMSG, WPARAM inWParam, LPARAM inLParam) { + switch (inMSG) { + case WM_CLOSE: + PostQuitMessage(0); + break; + } + return DefWindowProc(inHWND, inMSG, inWParam, inLParam); +} +int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPWSTR lpCmdLine, int inShowCmd) { + AttachConsole(ATTACH_PARENT_PROCESS); + freopen("CONOUT$", "w", stdout); + freopen("CONOUT$", "w", stderr); + printf("XCVolumeRenderer started\n"); + printf("Initializing D3D12...\n"); + //register + WNDCLASSEX wndClassEx; + wndClassEx.cbSize = sizeof(WNDCLASSEX); + wndClassEx.style = CS_HREDRAW | CS_VREDRAW; + wndClassEx.cbClsExtra = NULL;//class + wndClassEx.cbWndExtra = NULL;//instance + wndClassEx.hInstance = hInstance; + wndClassEx.hIcon = LoadIcon(NULL, IDI_APPLICATION); + wndClassEx.hIconSm = LoadIcon(NULL, IDI_APPLICATION); + wndClassEx.hCursor = LoadCursor(NULL, IDC_ARROW); + wndClassEx.hbrBackground = NULL; + wndClassEx.lpszMenuName = NULL; + wndClassEx.lpszClassName = gWindowClassName; + wndClassEx.lpfnWndProc = WindowProc; + if (!RegisterClassEx(&wndClassEx)) { + MessageBox(NULL, L"Register Class Failed!", L"Error", MB_OK | MB_ICONERROR); + return -1; + } + //create + int viewportWidth = 1280; + int viewportHeight = 720; + RECT rect; + rect.left = 0; + rect.top = 0; + rect.right = viewportWidth; + rect.bottom = viewportHeight; + AdjustWindowRect(&rect, WS_OVERLAPPEDWINDOW, FALSE); + int windowWidth = rect.right - rect.left; + int windowHeight = rect.bottom - rect.top; + HWND hwnd = CreateWindowEx(NULL, + gWindowClassName, + L"My Render Window", + WS_OVERLAPPEDWINDOW, + CW_USEDEFAULT, CW_USEDEFAULT, + windowWidth, windowHeight, + NULL, + NULL, + hInstance, + NULL); + if (!hwnd) { + MessageBox(NULL, L"Create Window Failed!", L"Error", MB_OK | MB_ICONERROR); + return -1; + } + //show + InitD3D12(hwnd, 1280, 720); + printf("D3D12 initialized\n"); + ID3D12GraphicsCommandList* commandList = GetCommandList(); + ID3D12CommandAllocator* commandAllocator = GetCommandAllocator(); + + NanoVDBData nanoVDBData = {}; + LoadNanoVDB("Res/NanoVDB/cloud.nvdb", nanoVDBData, commandList, commandAllocator); + printf("NanoVDB loaded: %llu bytes\n", (unsigned long long)nanoVDBData.byteSize); + printf("NanoVDB BBox: [%.2f, %.2f, %.2f] - [%.2f, %.2f, %.2f]\n", + nanoVDBData.worldBBox[0], nanoVDBData.worldBBox[1], nanoVDBData.worldBBox[2], + nanoVDBData.worldBBox[3], nanoVDBData.worldBBox[4], nanoVDBData.worldBBox[5]); + + float bboxMin[3] = { (float)nanoVDBData.worldBBox[0], (float)nanoVDBData.worldBBox[1], (float)nanoVDBData.worldBBox[2] }; + float bboxMax[3] = { (float)nanoVDBData.worldBBox[3], (float)nanoVDBData.worldBBox[4], (float)nanoVDBData.worldBBox[5] }; + float bboxCenter[3] = { (bboxMin[0] + bboxMax[0]) / 2, (bboxMin[1] + bboxMax[1]) / 2, (bboxMin[2] + bboxMax[2]) / 2 }; + float bboxSize[3] = { bboxMax[0] - bboxMin[0], bboxMax[1] - bboxMin[1], bboxMax[2] - bboxMin[2] }; + printf("BBox center: [%.2f, %.2f, %.2f], size: [%.2f, %.2f, %.2f]\n", + bboxCenter[0], bboxCenter[1], bboxCenter[2], + bboxSize[0], bboxSize[1], bboxSize[2]); + + float quadVertices[] = { + -1.0f, -1.0f, 0.0f, 0.0f, + 1.0f, -1.0f, 1.0f, 0.0f, + 1.0f, 1.0f, 1.0f, 1.0f, + -1.0f, 1.0f, 0.0f, 1.0f, + }; + uint32_t quadIndices[] = { 0, 1, 2, 0, 2, 3 }; + ID3D12Resource* quadVBO = CreateBufferObject(commandList, quadVertices, sizeof(quadVertices), D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER); + ID3D12Resource* quadIBO = CreateBufferObject(commandList, quadIndices, sizeof(quadIndices), D3D12_RESOURCE_STATE_INDEX_BUFFER); + printf("Quad mesh created\n"); + + StaticMeshComponent staticMeshComponent; + staticMeshComponent.InitFromFile(commandList, "Res/Model/Sphere.lhsm"); + printf("Mesh loaded\n"); + + ID3D12RootSignature* rootSignature = InitRootSignature(); + printf("Root signature created\n"); + D3D12_SHADER_BYTECODE vs,gs,ps; + CreateShaderFromFile(L"Res/Shader/gs.hlsl", "MainVS", "vs_5_1", &vs); + CreateShaderFromFile(L"Res/Shader/gs.hlsl", "MainGS", "gs_5_1", &gs); + CreateShaderFromFile(L"Res/Shader/gs.hlsl", "MainPS", "ps_5_1", &ps); + ID3D12PipelineState*pso=CreatePSO(rootSignature, vs, ps, gs); + printf("PSO created\n"); + + ID3D12RootSignature* volumeRootSignature = InitVolumeRootSignature(); + printf("Volume root signature created\n"); + + D3D12_SHADER_BYTECODE volumeVS, volumePS; + memset(&volumeVS, 0, sizeof(volumeVS)); + memset(&volumePS, 0, sizeof(volumePS)); + CreateShaderFromFile(L"Res/Shader/volume.hlsl", "MainVS", "vs_5_1", &volumeVS); + CreateShaderFromFile(L"Res/Shader/volume.hlsl", "MainPS", "ps_5_1", &volumePS); + printf("Volume VS: ptr=%p, size=%zu\n", volumeVS.pShaderBytecode, volumeVS.BytecodeLength); + printf("Volume PS: ptr=%p, size=%zu\n", volumePS.pShaderBytecode, volumePS.BytecodeLength); + + ID3D12PipelineState* quadPSO = CreateQuadPSO(volumeRootSignature, volumeVS, volumePS); + if (!quadPSO) { + printf("Quad PSO creation failed!\n"); + } else { + printf("Quad PSO created: %p\n", quadPSO); + } + + ID3D12Resource* volumeCB = CreateConstantBufferObject(65536); + + ID3D12Resource* cb = CreateConstantBufferObject(65536);//1024x64(4x4) + DirectX::XMMATRIX projectionMatrix=DirectX::XMMatrixPerspectiveFovLH( + (45.0f*3.141592f)/180.0f,1280.0f/720.0f,0.1f,1000.0f); + DirectX::XMMATRIX viewMatrix = DirectX::XMMatrixLookAtLH( + DirectX::XMVectorSet(-10.0f, 300.0f, -1200.0f, 1.0f), + DirectX::XMVectorSet(-10.0f, 73.0f, 0.0f, 1.0f), + DirectX::XMVectorSet(0.0f, 1.0f, 0.0f, 1.0f)); + DirectX::XMMATRIX modelMatrix = DirectX::XMMatrixTranslation(0.0f,0.0f,5.0f); + //modelMatrix *= DirectX::XMMatrixRotationZ(90.0f*3.141592f/180.0f); + DirectX::XMFLOAT4X4 tempMatrix; + float matrices[64]; + + DirectX::XMStoreFloat4x4(&tempMatrix, projectionMatrix); + memcpy(matrices, &tempMatrix, sizeof(float) * 16); + DirectX::XMStoreFloat4x4(&tempMatrix, viewMatrix); + memcpy(matrices+16, &tempMatrix, sizeof(float) * 16); + DirectX::XMStoreFloat4x4(&tempMatrix, modelMatrix); + memcpy(matrices + 32, &tempMatrix, sizeof(float) * 16);; + DirectX::XMVECTOR determinant; + DirectX::XMMATRIX inverseModelMatrix = DirectX::XMMatrixInverse(&determinant, modelMatrix); + if (DirectX::XMVectorGetX(determinant) != 0.0f) { + DirectX::XMMATRIX normalMatrix = DirectX::XMMatrixTranspose(inverseModelMatrix); + DirectX::XMStoreFloat4x4(&tempMatrix, modelMatrix); + memcpy(matrices + 48, &tempMatrix, sizeof(float) * 16);; + } + UpdateConstantBuffer(cb, matrices, sizeof(float) * 64); + + + ID3D12Resource* sb = CreateConstantBufferObject(65536);//1024x64(4x4) + struct MaterialData { + float r; + }; + MaterialData* materialDatas = new MaterialData[3000]; + for (int i=0;i<3000;i++){ + materialDatas[i].r = srandom() * 0.1f + 0.1f;//0.0~1.0 + } + UpdateConstantBuffer(sb, materialDatas, sizeof(MaterialData) * 3000); + + int imageWidth, imageHeight,imageChannel; + stbi_uc* pixels = stbi_load("Res/Image/earth_d.jpg", &imageWidth, &imageHeight, &imageChannel, 4); + printf("Texture loaded: %dx%d\n", imageWidth, imageHeight); + ID3D12Resource* texture = CreateTexture2D(commandList, pixels, + imageWidth * imageHeight * imageChannel, imageWidth, imageHeight,DXGI_FORMAT_R8G8B8A8_UNORM); + delete[]pixels; + ID3D12Device* d3dDevice = GetD3DDevice(); + + ID3D12DescriptorHeap* srvHeap = nullptr; + D3D12_DESCRIPTOR_HEAP_DESC d3dDescriptorHeapDescSRV = {}; + d3dDescriptorHeapDescSRV.NumDescriptors = 3; + d3dDescriptorHeapDescSRV.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV; + d3dDescriptorHeapDescSRV.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE; + d3dDevice->CreateDescriptorHeap(&d3dDescriptorHeapDescSRV, IID_PPV_ARGS(&srvHeap)); + + ID3D12DescriptorHeap* descriptorHeaps[] = {srvHeap}; + + D3D12_SHADER_RESOURCE_VIEW_DESC srvDesc = {}; + srvDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; + srvDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING; + srvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2D; + srvDesc.Texture2D.MipLevels = 1; + + D3D12_CPU_DESCRIPTOR_HANDLE srvHeapPtr = srvHeap->GetCPUDescriptorHandleForHeapStart(); + d3dDevice->CreateShaderResourceView(texture, &srvDesc, srvHeapPtr); + srvHeapPtr.ptr += d3dDevice->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV); + + EndCommandList(); + WaitForCompletionOfCommandList(); + printf("All resources uploaded to GPU\n"); + + ShowWindow(hwnd, inShowCmd); + UpdateWindow(hwnd); + printf("Window shown, entering render loop\n"); + float color[] = {0.5f,0.5f,0.5f,1.0f}; + MSG msg; + DWORD last_time = timeGetTime(); + DWORD appStartTime = last_time; + int frameCount = 0; + while (true){ + ZeroMemory(&msg, sizeof(MSG)); + if (PeekMessage(&msg,NULL,0,0,PM_REMOVE)) { + if (msg.message == WM_QUIT) { + break; + } + TranslateMessage(&msg); + DispatchMessage(&msg); + } else { + //rendering + WaitForCompletionOfCommandList(); + DWORD current_time = timeGetTime();//ms + DWORD frameTime = current_time - last_time; + DWORD timeSinceAppStartInMS = current_time - appStartTime; + last_time = current_time; + float frameTimeInSecond = float(frameTime) / 1000.0f;//second + float timeSinceAppStartInSecond = float(timeSinceAppStartInMS) / 1000.0f; + color[0] = timeSinceAppStartInSecond; + commandAllocator->Reset(); + commandList->Reset(commandAllocator, nullptr); + BeginRenderToSwapChain(commandList); + //draw + commandList->SetPipelineState(pso); + commandList->SetGraphicsRootSignature(rootSignature); + commandList->SetDescriptorHeaps(_countof(descriptorHeaps),descriptorHeaps); + commandList->SetGraphicsRootConstantBufferView(0, cb->GetGPUVirtualAddress()); + commandList->SetGraphicsRoot32BitConstants(1, 4, color, 0); + commandList->SetGraphicsRootDescriptorTable(2, srvHeap->GetGPUDescriptorHandleForHeapStart()); + commandList->SetGraphicsRootShaderResourceView(3, sb->GetGPUVirtualAddress()); + commandList->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST); + if (frameCount == 1) { + printf("Drawing mesh with %d vertices, VBO=%p, subMeshes=%zu\n", + staticMeshComponent.mVertexCount, staticMeshComponent.mVBO, staticMeshComponent.mSubMeshes.size()); + } + staticMeshComponent.Render(commandList); + + DirectX::XMMATRIX viewProj = viewMatrix * projectionMatrix; + DirectX::XMMATRIX invViewProj = DirectX::XMMatrixInverse(nullptr, viewProj); + DirectX::XMFLOAT4X4 invViewProjMat; + DirectX::XMStoreFloat4x4(&invViewProjMat, invViewProj); + + float volumeCBData[36]; + memcpy(volumeCBData, &invViewProjMat, sizeof(float) * 16); + volumeCBData[16] = -10.0f; + volumeCBData[17] = 300.0f; + volumeCBData[18] = -1200.0f; + volumeCBData[19] = 0.2f; // DensityScale + volumeCBData[20] = (float)nanoVDBData.worldBBox[0]; + volumeCBData[21] = (float)nanoVDBData.worldBBox[1]; + volumeCBData[22] = (float)nanoVDBData.worldBBox[2]; + volumeCBData[23] = 1.0f; // StepSize + volumeCBData[24] = (float)nanoVDBData.worldBBox[3]; + volumeCBData[25] = (float)nanoVDBData.worldBBox[4]; + volumeCBData[26] = (float)nanoVDBData.worldBBox[5]; + volumeCBData[27] = 2000.0f; // MaxSteps as float + volumeCBData[28] = timeSinceAppStartInSecond * 0.3f; // RotationY + volumeCBData[29] = 0.0f; // Pad0 + volumeCBData[30] = 0.0f; // Pad1 + volumeCBData[31] = 0.0f; // Pad2 + volumeCBData[32] = 0.5f; // LightDir X + volumeCBData[33] = 0.8f; // LightDir Y + volumeCBData[34] = 0.3f; // LightDir Z + volumeCBData[35] = 8.0f; // LightSamples as float + UpdateConstantBuffer(volumeCB, volumeCBData, sizeof(float) * 36); + if (frameCount == 1) { + printf("Volume BBox: [%.2f, %.2f, %.2f] - [%.2f, %.2f, %.2f]\n", + volumeCBData[20], volumeCBData[21], volumeCBData[22], + volumeCBData[24], volumeCBData[25], volumeCBData[26]); + } + + if (quadPSO) { + commandList->SetPipelineState(quadPSO); + commandList->SetGraphicsRootSignature(volumeRootSignature); + commandList->SetGraphicsRootConstantBufferView(0, volumeCB->GetGPUVirtualAddress()); + commandList->SetGraphicsRootShaderResourceView(1, nanoVDBData.gpuBuffer->GetGPUVirtualAddress()); + commandList->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST); + commandList->IASetVertexBuffers(0, 1, &D3D12_VERTEX_BUFFER_VIEW{ quadVBO->GetGPUVirtualAddress(), sizeof(quadVertices), sizeof(float) * 4 }); + commandList->IASetIndexBuffer(&D3D12_INDEX_BUFFER_VIEW{ quadIBO->GetGPUVirtualAddress(), sizeof(quadIndices), DXGI_FORMAT_R32_UINT }); + commandList->DrawIndexedInstanced(6, 1, 0, 0, 0); + } + + EndRenderToSwapChain(commandList); + EndCommandList(); + SwapD3D12Buffers(); + frameCount++; + if (frameCount <= 3) { + printf("Frame %d rendered\n", frameCount); + } + } + } + return 0; +} \ No newline at end of file diff --git a/MVS/VolumeRenderer/run.bat b/MVS/VolumeRenderer/run.bat new file mode 100644 index 00000000..071dc800 --- /dev/null +++ b/MVS/VolumeRenderer/run.bat @@ -0,0 +1,4 @@ +@echo off +cd /d "%~dp0" +Release\XCVolumeRenderer.exe +pause \ No newline at end of file diff --git a/MVS/VolumeRenderer/stbi/stb_image.cpp b/MVS/VolumeRenderer/stbi/stb_image.cpp new file mode 100644 index 00000000..badb3ef4 --- /dev/null +++ b/MVS/VolumeRenderer/stbi/stb_image.cpp @@ -0,0 +1,2 @@ +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" \ No newline at end of file diff --git a/MVS/VolumeRenderer/stbi/stb_image.h b/MVS/VolumeRenderer/stbi/stb_image.h new file mode 100644 index 00000000..bf44a3ad --- /dev/null +++ b/MVS/VolumeRenderer/stbi/stb_image.h @@ -0,0 +1,7194 @@ +/* stb_image - v2.14 - public domain image loader - http://nothings.org/stb_image.h +no warranty implied; use at your own risk + +Do this: +#define STB_IMAGE_IMPLEMENTATION +before you include this file in *one* C or C++ file to create the implementation. + +// i.e. it should look like this: +#include ... +#include ... +#include ... +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" + +You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. +And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + +QUICK NOTES: +Primarily of interest to game developers and other people who can +avoid problematic images and only need the trivial interface + +JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) +PNG 1/2/4/8-bit-per-channel (16 bpc not supported) + +TGA (not sure what subset, if a subset) +BMP non-1bpp, non-RLE +PSD (composited view only, no extra channels, 8/16 bit-per-channel) + +GIF (*comp always reports as 4-channel) +HDR (radiance rgbE format) +PIC (Softimage PIC) +PNM (PPM and PGM binary only) + +Animated GIF still needs a proper API, but here's one way to do it: +http://gist.github.com/urraka/685d9a6340b26b830d49 + +- decode from memory or through FILE (define STBI_NO_STDIO to remove code) +- decode from arbitrary I/O callbacks +- SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + +Full documentation under "DOCUMENTATION" below. + + +Revision 2.00 release notes: + +- Progressive JPEG is now supported. + +- PPM and PGM binary formats are now supported, thanks to Ken Miller. + +- x86 platforms now make use of SSE2 SIMD instructions for +JPEG decoding, and ARM platforms can use NEON SIMD if requested. +This work was done by Fabian "ryg" Giesen. SSE2 is used by +default, but NEON must be enabled explicitly; see docs. + +With other JPEG optimizations included in this version, we see +2x speedup on a JPEG on an x86 machine, and a 1.5x speedup +on a JPEG on an ARM machine, relative to previous versions of this +library. The same results will not obtain for all JPGs and for all +x86/ARM machines. (Note that progressive JPEGs are significantly +slower to decode than regular JPEGs.) This doesn't mean that this +is the fastest JPEG decoder in the land; rather, it brings it +closer to parity with standard libraries. If you want the fastest +decode, look elsewhere. (See "Philosophy" section of docs below.) + +See final bullet items below for more info on SIMD. + +- Added STBI_MALLOC, STBI_REALLOC, and STBI_FREE macros for replacing +the memory allocator. Unlike other STBI libraries, these macros don't +support a context parameter, so if you need to pass a context into +the allocator, you'll have to store it in a global or a thread-local +variable. + +- Split existing STBI_NO_HDR flag into two flags, STBI_NO_HDR and +STBI_NO_LINEAR. +STBI_NO_HDR: suppress implementation of .hdr reader format +STBI_NO_LINEAR: suppress high-dynamic-range light-linear float API + +- You can suppress implementation of any of the decoders to reduce +your code footprint by #defining one or more of the following +symbols before creating the implementation. + +STBI_NO_JPEG +STBI_NO_PNG +STBI_NO_BMP +STBI_NO_PSD +STBI_NO_TGA +STBI_NO_GIF +STBI_NO_HDR +STBI_NO_PIC +STBI_NO_PNM (.ppm and .pgm) + +- You can request *only* certain decoders and suppress all other ones +(this will be more forward-compatible, as addition of new decoders +doesn't require you to disable them explicitly): + +STBI_ONLY_JPEG +STBI_ONLY_PNG +STBI_ONLY_BMP +STBI_ONLY_PSD +STBI_ONLY_TGA +STBI_ONLY_GIF +STBI_ONLY_HDR +STBI_ONLY_PIC +STBI_ONLY_PNM (.ppm and .pgm) + +Note that you can define multiples of these, and you will get all +of them ("only x" and "only y" is interpreted to mean "only x&y"). + +- If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB + +- Compilation of all SIMD code can be suppressed with +#define STBI_NO_SIMD +It should not be necessary to disable SIMD unless you have issues +compiling (e.g. using an x86 compiler which doesn't support SSE +intrinsics or that doesn't support the method used to detect +SSE2 support at run-time), and even those can be reported as +bugs so I can refine the built-in compile-time checking to be +smarter. + +- The old STBI_SIMD system which allowed installing a user-defined +IDCT etc. has been removed. If you need this, don't upgrade. My +assumption is that almost nobody was doing this, and those who +were will find the built-in SIMD more satisfactory anyway. + +- RGB values computed for JPEG images are slightly different from +previous versions of stb_image. (This is due to using less +integer precision in SIMD.) The C code has been adjusted so +that the same RGB values will be computed regardless of whether +SIMD support is available, so your app should always produce +consistent results. But these results are slightly different from +previous versions. (Specifically, about 3% of available YCbCr values +will compute different RGB results from pre-1.49 versions by +-1; +most of the deviating values are one smaller in the G channel.) + +- If you must produce consistent results with previous versions of +stb_image, #define STBI_JPEG_OLD and you will get the same results +you used to; however, you will not get the SIMD speedups for +the YCbCr-to-RGB conversion step (although you should still see +significant JPEG speedup from the other changes). + +Please note that STBI_JPEG_OLD is a temporary feature; it will be +removed in future versions of the library. It is only intended for +near-term back-compatibility use. + + +Latest revision history: +2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes +2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes +2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 +RGB-format JPEG; remove white matting in PSD; +allocate large structures on the stack; +correct channel count for PNG & BMP +2.10 (2016-01-22) avoid warning introduced in 2.09 +2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED +2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA +2.07 (2015-09-13) partial animated GIF support +limited 16-bit PSD support +minor bugs, code cleanup, and compiler warnings + +See end of file for full revision history. + + +============================ Contributors ========================= + +Image formats Extensions, features +Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) +Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) +Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) +Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) +Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) +Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) +Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) +github:urraka (animated gif) Junggon Kim (PNM comments) +Daniel Gibson (16-bit TGA) +socks-the-fox (16-bit TGA) +Optimizations & bugfixes +Fabian "ryg" Giesen +Arseny Kapoulkine + +Bug & warning fixes +Marc LeBlanc David Woo Guillaume George Martins Mozeiko +Christpher Lloyd Martin Golini Jerry Jansson Joseph Thomson +Dave Moore Roy Eltham Hayaki Saito Phil Jordan +Won Chun Luke Graham Johan Duparc Nathan Reed +the Horde3D community Thomas Ruf Ronny Chevalier Nick Verigakis +Janez Zemva John Bartholomew Michal Cichon github:svdijk +Jonathan Blow Ken Hamada Tero Hanninen Baldur Karlsson +Laurent Gomila Cort Stratton Sergio Gonzalez github:romigrou +Aruelien Pocheville Thibault Reuille Cass Everitt Matthew Gregan +Ryamond Barbiero Paul Du Bois Engin Manap github:snagar +Michaelangel007@github Oriol Ferrer Mesia Dale Weiler github:Zelex +Philipp Wiesemann Josh Tobin github:rlyeh github:grim210@github +Blazej Dariusz Roszkowski github:sammyhw + + +LICENSE + +This software is dual-licensed to the public domain and under the following +license: you are granted a perpetual, irrevocable license to copy, modify, +publish, and distribute this file as you see fit. + +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 16-bit-per-channel PNG +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - no 1-bit BMP +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data) +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'req_comp' if req_comp is non-zero, or *comp otherwise. +// If req_comp is non-zero, *comp has the number of components that _would_ +// have been output otherwise. E.g. if you set req_comp to 4, you will always +// get RGBA output, but you can check *comp to see if it's trivially opaque +// because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *comp will be unchanged. The function stbi_failure_reason() +// can be queried for an extremely brief, end-user unfriendly explanation +// of why the load failed. Define STBI_NO_FAILURE_STRINGS to avoid +// compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy to use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries do not emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// make more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// The output of the JPEG decoder is slightly different from versions where +// SIMD support was introduced (that is, for versions before 1.49). The +// difference is only +-1 in the 8-bit RGB channels, and only on a small +// fraction of pixels. You can force the pre-1.49 behavior by defining +// STBI_JPEG_OLD, but this will disable some of the SIMD decoding path +// and hence cost some performance. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image now supports loading HDR images in general, and currently +// the Radiance .HDR file format, although the support is provided +// generically. You can still load any file through the existing interface; +// if you attempt to load an HDR file, it will be automatically remapped to +// LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// By default we convert iphone-formatted PNGs back to RGB, even though +// they are internally encoded differently. You can disable this conversion +// by by calling stbi_convert_iphone_png_to_rgb(0), in which case +// you will always just get the native iphone "format" through (which +// is BGR stored in RGB). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// + + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for req_comp + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif + + ////////////////////////////////////////////////////////////////////////////// + // + // PRIMARY API - works on images of any type + // + + // + // load image by filename, open file, or memory buffer + // + + typedef struct + { + int(*read) (void *user, char *data, int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void(*skip) (void *user, int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int(*eof) (void *user); // returns nonzero if we are at end of file/data + } stbi_io_callbacks; + + //////////////////////////////////// + // + // 8-bits-per-channel interface + // + + STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO + STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + // for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + + //////////////////////////////////// + // + // 16-bits-per-channel interface + // + + STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +#ifndef STBI_NO_STDIO + STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + // @TODO the other variants + + //////////////////////////////////// + // + // float-per-channel interface + // +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + + // stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR + STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); + STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO + STBIDEF int stbi_is_hdr(char const *filename); + STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + + // get a VERY brief reason for failure + // NOT THREADSAFE + STBIDEF const char *stbi_failure_reason(void); + + // free the loaded image -- this is just free() + STBIDEF void stbi_image_free(void *retval_from_stbi_load); + + // get image dimensions & components without fully decoding + STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); + STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); + +#ifndef STBI_NO_STDIO + STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp); + STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp); + +#endif + + + + // for image formats that explicitly notate that they have premultiplied alpha, + // we just return the colors as stored in the file. set this flag to force + // unpremultiplication. results are undefined if the unpremultiply overflow. + STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + + // indicate whether we should process iphone images back to canonical format, + // or just pass them through "as-is" + STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + + // flip the image vertically, so the first pixel in the output array is the bottom left + STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + + // ZLIB client - used by PNG, available for other purposes + + STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); + STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); + STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); + STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); + STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) +#ifndef STBI_ONLY_JPEG +#define STBI_NO_JPEG +#endif +#ifndef STBI_ONLY_PNG +#define STBI_NO_PNG +#endif +#ifndef STBI_ONLY_BMP +#define STBI_NO_BMP +#endif +#ifndef STBI_ONLY_PSD +#define STBI_NO_PSD +#endif +#ifndef STBI_ONLY_TGA +#define STBI_NO_TGA +#endif +#ifndef STBI_ONLY_GIF +#define STBI_NO_GIF +#endif +#ifndef STBI_ONLY_HDR +#define STBI_NO_HDR +#endif +#ifndef STBI_ONLY_PIC +#define STBI_NO_PIC +#endif +#ifndef STBI_ONLY_PNM +#define STBI_NO_PNM +#endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + + +#ifndef _MSC_VER +#ifdef __cplusplus +#define stbi_inline inline +#else +#define stbi_inline +#endif +#else +#define stbi_inline __forceinline +#endif + + +#ifdef _MSC_VER +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32) == 4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL +#define stbi_lrot(x,y) _lrotl(x,y) +#else +#define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// NOTE: not clear do we actually need this for the 64-bit path? +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// (but compiling with -msse2 allows the compiler to use SSE2 everywhere; +// this is just broken and gcc are jerks for not fixing it properly +// http://www.virtualdub.org/blog/pivot/entry.php?id=363 ) +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info, 1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax, 1 + cpuid + mov res, edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +static int stbi__sse2_available() +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +static int stbi__sse2_available() +{ +#if defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 // GCC 4.8 or later + // GCC 4.8+ has a nice way to do this + return __builtin_cpu_supports("sse2"); +#else + // portable way to do this, preferably without using GCC inline ASM? + // just bail for now. + return 0; +#endif +} +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +// assume GCC or Clang on ARM targets +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *)buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *)buffer + len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int)fread(data, 1, size, (FILE*)user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + fseek((FILE*)user, n, SEEK_CUR); +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*)user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *)f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +// this is not threadsafe +static const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX / b; +} + +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} + +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS +#define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) +#define stbi__err(x,y) stbi__err(y) +#else +#define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load = flag_true_if_should_flip; +} + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + +#ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s, x, y, comp, req_comp, ri); +#endif +#ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s, x, y, comp, req_comp, ri); +#endif +#ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s, x, y, comp, req_comp, ri); +#endif +#ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s, x, y, comp, req_comp, ri); +#endif +#ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s, x, y, comp, req_comp, ri, bpc); +#endif +#ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s, x, y, comp, req_comp, ri); +#endif +#ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s, x, y, comp, req_comp, ri); +#endif + +#ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x, y, comp, req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } +#endif + +#ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s, x, y, comp, req_comp, ri); +#endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *)stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *)stbi__malloc(img_len * 2); + if (enlarged == NULL) return (stbi__uint16 *)stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 8) { + STBI_ASSERT(ri.bits_per_channel == 16); + result = stbi__convert_16_to_8((stbi__uint16 *)result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int w = *x, h = *y; + int channels = req_comp ? req_comp : *comp; + int row, col, z; + stbi_uc *image = (stbi_uc *)result; + + // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once + for (row = 0; row < (h >> 1); row++) { + for (col = 0; col < w; col++) { + for (z = 0; z < channels; z++) { + stbi_uc temp = image[(row * w + col) * channels + z]; + image[(row * w + col) * channels + z] = image[((h - row - 1) * w + col) * channels + z]; + image[((h - row - 1) * w + col) * channels + z] = temp; + } + } + } + } + + return (unsigned char *)result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 16) { + STBI_ASSERT(ri.bits_per_channel == 8); + result = stbi__convert_8_to_16((stbi_uc *)result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int w = *x, h = *y; + int channels = req_comp ? req_comp : *comp; + int row, col, z; + stbi__uint16 *image = (stbi__uint16 *)result; + + // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once + for (row = 0; row < (h >> 1); row++) { + for (col = 0; col < w; col++) { + for (z = 0; z < channels; z++) { + stbi__uint16 temp = image[(row * w + col) * channels + z]; + image[(row * w + col) * channels + z] = image[((h - row - 1) * w + col) * channels + z]; + image[((h - row - 1) * w + col) * channels + z] = temp; + } + } + } + } + + return (stbi__uint16 *)result; +} + +#ifndef STBI_NO_HDR +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int w = *x, h = *y; + int depth = req_comp ? req_comp : *comp; + int row, col, z; + float temp; + + // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once + for (row = 0; row < (h >> 1); row++) { + for (col = 0; col < w; col++) { + for (z = 0; z < depth; z++) { + temp = result[(row * w + col) * depth + z]; + result[(row * w + col) * depth + z] = result[((h - row - 1) * w + col) * depth + z]; + result[((h - row - 1) * w + col) * depth + z] = temp; + } + } + } + } +} +#endif + +#ifndef STBI_NO_STDIO + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f = 0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f, x, y, comp, req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s, f); + result = stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, -(int)(s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s, f); + result = stbi__load_and_postprocess_16bit(&s, x, y, comp, req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, -(int)(s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *)stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f, x, y, comp, req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_8bit(&s, x, y, comp, req_comp); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; +#ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s, x, y, comp, req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data, x, y, comp, req_comp); + return hdr_data; + } +#endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__loadf_main(&s, x, y, comp, req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__loadf_main(&s, x, y, comp, req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f, x, y, comp, req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s, f); + return stbi__loadf_main(&s, x, y, comp, req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ +#ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__hdr_test(&s); +#else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; +#endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result = 0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ +#ifndef STBI_NO_HDR + stbi__context s; + stbi__start_file(&s, f); + return stbi__hdr_test(&s); +#else + STBI_NOTUSED(f); + return 0; +#endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ +#ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__hdr_test(&s); +#else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; +#endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma = 2.2f, stbi__l2h_scale = 1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i = 1.0f / 2.2f, stbi__h2l_scale_i = 1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1 / gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1 / scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load = 0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data, (char*)s->buffer_start, s->buflen); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + 1; + *s->img_buffer = 0; + } + else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} + +static void stbi__skip(stbi__context *s, int n) +{ + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int)(s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} + +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int)(s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*)buffer + blen, n - blen); + res = (count == (n - blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer + n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } + else + return 0; +} + +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} + +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + return z + (stbi__get16le(s) << 16); +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + + +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc)(((r * 77) + (g * 150) + (29 * b)) >> 8); +} + +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i, j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *)stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j = 0; j < (int)y; ++j) { + unsigned char *src = data + j * x * img_n; + unsigned char *dest = good + j * x * req_comp; + +#define STBI__COMBO(a,b) ((a)*8+(b)) +#define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1, 2) { dest[0] = src[0], dest[1] = 255; } break; + STBI__CASE(1, 3) { dest[0] = dest[1] = dest[2] = src[0]; } break; + STBI__CASE(1, 4) { dest[0] = dest[1] = dest[2] = src[0], dest[3] = 255; } break; + STBI__CASE(2, 1) { dest[0] = src[0]; } break; + STBI__CASE(2, 3) { dest[0] = dest[1] = dest[2] = src[0]; } break; + STBI__CASE(2, 4) { dest[0] = dest[1] = dest[2] = src[0], dest[3] = src[1]; } break; + STBI__CASE(3, 4) { dest[0] = src[0], dest[1] = src[1], dest[2] = src[2], dest[3] = 255; } break; + STBI__CASE(3, 1) { dest[0] = stbi__compute_y(src[0], src[1], src[2]); } break; + STBI__CASE(3, 2) { dest[0] = stbi__compute_y(src[0], src[1], src[2]), dest[1] = 255; } break; + STBI__CASE(4, 1) { dest[0] = stbi__compute_y(src[0], src[1], src[2]); } break; + STBI__CASE(4, 2) { dest[0] = stbi__compute_y(src[0], src[1], src[2]), dest[1] = src[3]; } break; + STBI__CASE(4, 3) { dest[0] = src[0], dest[1] = src[1], dest[2] = src[2]; } break; + default: STBI_ASSERT(0); + } +#undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16)(((r * 77) + (g * 150) + (29 * b)) >> 8); +} + +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i, j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *)stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *)stbi__errpuc("outofmem", "Out of memory"); + } + + for (j = 0; j < (int)y; ++j) { + stbi__uint16 *src = data + j * x * img_n; + stbi__uint16 *dest = good + j * x * req_comp; + +#define STBI__COMBO(a,b) ((a)*8+(b)) +#define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1, 2) { dest[0] = src[0], dest[1] = 0xffff; } break; + STBI__CASE(1, 3) { dest[0] = dest[1] = dest[2] = src[0]; } break; + STBI__CASE(1, 4) { dest[0] = dest[1] = dest[2] = src[0], dest[3] = 0xffff; } break; + STBI__CASE(2, 1) { dest[0] = src[0]; } break; + STBI__CASE(2, 3) { dest[0] = dest[1] = dest[2] = src[0]; } break; + STBI__CASE(2, 4) { dest[0] = dest[1] = dest[2] = src[0], dest[3] = src[1]; } break; + STBI__CASE(3, 4) { dest[0] = src[0], dest[1] = src[1], dest[2] = src[2], dest[3] = 0xffff; } break; + STBI__CASE(3, 1) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); } break; + STBI__CASE(3, 2) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]), dest[1] = 0xffff; } break; + STBI__CASE(4, 1) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]); } break; + STBI__CASE(4, 2) { dest[0] = stbi__compute_y_16(src[0], src[1], src[2]), dest[1] = src[3]; } break; + STBI__CASE(4, 3) { dest[0] = src[0], dest[1] = src[1], dest[2] = src[2]; } break; + default: STBI_ASSERT(0); + } +#undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i, k, n; + float *output; + if (!data) return NULL; + output = (float *)stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp - 1; + for (i = 0; i < x*y; ++i) { + for (k = 0; k < n; ++k) { + output[i*comp + k] = (float)(pow(data[i*comp + k] / 255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + if (k < comp) output[i*comp + k] = data[i*comp + k] / 255.0f; + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i, k, n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *)stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp - 1; + for (i = 0; i < x*y; ++i) { + for (k = 0; k < n; ++k) { + float z = (float)pow(data[i*comp + k] * stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc)stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp + k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc)stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi_uc dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + + // sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + + // definition of jpeg image component + struct + { + int id; + int h, v; + int tq; + int hd, ha; + int dc_pred; + + int x, y, w2, h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + + // kernels + void(*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void(*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i, j, k = 0, code; + // build size list for each symbol (from JPEG spec) + for (i = 0; i < 16; ++i) + for (j = 0; j < count[i]; ++j) + h->size[k++] = (stbi_uc)(i + 1); + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for (j = 1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16)(code++); + if (code - 1 >= (1 << j)) return stbi__err("bad code lengths", "Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16 - j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i = 0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS - s); + int m = 1 << (FAST_BITS - s); + for (j = 0; j < m; ++j) { + h->fast[c + j] = (stbi_uc)i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i = 0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (-1 << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16)((k << 8) + (run << 4) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + if (c != 0) { + j->marker = (unsigned char)c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static stbi__uint32 stbi__bmask[17] = { 0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535 }; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c, k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k = FAST_BITS + 1; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + + sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB + k = stbi_lrot(j->code_buffer, n); + STBI_ASSERT(n >= 0 && n < (int)(sizeof(stbi__bmask) / sizeof(*stbi__bmask))); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & ~sgn); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static stbi_uc stbi__jpeg_dezigzag[64 + 15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi_uc *dequant) +{ + int diff, dc, k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0) return stbi__err("bad huffman code", "Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data, 0, 64 * sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short)(dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c, r, s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short)((r >> 8) * dequant[zig]); + } + else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code", "Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } + else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short)(stbi__extend_receive(j, s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff, dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data, 0, 64 * sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + diff = t ? stbi__extend_receive(j, t) : 0; + + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short)(dc << j->succ_low); + } + else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short)(1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c, r, s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS) - 1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short)((r >> 8) << shift); + } + else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code", "Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } + else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short)(stbi__extend_receive(j, s) << shift); + } + } + } while (k <= j->spec_end); + } + else { + // refinement scan for these AC coefficients + + short bit = (short)(1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit) == 0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } + else { + k = j->spec_start; + do { + int r, s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code", "Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } + else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } + else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit) == 0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + else { + if (r == 0) { + *p = (short)s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int)x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc)x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) << 12) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i, val[64], *v = val; + stbi_uc *o; + short *d = data; + + // columns + for (i = 0; i < 8; ++i, ++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[8] == 0 && d[16] == 0 && d[24] == 0 && d[32] == 0 + && d[40] == 0 && d[48] == 0 && d[56] == 0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0] << 2; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } + else { + STBI__IDCT_1D(d[0], d[8], d[16], d[24], d[32], d[40], d[48], d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[0] = (x0 + t3) >> 10; + v[56] = (x0 - t3) >> 10; + v[8] = (x1 + t2) >> 10; + v[48] = (x1 - t2) >> 10; + v[16] = (x2 + t1) >> 10; + v[40] = (x2 - t1) >> 10; + v[24] = (x3 + t0) >> 10; + v[32] = (x3 - t0) >> 10; + } + } + + for (i = 0, v = val, o = out; i < 8; ++i, v += 8, o += out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128 << 17); + x1 += 65536 + (128 << 17); + x2 += 65536 + (128 << 17); + x3 += 65536 + (128 << 17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0 + t3) >> 17); + o[7] = stbi__clamp((x0 - t3) >> 17); + o[1] = stbi__clamp((x1 + t2) >> 17); + o[6] = stbi__clamp((x1 - t2) >> 17); + o[2] = stbi__clamp((x2 + t1) >> 17); + o[5] = stbi__clamp((x2 - t1) >> 17); + o[3] = stbi__clamp((x3 + t0) >> 17); + o[4] = stbi__clamp((x3 - t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y +#define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y +#define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) +#define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add +#define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub +#define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack +#define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) +#define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) +#define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + +#define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f(0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f(0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f(3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f(2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f(1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128 << 17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0 * 8)); + row1 = _mm_load_si128((const __m128i *) (data + 1 * 8)); + row2 = _mm_load_si128((const __m128i *) (data + 2 * 8)); + row3 = _mm_load_si128((const __m128i *) (data + 3 * 8)); + row4 = _mm_load_si128((const __m128i *) (data + 4 * 8)); + row5 = _mm_load_si128((const __m128i *) (data + 5 * 8)); + row6 = _mm_load_si128((const __m128i *) (data + 6 * 8)); + row7 = _mm_load_si128((const __m128i *) (data + 7 * 8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f(0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f(1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f(0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f(2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f(3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f(1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + + // wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + + // wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + + // butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0 * 8); + row1 = vld1q_s16(data + 1 * 8); + row2 = vld1q_s16(data + 2 * 8); + row3 = vld1q_s16(data + 3 * 8); + row4 = vld1q_s16(data + 4 * 8); + row5 = vld1q_s16(data + 5 * 8); + row6 = vld1q_s16(data + 6 * 8); + row7 = vld1q_s16(data + 7 * 8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { + // these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. + // whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i, j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x + 7) >> 3; + int h = (z->img_comp[n].y + 7) >> 3; + for (j = 0; j < h; ++j) { + for (i = 0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc + z->img_comp[n].hd, z->huff_ac + ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data + z->img_comp[n].w2*j * 8 + i * 8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + else { // interleaved + int i, j, k, x, y; + STBI_SIMD_ALIGN(short, data[64]); + for (j = 0; j < z->img_mcu_y; ++j) { + for (i = 0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k = 0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y = 0; y < z->img_comp[n].v; ++y) { + for (x = 0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x) * 8; + int y2 = (j*z->img_comp[n].v + y) * 8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc + z->img_comp[n].hd, z->huff_ac + ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data + z->img_comp[n].w2*y2 + x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } + else { + if (z->scan_n == 1) { + int i, j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x + 7) >> 3; + int h = (z->img_comp[n].y + 7) >> 3; + for (j = 0; j < h; ++j) { + for (i = 0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + else { // interleaved + int i, j, k, x, y; + for (j = 0; j < z->img_mcu_y; ++j) { + for (i = 0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k = 0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y = 0; y < z->img_comp[n].v; ++y) { + for (x = 0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi_uc *dequant) +{ + int i; + for (i = 0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i, j, n; + for (n = 0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x + 7) >> 3; + int h = (z->img_comp[n].y + 7) >> 3; + for (j = 0; j < h; ++j) { + for (i = 0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data + z->img_comp[n].w2*j * 8 + i * 8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker", "Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len", "Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s) - 2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4; + int t = q & 15, i; + if (p != 0) return stbi__err("bad DQT type", "Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table", "Corrupt JPEG"); + for (i = 0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = stbi__get8(z->s); + L -= 65; + } + return L == 0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s) - 2; + while (L > 0) { + stbi_uc *v; + int sizes[16], i, n = 0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header", "Corrupt JPEG"); + for (i = 0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc + th, sizes)) return 0; + v = z->huff_dc[th].values; + } + else { + if (!stbi__build_huffman(z->huff_ac + th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i = 0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L == 0; + } + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + stbi__skip(z->s, stbi__get16be(z->s) - 2); + return 1; + } + return 0; +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int)z->s->img_n) return stbi__err("bad SOS component count", "Corrupt JPEG"); + if (Ls != 6 + 2 * z->scan_n) return stbi__err("bad SOS len", "Corrupt JPEG"); + for (i = 0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff", "Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff", "Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } + else { + if (z->spec_start != 0) return stbi__err("bad SOS", "Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS", "Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i = 0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf, p, i, q, h_max = 1, v_max = 1, c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len", "Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit", "JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width", "Corrupt JPEG"); // JPEG requires + c = stbi__get8(s); + if (c != 3 && c != 1) return stbi__err("bad component count", "Corrupt JPEG"); // JFIF requires + s->img_n = c; + for (i = 0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8 + 3 * s->img_n) return stbi__err("bad SOF len", "Corrupt JPEG"); + + z->rgb = 0; + for (i = 0; i < s->img_n; ++i) { + static unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (z->img_comp[i].id != i + 1) // JFIF requires + if (z->img_comp[i].id != i) { // some version of jpegtran outputs non-JFIF-compliant files! + // somethings output this (see http://fileformats.archiveteam.org/wiki/JPEG#Color_format) + if (z->img_comp[i].id != rgb[i]) + return stbi__err("bad component ID", "Corrupt JPEG"); + ++z->rgb; + } + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H", "Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V", "Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ", "Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i = 0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w - 1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h - 1) / z->img_mcu_h; + + for (i = 0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max - 1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max - 1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i + 1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*)(((size_t)z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i + 1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*)(((size_t)z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI", "Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z, m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none) { + // handle 0s at the end of image data from IP Kamera 9060 + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + if (x == 255) { + j->marker = stbi__get8(j->s); + break; + } + else if (x != 0) { + return stbi__err("junk before marker", "Corrupt JPEG"); + } + } + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + } + else { + if (!stbi__process_marker(j, m)) return 0; + } + m = stbi__get_marker(j); + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i = 0; i < w; ++i) + out[i] = stbi__div4(3 * in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0] * 3 + input[1] + 2); + for (i = 1; i < w - 1; ++i) { + int n = 3 * input[i] + 2; + out[i * 2 + 0] = stbi__div4(n + input[i - 1]); + out[i * 2 + 1] = stbi__div4(n + input[i + 1]); + } + out[i * 2 + 0] = stbi__div4(input[w - 2] * 3 + input[w - 1] + 2); + out[i * 2 + 1] = input[w - 1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i, t0, t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3 * in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3 * in_near[0] + in_far[0]; + out[0] = stbi__div4(t1 + 2); + for (i = 1; i < w; ++i) { + t0 = t1; + t1 = 3 * in_near[i] + in_far[i]; + out[i * 2 - 1] = stbi__div16(3 * t0 + t1 + 8); + out[i * 2] = stbi__div16(3 * t1 + t0 + 8); + } + out[w * 2 - 1] = stbi__div4(t1 + 2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i = 0, t0, t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3 * in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3 * in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w - 1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3 * in_near[i + 8] + in_far[i + 8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i * 2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3 * in_near[i + 8] + in_far[i + 8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i * 2, o); +#endif + + // "previous" value for next iter + t1 = 3 * in_near[i + 7] + in_far[i + 7]; + } + + t0 = t1; + t1 = 3 * in_near[i] + in_far[i]; + out[i * 2] = stbi__div16(3 * t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3 * in_near[i] + in_far[i]; + out[i * 2 - 1] = stbi__div16(3 * t0 + t1 + 8); + out[i * 2] = stbi__div16(3 * t1 + t0 + 8); + } + out[w * 2 - 1] = stbi__div4(t1 + 2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i, j; + STBI_NOTUSED(in_far); + for (i = 0; i < w; ++i) + for (j = 0; j < hs; ++j) + out[i*hs + j] = in_near[i]; + return out; +} + +#ifdef STBI_JPEG_OLD +// this is the same YCbCr-to-RGB calculation that stb_image has used +// historically before the algorithm changes in 1.49 +#define float2fixed(x) ((int) ((x) * 65536 + 0.5)) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i = 0; i < count; ++i) { + int y_fixed = (y[i] << 16) + 32768; // rounding + int r, g, b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr*float2fixed(1.40200f); + g = y_fixed - cr*float2fixed(0.71414f) - cb*float2fixed(0.34414f); + b = y_fixed + cb*float2fixed(1.77200f); + r >>= 16; + g >>= 16; + b >>= 16; + if ((unsigned)r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned)g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned)b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#else +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i = 0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1 << 19); // rounding + int r, g, b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* float2fixed(1.40200f); + g = y_fixed + (cr*-float2fixed(0.71414f)) + ((cb*-float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned)r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned)g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned)b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16((short)(1.40200f*4096.0f + 0.5f)); + __m128i cr_const1 = _mm_set1_epi16(-(short)(0.71414f*4096.0f + 0.5f)); + __m128i cb_const0 = _mm_set1_epi16(-(short)(0.34414f*4096.0f + 0.5f)); + __m128i cb_const1 = _mm_set1_epi16((short)(1.77200f*4096.0f + 0.5f)); + __m128i y_bias = _mm_set1_epi8((char)(unsigned char)128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i + 7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y + i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr + i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb + i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16((short)(1.40200f*4096.0f + 0.5f)); + int16x8_t cr_const1 = vdupq_n_s16(-(short)(0.71414f*4096.0f + 0.5f)); + int16x8_t cb_const0 = vdupq_n_s16(-(short)(0.34414f*4096.0f + 0.5f)); + int16x8_t cb_const1 = vdupq_n_s16((short)(1.77200f*4096.0f + 0.5f)); + + for (; i + 7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8 * 4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1 << 19); // rounding + int r, g, b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* float2fixed(1.40200f); + g = y_fixed + cr*-float2fixed(0.71414f) + ((cb*-float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned)r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned)g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned)b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; +#ifndef STBI_JPEG_OLD + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; +#endif + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; +#ifndef STBI_JPEG_OLD + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; +#endif + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0, *line1; + int hs, vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n; + + if (z->s->img_n == 3 && n < 3) + decode_n = 1; + else + decode_n = z->s->img_n; + + // resample and color-convert + { + int k; + unsigned int i, j; + stbi_uc *output; + stbi_uc *coutput[4]; + + stbi__resample res_comp[4]; + + for (k = 0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *)stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs - 1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *)stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j = 0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k = 0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (z->rgb == 3) { + for (i = 0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } + else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } + else + for (i = 0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } + else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i = 0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i = 0; i < z->s->img_x; ++i) *out++ = y[i], *out++ = 255; + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x, y, comp, req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg j; + j.s = s; + stbi__setup_jpeg(&j); + r = stbi__decode_jpeg_header(&j, STBI__SCAN_type); + stbi__rewind(s); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind(j->s); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*)(stbi__malloc(sizeof(stbi__jpeg))); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[288]; + stbi__uint16 value[288]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16 - bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, stbi_uc *sizelist, int num) +{ + int i, k = 0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i = 0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i = 1; i < 16; ++i) + if (sizes[i] >(1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i = 1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16)code; + z->firstsymbol[i] = (stbi__uint16)k; + code = (code + sizes[i]); + if (sizes[i]) + if (code - 1 >= (1 << i)) return stbi__err("bad codelengths", "Corrupt PNG"); + z->maxcode[i] = code << (16 - i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i = 0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16)((s << 9) | i); + z->size[c] = (stbi_uc)s; + z->value[c] = (stbi__uint16)i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s], s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + if (z->zbuffer >= z->zbuffer_end) return 0; + return *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + STBI_ASSERT(z->code_buffer < (1U << z->num_bits)); + z->code_buffer |= (unsigned int)stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b, s, k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s = STBI__ZFAST_BITS + 1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s == 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16 - s)) - z->firstcode[s] + z->firstsymbol[s]; + STBI_ASSERT(z->size[b] == s); + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b, s; + if (a->num_bits < 16) stbi__fill_bits(a); + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit", "Corrupt PNG"); + cur = (int)(z->zout - z->zout_start); + limit = old_limit = (int)(z->zout_end - z->zout_start); + while (cur + n > limit) + limit *= 2; + q = (char *)STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static int stbi__zlength_extra[31] = +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0 }; + +static int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13 }; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for (;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code", "Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char)z; + } + else { + stbi_uc *p; + int len, dist; + if (z == 256) { + a->zout = zout; + return 1; + } + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0) return stbi__err("bad huffman code", "Corrupt PNG"); + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist", "Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *)(zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } + else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286 + 32 + 137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i, n; + + int hlit = stbi__zreceive(a, 5) + 257; + int hdist = stbi__zreceive(a, 5) + 1; + int hclen = stbi__zreceive(a, 4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i = 0; i < hclen; ++i) { + int s = stbi__zreceive(a, 3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc)s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc)c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a, 2) + 3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n - 1]; + } + else if (c == 17) + c = stbi__zreceive(a, 3) + 3; + else { + STBI_ASSERT(c == 18); + c = stbi__zreceive(a, 7) + 11; + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes + n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths", "Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes + hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len, nlen, k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc)(a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + STBI_ASSERT(a->num_bits == 0); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt", "Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer", "Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if ((cmf * 256 + flg) % 31 != 0) return stbi__err("bad zlib header", "Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict", "Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression", "Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +// @TODO: should statically initialize these for optimal thread safety +static stbi_uc stbi__zdefault_length[288], stbi__zdefault_distance[32]; +static void stbi__init_zdefaults(void) +{ + int i; // use <= to match clearly with spec + for (i = 0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for (; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for (; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for (; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i = 0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a, 1); + type = stbi__zreceive(a, 2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } + else if (type == 3) { + return 0; + } + else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zdefault_distance[31]) stbi__init_zdefaults(); + if (!stbi__zbuild_huffman(&a->z_length, stbi__zdefault_length, 288)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } + else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *)stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *)buffer; + a.zbuffer_end = (stbi_uc *)buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int)(a.zout - a.zout_start); + return a.zout_start; + } + else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *)stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *)buffer; + a.zbuffer_end = (stbi_uc *)buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int)(a.zout - a.zout_start); + return a.zout_start; + } + else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *)ibuffer; + a.zbuffer_end = (stbi_uc *)ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int)(a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *)stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *)buffer; + a.zbuffer_end = (stbi_uc *)buffer + len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int)(a.zout - a.zout_start); + return a.zout_start; + } + else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *)ibuffer; + a.zbuffer_end = (stbi_uc *)ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int)(a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i = 0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig", "Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none = 0, + STBI__F_sub = 1, + STBI__F_up = 2, + STBI__F_avg = 3, + STBI__F_paeth = 4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p - a); + int pb = abs(p - b); + int pc = abs(p - c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16 ? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i, j, stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n + 1); + a->out = (stbi_uc *)stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + if (s->img_x == x && s->img_y == y) { + if (raw_len != img_len) return stbi__err("not enough pixels", "Corrupt PNG"); + } + else { // interlaced: + if (raw_len < img_len) return stbi__err("not enough pixels", "Corrupt PNG"); + } + + for (j = 0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior = cur - stride; + int filter = *raw++; + + if (filter > 4) + return stbi__err("invalid filter", "Corrupt PNG"); + + if (depth < 8) { + STBI_ASSERT(img_width_bytes <= x); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k = 0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none: cur[k] = raw[k]; break; + case STBI__F_sub: cur[k] = raw[k]; break; + case STBI__F_up: cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg: cur[k] = STBI__BYTECAST(raw[k] + (prior[k] >> 1)); break; + case STBI__F_paeth: cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0, prior[k], 0)); break; + case STBI__F_avg_first: cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } + else if (depth == 16) { + if (img_n != out_n) { + cur[filter_bytes] = 255; // first pixel top byte + cur[filter_bytes + 1] = 255; // first pixel bottom byte + } + raw += filter_bytes; + cur += output_bytes; + prior += output_bytes; + } + else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*filter_bytes; +#define STBI__CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k - filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k - filter_bytes]) >> 1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - filter_bytes], prior[k], prior[k - filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k - filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - filter_bytes], 0, 0)); } break; + } +#undef STBI__CASE + raw += nk; + } + else { + STBI_ASSERT(img_n + 1 == out_n); +#define STBI__CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ + for (k=0; k < filter_bytes; ++k) + switch (filter) { + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k - output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k - output_bytes]) >> 1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - output_bytes], prior[k], prior[k - output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k - output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k - output_bytes], 0, 0)); } break; + } +#undef STBI__CASE + + // the loop above sets the high byte of the pixels' alpha, but for + // 16 bit png files we also need the low byte set. we'll do that here. + if (depth == 16) { + cur = a->out + stride*j; // start at the beginning of the row again + for (i = 0; i < x; ++i, cur += output_bytes) { + cur[filter_bytes + 1] = 255; + } + } + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j = 0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k = x*img_n; k >= 2; k -= 2, ++in) { + *cur++ = scale * ((*in >> 4)); + *cur++ = scale * ((*in) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4)); + } + else if (depth == 2) { + for (k = x*img_n; k >= 4; k -= 4, ++in) { + *cur++ = scale * ((*in >> 6)); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6)); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } + else if (depth == 1) { + for (k = x*img_n; k >= 8; k -= 8, ++in) { + *cur++ = scale * ((*in >> 7)); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7)); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q = x - 1; q >= 0; --q) { + cur[q * 2 + 1] = 255; + cur[q * 2 + 0] = cur[q]; + } + } + else { + STBI_ASSERT(img_n == 3); + for (q = x - 1; q >= 0; --q) { + cur[q * 4 + 3] = 255; + cur[q * 4 + 2] = cur[q * 3 + 2]; + cur[q * 4 + 1] = cur[q * 3 + 1]; + cur[q * 4 + 0] = cur[q * 3 + 0]; + } + } + } + } + } + else if (depth == 16) { + // force the image data from big-endian to platform-native. + // this is done in a separate pass due to the decoding relying + // on the data being untouched, but could probably be done + // per-line during decode if care is taken. + stbi_uc *cur = a->out; + stbi__uint16 *cur16 = (stbi__uint16*)cur; + + for (i = 0; i < x*y*out_n; ++i, cur16++, cur += 2) { + *cur16 = (cur[0] << 8) | cur[1]; + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *)stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + for (p = 0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i, j, x, y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p] - 1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p] - 1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j = 0; j < y; ++j) { + for (i = 0; i < x; ++i) { + int out_y = j*yspc[p] + yorig[p]; + int out_x = i*xspc[p] + xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x + i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } + else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*)z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } + else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *)stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i = 0; i < pixel_count; ++i) { + int n = orig[i] * 4; + p[0] = palette[n]; + p[1] = palette[n + 1]; + p[2] = palette[n + 2]; + p += 3; + } + } + else { + for (i = 0; i < pixel_count; ++i) { + int n = orig[i] * 4; + p[0] = palette[n]; + p[1] = palette[n + 1]; + p[2] = palette[n + 2]; + p[3] = palette[n + 3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load = 0; +static int stbi__de_iphone_flag = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag = flag_true_if_should_convert; +} + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i = 0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } + else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i = 0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + p[0] = p[2] * 255 / a; + p[1] = p[1] * 255 / a; + p[2] = t * 255 / a; + } + else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } + else { + // convert bgr to rgb + for (i = 0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n = 0; + stbi_uc has_trans = 0, tc[3]; + stbi__uint16 tc16[3]; + stbi__uint32 ioff = 0, idata_limit = 0, i, pal_len = 0; + int first = 1, k, interlace = 0, color = 0, is_iphone = 0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C', 'g', 'B', 'I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I', 'H', 'D', 'R'): { + int comp, filter; + if (!first) return stbi__err("multiple IHDR", "Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len", "Corrupt PNG"); + s->img_x = stbi__get32be(s); if (s->img_x > (1 << 24)) return stbi__err("too large", "Very large image (corrupt?)"); + s->img_y = stbi__get32be(s); if (s->img_y > (1 << 24)) return stbi__err("too large", "Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only", "PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype", "Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype", "Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype", "Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method", "Corrupt PNG"); + filter = stbi__get8(s); if (filter) return stbi__err("bad filter method", "Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method", "Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image", "Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (scan == STBI__SCAN_header) return 1; + } + else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large", "Corrupt PNG"); + // if SCAN_header, have to scan to see if we have a tRNS + } + break; + } + + case STBI__PNG_TYPE('P', 'L', 'T', 'E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256 * 3) return stbi__err("invalid PLTE", "Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE", "Corrupt PNG"); + for (i = 0; i < pal_len; ++i) { + palette[i * 4 + 0] = stbi__get8(s); + palette[i * 4 + 1] = stbi__get8(s); + palette[i * 4 + 2] = stbi__get8(s); + palette[i * 4 + 3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t', 'R', 'N', 'S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT", "Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE", "Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len", "Corrupt PNG"); + pal_img_n = 4; + for (i = 0; i < c.length; ++i) + palette[i * 4 + 3] = stbi__get8(s); + } + else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha", "Corrupt PNG"); + if (c.length != (stbi__uint32)s->img_n * 2) return stbi__err("bad tRNS len", "Corrupt PNG"); + has_trans = 1; + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } + else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I', 'D', 'A', 'T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE", "Corrupt PNG"); + if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *)STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata + ioff, c.length)) return stbi__err("outofdata", "Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I', 'E', 'N', 'D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT", "Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *)stbi_zlib_decode_malloc_guesssize_headerflag((char *)z->idata, ioff, raw_len, (int *)&raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n + 1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n + 1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } + else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } + STBI_FREE(z->expanded); z->expanded = NULL; + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { +#ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); +#endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result = NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth < 8) + ri->bits_per_channel = 8; + else + ri->bits_per_channel = p->depth; + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *)result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *)result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x, y, comp, req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind(p->s); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n = 0; + if (z == 0) return -1; + if (z >= 0x10000) n += 16, z >>= 16; + if (z >= 0x00100) n += 8, z >>= 8; + if (z >= 0x00010) n += 4, z >>= 4; + if (z >= 0x00004) n += 2, z >>= 2; + if (z >= 0x00002) n += 1, z >>= 1; + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +static int stbi__shiftsigned(int v, int shift, int bits) +{ + int result; + int z = 0; + + if (shift < 0) v <<= -shift; + else v >>= shift; + result = v; + + z = bits; + while (z < 8) { + result += v >> z; + z += bits; + } + return result; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr, mg, mb, ma, all_a; +} stbi__bmp_data; + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } + else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (info->bpp == 1) return stbi__errpuc("monochrome", "BMP type not supported: 1-bit"); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } + else { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } + } + else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } + else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } + else { + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + stbi__get32le(s); // discard color space + for (i = 0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *)1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr = 0, mg = 0, mb = 0, ma = 0, all_a; + stbi_uc pal[256][4]; + int psize = 0, i, j, width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int)s->img_y) > 0; + s->img_y = abs((int)s->img_y); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - 14 - 24) / 3; + } + else { + if (info.bpp < 16) + psize = (info.offset - 14 - info.hsz) >> 2; + } + + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *)stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z = 0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i = 0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - 14 - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width) & 3; + for (j = 0; j < (int)s->img_y; ++j) { + for (i = 0; i < (int)s->img_x; i += 2) { + int v = stbi__get8(s), v2 = 0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i + 1 == (int)s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + else { + int rshift = 0, gshift = 0, bshift = 0, ashift = 0, rcount = 0, gcount = 0, bcount = 0, acount = 0; + int z = 0; + int easy = 0; + stbi__skip(s, info.offset - 14 - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2 * s->img_x; + else /* bpp = 32 and pad = 0 */ width = 0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } + else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr) - 7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg) - 7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb) - 7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma) - 7; acount = stbi__bitcount(ma); + } + for (j = 0; j < (int)s->img_y; ++j) { + if (easy) { + for (i = 0; i < (int)s->img_x; ++i) { + unsigned char a; + out[z + 2] = stbi__get8(s); + out[z + 1] = stbi__get8(s); + out[z + 0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } + else { + int bpp = info.bpp; + for (i = 0; i < (int)s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32)stbi__get16le(s) : stbi__get32le(s)); + int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i = 4 * s->img_x*s->img_y - 1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j = 0; j < (int)s->img_y >> 1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y - 1 - j)*s->img_x*target; + for (i = 0; i < (int)s->img_x*target; ++i) { + t = p1[i], p1[i] = p2[i], p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch (bits_per_pixel) { + case 8: return STBI_grey; + case 16: if (is_grey) return STBI_grey_alpha; + // else: fall-through + case 15: if (is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fall-through + case 32: return bits_per_pixel / 8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if (tga_colormap_type > 1) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if (tga_colormap_type == 1) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s, 4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) { + stbi__rewind(s); + return 0; + } + stbi__skip(s, 4); // skip image x and y origin + tga_colormap_bpp = sz; + } + else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ((tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11)) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s, 9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if (tga_w < 1) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if (tga_h < 1) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if ((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } + else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if (!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if (tga_color_type > 1) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if (tga_color_type == 1) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s, 4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) goto errorEnd; + stbi__skip(s, 4); // skip image x and y origin + } + else { // "normal" image w/o colormap + if ((sz != 2) && (sz != 3) && (sz != 10) && (sz != 11)) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s, 9); // skip colormap specification and image x/y origin + } + if (stbi__get16le(s) < 1) goto errorEnd; // test width + if (stbi__get16le(s) < 1) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ((tga_color_type == 1) && (sz != 8) && (sz != 16)) goto errorEnd; // for colormapped images, bpp is size of an index + if ((sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32)) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255) / 31); + out[1] = (stbi_uc)((g * 255) / 31); + out[2] = (stbi_uc)((b * 255) / 31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16 = 0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = { 0 }; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + + // do a tiny bit of precessing + if (tga_image_type >= 8) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if (tga_indexed) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if (!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset); + + if (!tga_indexed && !tga_is_RLE && !tga_rgb16) { + for (i = 0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height - i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } + else { + // do I need to load a palette? + if (tga_indexed) + { + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i = 0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } + else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i = 0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if (tga_is_RLE) + { + if (RLE_count == 0) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } + else if (!RLE_repeating) + { + read_next_pixel = 1; + } + } + else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if (read_next_pixel) + { + // load however much data we did have + if (tga_indexed) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if (pal_idx >= tga_palette_len) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx + j]; + } + } + else if (tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } + else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp + j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if (tga_inverted) + { + for (j = 0; j * 2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if (tga_palette != NULL) + { + STBI_FREE(tga_palette); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i = 0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } + else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } + else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w, h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s, stbi__get32be(s)); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s)); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s)); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *)stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } + else + out = (stbi_uc *)stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceeded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out + channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } + else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } + else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *)out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } + else { + stbi_uc *p = out + channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } + else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *)out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16)stbi__get16be(s); + } + else { + stbi_uc *p = out + channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc)(stbi__get16be(s) >> 8); + } + else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i = 0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *)out + 4 * i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16)(pixel[0] * ra + inv_a); + pixel[1] = (stbi__uint16)(pixel[1] * ra + inv_a); + pixel[2] = (stbi__uint16)(pixel[2] * ra + inv_a); + } + } + } + else { + for (i = 0; i < w*h; ++i) { + unsigned char *pixel = out + 4 * i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char)(pixel[0] * ra + inv_a); + pixel[1] = (unsigned char)(pixel[1] * ra + inv_a); + pixel[2] = (unsigned char)(pixel[2] * ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *)stbi__convert_format16((stbi__uint16 *)out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s, const char *str) +{ + int i; + for (i = 0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s, "\x53\x80\xF6\x34")) + return 0; + + for (i = 0; i<84; ++i) + stbi__get8(s); + + if (!stbi__pic_is4(s, "PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size, type, channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask = 0x80, i; + + for (i = 0; i<4; ++i, mask >>= 1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file", "PIC file too short"); + dest[i] = stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel, stbi_uc *dest, const stbi_uc *src) +{ + int mask = 0x80, i; + + for (i = 0; i<4; ++i, mask >>= 1) + if (channel&mask) + dest[i] = src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s, int width, int height, int *comp, stbi_uc *result) +{ + int act_comp = 0, num_packets = 0, y, chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets == sizeof(packets) / sizeof(packets[0])) + return stbi__errpuc("bad format", "too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file", "file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format", "packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for (y = 0; ytype) { + default: + return stbi__errpuc("bad format", "packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for (x = 0; xchannel, dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left = width, i; + + while (left>0) { + stbi_uc count, value[4]; + + count = stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file", "file too short (pure read count)"); + + if (count > left) + count = (stbi_uc)left; + + if (!stbi__readval(s, packet->channel, value)) return 0; + + for (i = 0; ichannel, dest, value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left = width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file", "file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count == 128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file", "scanline overrun"); + + if (!stbi__readval(s, packet->channel, value)) + return 0; + + for (i = 0; ichannel, dest, value); + } + else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file", "scanline overrun"); + + for (i = 0; ichannel, dest)) + return 0; + } + left -= count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s, int *px, int *py, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x, y; + STBI_NOTUSED(ri); + + for (i = 0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file", "file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *)stbi__malloc_mad3(x, y, 4, 0); + memset(result, 0xff, x*y * 4); + + if (!stbi__pic_load_core(s, x, y, comp, result)) { + STBI_FREE(result); + result = 0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result = stbi__convert_format(result, 4, req_comp, x, y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w, h; + stbi_uc *out, *old_out; // output buffer (always 4 components) + int flags, bgindex, ratio, transparent, eflags, delay; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[4096]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i = 0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s, g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*)stbi__malloc(sizeof(stbi__gif)); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind(s); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + p = &g->out[g->cur_x + g->cur_y]; + c = &g->color_table[g->codes[code].suffix * 4]; + + if (c[3] >= 128) { + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc)init_code; + g->codes[init_code].suffix = (stbi_uc)init_code; + } + + // support no starting clear code + avail = clear + 2; + oldcode = -1; + + len = 0; + for (;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32)stbi__get8(s) << valid_bits; + valid_bits += 8; + } + else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } + else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s, len); + return g->out; + } + else if (code <= avail) { + if (first) return stbi__errpuc("no clear code", "Corrupt GIF"); + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 4096) return stbi__errpuc("too many codes", "Corrupt GIF"); + p->prefix = (stbi__int16)oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } + else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16)code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } + else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +static void stbi__fill_gif_background(stbi__gif *g, int x0, int y0, int x1, int y1) +{ + int x, y; + stbi_uc *c = g->pal[g->bgindex]; + for (y = y0; y < y1; y += 4 * g->w) { + for (x = x0; x < x1; x += 4) { + stbi_uc *p = &g->out[y + x]; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = 0; + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp) +{ + int i; + stbi_uc *prev_out = 0; + + if (g->out == 0 && !stbi__gif_header(s, g, comp, 0)) + return 0; // stbi__g_failure_reason set by stbi__gif_header + + if (!stbi__mad3sizes_valid(g->w, g->h, 4, 0)) + return stbi__errpuc("too large", "GIF too large"); + + prev_out = g->out; + g->out = (stbi_uc *)stbi__malloc_mad3(4, g->w, g->h, 0); + if (g->out == 0) return stbi__errpuc("outofmem", "Out of memory"); + + switch ((g->eflags & 0x1C) >> 2) { + case 0: // unspecified (also always used on 1st frame) + stbi__fill_gif_background(g, 0, 0, 4 * g->w, 4 * g->w * g->h); + break; + case 1: // do not dispose + if (prev_out) memcpy(g->out, prev_out, 4 * g->w * g->h); + g->old_out = prev_out; + break; + case 2: // dispose to background + if (prev_out) memcpy(g->out, prev_out, 4 * g->w * g->h); + stbi__fill_gif_background(g, g->start_x, g->start_y, g->max_x, g->max_y); + break; + case 3: // dispose to previous + if (g->old_out) { + for (i = g->start_y; i < g->max_y; i += 4 * g->w) + memcpy(&g->out[i + g->start_x], &g->old_out[i + g->start_x], g->max_x - g->start_x); + } + break; + } + + for (;;) { + switch (stbi__get8(s)) { + case 0x2C: /* Image Descriptor */ + { + int prev_trans = -1; + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } + else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s, g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *)g->lpal; + } + else if (g->flags & 0x80) { + if (g->transparent >= 0 && (g->eflags & 0x01)) { + prev_trans = g->pal[g->transparent][3]; + g->pal[g->transparent][3] = 0; + } + g->color_table = (stbi_uc *)g->pal; + } + else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (o == NULL) return NULL; + + if (prev_trans != -1) + g->pal[g->transparent][3] = (stbi_uc)prev_trans; + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + if (stbi__get8(s) == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = stbi__get16le(s); + g->transparent = stbi__get8(s); + } + else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) + stbi__skip(s, len); + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *)s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } + + STBI_NOTUSED(req_comp); +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif* g = (stbi__gif*)stbi__malloc(sizeof(stbi__gif)); + memset(g, 0, sizeof(*g)); + STBI_NOTUSED(ri); + + u = stbi__gif_load_next(s, g, comp, req_comp); + if (u == (stbi_uc *)s) u = 0; // end of animated gif marker + if (u) { + *x = g->w; + *y = g->h; + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g->w, g->h); + } + else if (g->out) + STBI_FREE(g->out); + STBI_FREE(g); + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s, x, y, comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i = 0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if (!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len = 0; + char c = '\0'; + + c = (char)stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN - 1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char)stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if (input[3] != 0) { + float f1; + // Exponent + f1 = (float)ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } + else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1, c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s, buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for (;;) { + token = stbi__hdr_gettoken(s, buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s, buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int)strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int)strtol(token, NULL, 10); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *)stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if (width < 8 || width >= 32768) { + // Read flat data + for (j = 0; j < height; ++j) { + for (i = 0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } + else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc)c1; + rgbe[1] = (stbi_uc)c2; + rgbe[2] = (stbi_uc)len; + rgbe[3] = (stbi_uc)stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *)stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } + else { + // Dump + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i = 0; i < width; ++i) + stbi__hdr_convert(hdr_data + (j*width + i)*req_comp, scanline + i * 4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind(s); + return 0; + } + + for (;;) { + token = stbi__hdr_gettoken(s, buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind(s); + return 0; + } + token = stbi__hdr_gettoken(s, buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind(s); + return 0; + } + token += 3; + *y = (int)strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind(s); + return 0; + } + token += 3; + *x = (int)strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + stbi__rewind(s); + if (p == NULL) + return 0; + *x = s->img_x; + *y = s->img_y; + *comp = info.ma ? 4 : 3; + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind(s); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind(s); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind(s); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + if (stbi__get16be(s) != 8) { + stbi__rewind(s); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind(s); + return 0; + } + *comp = 4; + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp = 0, num_packets = 0, chained; + stbi__pic_packet packets[10]; + + if (!stbi__pic_is4(s, "\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind(s); + return 0; + } + if ((*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets == sizeof(packets) / sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind(s); + return 0; + } + if (packet->size != 8) { + stbi__rewind(s); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) +// Does not support 16-bit-per-channel + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char)stbi__get8(s); + t = (char)stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n)) + return 0; + + *x = s->img_x; + *y = s->img_y; + *comp = s->img_n; + + if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *)stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y); + + if (req_comp && req_comp != s->img_n) { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char)stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r') + *c = (char)stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value * 10 + (*c - '0'); + *c = (char)stbi__get8(s); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv; + char c, p, t; + + stbi__rewind(s); + + // Get identifier + p = (char)stbi__get8(s); + t = (char)stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char)stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + + if (maxv > 255) + return stbi__err("max value > 255", "PPM image not 8-bit"); + else + return 1; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ +#ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; +#endif + +#ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; +#endif + + // test tga last because it's a crappy test! +#ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; +#endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s, x, y, comp); + fseek(f, pos, SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s, buffer, len); + return stbi__info_main(&s, x, y, comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)c, user); + return stbi__info_main(&s, x, y, comp); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* +revision history: +2.13 (2016-11-29) add 16-bit API, only supported for PNG right now +2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes +2.11 (2016-04-02) allocate large structures on the stack +remove white matting for transparent PSD +fix reported channel count for PNG & BMP +re-enable SSE2 in non-gcc 64-bit +support RGB-formatted JPEG +read 16-bit PNGs (only as 8-bit) +2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED +2.09 (2016-01-16) allow comments in PNM files +16-bit-per-pixel TGA (not bit-per-component) +info() for TGA could break due to .hdr handling +info() for BMP to shares code instead of sloppy parse +can use STBI_REALLOC_SIZED if allocator doesn't support realloc +code cleanup +2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA +2.07 (2015-09-13) fix compiler warnings +partial animated GIF support +limited 16-bpc PSD support +#ifdef unused functions +bug with < 92 byte PIC,PNM,HDR,TGA +2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value +2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning +2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit +2.03 (2015-04-12) extra corruption checking (mmozeiko) +stbi_set_flip_vertically_on_load (nguillemot) +fix NEON support; fix mingw support +2.02 (2015-01-19) fix incorrect assert, fix warning +2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 +2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG +2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) +progressive JPEG (stb) +PGM/PPM support (Ken Miller) +STBI_MALLOC,STBI_REALLOC,STBI_FREE +GIF bugfix -- seemingly never worked +STBI_NO_*, STBI_ONLY_* +1.48 (2014-12-14) fix incorrectly-named assert() +1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) +optimize PNG (ryg) +fix bug in interlaced PNG with user-specified channel count (stb) +1.46 (2014-08-26) +fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG +1.45 (2014-08-16) +fix MSVC-ARM internal compiler error by wrapping malloc +1.44 (2014-08-07) +various warning fixes from Ronny Chevalier +1.43 (2014-07-15) +fix MSVC-only compiler problem in code changed in 1.42 +1.42 (2014-07-09) +don't define _CRT_SECURE_NO_WARNINGS (affects user code) +fixes to stbi__cleanup_jpeg path +added STBI_ASSERT to avoid requiring assert.h +1.41 (2014-06-25) +fix search&replace from 1.36 that messed up comments/error messages +1.40 (2014-06-22) +fix gcc struct-initialization warning +1.39 (2014-06-15) +fix to TGA optimization when req_comp != number of components in TGA; +fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) +add support for BMP version 5 (more ignored fields) +1.38 (2014-06-06) +suppress MSVC warnings on integer casts truncating values +fix accidental rename of 'skip' field of I/O +1.37 (2014-06-04) +remove duplicate typedef +1.36 (2014-06-03) +convert to header file single-file library +if de-iphone isn't set, load iphone images color-swapped instead of returning NULL +1.35 (2014-05-27) +various warnings +fix broken STBI_SIMD path +fix bug where stbi_load_from_file no longer left file pointer in correct place +fix broken non-easy path for 32-bit BMP (possibly never used) +TGA optimization by Arseny Kapoulkine +1.34 (unknown) +use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case +1.33 (2011-07-14) +make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements +1.32 (2011-07-13) +support for "info" function for all supported filetypes (SpartanJ) +1.31 (2011-06-20) +a few more leak fixes, bug in PNG handling (SpartanJ) +1.30 (2011-06-11) +added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) +removed deprecated format-specific test/load functions +removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway +error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) +fix inefficiency in decoding 32-bit BMP (David Woo) +1.29 (2010-08-16) +various warning fixes from Aurelien Pocheville +1.28 (2010-08-01) +fix bug in GIF palette transparency (SpartanJ) +1.27 (2010-08-01) +cast-to-stbi_uc to fix warnings +1.26 (2010-07-24) +fix bug in file buffering for PNG reported by SpartanJ +1.25 (2010-07-17) +refix trans_data warning (Won Chun) +1.24 (2010-07-12) +perf improvements reading from files on platforms with lock-heavy fgetc() +minor perf improvements for jpeg +deprecated type-specific functions so we'll get feedback if they're needed +attempt to fix trans_data warning (Won Chun) +1.23 fixed bug in iPhone support +1.22 (2010-07-10) +removed image *writing* support +stbi_info support from Jetro Lauha +GIF support from Jean-Marc Lienher +iPhone PNG-extensions from James Brown +warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) +1.21 fix use of 'stbi_uc' in header (reported by jon blow) +1.20 added support for Softimage PIC, by Tom Seddon +1.19 bug in interlaced PNG corruption check (found by ryg) +1.18 (2008-08-02) +fix a threading bug (local mutable static) +1.17 support interlaced PNG +1.16 major bugfix - stbi__convert_format converted one too many pixels +1.15 initialize some fields for thread safety +1.14 fix threadsafe conversion bug +header-file-only version (#define STBI_HEADER_FILE_ONLY before including) +1.13 threadsafe +1.12 const qualifiers in the API +1.11 Support installable IDCT, colorspace conversion routines +1.10 Fixes for 64-bit (don't use "unsigned long") +optimized upsampling by Fabian "ryg" Giesen +1.09 Fix format-conversion for PSD code (bad global variables!) +1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz +1.07 attempt to fix C++ warning/errors again +1.06 attempt to fix C++ warning/errors again +1.05 fix TGA loading to return correct *comp and use good luminance calc +1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free +1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR +1.02 support for (subset of) HDR files, float interface for preferred access to them +1.01 fix bug: possible bug in handling right-side up bmps... not sure +fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all +1.00 interface to zlib that skips zlib header +0.99 correct handling of alpha in palette +0.98 TGA loader by lonesock; dynamically add loaders (untested) +0.97 jpeg errors on too large a file; also catch another malloc failure +0.96 fix detection of invalid v value - particleman@mollyrocket forum +0.95 during header scan, seek to markers in case of padding +0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same +0.93 handle jpegtran output; verbose errors +0.92 read 4,8,16,24,32-bit BMP files of several formats +0.91 output 24-bit Windows 3.0 BMP files +0.90 fix a few more warnings; bump version number to approach 1.0 +0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd +0.60 fix compiling as c++ +0.59 fix warnings: merge Dave Moore's -Wall fixes +0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian +0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available +0.56 fix bug: zlib uncompressed mode len vs. nlen +0.55 fix bug: restart_interval not initialized to 0 +0.54 allow NULL for 'int *comp' +0.53 fix bug in png 3->4; speedup png decoding +0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments +0.51 obey req_comp requests, 1-component jpegs return as 1-component, +on 'test' only check type, not whether we support this variant +0.50 (2006-11-19) +first released version +*/ \ No newline at end of file diff --git a/MVS/ui/CMakeLists.txt b/MVS/ui/CMakeLists.txt new file mode 100644 index 00000000..4fdcecdb --- /dev/null +++ b/MVS/ui/CMakeLists.txt @@ -0,0 +1,66 @@ +cmake_minimum_required(VERSION 3.15) +project(XCVolumeRendererUI2 VERSION 1.0 LANGUAGES CXX) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +add_definitions(-DUNICODE -D_UNICODE) +add_definitions(-DIMGUI_ENABLE_DOCKING) + +include(FetchContent) + +FetchContent_Declare( + imgui + GIT_REPOSITORY https://gitee.com/mirrors/imgui.git + GIT_TAG docking + GIT_SHALLOW TRUE +) + +FetchContent_MakeAvailable(imgui) + +set(IMGUI_SOURCES + ${imgui_SOURCE_DIR}/imgui.cpp + ${imgui_SOURCE_DIR}/imgui_demo.cpp + ${imgui_SOURCE_DIR}/imgui_draw.cpp + ${imgui_SOURCE_DIR}/imgui_tables.cpp + ${imgui_SOURCE_DIR}/imgui_widgets.cpp + ${imgui_SOURCE_DIR}/backends/imgui_impl_win32.cpp + ${imgui_SOURCE_DIR}/backends/imgui_impl_dx12.cpp +) + +add_executable(${PROJECT_NAME} WIN32 + src/main.cpp + src/Application.cpp + src/Theme.cpp + src/Managers/SceneManager.cpp + src/Managers/LogSystem.cpp + src/Managers/ProjectManager.cpp + src/panels/Panel.cpp + src/panels/MenuBar.cpp + src/panels/HierarchyPanel.cpp + src/panels/SceneViewPanel.cpp + src/panels/GameViewPanel.cpp + src/panels/InspectorPanel.cpp + src/panels/ConsolePanel.cpp + src/panels/ProjectPanel.cpp + ${IMGUI_SOURCES} +) + +target_include_directories(${PROJECT_NAME} PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/src + ${imgui_SOURCE_DIR} + ${imgui_SOURCE_DIR}/backends +) + +target_compile_definitions(${PROJECT_NAME} PRIVATE UNICODE _UNICODE) +target_compile_options(${PROJECT_NAME} PRIVATE /utf-8 /MT) + +target_link_libraries(${PROJECT_NAME} PRIVATE + d3d12.lib + dxgi.lib + d3dcompiler.lib +) + +set_target_properties(${PROJECT_NAME} PROPERTIES + RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/bin" +) \ No newline at end of file diff --git a/MVS/ui/README.md b/MVS/ui/README.md new file mode 100644 index 00000000..0beca2dc --- /dev/null +++ b/MVS/ui/README.md @@ -0,0 +1,183 @@ +# UI Editor + +Unity 风格的编辑器 UI,使用 ImGui 实现。 + +## 简介 + +XCVolumeRenderer UI 是一个仿 Unity 编辑器的桌面应用程序,提供场景管理、层级视图、属性检查器等功能。 + +## 技术栈 + +- **渲染 API**: DirectX 12 +- **UI 框架**: ImGui +- **语言**: C++17 +- **构建系统**: CMake +- **依赖库**: DirectX 12 SDK + +## 项目结构 + +``` +ui/ +├── src/ +│ ├── main.cpp # 程序入口 +│ ├── Application.cpp/h # 应用主类 +│ ├── Theme.cpp/h # 主题系统 +│ ├── Core/ +│ │ ├── GameObject.h # 游戏对象 +│ │ └── LogEntry.h # 日志条目 +│ ├── Managers/ +│ │ ├── LogSystem.cpp/h # 日志系统 +│ │ ├── ProjectManager.cpp/h # 项目管理 +│ │ ├── SceneManager.cpp/h # 场景管理 +│ │ └── SelectionManager.cpp/h # 选择管理 +│ └── panels/ +│ ├── Panel.cpp/h # 面板基类 +│ ├── MenuBar.cpp/h # 菜单栏 +│ ├── HierarchyPanel.cpp/h # 层级面板 +│ ├── InspectorPanel.cpp/h # 检查器面板 +│ ├── SceneViewPanel.cpp/h # 场景视图 +│ ├── GameViewPanel.cpp/h # 游戏视图 +│ ├── ProjectPanel.cpp/h # 项目面板 +│ └── ConsolePanel.cpp/h # 控制台面板 +├── bin/Release/ # 输出目录 +│ ├── XCVolumeRendererUI2.exe # 可执行文件 +│ ├── imgui.ini # ImGui 配置 +│ └── Assets/ +│ └── Models/ +│ └── Character.fbx # 示例模型 +├── build/ # 构建目录 +└── CMakeLists.txt # CMake 配置 +``` + +## 构建方法 + +### 前置要求 + +- Windows 10/11 +- Visual Studio 2019 或更高版本 +- CMake 3.15+ + +### 构建步骤 + +```bash +cd ui +mkdir build && cd build +cmake .. +cmake --build . --config Release +``` + +### 运行 + +```bash +# 运行编译好的可执行文件 +.\bin\Release\XCVolumeRendererUI2.exe +``` + +## 功能特性 + +### 编辑器面板 + +#### 菜单栏(MenuBar) +- 文件菜单(新建、打开、保存等) +- 编辑菜单(撤销、重做等) +- 视图菜单(面板显示/隐藏) +- 帮助菜单 + +#### 层级面板(Hierarchy Panel) +- 显示场景中所有游戏对象 +- 树形结构展示父子关系 +- 支持对象选择 +- 对象重命名 + +#### 检查器面板(Inspector Panel) +- 显示选中对象的属性 +- 支持组件编辑 +- 变换组件(位置、旋转、缩放) +- 材质组件 + +#### 场景视图(Scene View) +- 3D 场景预览 +- 相机控制(平移、旋转、缩放) +- 对象选择 +- 辅助工具(网格、轴心) + +#### 游戏视图(Game View) +- 游戏运行时的画面预览 +- 分辨率设置 +- 宽高比选择 + +#### 项目面板(Project Panel) +- 项目文件浏览器 +- 资源组织 +- 搜索过滤 + +#### 控制台面板(Console Panel) +- 日志输出 +- 警告和错误显示 +- 日志级别过滤 +- 清空日志 + +### 管理系统 + +#### 日志系统(LogSystem) +- 分级日志(Info、Warning、Error) +- 时间戳 +- 日志持久化 + +#### 项目管理(ProjectManager) +- 项目创建/打开 +- 资源路径管理 + +#### 场景管理(SceneManager) +- 场景加载/保存 +- 对象生命周期管理 + +#### 选择管理(SelectionManager) +- 当前选中对象追踪 +- 多选支持 + +### 主题系统 + +- 深色主题(Dark Theme) +- 可自定义配色方案 + +## 窗口布局 + +默认布局采用经典的 Unity 编辑器风格: + +``` ++----------------------------------------------------------+ +| 菜单栏 | ++----------+------------------------+----------------------+ +| | | | +| 项目 | 场景视图 | 检查器 | +| 面板 | | | +| | | | ++----------+------------------------+----------------------+ +| 层级面板 | 游戏视图 | +| | | ++------------------------------------+----------------------+ +| 控制台面板 | ++----------------------------------------------------------+ +``` + +## 依赖说明 + +- ImGui - 跨平台 GUI 库 +- DirectX 12 - 渲染 API +- Windows SDK - 窗口管理 + +## 扩展开发 + +### 添加新面板 + +1. 在 `panels/` 目录下创建新的面板类 +2. 继承 `Panel` 基类 +3. 实现 `Render()` 方法 +4. 在 `Application` 中注册新面板 + +### 添加新组件 + +1. 定义组件类 +2. 在 `GameObject` 中注册组件类型 +3. 在 `InspectorPanel` 中添加属性编辑器 diff --git a/MVS/ui/src/Application.cpp b/MVS/ui/src/Application.cpp new file mode 100644 index 00000000..47074823 --- /dev/null +++ b/MVS/ui/src/Application.cpp @@ -0,0 +1,303 @@ +#include "Application.h" +#include +#include +#include +#include + +extern IMGUI_IMPL_API LRESULT ImGui_ImplWin32_WndProcHandler(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam); + +namespace UI { + +Application& Application::Get() { + static Application instance; + return instance; +} + +bool Application::Initialize(HWND hwnd) { + m_hwnd = hwnd; + + if (!CreateDevice()) { + MessageBoxW(hwnd, L"Failed to create D3D12 device", L"Error", MB_OK | MB_ICONERROR); + return false; + } + if (!CreateRenderTarget()) { + MessageBoxW(hwnd, L"Failed to create render target", L"Error", MB_OK | MB_ICONERROR); + return false; + } + + IMGUI_CHECKVERSION(); + ImGui::CreateContext(); + ImGuiIO& io = ImGui::GetIO(); + io.ConfigFlags |= ImGuiConfigFlags_DockingEnable; + + io.Fonts->AddFontFromFileTTF("C:/Windows/Fonts/msyh.ttc", 16.0f); + io.Fonts->AddFontDefault(); + + unsigned char* pixels; + int width, height; + io.Fonts->GetTexDataAsRGBA32(&pixels, &width, &height); + + ApplyUnityDarkTheme(); + + ImGui_ImplWin32_Init(hwnd); + ImGui_ImplDX12_Init(m_device, 3, DXGI_FORMAT_R8G8B8A8_UNORM, m_srvHeap, + m_srvHeap->GetCPUDescriptorHandleForHeapStart(), + m_srvHeap->GetGPUDescriptorHandleForHeapStart()); + + m_menuBar = std::make_unique(); + m_hierarchyPanel = std::make_unique(); + m_sceneViewPanel = std::make_unique(); + m_gameViewPanel = std::make_unique(); + m_inspectorPanel = std::make_unique(); + m_consolePanel = std::make_unique(); + m_projectPanel = std::make_unique(); + + wchar_t exePath[MAX_PATH]; + GetModuleFileNameW(nullptr, exePath, MAX_PATH); + std::wstring exeDirW(exePath); + size_t pos = exeDirW.find_last_of(L"\\/"); + if (pos != std::wstring::npos) { + exeDirW = exeDirW.substr(0, pos); + } + std::string exeDir; + int len = WideCharToMultiByte(CP_UTF8, 0, exeDirW.c_str(), -1, nullptr, 0, nullptr, nullptr); + if (len > 0) { + exeDir.resize(len - 1); + WideCharToMultiByte(CP_UTF8, 0, exeDirW.c_str(), -1, &exeDir[0], len, nullptr, nullptr); + } + m_projectPanel->Initialize(exeDir); + + return true; +} + +void Application::Shutdown() { + ImGui_ImplDX12_Shutdown(); + ImGui_ImplWin32_Shutdown(); + ImGui::DestroyContext(); + + CleanupRenderTarget(); + + if (m_fence) m_fence->Release(); + if (m_commandList) m_commandList->Release(); + if (m_commandAllocator) m_commandAllocator->Release(); + if (m_commandQueue) m_commandQueue->Release(); + if (m_rtvHeap) m_rtvHeap->Release(); + if (m_srvHeap) m_srvHeap->Release(); + if (m_swapChain) m_swapChain->Release(); + if (m_device) m_device->Release(); +} + +void Application::Render() { + ImGui_ImplDX12_NewFrame(); + ImGui_ImplWin32_NewFrame(); + ImGui::NewFrame(); + + SetupDockspace(); + RenderUI(); + + ImGui::Render(); + + m_frameIndex = m_swapChain->GetCurrentBackBufferIndex(); + m_commandAllocator->Reset(); + m_commandList->Reset(m_commandAllocator, nullptr); + + D3D12_RESOURCE_BARRIER barrier = {}; + barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION; + barrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE; + barrier.Transition.pResource = m_renderTargets[m_frameIndex]; + barrier.Transition.StateBefore = D3D12_RESOURCE_STATE_PRESENT; + barrier.Transition.StateAfter = D3D12_RESOURCE_STATE_RENDER_TARGET; + barrier.Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES; + m_commandList->ResourceBarrier(1, &barrier); + + float clearColor[4] = { 0.12f, 0.12f, 0.12f, 1.0f }; + D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = m_rtvHeap->GetCPUDescriptorHandleForHeapStart(); + rtvHandle.ptr += m_frameIndex * m_rtvDescriptorSize; + m_commandList->ClearRenderTargetView(rtvHandle, clearColor, 0, nullptr); + m_commandList->OMSetRenderTargets(1, &rtvHandle, FALSE, nullptr); + + ID3D12DescriptorHeap* heaps[] = { m_srvHeap }; + m_commandList->SetDescriptorHeaps(1, heaps); + + ImGui_ImplDX12_RenderDrawData(ImGui::GetDrawData(), m_commandList); + + barrier.Transition.StateBefore = D3D12_RESOURCE_STATE_RENDER_TARGET; + barrier.Transition.StateAfter = D3D12_RESOURCE_STATE_PRESENT; + m_commandList->ResourceBarrier(1, &barrier); + + m_commandList->Close(); + ID3D12CommandList* cmdLists[] = { m_commandList }; + m_commandQueue->ExecuteCommandLists(1, cmdLists); + + m_swapChain->Present(1, 0); + + m_fenceValue++; + m_commandQueue->Signal(m_fence, m_fenceValue); + if (m_fence->GetCompletedValue() < m_fenceValue) { + m_fence->SetEventOnCompletion(m_fenceValue, nullptr); + } +} + +void Application::OnResize(int width, int height) { + if (width <= 0 || height <= 0) return; + + m_width = width; + m_height = height; + + CleanupRenderTarget(); + + if (m_swapChain) { + DXGI_SWAP_CHAIN_DESC desc; + m_swapChain->GetDesc(&desc); + m_swapChain->ResizeBuffers(3, width, height, desc.BufferDesc.Format, desc.Flags); + } + + CreateRenderTarget(); +} + +bool Application::CreateDevice() { + HRESULT hr = D3D12CreateDevice(nullptr, D3D_FEATURE_LEVEL_11_0, IID_PPV_ARGS(&m_device)); + if (FAILED(hr)) { + return false; + } + + D3D12_COMMAND_QUEUE_DESC queueDesc = {}; + queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; + queueDesc.Priority = 0; + queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE; + queueDesc.NodeMask = 0; + hr = m_device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&m_commandQueue)); + if (FAILED(hr)) return false; + + hr = m_device->CreateCommandAllocator(D3D12_COMMAND_LIST_TYPE_DIRECT, IID_PPV_ARGS(&m_commandAllocator)); + if (FAILED(hr)) return false; + + hr = m_device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, m_commandAllocator, nullptr, IID_PPV_ARGS(&m_commandList)); + if (FAILED(hr)) return false; + m_commandList->Close(); + + hr = m_device->CreateFence(0, D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&m_fence)); + if (FAILED(hr)) return false; + + IDXGIFactory4* factory = nullptr; + hr = CreateDXGIFactory1(IID_PPV_ARGS(&factory)); + if (FAILED(hr)) return false; + + DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {}; + swapChainDesc.BufferCount = 3; + swapChainDesc.Width = m_width; + swapChainDesc.Height = m_height; + swapChainDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; + swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; + swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD; + swapChainDesc.SampleDesc.Count = 1; + + IDXGISwapChain1* swapChain1 = nullptr; + hr = factory->CreateSwapChainForHwnd(m_commandQueue, m_hwnd, &swapChainDesc, nullptr, nullptr, &swapChain1); + factory->Release(); + if (FAILED(hr)) return false; + + hr = swapChain1->QueryInterface(IID_PPV_ARGS(&m_swapChain)); + swapChain1->Release(); + if (FAILED(hr)) return false; + + D3D12_DESCRIPTOR_HEAP_DESC rtvDesc = {}; + rtvDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_RTV; + rtvDesc.NumDescriptors = 3; + rtvDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE; + hr = m_device->CreateDescriptorHeap(&rtvDesc, IID_PPV_ARGS(&m_rtvHeap)); + if (FAILED(hr)) return false; + m_rtvDescriptorSize = m_device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_RTV); + + D3D12_DESCRIPTOR_HEAP_DESC srvDesc = {}; + srvDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV; + srvDesc.NumDescriptors = 1; + srvDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE; + hr = m_device->CreateDescriptorHeap(&srvDesc, IID_PPV_ARGS(&m_srvHeap)); + if (FAILED(hr)) return false; + + return true; +} + +bool Application::CreateRenderTarget() { + if (!m_swapChain || !m_device || !m_rtvHeap) return false; + + for (UINT i = 0; i < 3; i++) { + HRESULT hr = m_swapChain->GetBuffer(i, IID_PPV_ARGS(&m_renderTargets[i])); + if (FAILED(hr)) return false; + + D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = m_rtvHeap->GetCPUDescriptorHandleForHeapStart(); + rtvHandle.ptr += i * m_rtvDescriptorSize; + m_device->CreateRenderTargetView(m_renderTargets[i], nullptr, rtvHandle); + } + return true; +} + +void Application::CleanupRenderTarget() { + for (UINT i = 0; i < 3; i++) { + if (m_renderTargets[i]) { + m_renderTargets[i]->Release(); + m_renderTargets[i] = nullptr; + } + } +} + +void Application::SetupDockspace() { + static ImGuiDockNodeFlags dockspaceFlags = ImGuiDockNodeFlags_NoWindowMenuButton; + + ImGuiWindowFlags windowFlags = ImGuiWindowFlags_MenuBar | ImGuiWindowFlags_NoDocking; + + ImGuiViewport* viewport = ImGui::GetMainViewport(); + ImGui::SetNextWindowPos(viewport->Pos); + ImGui::SetNextWindowSize(viewport->Size); + ImGui::SetNextWindowViewport(viewport->ID); + ImGui::PushStyleVar(ImGuiStyleVar_WindowRounding, 0.0f); + ImGui::PushStyleVar(ImGuiStyleVar_WindowBorderSize, 0.0f); + windowFlags |= ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_NoCollapse | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoMove; + windowFlags |= ImGuiWindowFlags_NoBringToFrontOnFocus | ImGuiWindowFlags_NoNavFocus; + + ImGui::PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2(0.0f, 0.0f)); + ImGui::Begin("MainDockspace", nullptr, windowFlags); + ImGui::PopStyleVar(); + ImGui::PopStyleVar(2); + + ImGuiID dockspaceId = ImGui::GetID("MyDockspace"); + ImGui::DockSpace(dockspaceId, ImVec2(0.0f, 0.0f), dockspaceFlags); + + static bool firstTime = true; + if (firstTime) { + firstTime = false; + ImGui::DockBuilderRemoveNode(dockspaceId); + ImGui::DockBuilderAddNode(dockspaceId, dockspaceFlags | ImGuiDockNodeFlags_DockSpace); + ImGui::DockBuilderSetNodeSize(dockspaceId, viewport->Size); + + ImGuiID dockMain = dockspaceId; + ImGuiID dockBottom = ImGui::DockBuilderSplitNode(dockMain, ImGuiDir_Down, 0.25f, nullptr, &dockMain); + ImGuiID dockLeft = ImGui::DockBuilderSplitNode(dockMain, ImGuiDir_Left, 0.15f, nullptr, &dockMain); + ImGuiID dockRight = ImGui::DockBuilderSplitNode(dockMain, ImGuiDir_Right, 0.25f, nullptr, &dockMain); + + ImGui::DockBuilderDockWindow("Hierarchy", dockLeft); + ImGui::DockBuilderDockWindow("Scene", dockMain); + ImGui::DockBuilderDockWindow("Game", dockMain); + ImGui::DockBuilderDockWindow("Inspector", dockRight); + ImGui::DockBuilderDockWindow("Console", dockBottom); + ImGui::DockBuilderDockWindow("Project", dockBottom); + + ImGui::DockBuilderFinish(dockspaceId); + } + + ImGui::End(); +} + +void Application::RenderUI() { + m_menuBar->Render(); + + m_hierarchyPanel->Render(); + m_sceneViewPanel->Render(); + m_gameViewPanel->Render(); + m_inspectorPanel->Render(); + m_consolePanel->Render(); + m_projectPanel->Render(); +} + +} \ No newline at end of file diff --git a/MVS/ui/src/Application.h b/MVS/ui/src/Application.h new file mode 100644 index 00000000..8e1d92dd --- /dev/null +++ b/MVS/ui/src/Application.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include +#include +#include + +#include "Theme.h" +#include "panels/Panel.h" +#include "panels/MenuBar.h" +#include "panels/HierarchyPanel.h" +#include "panels/SceneViewPanel.h" +#include "panels/GameViewPanel.h" +#include "panels/InspectorPanel.h" +#include "panels/ConsolePanel.h" +#include "panels/ProjectPanel.h" + +namespace UI { + +class Application { +public: + static Application& Get(); + + bool Initialize(HWND hwnd); + void Shutdown(); + void Render(); + void OnResize(int width, int height); + +private: + Application() = default; + ~Application() = default; + + bool CreateDevice(); + bool CreateRenderTarget(); + void CleanupRenderTarget(); + void SetupDockspace(); + void RenderUI(); + + HWND m_hwnd = nullptr; + int m_width = 1280; + int m_height = 720; + + ID3D12Device* m_device = nullptr; + ID3D12CommandQueue* m_commandQueue = nullptr; + ID3D12CommandAllocator* m_commandAllocator = nullptr; + ID3D12GraphicsCommandList* m_commandList = nullptr; + IDXGISwapChain3* m_swapChain = nullptr; + ID3D12DescriptorHeap* m_rtvHeap = nullptr; + ID3D12DescriptorHeap* m_srvHeap = nullptr; + ID3D12Resource* m_renderTargets[3] = {}; + ID3D12Fence* m_fence = nullptr; + UINT64 m_fenceValue = 0; + UINT m_rtvDescriptorSize = 0; + UINT m_frameIndex = 0; + + std::unique_ptr m_menuBar; + std::unique_ptr m_hierarchyPanel; + std::unique_ptr m_sceneViewPanel; + std::unique_ptr m_gameViewPanel; + std::unique_ptr m_inspectorPanel; + std::unique_ptr m_consolePanel; + std::unique_ptr m_projectPanel; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/Core/AssetItem.h b/MVS/ui/src/Core/AssetItem.h new file mode 100644 index 00000000..8875bcd3 --- /dev/null +++ b/MVS/ui/src/Core/AssetItem.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include +#include + +namespace UI { + +struct AssetItem { + std::string name; + std::string type; + bool isFolder; + std::string fullPath; + std::vector> children; +}; + +using AssetItemPtr = std::shared_ptr; + +} \ No newline at end of file diff --git a/MVS/ui/src/Core/Event.h b/MVS/ui/src/Core/Event.h new file mode 100644 index 00000000..2ab193b5 --- /dev/null +++ b/MVS/ui/src/Core/Event.h @@ -0,0 +1,48 @@ +#pragma once + +#include +#include +#include + +namespace UI { + +template +class Event { +public: + using HandlerID = size_t; + using Handler = std::function; + + HandlerID Subscribe(Handler handler) { + HandlerID id = m_nextId++; + m_handlers.emplace_back(id, std::move(handler)); + return id; + } + + void Unsubscribe(HandlerID id) { + m_handlers.erase( + std::remove_if(m_handlers.begin(), m_handlers.end(), + [id](const auto& pair) { return pair.first == id; }), + m_handlers.end() + ); + } + + void Invoke(Args... args) { + for (const auto& pair : m_handlers) { + pair.second(args...); + } + } + + void operator()(Args... args) { + Invoke(args...); + } + + void Clear() { + m_handlers.clear(); + } + +private: + HandlerID m_nextId = 0; + std::vector> m_handlers; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/Core/GameObject.h b/MVS/ui/src/Core/GameObject.h new file mode 100644 index 00000000..f376a040 --- /dev/null +++ b/MVS/ui/src/Core/GameObject.h @@ -0,0 +1,101 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace UI { + +using EntityID = uint64_t; +constexpr EntityID INVALID_ENTITY = 0; + +class Component { +public: + virtual ~Component() = default; + virtual std::string GetName() const = 0; +}; + +class TransformComponent : public Component { +public: + float position[3] = {0.0f, 0.0f, 0.0f}; + float rotation[3] = {0.0f, 0.0f, 0.0f}; + float scale[3] = {1.0f, 1.0f, 1.0f}; + + std::string GetName() const override { return "Transform"; } +}; + +class MeshRendererComponent : public Component { +public: + std::string materialName = "Default-Material"; + std::string meshName = ""; + + std::string GetName() const override { return "Mesh Renderer"; } +}; + +struct Entity { + EntityID id = INVALID_ENTITY; + std::string name; + EntityID parent = INVALID_ENTITY; + std::vector children; + std::vector> components; + bool selected = false; + + template + T* AddComponent(Args&&... args) { + auto comp = std::make_unique(std::forward(args)...); + T* ptr = comp.get(); + components.push_back(std::move(comp)); + return ptr; + } + + template + T* GetComponent() { + for (auto& comp : components) { + if (auto casted = dynamic_cast(comp.get())) { + return casted; + } + } + return nullptr; + } +}; + +using ComponentInspectorFn = std::function; + +struct ComponentInspectorInfo { + std::string name; + ComponentInspectorFn renderFn; +}; + +class ComponentRegistry { +public: + static ComponentRegistry& Get() { + static ComponentRegistry instance; + return instance; + } + + template + void RegisterComponent(const std::string& name, ComponentInspectorFn inspectorFn) { + m_inspectors[name] = {name, inspectorFn}; + m_factories[name] = []() -> std::unique_ptr { + return std::make_unique(); + }; + } + + ComponentInspectorInfo* GetInspector(const std::string& name) { + auto it = m_inspectors.find(name); + if (it != m_inspectors.end()) { + return &it->second; + } + return nullptr; + } + +private: + ComponentRegistry() = default; + std::unordered_map m_inspectors; + std::unordered_map()>> m_factories; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/Core/LogEntry.h b/MVS/ui/src/Core/LogEntry.h new file mode 100644 index 00000000..e4218f09 --- /dev/null +++ b/MVS/ui/src/Core/LogEntry.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace UI { + +struct LogEntry { + enum class Level { Info, Warning, Error }; + Level level; + std::string message; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/Managers/LogSystem.cpp b/MVS/ui/src/Managers/LogSystem.cpp new file mode 100644 index 00000000..a0e06dcc --- /dev/null +++ b/MVS/ui/src/Managers/LogSystem.cpp @@ -0,0 +1,19 @@ +#include "LogSystem.h" + +namespace UI { + +LogSystem& LogSystem::Get() { + static LogSystem instance; + return instance; +} + +void LogSystem::AddLog(LogEntry::Level level, const std::string& message) { + m_logs.push_back({level, message}); + if (m_callback) m_callback(); +} + +void LogSystem::Clear() { + m_logs.clear(); +} + +} \ No newline at end of file diff --git a/MVS/ui/src/Managers/LogSystem.h b/MVS/ui/src/Managers/LogSystem.h new file mode 100644 index 00000000..b480e907 --- /dev/null +++ b/MVS/ui/src/Managers/LogSystem.h @@ -0,0 +1,26 @@ +#pragma once + +#include "Core/LogEntry.h" +#include +#include + +namespace UI { + +class LogSystem { +public: + static LogSystem& Get(); + + void AddLog(LogEntry::Level level, const std::string& message); + void Clear(); + const std::vector& GetLogs() const { return m_logs; } + + void SetCallback(std::function callback) { m_callback = callback; } + +private: + LogSystem() = default; + + std::vector m_logs; + std::function m_callback; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/Managers/ProjectManager.cpp b/MVS/ui/src/Managers/ProjectManager.cpp new file mode 100644 index 00000000..15d1d1e9 --- /dev/null +++ b/MVS/ui/src/Managers/ProjectManager.cpp @@ -0,0 +1,246 @@ +#include "ProjectManager.h" +#include +#include +#include +#include + +namespace fs = std::filesystem; + +namespace UI { + +ProjectManager& ProjectManager::Get() { + static ProjectManager instance; + return instance; +} + +std::vector& ProjectManager::GetCurrentItems() { + if (m_path.empty()) { + static std::vector empty; + return empty; + } + return m_path.back()->children; +} + +void ProjectManager::NavigateToFolder(const AssetItemPtr& folder) { + m_path.push_back(folder); + m_selectedIndex = -1; +} + +void ProjectManager::NavigateBack() { + if (m_path.size() > 1) { + m_path.pop_back(); + m_selectedIndex = -1; + } +} + +void ProjectManager::NavigateToIndex(size_t index) { + if (index >= m_path.size()) return; + while (m_path.size() > index + 1) { + m_path.pop_back(); + } + m_selectedIndex = -1; +} + +std::string ProjectManager::GetCurrentPath() const { + if (m_path.empty()) return "Assets"; + std::string result = "Assets"; + for (size_t i = 1; i < m_path.size(); i++) { + result += "/"; + result += m_path[i]->name; + } + return result; +} + +std::string ProjectManager::GetPathName(size_t index) const { + if (index >= m_path.size()) return ""; + return m_path[index]->name; +} + +static std::wstring Utf8ToWstring(const std::string& str) { + if (str.empty()) return L""; + int len = MultiByteToWideChar(CP_UTF8, 0, str.c_str(), -1, nullptr, 0); + if (len <= 0) return L""; + std::wstring result(len - 1, 0); + MultiByteToWideChar(CP_UTF8, 0, str.c_str(), -1, &result[0], len); + return result; +} + +static std::string WstringToUtf8(const std::wstring& wstr) { + if (wstr.empty()) return ""; + int len = WideCharToMultiByte(CP_UTF8, 0, wstr.c_str(), -1, nullptr, 0, nullptr, nullptr); + if (len <= 0) return ""; + std::string result(len - 1, 0); + WideCharToMultiByte(CP_UTF8, 0, wstr.c_str(), -1, &result[0], len, nullptr, nullptr); + return result; +} + +void ProjectManager::Initialize(const std::string& projectPath) { + m_projectPath = projectPath; + + std::wstring projectPathW = Utf8ToWstring(projectPath); + fs::path assetsPath = fs::path(projectPathW) / L"Assets"; + + try { + if (!fs::exists(assetsPath)) { + fs::create_directories(assetsPath); + fs::create_directories(assetsPath / L"Textures"); + fs::create_directories(assetsPath / L"Models"); + fs::create_directories(assetsPath / L"Scripts"); + fs::create_directories(assetsPath / L"Materials"); + fs::create_directories(assetsPath / L"Scenes"); + + std::ofstream((assetsPath / L"Textures" / L"Grass.png").wstring()); + std::ofstream((assetsPath / L"Textures" / L"Stone.png").wstring()); + std::ofstream((assetsPath / L"Models" / L"Character.fbx").wstring()); + std::ofstream((assetsPath / L"Scripts" / L"PlayerController.cs").wstring()); + std::ofstream((assetsPath / L"Scenes" / L"Main.unity").wstring()); + } + + m_rootFolder = ScanDirectory(assetsPath.wstring()); + m_rootFolder->name = "Assets"; + m_rootFolder->fullPath = WstringToUtf8(assetsPath.wstring()); + + m_path.clear(); + m_path.push_back(m_rootFolder); + m_selectedIndex = -1; + } catch (const std::exception& e) { + m_rootFolder = std::make_shared(); + m_rootFolder->name = "Assets"; + m_rootFolder->isFolder = true; + m_rootFolder->type = "Folder"; + m_path.push_back(m_rootFolder); + } +} + +std::wstring ProjectManager::GetCurrentFullPathW() const { + if (m_path.empty()) return Utf8ToWstring(m_projectPath); + + std::wstring fullPath = Utf8ToWstring(m_projectPath); + for (size_t i = 0; i < m_path.size(); i++) { + fullPath += L"/" + Utf8ToWstring(m_path[i]->name); + } + return fullPath; +} + +void ProjectManager::RefreshCurrentFolder() { + if (m_path.empty()) return; + + try { + auto newFolder = ScanDirectory(GetCurrentFullPathW()); + m_path.back()->children = newFolder->children; + } catch (...) { + } +} + +void ProjectManager::CreateFolder(const std::string& name) { + try { + std::wstring fullPath = GetCurrentFullPathW(); + fs::path newFolderPath = fs::path(fullPath) / Utf8ToWstring(name); + fs::create_directory(newFolderPath); + RefreshCurrentFolder(); + } catch (...) { + } +} + +void ProjectManager::DeleteItem(int index) { + if (m_path.empty()) return; + auto& items = m_path.back()->children; + if (index < 0 || index >= (int)items.size()) return; + + try { + std::wstring fullPath = GetCurrentFullPathW(); + fs::path itemPath = fs::path(fullPath) / Utf8ToWstring(items[index]->name); + fs::remove_all(itemPath); + m_selectedIndex = -1; + RefreshCurrentFolder(); + } catch (...) { + } +} + +bool ProjectManager::MoveItem(const std::string& sourceFullPath, const std::string& destFolderFullPath) { + try { + fs::path sourcePath = Utf8ToWstring(sourceFullPath); + fs::path destPath = fs::path(Utf8ToWstring(destFolderFullPath)) / sourcePath.filename(); + + if (!fs::exists(sourcePath)) { + return false; + } + + if (fs::exists(destPath)) { + return false; + } + + fs::rename(sourcePath, destPath); + RefreshCurrentFolder(); + return true; + } catch (...) { + return false; + } +} + +AssetItemPtr ProjectManager::ScanDirectory(const std::wstring& path) { + auto folder = std::make_shared(); + folder->name = WstringToUtf8(fs::path(path).filename().wstring()); + folder->isFolder = true; + folder->type = "Folder"; + + if (!fs::exists(path)) return folder; + + std::vector items; + + try { + for (const auto& entry : fs::directory_iterator(path)) { + std::wstring nameW = entry.path().filename().wstring(); + bool isFolder = entry.is_directory(); + items.push_back(CreateAssetItem(entry.path().wstring(), nameW, isFolder)); + } + } catch (...) { + } + + std::sort(items.begin(), items.end(), [](const AssetItemPtr& a, const AssetItemPtr& b) { + if (a->isFolder != b->isFolder) return a->isFolder; + return a->name < b->name; + }); + + folder->children = items; + return folder; +} + +AssetItemPtr ProjectManager::CreateAssetItem(const std::wstring& path, const std::wstring& nameW, bool isFolder) { + auto item = std::make_shared(); + item->name = WstringToUtf8(nameW); + item->isFolder = isFolder; + item->fullPath = WstringToUtf8(path); + + if (isFolder) { + item->type = "Folder"; + try { + auto subFolder = ScanDirectory(path); + item->children = subFolder->children; + } catch (...) { + } + } else { + std::wstring ext = fs::path(path).extension().wstring(); + std::transform(ext.begin(), ext.end(), ext.begin(), ::towlower); + + if (ext == L".png" || ext == L".jpg" || ext == L".tga" || ext == L".bmp") { + item->type = "Texture"; + } else if (ext == L".fbx" || ext == L".obj" || ext == L".gltf" || ext == L".glb") { + item->type = "Model"; + } else if (ext == L".cs" || ext == L".cpp" || ext == L".h") { + item->type = "Script"; + } else if (ext == L".mat") { + item->type = "Material"; + } else if (ext == L".unity" || ext == L".scene") { + item->type = "Scene"; + } else if (ext == L".prefab") { + item->type = "Prefab"; + } else { + item->type = "File"; + } + } + + return item; +} + +} \ No newline at end of file diff --git a/MVS/ui/src/Managers/ProjectManager.h b/MVS/ui/src/Managers/ProjectManager.h new file mode 100644 index 00000000..525aa0ea --- /dev/null +++ b/MVS/ui/src/Managers/ProjectManager.h @@ -0,0 +1,49 @@ +#pragma once + +#include "Core/AssetItem.h" +#include +#include +#include + +namespace UI { + +class ProjectManager { +public: + static ProjectManager& Get(); + + std::vector& GetCurrentItems(); + int GetSelectedIndex() const { return m_selectedIndex; } + void SetSelectedIndex(int index) { m_selectedIndex = index; } + + void NavigateToFolder(const AssetItemPtr& folder); + void NavigateBack(); + void NavigateToIndex(size_t index); + bool CanNavigateBack() const { return m_path.size() > 1; } + + std::string GetCurrentPath() const; + size_t GetPathDepth() const { return m_path.size(); } + std::string GetPathName(size_t index) const; + + void Initialize(const std::string& projectPath); + void RefreshCurrentFolder(); + + void CreateFolder(const std::string& name); + void DeleteItem(int index); + bool MoveItem(const std::string& sourceFullPath, const std::string& destFolderFullPath); + + const std::string& GetProjectPath() const { return m_projectPath; } + +private: + ProjectManager() = default; + + AssetItemPtr ScanDirectory(const std::wstring& path); + AssetItemPtr CreateAssetItem(const std::wstring& path, const std::wstring& nameW, bool isFolder); + std::wstring GetCurrentFullPathW() const; + + AssetItemPtr m_rootFolder; + std::vector m_path; + int m_selectedIndex = -1; + std::string m_projectPath; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/Managers/SceneManager.cpp b/MVS/ui/src/Managers/SceneManager.cpp new file mode 100644 index 00000000..ba9d419d --- /dev/null +++ b/MVS/ui/src/Managers/SceneManager.cpp @@ -0,0 +1,185 @@ +#include "SceneManager.h" +#include + +namespace UI { + +EntityID SceneManager::CreateEntity(const std::string& name, EntityID parent) { + EntityID id = m_nextEntityId++; + Entity entity; + entity.id = id; + entity.name = name; + entity.parent = parent; + m_entities[id] = std::move(entity); + + if (parent != INVALID_ENTITY) { + m_entities[parent].children.push_back(id); + } else { + m_rootEntities.push_back(id); + } + + OnEntityCreated.Invoke(id); + return id; +} + +void SceneManager::DeleteEntity(EntityID id) { + auto it = m_entities.find(id); + if (it == m_entities.end()) return; + + Entity& entity = it->second; + + std::vector childrenToDelete = entity.children; + for (EntityID childId : childrenToDelete) { + DeleteEntity(childId); + } + + if (entity.parent != INVALID_ENTITY) { + auto* parent = GetEntity(entity.parent); + if (parent) { + auto& siblings = parent->children; + siblings.erase(std::remove(siblings.begin(), siblings.end(), id), siblings.end()); + } + } else { + m_rootEntities.erase(std::remove(m_rootEntities.begin(), m_rootEntities.end(), id), m_rootEntities.end()); + } + + if (SelectionManager::Get().GetSelectedEntity() == id) { + SelectionManager::Get().ClearSelection(); + } + + m_entities.erase(it); + OnEntityDeleted.Invoke(id); +} + +ClipboardData SceneManager::CopyEntityRecursive(const Entity* entity) { + ClipboardData data; + data.name = entity->name; + + for (const auto& comp : entity->components) { + if (auto* transform = dynamic_cast(comp.get())) { + auto newComp = std::make_unique(); + memcpy(newComp->position, transform->position, sizeof(transform->position)); + memcpy(newComp->rotation, transform->rotation, sizeof(transform->rotation)); + memcpy(newComp->scale, transform->scale, sizeof(transform->scale)); + data.components.push_back(std::move(newComp)); + } + else if (auto* meshRenderer = dynamic_cast(comp.get())) { + auto newComp = std::make_unique(); + newComp->materialName = meshRenderer->materialName; + newComp->meshName = meshRenderer->meshName; + data.components.push_back(std::move(newComp)); + } + } + + for (EntityID childId : entity->children) { + const Entity* child = GetEntity(childId); + if (child) { + data.children.push_back(CopyEntityRecursive(child)); + } + } + + return data; +} + +void SceneManager::CopyEntity(EntityID id) { + const Entity* entity = GetEntity(id); + if (!entity) return; + + m_clipboard = CopyEntityRecursive(entity); +} + +EntityID SceneManager::PasteEntityRecursive(const ClipboardData& data, EntityID parent) { + EntityID newId = CreateEntity(data.name, parent); + Entity* newEntity = GetEntity(newId); + + if (newEntity) { + newEntity->components.clear(); + for (const auto& comp : data.components) { + if (auto* transform = dynamic_cast(comp.get())) { + auto newComp = std::make_unique(); + memcpy(newComp->position, transform->position, sizeof(transform->position)); + memcpy(newComp->rotation, transform->rotation, sizeof(transform->rotation)); + memcpy(newComp->scale, transform->scale, sizeof(transform->scale)); + newEntity->components.push_back(std::move(newComp)); + } + else if (auto* meshRenderer = dynamic_cast(comp.get())) { + auto newComp = std::make_unique(); + newComp->materialName = meshRenderer->materialName; + newComp->meshName = meshRenderer->meshName; + newEntity->components.push_back(std::move(newComp)); + } + } + } + + for (const auto& childData : data.children) { + PasteEntityRecursive(childData, newId); + } + + return newId; +} + +EntityID SceneManager::PasteEntity(EntityID parent) { + if (!m_clipboard) return INVALID_ENTITY; + return PasteEntityRecursive(*m_clipboard, parent); +} + +EntityID SceneManager::DuplicateEntity(EntityID id) { + CopyEntity(id); + const Entity* entity = GetEntity(id); + if (!entity) return INVALID_ENTITY; + return PasteEntity(entity->parent); +} + +void SceneManager::MoveEntity(EntityID id, EntityID newParent) { + Entity* entity = GetEntity(id); + if (!entity || id == newParent) return; + + if (entity->parent != INVALID_ENTITY) { + Entity* oldParent = GetEntity(entity->parent); + if (oldParent) { + auto& siblings = oldParent->children; + siblings.erase(std::remove(siblings.begin(), siblings.end(), id), siblings.end()); + } + } else { + m_rootEntities.erase(std::remove(m_rootEntities.begin(), m_rootEntities.end(), id), m_rootEntities.end()); + } + + entity->parent = newParent; + + if (newParent != INVALID_ENTITY) { + Entity* newParentEntity = GetEntity(newParent); + if (newParentEntity) { + newParentEntity->children.push_back(id); + } + } else { + m_rootEntities.push_back(id); + } + + OnEntityChanged.Invoke(id); +} + +void SceneManager::CreateDemoScene() { + m_entities.clear(); + m_rootEntities.clear(); + m_nextEntityId = 1; + m_clipboard.reset(); + + EntityID camera = CreateEntity("Main Camera"); + GetEntity(camera)->AddComponent(); + + EntityID light = CreateEntity("Directional Light"); + + EntityID cube = CreateEntity("Cube"); + GetEntity(cube)->AddComponent(); + GetEntity(cube)->AddComponent()->meshName = "Cube Mesh"; + + EntityID sphere = CreateEntity("Sphere"); + GetEntity(sphere)->AddComponent(); + GetEntity(sphere)->AddComponent()->meshName = "Sphere Mesh"; + + EntityID player = CreateEntity("Player"); + EntityID weapon = CreateEntity("Weapon", player); + + OnSceneChanged.Invoke(); +} + +} \ No newline at end of file diff --git a/MVS/ui/src/Managers/SceneManager.h b/MVS/ui/src/Managers/SceneManager.h new file mode 100644 index 00000000..49986e59 --- /dev/null +++ b/MVS/ui/src/Managers/SceneManager.h @@ -0,0 +1,86 @@ +#pragma once + +#include "Core/GameObject.h" +#include "SelectionManager.h" +#include +#include +#include +#include + +namespace UI { + +struct ClipboardData { + std::string name; + std::vector> components; + std::vector children; +}; + +class SceneManager { +public: + static SceneManager& Get() { + static SceneManager instance; + return instance; + } + + EntityID CreateEntity(const std::string& name, EntityID parent = INVALID_ENTITY); + + Entity* GetEntity(EntityID id) { + auto it = m_entities.find(id); + if (it != m_entities.end()) { + return &it->second; + } + return nullptr; + } + + const Entity* GetEntity(EntityID id) const { + auto it = m_entities.find(id); + if (it != m_entities.end()) { + return &it->second; + } + return nullptr; + } + + const std::vector& GetRootEntities() const { + return m_rootEntities; + } + + void DeleteEntity(EntityID id); + + void RenameEntity(EntityID id, const std::string& newName) { + auto* entity = GetEntity(id); + if (entity) { + entity->name = newName; + OnEntityChanged.Invoke(id); + } + } + + void CopyEntity(EntityID id); + + EntityID PasteEntity(EntityID parent = INVALID_ENTITY); + + EntityID DuplicateEntity(EntityID id); + + void MoveEntity(EntityID id, EntityID newParent); + + void CreateDemoScene(); + + bool HasClipboardData() const { return m_clipboard.has_value(); } + + Event OnEntityCreated; + Event OnEntityDeleted; + Event OnEntityChanged; + Event<> OnSceneChanged; + +private: + SceneManager() = default; + + ClipboardData CopyEntityRecursive(const Entity* entity); + EntityID PasteEntityRecursive(const ClipboardData& data, EntityID parent); + + EntityID m_nextEntityId = 1; + std::unordered_map m_entities; + std::vector m_rootEntities; + std::optional m_clipboard; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/Managers/SelectionManager.h b/MVS/ui/src/Managers/SelectionManager.h new file mode 100644 index 00000000..fe17e7b0 --- /dev/null +++ b/MVS/ui/src/Managers/SelectionManager.h @@ -0,0 +1,38 @@ +#pragma once + +#include "Core/GameObject.h" +#include "Core/Event.h" +#include + +namespace UI { + +class SelectionManager { +public: + static SelectionManager& Get() { + static SelectionManager instance; + return instance; + } + + EntityID GetSelectedEntity() const { return m_selectedEntity; } + + void SetSelectedEntity(EntityID id) { + m_selectedEntity = id; + OnSelectionChanged.Invoke(id); + } + + void ClearSelection() { + SetSelectedEntity(INVALID_ENTITY); + } + + bool IsSelected(EntityID id) const { + return m_selectedEntity == id; + } + + Event OnSelectionChanged; + +private: + SelectionManager() = default; + EntityID m_selectedEntity = INVALID_ENTITY; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/Theme.cpp b/MVS/ui/src/Theme.cpp new file mode 100644 index 00000000..6c5d7d93 --- /dev/null +++ b/MVS/ui/src/Theme.cpp @@ -0,0 +1,82 @@ +#include "Theme.h" +#include + +namespace UI { + +void ApplyUnityDarkTheme() { + ImGuiStyle& style = ImGui::GetStyle(); + ImVec4* colors = style.Colors; + + colors[ImGuiCol_Text] = ImVec4(0.90f, 0.90f, 0.90f, 1.00f); + colors[ImGuiCol_TextDisabled] = ImVec4(0.50f, 0.50f, 0.50f, 1.00f); + colors[ImGuiCol_WindowBg] = ImVec4(0.12f, 0.12f, 0.12f, 1.00f); + colors[ImGuiCol_ChildBg] = ImVec4(0.15f, 0.15f, 0.15f, 1.00f); + colors[ImGuiCol_PopupBg] = ImVec4(0.18f, 0.18f, 0.18f, 0.94f); + colors[ImGuiCol_Border] = ImVec4(0.08f, 0.08f, 0.08f, 1.00f); + colors[ImGuiCol_BorderShadow] = ImVec4(0.00f, 0.00f, 0.00f, 0.00f); + colors[ImGuiCol_FrameBg] = ImVec4(0.20f, 0.20f, 0.20f, 1.00f); + colors[ImGuiCol_FrameBgHovered] = ImVec4(0.28f, 0.28f, 0.28f, 1.00f); + colors[ImGuiCol_FrameBgActive] = ImVec4(0.35f, 0.35f, 0.35f, 1.00f); + colors[ImGuiCol_TitleBg] = ImVec4(0.15f, 0.15f, 0.15f, 1.00f); + colors[ImGuiCol_TitleBgActive] = ImVec4(0.20f, 0.20f, 0.20f, 1.00f); + colors[ImGuiCol_TitleBgCollapsed] = ImVec4(0.12f, 0.12f, 0.12f, 0.75f); + colors[ImGuiCol_MenuBarBg] = ImVec4(0.14f, 0.14f, 0.14f, 1.00f); + colors[ImGuiCol_ScrollbarBg] = ImVec4(0.10f, 0.10f, 0.10f, 0.53f); + colors[ImGuiCol_ScrollbarGrab] = ImVec4(0.30f, 0.30f, 0.30f, 1.00f); + colors[ImGuiCol_ScrollbarGrabHovered] = ImVec4(0.40f, 0.40f, 0.40f, 1.00f); + colors[ImGuiCol_ScrollbarGrabActive] = ImVec4(0.50f, 0.50f, 0.50f, 1.00f); + colors[ImGuiCol_CheckMark] = ImVec4(0.90f, 0.90f, 0.90f, 1.00f); + colors[ImGuiCol_SliderGrab] = ImVec4(0.60f, 0.60f, 0.60f, 1.00f); + colors[ImGuiCol_SliderGrabActive] = ImVec4(0.80f, 0.80f, 0.80f, 1.00f); + colors[ImGuiCol_Button] = ImVec4(0.25f, 0.25f, 0.25f, 1.00f); + colors[ImGuiCol_ButtonHovered] = ImVec4(0.35f, 0.35f, 0.35f, 1.00f); + colors[ImGuiCol_ButtonActive] = ImVec4(0.45f, 0.45f, 0.45f, 1.00f); + colors[ImGuiCol_Header] = ImVec4(0.25f, 0.25f, 0.25f, 1.00f); + colors[ImGuiCol_HeaderHovered] = ImVec4(0.35f, 0.35f, 0.35f, 1.00f); + colors[ImGuiCol_HeaderActive] = ImVec4(0.45f, 0.45f, 0.45f, 1.00f); + colors[ImGuiCol_Separator] = ImVec4(0.08f, 0.08f, 0.08f, 1.00f); + colors[ImGuiCol_SeparatorHovered] = ImVec4(0.50f, 0.50f, 0.50f, 1.00f); + colors[ImGuiCol_SeparatorActive] = ImVec4(0.60f, 0.60f, 0.60f, 1.00f); + colors[ImGuiCol_ResizeGrip] = ImVec4(0.30f, 0.30f, 0.30f, 1.00f); + colors[ImGuiCol_ResizeGripHovered] = ImVec4(0.50f, 0.50f, 0.50f, 1.00f); + colors[ImGuiCol_ResizeGripActive] = ImVec4(0.60f, 0.60f, 0.60f, 1.00f); + colors[ImGuiCol_Tab] = ImVec4(0.18f, 0.18f, 0.18f, 0.86f); + colors[ImGuiCol_TabHovered] = ImVec4(0.35f, 0.35f, 0.35f, 1.00f); + colors[ImGuiCol_TabActive] = ImVec4(0.20f, 0.20f, 0.20f, 1.00f); + colors[ImGuiCol_TabUnfocused] = ImVec4(0.15f, 0.15f, 0.15f, 0.97f); + colors[ImGuiCol_TabUnfocusedActive] = ImVec4(0.22f, 0.22f, 0.22f, 1.00f); + colors[ImGuiCol_DockingPreview] = ImVec4(0.40f, 0.40f, 0.40f, 0.70f); + colors[ImGuiCol_DockingEmptyBg] = ImVec4(0.12f, 0.12f, 0.12f, 1.00f); + colors[ImGuiCol_PlotLines] = ImVec4(0.61f, 0.61f, 0.61f, 1.00f); + colors[ImGuiCol_PlotLinesHovered] = ImVec4(1.00f, 0.43f, 0.35f, 1.00f); + colors[ImGuiCol_PlotHistogram] = ImVec4(0.90f, 0.70f, 0.00f, 1.00f); + colors[ImGuiCol_PlotHistogramHovered] = ImVec4(1.00f, 0.60f, 0.00f, 1.00f); + colors[ImGuiCol_TableHeaderBg] = ImVec4(0.19f, 0.19f, 0.20f, 1.00f); + colors[ImGuiCol_TableBorderStrong] = ImVec4(0.31f, 0.31f, 0.35f, 1.00f); + colors[ImGuiCol_TableBorderLight] = ImVec4(0.23f, 0.23f, 0.25f, 1.00f); + colors[ImGuiCol_TableRowBg] = ImVec4(0.00f, 0.00f, 0.00f, 0.00f); + colors[ImGuiCol_TableRowBgAlt] = ImVec4(1.00f, 1.00f, 1.00f, 0.06f); + colors[ImGuiCol_TextSelectedBg] = ImVec4(0.40f, 0.40f, 0.40f, 0.50f); + colors[ImGuiCol_DragDropTarget] = ImVec4(0.60f, 0.60f, 0.60f, 0.90f); + colors[ImGuiCol_NavHighlight] = ImVec4(0.50f, 0.50f, 0.50f, 1.00f); + colors[ImGuiCol_NavWindowingHighlight] = ImVec4(1.00f, 1.00f, 1.00f, 0.70f); + colors[ImGuiCol_NavWindowingDimBg] = ImVec4(0.80f, 0.80f, 0.80f, 0.20f); + colors[ImGuiCol_ModalWindowDimBg] = ImVec4(0.80f, 0.80f, 0.80f, 0.35f); + + style.WindowRounding = 4.0f; + style.ChildRounding = 4.0f; + style.FrameRounding = 4.0f; + style.GrabRounding = 4.0f; + style.PopupRounding = 4.0f; + style.ScrollbarRounding = 4.0f; + style.TabRounding = 4.0f; + style.WindowBorderSize = 1.0f; + style.ChildBorderSize = 1.0f; + style.FrameBorderSize = 0.0f; + style.WindowPadding = ImVec2(8.0f, 8.0f); + style.FramePadding = ImVec2(6.0f, 4.0f); + style.ItemSpacing = ImVec2(8.0f, 4.0f); + style.ItemInnerSpacing = ImVec2(6.0f, 4.0f); +} + +} \ No newline at end of file diff --git a/MVS/ui/src/Theme.h b/MVS/ui/src/Theme.h new file mode 100644 index 00000000..6915ee9c --- /dev/null +++ b/MVS/ui/src/Theme.h @@ -0,0 +1,7 @@ +#pragma once + +namespace UI { + +void ApplyUnityDarkTheme(); + +} \ No newline at end of file diff --git a/MVS/ui/src/main.cpp b/MVS/ui/src/main.cpp new file mode 100644 index 00000000..253643a2 --- /dev/null +++ b/MVS/ui/src/main.cpp @@ -0,0 +1,96 @@ +#include "Application.h" +#include +#include +#include + +LRESULT WINAPI WndProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam); + +int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE, LPWSTR, int nCmdShow) { + AllocConsole(); + freopen("CONOUT$", "w", stdout); + printf("Starting UI application...\n"); + + WNDCLASSEXW wc = {}; + wc.cbSize = sizeof(wc); + wc.style = CS_CLASSDC; + wc.lpfnWndProc = WndProc; + wc.hInstance = GetModuleHandle(nullptr); + wc.lpszClassName = L"XCVolumeRendererUI2"; + + if (!RegisterClassExW(&wc)) { + printf("Failed to register window class, error: %lu\n", GetLastError()); + return 1; + } + printf("Window class registered.\n"); + + HWND hwnd = CreateWindowExW( + 0, wc.lpszClassName, L"XCVolumeRenderer - Unity Style Editor", + WS_OVERLAPPEDWINDOW, 100, 100, 1280, 720, + nullptr, nullptr, wc.hInstance, nullptr + ); + + if (!hwnd) { + printf("Failed to create window, error: %lu\n", GetLastError()); + return 1; + } + printf("Window created.\n"); + + ShowWindow(hwnd, nCmdShow); + UpdateWindow(hwnd); + printf("Window shown.\n"); + + printf("Initializing application...\n"); + if (!UI::Application::Get().Initialize(hwnd)) { + printf("Failed to initialize application!\n"); + UnregisterClassW(wc.lpszClassName, wc.hInstance); + system("pause"); + return 1; + } + printf("Application initialized successfully.\n"); + + MSG msg = {}; + int frameCount = 0; + while (msg.message != WM_QUIT) { + if (PeekMessageW(&msg, nullptr, 0U, 0U, PM_REMOVE)) { + TranslateMessage(&msg); + DispatchMessageW(&msg); + } else { + UI::Application::Get().Render(); + frameCount++; + if (frameCount % 100 == 0) { + printf("Frame %d\n", frameCount); + } + } + } + + printf("Shutting down...\n"); + UI::Application::Get().Shutdown(); + UnregisterClassW(wc.lpszClassName, wc.hInstance); + + printf("Press any key to exit...\n"); + system("pause"); + return 0; +} + +extern IMGUI_IMPL_API LRESULT ImGui_ImplWin32_WndProcHandler(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam); + +LRESULT WINAPI WndProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) { + if (ImGui_ImplWin32_WndProcHandler(hWnd, msg, wParam, lParam)) + return true; + + switch (msg) { + case WM_SIZE: + if (wParam != SIZE_MINIMIZED) { + UI::Application::Get().OnResize((int)LOWORD(lParam), (int)HIWORD(lParam)); + } + return 0; + case WM_SYSCOMMAND: + if ((wParam & 0xfff0) == SC_KEYMENU) + return 0; + break; + case WM_DESTROY: + PostQuitMessage(0); + return 0; + } + return DefWindowProcW(hWnd, msg, wParam, lParam); +} \ No newline at end of file diff --git a/MVS/ui/src/panels/ConsolePanel.cpp b/MVS/ui/src/panels/ConsolePanel.cpp new file mode 100644 index 00000000..f7df0193 --- /dev/null +++ b/MVS/ui/src/panels/ConsolePanel.cpp @@ -0,0 +1,70 @@ +#include "ConsolePanel.h" +#include "Managers/LogSystem.h" +#include "Core/LogEntry.h" +#include + +namespace UI { + +ConsolePanel::ConsolePanel() : Panel("Console") { + LogSystem::Get().AddLog(LogEntry::Level::Info, "Engine initialized successfully"); + LogSystem::Get().AddLog(LogEntry::Level::Info, "Loading default scene..."); + LogSystem::Get().AddLog(LogEntry::Level::Warning, "Missing material on object 'Cube'"); + LogSystem::Get().AddLog(LogEntry::Level::Error, "Failed to load texture: 'Assets/Textures/missing.png'"); + LogSystem::Get().AddLog(LogEntry::Level::Info, "Scene loaded successfully"); +} + +void ConsolePanel::Render() { + ImGui::Begin(m_name.c_str(), nullptr, ImGuiWindowFlags_None); + + if (ImGui::Button("Clear")) { + LogSystem::Get().Clear(); + } + ImGui::SameLine(); + if (ImGui::Button("Info")) { + LogSystem::Get().AddLog(LogEntry::Level::Info, "Test info message"); + } + ImGui::SameLine(); + if (ImGui::Button("Warn")) { + LogSystem::Get().AddLog(LogEntry::Level::Warning, "Test warning message"); + } + ImGui::SameLine(); + if (ImGui::Button("Error")) { + LogSystem::Get().AddLog(LogEntry::Level::Error, "Test error message"); + } + + ImGui::Separator(); + + ImGui::BeginChild("LogScroll", ImVec2(0, 0), false, ImGuiWindowFlags_HorizontalScrollbar); + + for (const auto& log : LogSystem::Get().GetLogs()) { + ImVec4 color; + const char* prefix; + + switch (log.level) { + case LogEntry::Level::Info: + color = ImVec4(0.7f, 0.7f, 0.7f, 1.0f); + prefix = "[Info] "; + break; + case LogEntry::Level::Warning: + color = ImVec4(1.0f, 0.8f, 0.0f, 1.0f); + prefix = "[Warn] "; + break; + case LogEntry::Level::Error: + color = ImVec4(1.0f, 0.3f, 0.3f, 1.0f); + prefix = "[Error]"; + break; + } + + ImGui::TextColored(color, "%s%s", prefix, log.message.c_str()); + } + + if (m_scrollToBottom) { + ImGui::SetScrollHereY(1.0f); + m_scrollToBottom = false; + } + + ImGui::EndChild(); + ImGui::End(); +} + +} \ No newline at end of file diff --git a/MVS/ui/src/panels/ConsolePanel.h b/MVS/ui/src/panels/ConsolePanel.h new file mode 100644 index 00000000..4d73ef58 --- /dev/null +++ b/MVS/ui/src/panels/ConsolePanel.h @@ -0,0 +1,16 @@ +#pragma once + +#include "Panel.h" + +namespace UI { + +class ConsolePanel : public Panel { +public: + ConsolePanel(); + void Render() override; + +private: + bool m_scrollToBottom = false; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/panels/GameViewPanel.cpp b/MVS/ui/src/panels/GameViewPanel.cpp new file mode 100644 index 00000000..d4389e1e --- /dev/null +++ b/MVS/ui/src/panels/GameViewPanel.cpp @@ -0,0 +1,31 @@ +#include "GameViewPanel.h" +#include +#include + +namespace UI { + +GameViewPanel::GameViewPanel() : Panel("Game") {} + +void GameViewPanel::Render() { + ImGui::Begin(m_name.c_str(), nullptr, ImGuiWindowFlags_None); + + RenderGameView(); + + ImGui::End(); +} + +void GameViewPanel::RenderGameView() { + ImVec2 canvasSize = ImGui::GetContentRegionAvail(); + ImDrawList* drawList = ImGui::GetWindowDrawList(); + ImVec2 canvasPos = ImGui::GetCursorScreenPos(); + + ImU32 bgColor = IM_COL32(20, 20, 25, 255); + drawList->AddRectFilled(canvasPos, ImVec2(canvasPos.x + canvasSize.x, canvasPos.y + canvasSize.y), bgColor); + + const char* text = "Game View (Press Play)"; + ImVec2 textSize = ImGui::CalcTextSize(text); + ImVec2 textPos(canvasPos.x + (canvasSize.x - textSize.x) * 0.5f, canvasPos.y + (canvasSize.y - textSize.y) * 0.5f); + drawList->AddText(textPos, IM_COL32(128, 128, 128, 255), text); +} + +} diff --git a/MVS/ui/src/panels/GameViewPanel.h b/MVS/ui/src/panels/GameViewPanel.h new file mode 100644 index 00000000..f0797cdf --- /dev/null +++ b/MVS/ui/src/panels/GameViewPanel.h @@ -0,0 +1,16 @@ +#pragma once + +#include "Panel.h" + +namespace UI { + +class GameViewPanel : public Panel { +public: + GameViewPanel(); + void Render() override; + +private: + void RenderGameView(); +}; + +} diff --git a/MVS/ui/src/panels/HierarchyPanel.cpp b/MVS/ui/src/panels/HierarchyPanel.cpp new file mode 100644 index 00000000..1be32dcc --- /dev/null +++ b/MVS/ui/src/panels/HierarchyPanel.cpp @@ -0,0 +1,345 @@ +#include "HierarchyPanel.h" +#include "Managers/SceneManager.h" +#include "Managers/SelectionManager.h" +#include +#include + +namespace UI { + +HierarchyPanel::HierarchyPanel() : Panel("Hierarchy") { + SceneManager::Get().CreateDemoScene(); + + m_selectionHandlerId = SelectionManager::Get().OnSelectionChanged.Subscribe([this](EntityID) { + }); +} + +HierarchyPanel::~HierarchyPanel() { + SelectionManager::Get().OnSelectionChanged.Unsubscribe(m_selectionHandlerId); +} + +void HierarchyPanel::Render() { + ImGui::Begin(m_name.c_str(), nullptr, ImGuiWindowFlags_None); + + RenderSearchBar(); + + ImGui::Separator(); + + HandleKeyboardShortcuts(); + + std::string filter = m_searchBuffer; + + ImGui::BeginChild("EntityList"); + + for (EntityID id : SceneManager::Get().GetRootEntities()) { + RenderEntity(id, filter); + } + + if (ImGui::IsWindowHovered() && ImGui::IsMouseDown(0) && !ImGui::IsAnyItemHovered()) { + if (!m_renaming) { + SelectionManager::Get().ClearSelection(); + } + } + + if (ImGui::BeginPopupContextWindow("HierarchyContextMenu", ImGuiPopupFlags_MouseButtonRight)) { + RenderCreateMenu(INVALID_ENTITY); + ImGui::EndPopup(); + } + + ImGui::InvisibleButton("##DragTarget", ImVec2(-1, -1)); + if (ImGui::BeginDragDropTarget()) { + if (const ImGuiPayload* payload = ImGui::AcceptDragDropPayload("ENTITY_ID")) { + EntityID sourceId = *(const EntityID*)payload->Data; + if (sourceId != INVALID_ENTITY) { + const Entity* sourceEntity = SceneManager::Get().GetEntity(sourceId); + if (sourceEntity && sourceEntity->parent != INVALID_ENTITY) { + SceneManager::Get().MoveEntity(sourceId, INVALID_ENTITY); + } + } + } + ImGui::EndDragDropTarget(); + } + + ImGui::EndChild(); + + ImGui::End(); +} + +void HierarchyPanel::RenderSearchBar() { + ImGui::SetNextItemWidth(-1); + ImGui::InputTextWithHint("##Search", "Search...", m_searchBuffer, sizeof(m_searchBuffer)); +} + +void HierarchyPanel::RenderEntity(EntityID id, const std::string& filter) { + auto& sceneManager = SceneManager::Get(); + Entity* entity = sceneManager.GetEntity(id); + if (!entity) return; + + if (!filter.empty() && !PassesFilter(id, filter)) { + return; + } + + ImGui::PushID(static_cast(id)); + + ImGuiTreeNodeFlags flags = ImGuiTreeNodeFlags_OpenOnArrow | ImGuiTreeNodeFlags_SpanAvailWidth; + + if (entity->children.empty()) { + flags |= ImGuiTreeNodeFlags_Leaf; + } + + if (SelectionManager::Get().IsSelected(id)) { + flags |= ImGuiTreeNodeFlags_Selected; + } + + if (m_renaming && m_renamingEntity == id) { + if (m_renameJustStarted) { + ImGui::SetKeyboardFocusHere(); + m_renameJustStarted = false; + } + + ImGui::SetNextItemWidth(-1); + if (ImGui::InputText("##Rename", m_renameBuffer, sizeof(m_renameBuffer), ImGuiInputTextFlags_EnterReturnsTrue | ImGuiInputTextFlags_AutoSelectAll)) { + if (strlen(m_renameBuffer) > 0) { + sceneManager.RenameEntity(id, m_renameBuffer); + } + m_renaming = false; + m_renamingEntity = INVALID_ENTITY; + } + + if (!ImGui::IsItemActive() && ImGui::IsMouseClicked(0)) { + if (strlen(m_renameBuffer) > 0) { + sceneManager.RenameEntity(id, m_renameBuffer); + } + m_renaming = false; + m_renamingEntity = INVALID_ENTITY; + } + } else { + bool isOpen = ImGui::TreeNodeEx(entity->name.c_str(), flags); + + if (ImGui::IsItemClicked() && !ImGui::IsItemToggledOpen()) { + SelectionManager::Get().SetSelectedEntity(id); + } + + if (ImGui::IsItemHovered() && ImGui::IsMouseDoubleClicked(0)) { + m_renaming = true; + m_renamingEntity = id; + strcpy_s(m_renameBuffer, entity->name.c_str()); + m_renameJustStarted = true; + } + + HandleDragDrop(id); + + if (ImGui::BeginPopupContextItem("EntityContextMenu")) { + RenderContextMenu(id); + ImGui::EndPopup(); + } + + if (isOpen) { + for (EntityID childId : entity->children) { + RenderEntity(childId, filter); + } + ImGui::TreePop(); + } + } + + ImGui::PopID(); +} + +void HierarchyPanel::RenderContextMenu(EntityID id) { + auto& sceneManager = SceneManager::Get(); + auto& selectionManager = SelectionManager::Get(); + + if (ImGui::BeginMenu("Create")) { + RenderCreateMenu(id); + ImGui::EndMenu(); + } + + ImGui::Separator(); + + if (ImGui::MenuItem("Rename", "F2")) { + const Entity* entity = sceneManager.GetEntity(id); + if (entity) { + m_renaming = true; + m_renamingEntity = id; + strcpy_s(m_renameBuffer, entity->name.c_str()); + m_renameJustStarted = true; + } + } + + if (ImGui::MenuItem("Delete", "Delete")) { + sceneManager.DeleteEntity(id); + } + + ImGui::Separator(); + + if (ImGui::MenuItem("Copy", "Ctrl+C")) { + sceneManager.CopyEntity(id); + } + + if (ImGui::MenuItem("Paste", "Ctrl+V", false, sceneManager.HasClipboardData())) { + sceneManager.PasteEntity(id); + } + + if (ImGui::MenuItem("Duplicate", "Ctrl+D")) { + EntityID newId = sceneManager.DuplicateEntity(id); + if (newId != INVALID_ENTITY) { + selectionManager.SetSelectedEntity(newId); + } + } +} + +void HierarchyPanel::RenderCreateMenu(EntityID parent) { + auto& sceneManager = SceneManager::Get(); + auto& selectionManager = SelectionManager::Get(); + + if (ImGui::MenuItem("Empty Object")) { + EntityID newId = sceneManager.CreateEntity("GameObject", parent); + selectionManager.SetSelectedEntity(newId); + } + + ImGui::Separator(); + + if (ImGui::MenuItem("Camera")) { + EntityID newId = sceneManager.CreateEntity("Camera", parent); + sceneManager.GetEntity(newId)->AddComponent(); + selectionManager.SetSelectedEntity(newId); + } + + if (ImGui::MenuItem("Light")) { + EntityID newId = sceneManager.CreateEntity("Light", parent); + selectionManager.SetSelectedEntity(newId); + } + + ImGui::Separator(); + + if (ImGui::MenuItem("Cube")) { + EntityID newId = sceneManager.CreateEntity("Cube", parent); + sceneManager.GetEntity(newId)->AddComponent(); + sceneManager.GetEntity(newId)->AddComponent()->meshName = "Cube"; + selectionManager.SetSelectedEntity(newId); + } + + if (ImGui::MenuItem("Sphere")) { + EntityID newId = sceneManager.CreateEntity("Sphere", parent); + sceneManager.GetEntity(newId)->AddComponent(); + sceneManager.GetEntity(newId)->AddComponent()->meshName = "Sphere"; + selectionManager.SetSelectedEntity(newId); + } + + if (ImGui::MenuItem("Plane")) { + EntityID newId = sceneManager.CreateEntity("Plane", parent); + sceneManager.GetEntity(newId)->AddComponent(); + sceneManager.GetEntity(newId)->AddComponent()->meshName = "Plane"; + selectionManager.SetSelectedEntity(newId); + } +} + +void HierarchyPanel::HandleDragDrop(EntityID id) { + auto& sceneManager = SceneManager::Get(); + + if (ImGui::BeginDragDropSource(ImGuiDragDropFlags_None)) { + m_dragSource = id; + ImGui::SetDragDropPayload("ENTITY_ID", &id, sizeof(EntityID)); + const Entity* entity = sceneManager.GetEntity(id); + if (entity) { + ImGui::Text("%s", entity->name.c_str()); + } + ImGui::EndDragDropSource(); + } + + if (ImGui::BeginDragDropTarget()) { + if (const ImGuiPayload* payload = ImGui::AcceptDragDropPayload("ENTITY_ID")) { + EntityID sourceId = *(const EntityID*)payload->Data; + if (sourceId != id && sourceId != INVALID_ENTITY) { + const Entity* targetEntity = sceneManager.GetEntity(id); + const Entity* sourceEntity = sceneManager.GetEntity(sourceId); + + bool isValidMove = true; + EntityID checkParent = targetEntity ? targetEntity->parent : INVALID_ENTITY; + while (checkParent != INVALID_ENTITY) { + if (checkParent == sourceId) { + isValidMove = false; + break; + } + const Entity* parentEntity = sceneManager.GetEntity(checkParent); + checkParent = parentEntity ? parentEntity->parent : INVALID_ENTITY; + } + + if (isValidMove && sourceEntity && sourceEntity->parent != id) { + sceneManager.MoveEntity(sourceId, id); + } + } + } + ImGui::EndDragDropTarget(); + } +} + +void HierarchyPanel::HandleKeyboardShortcuts() { + auto& sceneManager = SceneManager::Get(); + auto& selectionManager = SelectionManager::Get(); + + EntityID selectedId = selectionManager.GetSelectedEntity(); + + if (ImGui::IsWindowFocused()) { + if (ImGui::IsKeyPressed(ImGuiKey_Delete)) { + if (selectedId != INVALID_ENTITY) { + sceneManager.DeleteEntity(selectedId); + } + } + + if (ImGui::IsKeyPressed(ImGuiKey_F2)) { + if (selectedId != INVALID_ENTITY) { + const Entity* entity = sceneManager.GetEntity(selectedId); + if (entity) { + m_renaming = true; + m_renamingEntity = selectedId; + strcpy_s(m_renameBuffer, entity->name.c_str()); + m_renameJustStarted = true; + } + } + } + + ImGuiIO& io = ImGui::GetIO(); + if (io.KeyCtrl) { + if (ImGui::IsKeyPressed(ImGuiKey_C)) { + if (selectedId != INVALID_ENTITY) { + sceneManager.CopyEntity(selectedId); + } + } + + if (ImGui::IsKeyPressed(ImGuiKey_V)) { + if (sceneManager.HasClipboardData()) { + sceneManager.PasteEntity(selectedId); + } + } + + if (ImGui::IsKeyPressed(ImGuiKey_D)) { + if (selectedId != INVALID_ENTITY) { + EntityID newId = sceneManager.DuplicateEntity(selectedId); + if (newId != INVALID_ENTITY) { + selectionManager.SetSelectedEntity(newId); + } + } + } + } + } +} + +bool HierarchyPanel::PassesFilter(EntityID id, const std::string& filter) { + auto& sceneManager = SceneManager::Get(); + const Entity* entity = sceneManager.GetEntity(id); + if (!entity) return false; + + if (entity->name.find(filter) != std::string::npos) { + return true; + } + + for (EntityID childId : entity->children) { + if (PassesFilter(childId, filter)) { + return true; + } + } + + return false; +} + +} \ No newline at end of file diff --git a/MVS/ui/src/panels/HierarchyPanel.h b/MVS/ui/src/panels/HierarchyPanel.h new file mode 100644 index 00000000..2b1ab3ed --- /dev/null +++ b/MVS/ui/src/panels/HierarchyPanel.h @@ -0,0 +1,35 @@ +#pragma once + +#include "Panel.h" +#include "Core/Event.h" +#include "Core/GameObject.h" + +namespace UI { + +class HierarchyPanel : public Panel { +public: + HierarchyPanel(); + ~HierarchyPanel(); + + void Render() override; + +private: + void RenderSearchBar(); + void RenderEntity(EntityID id, const std::string& filter); + void RenderContextMenu(EntityID id); + void RenderCreateMenu(EntityID parent); + void HandleDragDrop(EntityID id); + void HandleKeyboardShortcuts(); + bool PassesFilter(EntityID id, const std::string& filter); + + Event::HandlerID m_selectionHandlerId = 0; + + char m_searchBuffer[256] = ""; + bool m_renaming = false; + EntityID m_renamingEntity = INVALID_ENTITY; + char m_renameBuffer[256] = ""; + bool m_renameJustStarted = false; + EntityID m_dragSource = INVALID_ENTITY; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/panels/InspectorPanel.cpp b/MVS/ui/src/panels/InspectorPanel.cpp new file mode 100644 index 00000000..b3899edf --- /dev/null +++ b/MVS/ui/src/panels/InspectorPanel.cpp @@ -0,0 +1,94 @@ +#include "InspectorPanel.h" +#include "Managers/SceneManager.h" +#include "Managers/SelectionManager.h" +#include +#include + +namespace UI { + +InspectorPanel::InspectorPanel() : Panel("Inspector") { + m_selectionHandlerId = SelectionManager::Get().OnSelectionChanged.Subscribe([this](EntityID) { + }); +} + +InspectorPanel::~InspectorPanel() { + SelectionManager::Get().OnSelectionChanged.Unsubscribe(m_selectionHandlerId); +} + +void InspectorPanel::Render() { + ImGui::Begin(m_name.c_str(), nullptr, ImGuiWindowFlags_None); + + EntityID selectedId = SelectionManager::Get().GetSelectedEntity(); + Entity* entity = SceneManager::Get().GetEntity(selectedId); + + if (entity) { + RenderEntity(entity); + } else { + ImGui::Text("No object selected"); + ImGui::TextColored(ImVec4(0.5f, 0.5f, 0.5f, 1.0f), "Select an object in Hierarchy"); + } + + ImGui::End(); +} + +void InspectorPanel::RenderEntity(Entity* entity) { + ImGui::Text("%s", entity->name.c_str()); + ImGui::Separator(); + + for (auto& component : entity->components) { + RenderComponent(component.get()); + ImGui::Separator(); + } +} + +void InspectorPanel::RenderComponent(Component* component) { + if (!component) return; + + const char* name = component->GetName().c_str(); + + std::string headerId = name + std::string("##") + std::to_string(reinterpret_cast(component)); + + if (ImGui::CollapsingHeader(headerId.c_str(), ImGuiTreeNodeFlags_DefaultOpen)) { + ImGui::Indent(10.0f); + + if (auto* transform = dynamic_cast(component)) { + ImGui::Text("Position"); + ImGui::SameLine(80); + ImGui::SetNextItemWidth(180); + ImGui::DragFloat3("##Position", transform->position, 0.1f); + + ImGui::Text("Rotation"); + ImGui::SameLine(80); + ImGui::SetNextItemWidth(180); + ImGui::DragFloat3("##Rotation", transform->rotation, 1.0f); + + ImGui::Text("Scale"); + ImGui::SameLine(80); + ImGui::SetNextItemWidth(180); + ImGui::DragFloat3("##Scale", transform->scale, 0.1f); + } + else if (auto* meshRenderer = dynamic_cast(component)) { + char materialBuffer[256] = {}; + strncpy_s(materialBuffer, meshRenderer->materialName.c_str(), sizeof(materialBuffer) - 1); + ImGui::Text("Material"); + ImGui::SameLine(80); + ImGui::SetNextItemWidth(180); + if (ImGui::InputText("##Material", materialBuffer, sizeof(materialBuffer))) { + meshRenderer->materialName = materialBuffer; + } + + char meshBuffer[256] = {}; + strncpy_s(meshBuffer, meshRenderer->meshName.c_str(), sizeof(meshBuffer) - 1); + ImGui::Text("Mesh"); + ImGui::SameLine(80); + ImGui::SetNextItemWidth(180); + if (ImGui::InputText("##Mesh", meshBuffer, sizeof(meshBuffer))) { + meshRenderer->meshName = meshBuffer; + } + } + + ImGui::Unindent(10.0f); + } +} + +} \ No newline at end of file diff --git a/MVS/ui/src/panels/InspectorPanel.h b/MVS/ui/src/panels/InspectorPanel.h new file mode 100644 index 00000000..74945608 --- /dev/null +++ b/MVS/ui/src/panels/InspectorPanel.h @@ -0,0 +1,23 @@ +#pragma once + +#include "Panel.h" +#include "Core/Event.h" +#include "Core/GameObject.h" + +namespace UI { + +class InspectorPanel : public Panel { +public: + InspectorPanel(); + ~InspectorPanel(); + + void Render() override; + +private: + void RenderEntity(Entity* entity); + void RenderComponent(Component* component); + + Event::HandlerID m_selectionHandlerId = 0; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/panels/MenuBar.cpp b/MVS/ui/src/panels/MenuBar.cpp new file mode 100644 index 00000000..9376a06f --- /dev/null +++ b/MVS/ui/src/panels/MenuBar.cpp @@ -0,0 +1,55 @@ +#include "MenuBar.h" +#include + +namespace UI { + +MenuBar::MenuBar() : Panel("MenuBar") {} + +void MenuBar::Render() { + if (ImGui::BeginMainMenuBar()) { + ShowFileMenu(); + ShowEditMenu(); + ShowViewMenu(); + ShowHelpMenu(); + ImGui::EndMainMenuBar(); + } +} + +void MenuBar::ShowFileMenu() { + if (ImGui::BeginMenu("File")) { + if (ImGui::MenuItem("New Scene", "Ctrl+N")) {} + if (ImGui::MenuItem("Open Scene", "Ctrl+O")) {} + if (ImGui::MenuItem("Save Scene", "Ctrl+S")) {} + ImGui::Separator(); + if (ImGui::MenuItem("Exit", "Alt+F4")) {} + ImGui::EndMenu(); + } +} + +void MenuBar::ShowEditMenu() { + if (ImGui::BeginMenu("Edit")) { + if (ImGui::MenuItem("Undo", "Ctrl+Z")) {} + if (ImGui::MenuItem("Redo", "Ctrl+Y")) {} + ImGui::Separator(); + if (ImGui::MenuItem("Cut", "Ctrl+X")) {} + if (ImGui::MenuItem("Copy", "Ctrl+C")) {} + if (ImGui::MenuItem("Paste", "Ctrl+V")) {} + ImGui::EndMenu(); + } +} + +void MenuBar::ShowViewMenu() { + if (ImGui::BeginMenu("View")) { + if (ImGui::MenuItem("Reset Layout")) {} + ImGui::EndMenu(); + } +} + +void MenuBar::ShowHelpMenu() { + if (ImGui::BeginMenu("Help")) { + if (ImGui::MenuItem("About")) {} + ImGui::EndMenu(); + } +} + +} \ No newline at end of file diff --git a/MVS/ui/src/panels/MenuBar.h b/MVS/ui/src/panels/MenuBar.h new file mode 100644 index 00000000..b4481422 --- /dev/null +++ b/MVS/ui/src/panels/MenuBar.h @@ -0,0 +1,19 @@ +#pragma once + +#include "Panel.h" + +namespace UI { + +class MenuBar : public Panel { +public: + MenuBar(); + void Render() override; + +private: + void ShowFileMenu(); + void ShowEditMenu(); + void ShowViewMenu(); + void ShowHelpMenu(); +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/panels/Panel.cpp b/MVS/ui/src/panels/Panel.cpp new file mode 100644 index 00000000..bd3d0c3b --- /dev/null +++ b/MVS/ui/src/panels/Panel.cpp @@ -0,0 +1,4 @@ +#include "Panel.h" + +namespace UI { +} \ No newline at end of file diff --git a/MVS/ui/src/panels/Panel.h b/MVS/ui/src/panels/Panel.h new file mode 100644 index 00000000..81c4b501 --- /dev/null +++ b/MVS/ui/src/panels/Panel.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +namespace UI { + +class Panel { +public: + Panel(const std::string& name) : m_name(name), m_isOpen(true) {} + virtual ~Panel() = default; + + virtual void Render() = 0; + + const std::string& GetName() const { return m_name; } + bool IsOpen() const { return m_isOpen; } + void SetOpen(bool open) { m_isOpen = open; } + void Toggle() { m_isOpen = !m_isOpen; } + +protected: + std::string m_name; + bool m_isOpen; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/panels/ProjectPanel.cpp b/MVS/ui/src/panels/ProjectPanel.cpp new file mode 100644 index 00000000..a3b2e6e9 --- /dev/null +++ b/MVS/ui/src/panels/ProjectPanel.cpp @@ -0,0 +1,297 @@ +#include "ProjectPanel.h" +#include "Managers/ProjectManager.h" +#include "Core/AssetItem.h" +#include +#include + +namespace UI { + +const char* DRAG_DROP_TYPE = "ASSET_ITEM"; + +ProjectPanel::ProjectPanel() : Panel("Project") { +} + +void ProjectPanel::Initialize(const std::string& projectPath) { + ProjectManager::Get().Initialize(projectPath); +} + +void ProjectPanel::Render() { + const ImGuiPayload* payload = ImGui::GetDragDropPayload(); + if (payload && payload->IsDataType(DRAG_DROP_TYPE)) { + m_draggingPath = (const char*)payload->Data; + } else if (!ImGui::IsMouseDown(0)) { + m_draggingPath.clear(); + } + + ImGui::Begin(m_name.c_str(), nullptr, ImGuiWindowFlags_None); + + auto& manager = ProjectManager::Get(); + + bool canGoBack = manager.CanNavigateBack(); + ImGui::BeginDisabled(!canGoBack); + if (ImGui::Button("<")) { + if (canGoBack) { + manager.NavigateBack(); + } + } + ImGui::EndDisabled(); + ImGui::SameLine(); + + ImGui::PushStyleColor(ImGuiCol_Button, ImVec4(0, 0, 0, 0)); + ImGui::PushStyleColor(ImGuiCol_ButtonHovered, ImVec4(0, 0, 0, 0)); + size_t pathDepth = manager.GetPathDepth(); + for (size_t i = 0; i < pathDepth; i++) { + if (i > 0) { + ImGui::SameLine(); + ImGui::Text("/"); + ImGui::SameLine(); + } + std::string name = manager.GetPathName(i); + if (i < pathDepth - 1) { + if (ImGui::Button(name.c_str())) { + manager.NavigateToIndex(i); + } + } else { + ImGui::Text("%s", name.c_str()); + } + } + ImGui::PopStyleColor(2); + + ImGui::SameLine(); + ImGui::SetCursorPosX(ImGui::GetWindowWidth() - 80.0f); + if (ImGui::Button("Refresh")) { + manager.RefreshCurrentFolder(); + } + + ImGui::Separator(); + + ImGui::PushItemWidth(-1); + ImGui::InputTextWithHint("##Search", "Search...", m_searchBuffer, sizeof(m_searchBuffer)); + ImGui::PopItemWidth(); + + ImGui::Separator(); + + ImGui::PushStyleVar(ImGuiStyleVar_ItemSpacing, ImVec2(10, 10)); + + float buttonWidth = 80.0f; + float padding = 10.0f; + float panelWidth = ImGui::GetContentRegionAvail().x; + int columns = (int)(panelWidth / (buttonWidth + padding)); + if (columns < 1) columns = 1; + + auto& items = manager.GetCurrentItems(); + std::string searchStr = m_searchBuffer; + int itemIndex = 0; + + for (int i = 0; i < (int)items.size(); i++) { + if (!searchStr.empty()) { + if (items[i]->name.find(searchStr) == std::string::npos) { + continue; + } + } + + if (itemIndex > 0 && itemIndex % columns != 0) { + ImGui::SameLine(); + } + RenderAssetItem(items[i], itemIndex); + itemIndex++; + } + + ImGui::PopStyleVar(); + + if (ImGui::IsWindowHovered() && ImGui::IsMouseClicked(0) && !ImGui::IsAnyItemHovered()) { + manager.SetSelectedIndex(-1); + } + + if (ImGui::BeginPopup("ItemContextMenu")) { + if (m_contextMenuIndex >= 0 && m_contextMenuIndex < (int)items.size()) { + auto& item = items[m_contextMenuIndex]; + if (item->isFolder) { + if (ImGui::MenuItem("Open")) { + manager.NavigateToFolder(item); + } + ImGui::Separator(); + } + if (ImGui::MenuItem("Delete")) { + manager.DeleteItem(m_contextMenuIndex); + m_contextMenuIndex = -1; + } + } + ImGui::EndPopup(); + } + + if (ImGui::IsWindowHovered() && ImGui::IsMouseClicked(1) && !ImGui::IsAnyItemHovered()) { + ImGui::OpenPopup("EmptyContextMenu"); + } + + if (ImGui::BeginPopup("EmptyContextMenu")) { + if (ImGui::MenuItem("Create Folder")) { + m_showCreateFolderPopup = true; + strcpy_s(m_newFolderName, "NewFolder"); + } + ImGui::Separator(); + if (ImGui::MenuItem("Refresh")) { + manager.RefreshCurrentFolder(); + } + ImGui::EndPopup(); + } + + ImGui::End(); + + if (m_showCreateFolderPopup) { + ImGui::OpenPopup("Create Folder"); + m_showCreateFolderPopup = false; + } + + if (ImGui::BeginPopupModal("Create Folder", nullptr, ImGuiWindowFlags_AlwaysAutoResize)) { + ImGui::InputText("Name", m_newFolderName, sizeof(m_newFolderName)); + ImGui::Separator(); + if (ImGui::Button("Create", ImVec2(80, 0))) { + CreateNewFolder(m_newFolderName); + ImGui::CloseCurrentPopup(); + } + ImGui::SameLine(); + if (ImGui::Button("Cancel", ImVec2(80, 0))) { + ImGui::CloseCurrentPopup(); + } + ImGui::EndPopup(); + } +} + +void ProjectPanel::RenderAssetItem(const AssetItemPtr& item, int index) { + auto& manager = ProjectManager::Get(); + bool isSelected = (manager.GetSelectedIndex() == index); + + ImGui::PushID(index); + + if (isSelected) { + ImGui::PushStyleColor(ImGuiCol_Button, ImVec4(0.40f, 0.40f, 0.40f, 0.50f)); + } else { + ImGui::PushStyleColor(ImGuiCol_Button, ImVec4(0, 0, 0, 0)); + ImGui::PushStyleColor(ImGuiCol_ButtonHovered, ImVec4(0.30f, 0.30f, 0.30f, 0.40f)); + } + + ImVec2 buttonSize(80.0f, 90.0f); + + if (ImGui::Button("##AssetBtn", buttonSize)) { + manager.SetSelectedIndex(index); + } + + bool doubleClicked = false; + if (ImGui::IsItemHovered() && ImGui::IsMouseDoubleClicked(0)) { + doubleClicked = true; + } + + bool openContextMenu = false; + if (ImGui::IsItemClicked(1)) { + manager.SetSelectedIndex(index); + m_contextMenuIndex = index; + openContextMenu = true; + } + + if (isSelected) { + ImGui::PopStyleColor(); + } else { + ImGui::PopStyleColor(2); + } + + ImVec2 min = ImGui::GetItemRectMin(); + ImVec2 max = ImVec2(min.x + buttonSize.x, min.y + buttonSize.y); + ImDrawList* drawList = ImGui::GetWindowDrawList(); + + if (!m_draggingPath.empty() && item->fullPath == m_draggingPath) { + drawList->AddRectFilled(min, max, IM_COL32(0, 0, 0, 60), 0.0f); + } + + ImU32 iconColor; + if (item->isFolder) { + iconColor = IM_COL32(200, 180, 100, 255); + } else if (item->type == "Texture") { + iconColor = IM_COL32(150, 200, 150, 255); + } else if (item->type == "Model") { + iconColor = IM_COL32(150, 150, 200, 255); + } else if (item->type == "Script") { + iconColor = IM_COL32(200, 150, 150, 255); + } else if (item->type == "Scene") { + iconColor = IM_COL32(200, 200, 150, 255); + } else { + iconColor = IM_COL32(100, 150, 200, 255); + } + + float iconSize = 40.0f; + ImVec2 iconMin(min.x + (80.0f - iconSize) * 0.5f, min.y + 10.0f); + ImVec2 iconMax(iconMin.x + iconSize, iconMin.y + iconSize); + drawList->AddRectFilled(iconMin, iconMax, iconColor, 4.0f); + + ImVec4 textColor = isSelected ? ImVec4(1.0f, 1.0f, 1.0f, 1.0f) : ImVec4(0.8f, 0.8f, 0.8f, 1.0f); + ImVec2 textSize = ImGui::CalcTextSize(item->name.c_str()); + float textOffset = std::max(0.0f, (80.0f - textSize.x) * 0.5f); + + ImGui::PushClipRect(min, ImVec2(min.x + 80.0f, min.y + 90.0f), true); + drawList->AddText(ImVec2(min.x + textOffset, min.y + 60.0f), ImGui::GetColorU32(textColor), item->name.c_str()); + ImGui::PopClipRect(); + + if (item->isFolder) { + if (ImGui::BeginDragDropTarget()) { + if (const ImGuiPayload* payload = ImGui::AcceptDragDropPayload(DRAG_DROP_TYPE)) { + const char* draggedPath = (const char*)payload->Data; + std::string sourcePath(draggedPath); + manager.MoveItem(sourcePath, item->fullPath); + } + ImGui::EndDragDropTarget(); + } + + if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) { + ImDrawList* hoverDrawList = ImGui::GetWindowDrawList(); + hoverDrawList->AddRect(min, ImVec2(min.x + buttonSize.x, min.y + buttonSize.y), IM_COL32(255, 255, 255, 80), 4.0f); + } + } + + if (!item->fullPath.empty()) { + if (ImGui::BeginDragDropSource(ImGuiDragDropFlags_None)) { + ImGui::SetDragDropPayload(DRAG_DROP_TYPE, item->fullPath.c_str(), item->fullPath.length() + 1); + + ImU32 iconColor; + if (item->isFolder) { + iconColor = IM_COL32(200, 180, 100, 100); + } else if (item->type == "Texture") { + iconColor = IM_COL32(150, 200, 150, 100); + } else if (item->type == "Model") { + iconColor = IM_COL32(150, 150, 200, 100); + } else if (item->type == "Script") { + iconColor = IM_COL32(200, 150, 150, 100); + } else if (item->type == "Scene") { + iconColor = IM_COL32(200, 200, 150, 100); + } else { + iconColor = IM_COL32(100, 150, 200, 100); + } + + ImVec2 previewMin = ImGui::GetMousePos(); + ImVec2 previewMax = ImVec2(previewMin.x + 40, previewMin.y + 40); + ImGui::GetForegroundDrawList()->AddRectFilled(previewMin, previewMax, iconColor, 4.0f); + + ImGui::EndDragDropSource(); + } + } + + if (doubleClicked && item->isFolder) { + manager.NavigateToFolder(item); + } + + ImGui::PopID(); + + if (openContextMenu) { + ImGui::OpenPopup("ItemContextMenu"); + } +} + +void ProjectPanel::CreateNewFolder(const std::string& name) { + auto& manager = ProjectManager::Get(); + manager.CreateFolder(name); +} + +bool ProjectPanel::HandleDrop(const AssetItemPtr& targetFolder) { + return false; +} + +} \ No newline at end of file diff --git a/MVS/ui/src/panels/ProjectPanel.h b/MVS/ui/src/panels/ProjectPanel.h new file mode 100644 index 00000000..5aea84d7 --- /dev/null +++ b/MVS/ui/src/panels/ProjectPanel.h @@ -0,0 +1,26 @@ +#pragma once + +#include "Panel.h" +#include "Core/AssetItem.h" + +namespace UI { + +class ProjectPanel : public Panel { +public: + ProjectPanel(); + void Render() override; + void Initialize(const std::string& projectPath); + +private: + void RenderAssetItem(const AssetItemPtr& item, int index); + void CreateNewFolder(const std::string& name); + bool HandleDrop(const AssetItemPtr& targetFolder); + + char m_searchBuffer[256] = ""; + bool m_showCreateFolderPopup = false; + char m_newFolderName[256] = "NewFolder"; + int m_contextMenuIndex = -1; + std::string m_draggingPath; +}; + +} \ No newline at end of file diff --git a/MVS/ui/src/panels/SceneViewPanel.cpp b/MVS/ui/src/panels/SceneViewPanel.cpp new file mode 100644 index 00000000..6fc8d98d --- /dev/null +++ b/MVS/ui/src/panels/SceneViewPanel.cpp @@ -0,0 +1,54 @@ +#include "SceneViewPanel.h" +#include +#include + +namespace UI { + +SceneViewPanel::SceneViewPanel() : Panel("Scene") {} + +void SceneViewPanel::Render() { + ImGui::Begin(m_name.c_str(), nullptr, ImGuiWindowFlags_None); + + ImVec2 canvasSize = ImGui::GetContentRegionAvail(); + ImDrawList* drawList = ImGui::GetWindowDrawList(); + ImVec2 canvasPos = ImGui::GetCursorScreenPos(); + + ImU32 bgColor = IM_COL32(30, 30, 30, 255); + drawList->AddRectFilled(canvasPos, ImVec2(canvasPos.x + canvasSize.x, canvasPos.y + canvasSize.y), bgColor); + + RenderGrid(); + + ImGui::End(); +} + +void SceneViewPanel::RenderGrid() { + ImDrawList* drawList = ImGui::GetWindowDrawList(); + ImVec2 canvasPos = ImGui::GetCursorScreenPos(); + ImVec2 canvasSize = ImGui::GetContentRegionAvail(); + + float gridSize = 50.0f; + ImU32 gridColor = IM_COL32(50, 50, 50, 255); + + for (float x = fmodf(0, gridSize); x < canvasSize.x; x += gridSize) { + drawList->AddLine( + ImVec2(canvasPos.x + x, canvasPos.y), + ImVec2(canvasPos.x + x, canvasPos.y + canvasSize.y), + gridColor + ); + } + + for (float y = fmodf(0, gridSize); y < canvasSize.y; y += gridSize) { + drawList->AddLine( + ImVec2(canvasPos.x, canvasPos.y + y), + ImVec2(canvasPos.x + canvasSize.x, canvasPos.y + y), + gridColor + ); + } + + const char* label = "Scene View"; + ImVec2 labelSize = ImGui::CalcTextSize(label); + ImVec2 labelPos(canvasPos.x + 10, canvasPos.y + 10); + drawList->AddText(labelPos, IM_COL32(100, 100, 100, 255), label); +} + +} \ No newline at end of file diff --git a/MVS/ui/src/panels/SceneViewPanel.h b/MVS/ui/src/panels/SceneViewPanel.h new file mode 100644 index 00000000..d81c47ec --- /dev/null +++ b/MVS/ui/src/panels/SceneViewPanel.h @@ -0,0 +1,16 @@ +#pragma once + +#include "Panel.h" + +namespace UI { + +class SceneViewPanel : public Panel { +public: + SceneViewPanel(); + void Render() override; + +private: + void RenderGrid(); +}; + +} \ No newline at end of file diff --git a/README.md b/README.md index 2153790a..4696d01f 100644 --- a/README.md +++ b/README.md @@ -1,37 +1,77 @@ -# XCVolumeRenderer +# XCEngine -基于 DirectX 12 的体积渲染器,使用 NanoVDB 格式实现云、烟雾等体积数据的实时渲染。 +基于 DirectX 12 的渲染引擎项目,包含体积渲染器、基础渲染示例和 Unity 风格编辑器 UI。 + +## 项目概述 + +XCEngine 是一个正在开发中的图形渲染引擎,使用 DirectX 12 作为底层渲染 API。项目采用模块化设计,包含多个独立演示项目(MVS - Multiple Version Samples)。 ## 技术栈 -- **渲染API**: DirectX 12 +- **渲染 API**: DirectX 12 - **语言**: C++17 - **构建系统**: CMake -- **依赖库**: DirectX 12 SDK, stb_image, NanoVDB +- **UI 框架**: ImGui ## 项目结构 ``` -XCVolumeRenderer/ -├── main.cpp # 主程序入口 -├── BattleFireDirect.cpp/h # DirectX 12 核心渲染实现 -├── NanoVDBLoader.cpp/h # NanoVDB 体积数据加载器 -├── StaticMeshComponent.cpp/h # 静态网格组件 -├── Utils.cpp/h # 工具函数 -├── stbi/ # 图像加载库 -├── Res/ -│ ├── Shader/ # HLSL 着色器 -│ │ ├── volume.hlsl # 体积渲染着色器 -│ │ ├── gs.hlsl # 几何着色器 -│ │ └── PNanoVDB.hlsl # NanoVDB GPU 解析 -│ ├── Model/ # 模型文件 -│ ├── Image/ # 纹理图片 -│ └── NanoVDB/ # NanoVDB 体积数据 -├── CMakeLists.txt # CMake 构建配置 -└── build/ # 构建目录 +XCEngine/ +├── MVS/ # 多个示例版本 +│ ├── HelloEarth/ # 基础渲染示例 +│ │ ├── main.cpp # 程序入口 +│ │ ├── BattleFireDirect.cpp/h # DirectX 12 核心 +│ │ ├── StaticMeshComponent.cpp/h # 静态网格组件 +│ │ ├── Utils.cpp/h # 工具函数 +│ │ ├── stbi/ # 图像加载库 +│ │ └── Res/ # 资源文件 +│ │ ├── Shader/ # HLSL 着色器 +│ │ ├── Model/ # 模型文件 +│ │ └── Image/ # 纹理图片 +│ │ +│ ├── VolumeRenderer/ # 体积渲染器 +│ │ ├── main.cpp # 主程序入口 +│ │ ├── BattleFireDirect.cpp/h # DirectX 12 核心渲染 +│ │ ├── NanoVDBLoader.cpp/h # NanoVDB 体积数据加载 +│ │ ├── StaticMeshComponent.cpp/h # 静态网格组件 +│ │ ├── Utils.cpp/h # 工具函数 +│ │ ├── stbi/ # 图像加载库 +│ │ ├── CMakeLists.txt # CMake 构建配置 +│ │ ├── Res/ # 资源文件 +│ │ │ ├── Shader/ # HLSL 着色器 +│ │ │ ├── Model/ # 模型文件 +│ │ │ ├── Image/ # 纹理图片 +│ │ │ └── NanoVDB/ # NanoVDB 体积数据 +│ │ └── README.md # 详细文档 +│ │ +│ └── ui/ # Unity 风格编辑器 UI +│ ├── src/ +│ │ ├── main.cpp # 程序入口 +│ │ ├── Application.cpp/h # 应用主类 +│ │ ├── Theme.cpp/h # 主题系统 +│ │ ├── Core/ # 核心类 +│ │ ├── Managers/ # 管理系统 +│ │ │ ├── LogSystem # 日志系统 +│ │ │ ├── ProjectManager # 项目管理 +│ │ │ └── SceneManager # 场景管理 +│ │ └── panels/ # UI 面板 +│ │ ├── HierarchyPanel # 层级面板 +│ │ ├── InspectorPanel # 检查器面板 +│ │ ├── SceneViewPanel # 场景视图 +│ │ ├── GameViewPanel # 游戏视图 +│ │ ├── ProjectPanel # 项目面板 +│ │ ├── ConsolePanel # 控制台面板 +│ │ └── MenuBar # 菜单栏 +│ ├── bin/Release/ # 输出目录 +│ └── CMakeLists.txt +│ +└── docs/ # 设计文档 + ├── XCVolumeRenderer渲染引擎架构设计.md + ├── XCGameEngine架构设计.md + └── ... ``` -## 构建方法 +## 快速开始 ### 前置要求 @@ -39,49 +79,52 @@ XCVolumeRenderer/ - Visual Studio 2019 或更高版本 - CMake 3.15+ -### 构建步骤 +### 构建项目 + +每个子项目都可以独立构建,以 HelloEarth 为例: ```bash -# 创建并进入构建目录 +cd MVS/HelloEarth mkdir build && cd build - -# 配置项目 cmake .. - -# 编译 cmake --build . --config Release ``` -### 运行 +## 各模块说明 -编译完成后,运行 `XCVolumeRenderer.exe` 或使用 `run.bat` +### HelloEarth -## 功能特性 - -### 核心渲染 -- DirectX 12 渲染管线 -- 几何着色器(GS)支持 +基础的 DirectX 12 渲染示例,展示如何: +- 初始化 DirectX 12 渲染环境 +- 加载和渲染静态网格 +- 使用几何着色器 - 纹理映射 -- 常量缓冲区(CBV/SRV) +- 常量缓冲区更新 -### 体积渲染 -- NanoVDB 格式支持 -- 光线步进(Ray Marching) +### VolumeRenderer + +基于 NanoVDB 的体积渲染器,支持: +- NanoVDB 格式体积数据加载 +- 光线步进(Ray Marching)渲染 - HDDA 空间跳跃加速 -- 体积阴影(Volumetric Shadow) -- 指数步长抖动采样 -- Gamma 校正 +- 体积阴影 +- 多种渲染参数配置 -### 参数配置 -- DensityScale - 密度缩放 -- StepSize - 步进大小 -- MaxSteps - 最大步数 -- LightDir - 光照方向 -- LightSamples - 阴影采样数 +### ui -## 资源说明 +Unity 风格的编辑器 UI,包含: +- 层级面板(Hierarchy Panel) +- 检查器面板(Inspector Panel) +- 场景视图(Scene View) +- 游戏视图(Game View) +- 项目面板(Project Panel) +- 控制台面板(Console Panel) +- 日志系统 -- 着色器文件位于 `Res/Shader/` 目录 -- 模型文件为 `.lhsm` 格式 -- 纹理支持 PNG、JPG 格式 -- 体积数据支持 NanoVDB `.nvdb` 格式 \ No newline at end of file +## 文档 + +更多详细设计文档请参考 `docs/` 目录。 + +## 许可证 + +MIT License