概述
cwl-engine-v1
忘记了呀
线框立方体
commit 72a0221e954ab7302be64538a8e5cd9fc0180e9a
Merge: 6ba285a d818a65
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Sun Nov 19 06:08:34 2023 +0800
Merge branch 'test'
比较早期的一个可运行版本。
运行后界面出现一个线框立方体,没有相机,也不能动
主入口在这个winmain
Source/CWLEngine/Engine/CWLEngine.cpp
#include "EngineMinimal.h"
#include "Timer.h"
int Init(CEngine *InEngine, HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow) {
#if defined(_WIN32)
FWinMainCmdParams CmdParams(hInstance, hPrevInstance, lpCmdLine, nCmdShow);
#endif
// PreInitialize
int ReturnValue = InEngine->PreInitialize(
#if defined(_WIN32)
CmdParams
#endif
);
if (ReturnValue != 0) {
ENGINE_LOG_ERROR("PreInitialize Fail");
return ReturnValue;
}
// Initialize
ReturnValue = InEngine->Initialize(
#if defined(_WIN32)
CmdParams
#endif
);
if (ReturnValue != 0) {
ENGINE_LOG_ERROR("Initialize Fail");
return ReturnValue;
}
// PostInitialize
ReturnValue = InEngine->PostInitialize();
if (ReturnValue != 0) {
ENGINE_LOG_ERROR("PostInitialize Fail");
return ReturnValue;
}
return ReturnValue;
}
void Tick(CEngine *InEngine, float DeltaTime) {
InEngine->Tick(DeltaTime);
Sleep(30);
}
int Exit(CEngine *InEngine) {
int ReturnValue = InEngine->PreExit();
if (ReturnValue != 0) {
ENGINE_LOG_ERROR("PreExit Fail");
return ReturnValue;
}
ReturnValue = InEngine->Exit();
if (ReturnValue != 0) {
ENGINE_LOG_ERROR("Exit Fail");
return ReturnValue;
}
ReturnValue = InEngine->PostExit();
if (ReturnValue != 0) {
ENGINE_LOG_ERROR("PostExit Fail");
return ReturnValue;
}
if (GEngine != nullptr) {
delete GEngine;
GEngine = nullptr;
}
return ReturnValue;
}
CEngine *GEngine = nullptr;
std::unique_ptr<FTimer> Timer = std::make_unique<FTimer>();
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow) {
int ReturnValue = 0;
GEngine = FEngineFactory::CreateEngine();
if (!GEngine->InitLogSystem())
{
return 1;
}
ENGINE_LOG_INFO("Engine Start. Version: {}", "1.0")
if (GEngine == nullptr) {
ENGINE_LOG_ERROR("Engine Create Fail")
return 1;
}
ReturnValue = Init(GEngine, hInstance, hPrevInstance, lpCmdLine, nCmdShow);
if (ReturnValue != 0) {
ENGINE_LOG_ERROR("Engine Init Fail")
return ReturnValue;
}
ENGINE_LOG_INFO("Engine Init Success")
MSG EngineMsg = {nullptr};
Timer->Reset();
while (EngineMsg.message != WM_QUIT) {
if (PeekMessage(&EngineMsg, nullptr, 0, 0, PM_REMOVE)) {
TranslateMessage(&EngineMsg);
DispatchMessage(&EngineMsg);
} else {
Timer->Tick();
Tick(GEngine, Timer->GetDeltaTime());
}
}
ReturnValue = Exit(GEngine);
return ReturnValue;
}
int main(int argc, char *argv[]) {
return 0;
}
代码不错,不过快一点到核心部分,不展示完整了代码了。
下一步,核心到了
CWinEngine的 PreInitialize,Initialize,PostInitialize,Tick,PreExit,Exit,PostExit
- InitWindows 平台窗口初始化,保存句柄。
- std::vector<CCoreMinimalObject*> GObjects; 全局的对象素组,所有继承了 CCoreMinimalObject 的对象都在里面,方便全量tick
- CWorld 创建了全局world对象
除此之外渲染的流程都在
CDirectXRenderingEngine的 PreInitialize,Initialize,PostInitialize,Tick,PreExit,Exit,PostExit 里面
CWorld里面有个Camera对象后面再看。
Init
-
CDirectXRenderingEngine::PreInitialize
-
CDirectXRenderingEngine::Initialize
- CDirectXRenderingEngine::InitDirect3D
- DX Debug层
- 创建驱动
- 创建围栏
- 创建命令队列,命令列表,命令分配器
- 创建交换链
- 创建RTV堆 和 DSV堆(注意这里RTV的大小是交换链大小2,DSV大小是1)
- 堆是描述符数组,一块存放描述符的内存块。
- 描述符和视图的同义词
- CDirectXRenderingEngine::PostInitDirect3D
- 初始化交换链和深度模板资源 (ID3D12Resource)
- 创建 Render Target View 和 Depth Stencil View(ps: 视图View和描述符Descriptor是同义词)
- 提交命令
- 设置视口和裁剪矩形大小
- 这个 PostInitDirect3D 可重复调用,比如窗口大小变化的时候,在龙书中叫OnResize
- CMeshMgr::Init()
- 提前构建 ProjectionMatrix
- CDirectXRenderingEngine::InitDirect3D
-
CDirectXRenderingEngine::PostInitialize()
- CMeshMgr::CreateBoxMesh
- new FBoxMesh
- 调用 FBoxMesh::CreateMesh 获得 FMeshRenderingData。也就是DX所需的顶点和索引数据
- 调用Box对象的 BeginInit 没东西
- 调用 BuildMesh(&MeshData);
- 【这里东西有点多,下面展开】
- 调用Box对象的 Init 没东西
- CMeshMgr::CreateBoxMesh
-
CMeshMgr::BuildMesh
- 创建CBV,是拿来上传 MVP矩阵的
- 根签名设置和创建
- PSO相关
Tick
CDirectXRenderingEngine::Tick
- CMeshMgr::PreDraw
- ANALYSIS_HRESULT(GetD3dGraphicsCommandList()->Reset(GetCommandAllocator().Get(), PSO.Get()))
- Barrier Begin
- 一些准备
- RSSetViewports
- RSSetScissorRects
- ClearRenderTargetView
- ClearDepthStencilView
- OMSetRenderTargets
- Draw
- SetDescriptorHeaps 描述堆,描述符数组,前面的ConstantBufferViewHeap
- SetGraphicsRootSignature 根签名
- IASetVertexBuffers 设置顶点数组
- IASetIndexBuffer 设置索引数组
- IASetPrimitiveTopology 设置图元拓扑
- SetGraphicsRootDescriptorTable
- DrawIndexedInstanced 这一步才真正的要开始绘制
- PostDraw
- MVP矩阵上传
- 一些准备
- Barrier End
- 其他:提交命令,交换链对调,等待GPU完成
摄像机
PS D:\UE_Project\cwlengine> git log
commit cef3a0f4b0aa8bacdb59b0eeb4119395b50486cb (HEAD -> master)
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Sat Nov 25 00:55:13 2023 +0800
9-finish
挑选第二个可运行版本
这个版本运行后有一个线框立方体外,可以控制摄像机的移动
WinEngine
从这一层开始看这个类是啥
Source/CWLEngine/Engine/Platform/Win/WinEngine.cpp
- CWinEngine::CWinEngine 构造
- CDirectXRenderingEngine *RenderingEngine; // new CDirectX12RenderingEngine()
- CWorld *World;
- CwlLibrary::FLogSystem *Logger{}; // spdlog 简单封装,抄自games104
- HWND MainWindowsHandle{};
- 初始化
- CWinEngine::PreInitialize
- RenderingEngine->PreInitialize(InParams); // 没东西
- CWinEngine::Initialize
- InitWindows(InParams) // windows窗口初始化并保留win句柄
- RenderingEngine->Initialize(InParams);
- World = CreateObject
(new CWorld());
- CWinEngine::PostInitialize
- RenderingEngine->PostInitialize();
- 调用接口创建场景中的mesh,例如Box
- 提交DX命令
- for (auto &Obj: GObjects)
- Obj->BeginInit();
- RenderingEngine->PostInitialize();
- CWinEngine::PreInitialize
- Tick
- for (auto &Obj: GObjects)
- Obj->Tick(DeltaTime);
- RenderingEngine->UpdateCalculations(DeltaTime, Info)
- RenderingEngine->Tick(DeltaTime);
- for (auto &Obj: GObjects)
CDirectXRenderingEngine
- 构造和成员函数
- CMeshMgr *MeshMgr;
- 其他都是DX相关的 Factory,Device, Fence, Command, SwapChain, RTVHeap, DSVHeap。。。
- 初始化
- Initialize (DX初始化上一部分比较清楚,这里就不赘述了)
- InitDirect3D();
- PostInitDirect3D();
- MeshMgr->Init();
- PostInitialize
- MeshMgr->CreateBoxMesh 创建场景中的模型
- 提交命令
- Initialize (DX初始化上一部分比较清楚,这里就不赘述了)
- Tick
- MeshMgr->PreDraw(DeltaTime);
- MeshMgr->Draw(DeltaTime);
- MeshMgr->PostDraw(DeltaTime);
主要变化的点是哪里呢?
- Projection Matrix 的计算
void FViewport::ViewportInit() {
float AspectRatio = FEngineRenderConfig::GetRenderConfig()->AspectRatio;
auto Project = XMMatrixPerspectiveFovLH(
0.25f * XM_PI, // Field of View Angle
AspectRatio, // Aspect ratio
1.0f, // Near clip plane distance
1000.0f); // Far clip plane distance
XMStoreFloat4x4(&ProjectionMatrix, Project);
}
- 输入事件看看
void CCamera::BeginInit() {
ViewportInit();
InputComponent->CaptureKeyboardInfoDelegate.Bind(this, &CCamera::ExecuteKeyboardInput);
InputComponent->OnRightMouseButtonUpDelegate.Bind(this, &CCamera::OnRightMouseButtonUp);
InputComponent->OnRightMouseButtonDownDelegate.Bind(this, &CCamera::OnRightMouseButtonDown);
InputComponent->OnMouseMoveDelegate.Bind(this, &CCamera::OnMouseMove);
InputComponent->OnMousesWheelDelegate.Bind(this, &CCamera::OnMouseWheel);
}
- 在tick里面不断调用 BuildViewMatrix 计算 view matrix
void CCamera::BuildViewMatrix(float DeltaTime) {
if (CameraType == ECameraType::CameraRoaming) {
// 计算和矫正轴
TransformationComponent->CorrectionVector();
// 算出按自身方向移动的意图
float X = 0, Y = 0, Z = 0;
TransformationComponent->GetCorrectionPosition(X, Y, Z);
// 构建View
XMFLOAT3 RightVector = TransformationComponent->GetRightVector();
XMFLOAT3 UpVector = TransformationComponent->GetUpVector();
XMFLOAT3 ForwardVector = TransformationComponent->GetForwardVector();
ViewMatrix =
{
RightVector.x, UpVector.x, ForwardVector.x, 0.0f,
RightVector.y, UpVector.y, ForwardVector.y, 0.0f,
RightVector.z, UpVector.z, ForwardVector.z, 0.0f,
X, Y, Z, 1.0f
};
} else if (CameraType == ECameraType::ObservationObject) {
// 构建View
// 。。。
}
}
-
word matrix 呢?还没有,现在是单位矩阵。相当于模型空间就是世界空间
-
如何传递数据
在hlsl中,矩阵通过b0, b1 传递,在GPU中做MVP变换
cbuffer ObjectConstantBuffer : register(b0) // b0~b14
{
float4x4 WorldMatrix;
};
cbuffer ViewportConstantBuffer : register(b1) // b0~b14
{
float4x4 ViewProjectionMatrix;
};
MeshVertexOut VSMain(MeshVertexIn MV)
{
MeshVertexOut Out;
float4 Position = mul(float4(MV.Position, 1.0f), WorldMatrix);
Out.Position = mul(Position, ViewProjectionMatrix);
Out.Color = MV.Color;
return Out;
}
在这里,每一帧的tick之前,把viewport信息拿了过来
void CMeshMgr::UpdateCalculations(float DeltaTime, const FViewportInfo &Info) {
XMMATRIX ViewMatrix = XMLoadFloat4x4(&Info.ViewMatrix);
XMMATRIX ATRIXWorld = XMLoadFloat4x4(&WorldMatrix);
XMMATRIX ATRIXProjection = XMLoadFloat4x4(&Info.ProjectionMatrix);
FObjectTransformation ObjectTransformation;
XMStoreFloat4x4(&ObjectTransformation.World, XMMatrixTranspose(ATRIXWorld));
ObjectConstantBuffer->Update(0, &ObjectTransformation);
// ViewPort
XMMATRIX ViewProject = XMMatrixMultiply(ViewMatrix, ATRIXProjection);
FViewportTransformation ViewportTransformation;
XMStoreFloat4x4(&ViewportTransformation.ViewProjectionMatrix, XMMatrixTranspose(ViewProject));
ViewConstantBuffer->Update(0, &ViewportTransformation);
}
通过
ObjectConstantBuffer->Update(0, &ObjectTransformation);
ViewConstantBuffer->Update(0, &ViewportTransformation);
把数据上传
这东西是 FRenderingResUpdate
shared_ptr<FRenderingResUpdate> ObjectConstantBuffer;
shared_ptr<FRenderingResUpdate> ViewConstantBuffer;
他在哪里创建和初始化呢?
在调用了CreateXXMesh之后,Tick之前
他创建了描述堆 CreateDescriptorHeap,大小是2
然后一个大括号里面两个 CreateConstantBufferView
注意一下 DescHandle 的偏移处理。
除此之外,FRenderingResUpdate是对于 ComPtr
通过Init,Update的操作,简化了一下流程。
void CMeshMgr::BuildMesh(const FMeshRenderingData *InMeshData) {
// const heap
D3D12_DESCRIPTOR_HEAP_DESC HeapDesc;
HeapDesc.NumDescriptors = 2;
HeapDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
HeapDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
HeapDesc.NodeMask = 0;
ANALYSIS_HRESULT(GetD3dDevice()->CreateDescriptorHeap(&HeapDesc, IID_PPV_ARGS(ConstantBufferViewHeap.GetAddressOf())))
// 常量缓冲区
{
UINT DescriptorOffset = GetD3dDevice()->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
// OBJ const buffer
ObjectConstantBuffer = make_shared<FRenderingResUpdate>();
ObjectConstantBuffer->Init(GetD3dDevice().Get(), sizeof(FObjectTransformation), 1);
D3D12_GPU_VIRTUAL_ADDRESS AddressObj = ObjectConstantBuffer->GetUploadBuffer()->GetGPUVirtualAddress();
D3D12_CONSTANT_BUFFER_VIEW_DESC ObjConstantBufferViewDesc;
ObjConstantBufferViewDesc.BufferLocation = AddressObj;
ObjConstantBufferViewDesc.SizeInBytes = ObjectConstantBuffer->GetConstantBufferByteSize();
// get offset b0
CD3DX12_CPU_DESCRIPTOR_HANDLE DescHandle(ConstantBufferViewHeap->GetCPUDescriptorHandleForHeapStart());
DescHandle.Offset(0, DescriptorOffset);
GetD3dDevice()->CreateConstantBufferView(
&ObjConstantBufferViewDesc,
DescHandle);
// ViewPort const buffer
ViewConstantBuffer = make_shared<FRenderingResUpdate>();
ViewConstantBuffer->Init(GetD3dDevice().Get(), sizeof(FViewportTransformation), 1);
D3D12_GPU_VIRTUAL_ADDRESS AddressViewPort = ViewConstantBuffer->GetUploadBuffer()->GetGPUVirtualAddress();
// 不是第一个了,要算下偏移
DescHandle.Offset(1, DescriptorOffset);
D3D12_CONSTANT_BUFFER_VIEW_DESC ViewportConstantBufferViewDesc;
ViewportConstantBufferViewDesc.BufferLocation = AddressViewPort;
ViewportConstantBufferViewDesc.SizeInBytes = ViewConstantBuffer->GetConstantBufferByteSize();
GetD3dDevice()->CreateConstantBufferView(
&ViewportConstantBufferViewDesc,
DescHandle);
}
// 。。。
}
然后把b0和b1和和这个东西绑定。
这里的 DescriptorRangeObjCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 0);
意思是从B0开始,占用空间1
合起来就是
RootParameters[0].InitAsDescriptorTable(1, &DescriptorRangeObjCBV);
RootParameters[1].InitAsDescriptorTable(1, &DescriptorRangeViewPortCBV);
根签名的0,用的是b0
根签名的1,用的是b1
// root signature
CD3DX12_ROOT_PARAMETER RootParameters[2];
// OBJ CBV descriptor table
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeObjCBV;
DescriptorRangeObjCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 0); // baseShaderRegister is shader register
// ViewPort CBV descriptor table
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeViewPortCBV;
DescriptorRangeViewPortCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 1);
RootParameters[0].InitAsDescriptorTable(1, &DescriptorRangeObjCBV);
RootParameters[1].InitAsDescriptorTable(1, &DescriptorRangeViewPortCBV);
ComPtr<ID3DBlob> SerializeRootSignature;
ComPtr<ID3DBlob> ErrorBlob;
CD3DX12_ROOT_SIGNATURE_DESC RootSignatureDesc;
RootSignatureDesc.Init(
2,
RootParameters,
0,
nullptr,
D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT);
D3D12SerializeRootSignature(
&RootSignatureDesc,
D3D_ROOT_SIGNATURE_VERSION_1,
SerializeRootSignature.GetAddressOf(),
ErrorBlob.GetAddressOf());
if (ErrorBlob) {
ENGINE_LOG_ERROR("ErrorBlob: {}", ErrorBlob->GetBufferPointer())
OutputDebugStringA((char *) ErrorBlob->GetBufferPointer());
}
ANALYSIS_HRESULT(GetD3dDevice()->CreateRootSignature(
0,
SerializeRootSignature->GetBufferPointer(),
SerializeRootSignature->GetBufferSize(),
IID_PPV_ARGS(RootSignature.GetAddressOf())))
// 。。。
然后在tick draw的时候
void CMeshMgr::Draw(float DeltaTime) {
// 。。。
CD3DX12_GPU_DESCRIPTOR_HANDLE DescHandle(ConstantBufferViewHeap->GetGPUDescriptorHandleForHeapStart());
UINT DescriptorOffset = GetD3dDevice()->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
// root 1
DescHandle.Offset(0, DescriptorOffset);
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(0, DescHandle);
// root 2
DescHandle.Offset(1, DescriptorOffset);
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(1, DescHandle);
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(0, DescHandle); 根签名的0,用这个DescHandle的东西更新进去
而这个 DescHandle 是一个基于描述堆偏移的句柄,到此这一串就连起来了。
多模型
PS D:\UE_Project\cwlengine> git log
commit b188767973207e333581a09bfb59c73c0d31dd5d (HEAD -> master)
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Tue Dec 5 02:05:04 2023 +0800
平面创建
编译运行,场景中两个模型
CWinEngine 内容不变
CDirectXRenderingEngine
- Initialize
- InitDirect3D();
- PostInitDirect3D();
- PostInitialize
- 调用 CreateXXXMesh创建多个模型
- MeshMgr->BuildMesh();
- 提交命令
- Tick
- MeshMgr->PreDraw(DeltaTime);
- MeshMgr->Draw(DeltaTime);
- MeshMgr->PostDraw(DeltaTime);
这一层的主要变化:
调用 CreateXXXMesh创建多个模型
然后
MeshMgr->BuildMesh();
本质上是多个模型的顶点索引数据的合并,并且在BuildMesh这一步,做一些PSO的设置
最后是tick中的 MeshMgr 的三个Draw
多个模型的顶点索引数据的合并
template<typename T, typename ...ParamTypes>
T* CreateMesh(ParamTypes &&... Params) {
T* Mesh = new T();
FMeshRenderingData MeshData;
Mesh->CreateMesh(MeshData, std::forward<ParamTypes>(Params)...);
Mesh->BeginInit();
RenderingPipeLine.BuildMesh(Mesh, MeshData);
Mesh->Init();
return Mesh;
}
位于
RenderingPipeLine.BuildMesh(Mesh, MeshData);
这里有个 Mesh 对象,他的类型T可以是 BoxMesh, SphereMesh 等,代表了C++层的Mesh对象
一个 FMeshRenderingData MeshData;
struct FMeshRenderingData {
vector<FVertex> VertexData;
vector<uint16_t> IndexData;
public:
UINT GetVertexSizeInBytes() const { return VertexData.size() * sizeof(FVertex); }
UINT GetIndexSizeInBytes() const { return IndexData.size() * sizeof(uint16_t); }
};
为DX准备的顶点和索引数据
在
void FRenderingPipeLine::BuildMesh(GMesh *InMesh, const FMeshRenderingData &MeshData) {
GeometryMap.BuildMesh(InMesh, MeshData);
}
void FGeometryMap::BuildMesh(GMesh *InMesh, const FMeshRenderingData &MeshData) {
FGeometry &Geometry = Geometries[0];
Geometry.BuildMesh(InMesh, MeshData);
}
这里的0应该是原作者的预留设计,目前只用到了0
struct FGeometry : public IDirectXDeviceInterface_Struct {
// ...
protected:
// ...
FMeshRenderingData MeshRenderingData;
std::vector<FRenderingData> DescribeMeshRenderingData;
};
每一个Mesh,都会加入到 DescribeMeshRenderingData 这个 vector 中,几个模型 size 就是几
MeshRenderingData 则是把这些模型的顶点索引合并到一起了,FMeshRenderingData 结构里面只有一个vector
这样我们需要遍历所有Mesh的时候找 DescribeMeshRenderingData
MeshRenderingData.VertexData 和 MeshRenderingData.IndexData 则可以 直接丢给DX
void FGeometry::BuildMesh(GMesh *InMesh, const FMeshRenderingData &InMeshData) {
if (IsRenderingDataExist(InMesh)) {
return;
}
DescribeMeshRenderingData.push_back(std::move(FRenderingData()));
auto &RenderingData = DescribeMeshRenderingData.back();
RenderingData.Mesh = InMesh;
RenderingData.IndexSize = InMeshData.IndexData.size();
RenderingData.VertexSize = InMeshData.VertexData.size();
RenderingData.IndexOffsetPosition = MeshRenderingData.IndexData.size();
RenderingData.VertexOffsetPosition = MeshRenderingData.VertexData.size();
MeshRenderingData.IndexData.insert(MeshRenderingData.IndexData.end(), InMeshData.IndexData.begin(), InMeshData.IndexData.end());
MeshRenderingData.VertexData.insert(MeshRenderingData.VertexData.end(), InMeshData.VertexData.begin(), InMeshData.VertexData.end());
}
描述堆,常量缓冲区
描述堆的创建
void FGeometryMap::BuildDescriptorHeap() {
DescriptorHeap.BuildDescriptorHeap(GetDrawObjectNumber() + 1);
}
DescriptorHeap 是对 ID3D12DescriptorHeap 的封装
GetDrawObjectNumber 刚刚好是上面 DescribeMeshRenderingData 的vector size,就是模型是数量,+1 是给 VP矩阵的
void FGeometryMap::BuildConstantBuffer() {
ObjectConstantBufferView.CreateConstantBufferView(sizeof(FObjectTransformation), GetDrawObjectNumber());
CD3DX12_CPU_DESCRIPTOR_HANDLE DescHandle = CD3DX12_CPU_DESCRIPTOR_HANDLE(GetDescriptorHeap()->GetCPUDescriptorHandleForHeapStart());
ObjectConstantBufferView.BuildConstantBufferView(DescHandle, GetDrawObjectNumber());
}
void FGeometryMap::BuildViewportConstantBufferView() {
ViewportConstantBufferView.CreateConstantBufferView(sizeof(FViewportTransformation), 1);
CD3DX12_CPU_DESCRIPTOR_HANDLE DescHandle = CD3DX12_CPU_DESCRIPTOR_HANDLE(GetDescriptorHeap()->GetCPUDescriptorHandleForHeapStart());
ViewportConstantBufferView.BuildConstantBufferView(DescHandle, 1, GetDrawObjectNumber());
}
ObjectConstantBufferView 是模型常量缓冲区,创建了 GetDrawObjectNumber() 这么多个,每个 sizeof(FObjectTransformation) 这么大
从描述堆的地址开头开始创建 GetCPUDescriptorHandleForHeapStart,创建这么多个。
而后面 ViewportConstantBufferView 是给VP矩阵的,需要1个,大小是 sizeof(FViewportTransformation)
由于前面的 GetDrawObjectNumber() 这么多个给模型了,所以描述堆偏移 GetDrawObjectNumber() 后的位置,才是放这个 ViewportConstantBufferView 的位置。
描述堆的数据更新
这两步,我们创建了描述堆,并且给他划分了那一块是啥。但是里面还没有内容。
所以说
void FGeometryMap::UpdateCalculations(float DeltaTime, const FViewportInfo &ViewportInfo) {
XMMATRIX ViewMatrix = XMLoadFloat4x4(&ViewportInfo.ViewMatrix);
XMMATRIX ATRIXProjection = XMLoadFloat4x4(&ViewportInfo.ProjectionMatrix);
for (int i = 0; i < Geometries.size(); i++) {
for (size_t j = 0; j < Geometries[i].DescribeMeshRenderingData.size(); j++) {
FRenderingData &RenderingData = Geometries[i].DescribeMeshRenderingData[j];
// world matrix
{
XMFLOAT3 Position = RenderingData.Mesh->GetPosition();
XMFLOAT3 Scale = RenderingData.Mesh->GetScale();
XMFLOAT3 ForwardVector = RenderingData.Mesh->GetForwardVector();
XMFLOAT3 RightVector = RenderingData.Mesh->GetRightVector();
XMFLOAT3 UpVector = RenderingData.Mesh->GetUpVector();
RenderingData.WorldMatrix = {
RightVector.x * Scale.x, UpVector.x, ForwardVector.x, 0.0f,
RightVector.y, UpVector.y * Scale.x, ForwardVector.y, 0.0f,
RightVector.z, UpVector.z, ForwardVector.z * Scale.x, 0.0f,
Position.x, Position.y, Position.z, 1.0f
};
}
XMMATRIX ATRIXWorld = XMLoadFloat4x4(&RenderingData.WorldMatrix);
FObjectTransformation ObjectTransformation;
XMStoreFloat4x4(&ObjectTransformation.World, XMMatrixTranspose(ATRIXWorld));
ObjectConstantBufferView.Update(j, &ObjectTransformation);
}
}
// ViewPort
XMMATRIX ViewProject = XMMatrixMultiply(ViewMatrix, ATRIXProjection);
FViewportTransformation ViewportTransformation;
XMStoreFloat4x4(&ViewportTransformation.ViewProjectionMatrix, XMMatrixTranspose(ViewProject));
ViewportConstantBufferView.Update(0, &ViewportTransformation);
}
第一层 for i 其实是无效的,因为目前 std::map<int, FGeometry> Geometries; 大小是1
其实就是
for (size_t j = 0; j < Geometries[i].DescribeMeshRenderingData.size(); j++)
ObjectConstantBufferView.Update(j, &ObjectTransformation);
第j个模型常量缓冲区,把他的world matrix更新进去,
下面的
ViewportConstantBufferView.Update(0, &ViewportTransformation); 同理
就一个,直接更新0。
这里传地址,函数参数传的是void*
然后再到DX的套路,拷贝内存到常量缓冲区的偏移多少 memcpy(&Data[Index * ElementSize], InData, ElementSize);
到这里我们创建了描述堆,规定了描述对的每一块的大小含义,并且也有数据了。
这些 UpdateCalculations 发生在引擎的Tick之前一个函数。
根签名
void FDirectXRootSignature::BuildRootSignature() {
// root signature
CD3DX12_ROOT_PARAMETER RootParameters[2];
// OBJ CBV descriptor table
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeObjCBV;
DescriptorRangeObjCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 0); // baseShaderRegister is shader register
// ViewPort CBV descriptor table
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeViewPortCBV;
DescriptorRangeViewPortCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 1);
RootParameters[0].InitAsDescriptorTable(1, &DescriptorRangeObjCBV);
RootParameters[1].InitAsDescriptorTable(1, &DescriptorRangeViewPortCBV);
// 。。。
}
预先设置一下
DescriptorRangeObjCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 0);
意思是 b0,用一个
DescriptorRangeViewPortCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 1);
意思是b1,用一个
这里根签名的下标,对应
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(1, DescHandle);
这个接口第一个参数的下表
兰伯特
commit e2f9a42979309a236dc64c9b8be558d8b2bf98af (HEAD -> master)
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Wed Dec 13 00:16:51 2023 +0800
兰伯特材质
我跳到了这个版本
编译运行,球上色了,其他不变
CWinEngine
CDirectXRenderingEngine
CMeshMgr
FRenderingPipeLine
FGeometryMap
瞟了一样这些都没变
- 描述堆
void FGeometryMap::BuildDescriptorHeap() {
DescriptorHeap.BuildDescriptorHeap(GetDrawMeshObjectNumber() + GetDrawMaterialNumber() + GetDrawLightNumber() + 1);
}
void FGeometryMap::BuildMeshConstantBuffer() {
MeshConstantBufferView.CreateConstantBufferView(sizeof(FObjectTransformation), GetDrawMeshObjectNumber());
CD3DX12_CPU_DESCRIPTOR_HANDLE DescHandle = CD3DX12_CPU_DESCRIPTOR_HANDLE(GetDescriptorHeap()->GetCPUDescriptorHandleForHeapStart());
MeshConstantBufferView.BuildConstantBufferView(DescHandle, GetDrawMeshObjectNumber());
}
void FGeometryMap::BuildMaterialConstantBuffer() {
MaterialConstantBufferView.CreateConstantBufferView(sizeof(FMaterialConstantBuffer), GetDrawMaterialNumber());
CD3DX12_CPU_DESCRIPTOR_HANDLE DescHandle = CD3DX12_CPU_DESCRIPTOR_HANDLE(GetDescriptorHeap()->GetCPUDescriptorHandleForHeapStart());
MaterialConstantBufferView.BuildConstantBufferView(DescHandle, GetDrawMaterialNumber(), GetDrawMeshObjectNumber());
}
void FGeometryMap::BuildLightConstantBuffer() {
LightConstantBufferView.CreateConstantBufferView(sizeof(FLightConstantBuffer), GetDrawLightNumber());
CD3DX12_CPU_DESCRIPTOR_HANDLE DescHandle = CD3DX12_CPU_DESCRIPTOR_HANDLE(GetDescriptorHeap()->GetCPUDescriptorHandleForHeapStart());
LightConstantBufferView.BuildConstantBufferView(
DescHandle,
GetDrawLightNumber(),
GetDrawMeshObjectNumber() + GetDrawMaterialNumber());
}
描述堆的创建,大小,结构改了一下
变成了 GetDrawMeshObjectNumber() + GetDrawMaterialNumber() + GetDrawLightNumber() + 1 这么大,最后一个1是给VP矩阵的
- 常量缓冲区更新
位于tick的上一个函数
把内容写进常量缓冲区去
void FGeometryMap::UpdateCalculations(float DeltaTime, const FViewportInfo &ViewportInfo) {
XMMATRIX ViewMatrix = XMLoadFloat4x4(&ViewportInfo.ViewMatrix);
XMMATRIX ATRIXProjection = XMLoadFloat4x4(&ViewportInfo.ProjectionMatrix);
for (int i = 0; i < Geometries.size(); i++) {
for (size_t j = 0; j < Geometries[i].DescribeMeshRenderingData.size(); j++) {
FRenderingData &RenderingData = Geometries[i].DescribeMeshRenderingData[j];
// world matrix
{
XMFLOAT3 Position = RenderingData.Mesh->GetPosition();
XMFLOAT3 Scale = RenderingData.Mesh->GetScale();
XMFLOAT3 ForwardVector = RenderingData.Mesh->GetForwardVector();
XMFLOAT3 RightVector = RenderingData.Mesh->GetRightVector();
XMFLOAT3 UpVector = RenderingData.Mesh->GetUpVector();
RenderingData.WorldMatrix = {
RightVector.x * Scale.x, UpVector.x, ForwardVector.x, 0.0f,
RightVector.y, UpVector.y * Scale.x, ForwardVector.y, 0.0f,
RightVector.z, UpVector.z, ForwardVector.z * Scale.x, 0.0f,
Position.x, Position.y, Position.z, 1.0f
};
}
XMMATRIX ATRIXWorld = XMLoadFloat4x4(&RenderingData.WorldMatrix);
FObjectTransformation ObjectTransformation;
XMStoreFloat4x4(&ObjectTransformation.World, XMMatrixTranspose(ATRIXWorld));
MeshConstantBufferView.Update(j, &ObjectTransformation);
// material
FMaterialConstantBuffer MaterialConstantBuffer;
{
// TODO 先只有单材质
if (RenderingData.Mesh->GetMaterials()->size() > 0) {
CMaterial *Material = (*RenderingData.Mesh->GetMaterials())[0];
FVector4D BaseColor = Material->GetBaseColor();
MaterialConstantBuffer.BaseColor = XMFLOAT4(BaseColor.X, BaseColor.Y, BaseColor.Z, BaseColor.W);
} else {
MaterialConstantBuffer.BaseColor = {0.5f, 0.5f, 0.5f, 1.f};
}
}
MaterialConstantBufferView.Update(j, &MaterialConstantBuffer);
}
}
// light
FLightConstantBuffer LightConstantBuffer;
{
}
LightConstantBufferView.Update(0, &LightConstantBuffer);
// ViewPort
XMMATRIX ViewProject = XMMatrixMultiply(ViewMatrix, ATRIXProjection);
FViewportTransformation ViewportTransformation;
XMStoreFloat4x4(&ViewportTransformation.ViewProjectionMatrix, XMMatrixTranspose(ViewProject));
ViewportConstantBufferView.Update(0, &ViewportTransformation);
}
提一嘴,添加材质系统,其实就是
Mesh成员对象里面加了一个vector
vector<CMaterial*> Materials;
一个材质类至少需要这样的内容
FVector4D BaseColor;
float MaterialRoughness;
EMaterialType MaterialType;
基础颜色,粗糙,光照类型(代表各种光照算法,比如兰伯特,PBR等)
- 根签名
void FDirectXRootSignature::BuildRootSignature() {
const int CountBuffer = 4;
// root signature
CD3DX12_ROOT_PARAMETER RootParameters[CountBuffer];
// OBJ CBV descriptor table
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeObjCBV;
DescriptorRangeObjCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 0); // baseShaderRegister is shader register
// ViewPort CBV descriptor table
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeViewPortCBV;
DescriptorRangeViewPortCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 1);
// Material CBV descriptor table
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeMaterialCBV;
DescriptorRangeMaterialCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 2);
// Light CBV descriptor table
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeLightCBV;
DescriptorRangeLightCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 3);
RootParameters[0].InitAsDescriptorTable(1, &DescriptorRangeObjCBV);
RootParameters[1].InitAsDescriptorTable(1, &DescriptorRangeViewPortCBV);
RootParameters[2].InitAsDescriptorTable(1, &DescriptorRangeMaterialCBV);
RootParameters[3].InitAsDescriptorTable(1, &DescriptorRangeLightCBV);
跟签名简单看一下,
b0开始,使用空间1 (DescriptorRangeObjCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 0);)
b1开始,使用空间1 (DescriptorRangeViewPortCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 1);)
...
签名的描述堆的创建
DescriptorHeap.BuildDescriptorHeap(GetDrawMeshObjectNumber() + GetDrawMaterialNumber() + GetDrawLightNumber() + 1);
和对于空间的常量缓冲区的内容设置
MeshConstantBufferView.Update(j, &ObjectTransformation);
LightConstantBufferView.Update(0, &LightConstantBuffer);
ViewportConstantBufferView.Update(0, &ViewportTransformation);
最后在tick->draw中
void FGeometryMap::DrawLight(float DeltaTime) {
UINT DescriptorOffset = GetD3dDevice()->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
CD3DX12_GPU_DESCRIPTOR_HANDLE DescHandle(GetDescriptorHeap()->GetGPUDescriptorHandleForHeapStart());
DescHandle.Offset(GetDrawMeshObjectNumber() + GetDrawMaterialNumber(), DescriptorOffset);
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(3, DescHandle);
}
void FGeometryMap::DrawViewport(float DeltaTime) {
UINT DescriptorOffset = GetD3dDevice()->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
CD3DX12_GPU_DESCRIPTOR_HANDLE DescHandle(GetDescriptorHeap()->GetGPUDescriptorHandleForHeapStart());
DescHandle.Offset(GetDrawMeshObjectNumber() + GetDrawMaterialNumber() + GetDrawLightNumber(), DescriptorOffset);
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(1, DescHandle);
}
void FGeometryMap::DrawMesh(float DeltaTime) {
UINT DescriptorOffset = GetD3dDevice()->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
for (auto &Iter: Geometries) {
D3D12_VERTEX_BUFFER_VIEW VBV = Iter.second.GetVertexBufferView();
D3D12_INDEX_BUFFER_VIEW IBV = Iter.second.GetIndexBufferView();
for (int i = 0; i < Iter.second.DescribeMeshRenderingData.size(); i++) {
// 必要的,否则多次偏移会影响,每一帧都是从头开始
auto DesMeshHandle = CD3DX12_GPU_DESCRIPTOR_HANDLE(GetDescriptorHeap()->GetGPUDescriptorHandleForHeapStart());
auto DesMaterialHandle = CD3DX12_GPU_DESCRIPTOR_HANDLE(GetDescriptorHeap()->GetGPUDescriptorHandleForHeapStart());
FRenderingData &RenderingData = Iter.second.DescribeMeshRenderingData[i];
GetD3dGraphicsCommandList()->IASetIndexBuffer(&IBV);
GetD3dGraphicsCommandList()->IASetVertexBuffers(
0, // StartSlot 0 ~ 15 输入槽
1, // k, k + 1, k + 2, k + n - 1 顶点缓冲区的数量
&VBV);
GetD3dGraphicsCommandList()->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
// mesh offset
DesMeshHandle.Offset(i, DescriptorOffset);
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(0, DesMeshHandle);
// material offset TODO 一个Mesh有多个材质的时候需要调整
DesMaterialHandle.Offset(i + GetDrawMeshObjectNumber(), DescriptorOffset);
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(2, DesMaterialHandle);
GetD3dGraphicsCommandList()->DrawIndexedInstanced(
RenderingData.IndexSize, // 顶点数量
1, // 绘制实例数量
RenderingData.IndexOffsetPosition, // 顶点缓冲区第一个被绘制的索引
RenderingData.VertexOffsetPosition, // GPU 从索引缓冲区读取的第一个索引的位置
0 // 从顶点缓冲区读取每个实例数据之前添加到每个索引的值
);
}
}
}
再看看shader
#include "Light.hlsl"
#include "Material.hlsl"
cbuffer MeshConstantBuffer : register(b0) // b0~b14
{
float4x4 WorldMatrix;
};
cbuffer ViewportConstantBuffer : register(b1)
{
float4x4 ViewProjectionMatrix;
};
cbuffer MaterialConstantBuffer : register(b2)
{
float4 BaseColor;
float4x4 TransformInfo;
};
cbuffer LightConstantBuffer : register(b3)
{
float3 LightIntensity;
float3 LightDirection;
};
struct MeshVertexIn
{
float3 Position : POSITION;
float4 Color : COLOR;
float3 Normal : NORMAL;
};
struct MeshVertexOut
{
float4 Position : SV_POSITION;
float4 Color : COLOR;
float3 Normal : NORMAL;
};
MeshVertexOut VSMain(MeshVertexIn MV)
{
MeshVertexOut Out;
float4 Position = mul(float4(MV.Position, 1.0f), WorldMatrix);
Out.Position = mul(Position, ViewProjectionMatrix);
Out.Normal = mul(MV.Normal, (float3x3)WorldMatrix);
Out.Color = MV.Color;
return Out;
}
float4 PSMain(MeshVertexOut MVOut) : SV_TARGET
{
float4 Ambient = float4(0.05f, 0.05f, 0.15f, 1.0f);
float3 ModelNormal = normalize(MVOut.Normal);
float3 ModelLightDirection = normalize(-LightDirection);
float DotValue = max(dot(ModelNormal, ModelLightDirection), 0.0);
FMaterial Material;
Material.BaseColor = BaseColor;
MVOut.Color = Material.BaseColor * DotValue + Ambient * MVOut.Color;
return MVOut.Color;
}
主要是 PSMain
多光照
commit 2193c98aaaa62b2728f5ea080c826ec5bdfc3e63 (HEAD -> master)
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Sun Dec 17 21:48:19 2023 +0800
常量缓冲区对其调整
这里进度应该是13- 的最后部分,是各种光照效果的内容
主要的
- 多PSO
struct FDirectXPipelineState : public IDirectXDeviceInterface_Struct {
public:
FDirectXPipelineState();
// ...
private:
unordered_map<int, ComPtr<ID3D12PipelineState>> PSOMap;
D3D12_GRAPHICS_PIPELINE_STATE_DESC GPSDesc;
EPipelineState PipelineState;
};
这里有一个map
在绘制之前,会根据选择的变量,选择对应的PSO
void FDirectXPipelineState::PreDraw(float DeltaTime) {
auto PSO = PSOMap[PipelineState];
ANALYSIS_HRESULT(GetD3dGraphicsCommandList()->Reset(GetCommandAllocator().Get(), PSO.Get()))
}
这一段其实没什么内容,
主要是各种光照算法的实现,主要改动在shader中,其次就是各种常量缓冲区的调整,比如材质里面多添加一个字段粗超度
灯光
commit 0fc798536c2615c5caf849b1e54d97520d8252fc (HEAD -> master)
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Mon Dec 25 03:13:06 2023 +0800
调整
commit f7a6f0016e00054a40d593e8d71cfe1a11bf0acf
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Mon Dec 25 03:08:50 2023 +0800
16-18 聚光灯
版本来到了这里,运行后是一个手电筒的模型,射灯。
前面的光照指的其实是平行光,这一章节需要实现的是点光源,射灯(手电筒)
还是从外围开始看。
-
CWinEngine 没有改动
-
CDirectXRenderingEngine
LightMgr = CreateObject
在构造函数中多了灯光管理器的初始化。
在 PostInitialize 中
可以通过 World->CreateActorObject
其他不变
又看到这里,
if (auto *Sphere = World->CreateActorObject<GSphereMesh>()) {
Sphere->CreateMesh(1.f, 20.f, 30.f);
Sphere->SetPosition(XMFLOAT3(2, 4, 0));
if (CMaterial *Material = Sphere->GetMaterials()->at(0)) {
Material->SetBaseColor(Color);
Material->SetMaterialType(EMaterialType::GradualBanded);
}
}
创建一个 GSphereMesh 对象,各种mesh对象都是继承自GMesh,是什么c++层对于Mesh的描述,里面包含位置,旋转,缩放,材质等。在需要的地方get出来,更新给DX。
- CMeshMgr
- FRenderingPipeLine
- FGeometryMap
代码不变
- 描述堆的创建也没有变化
void FGeometryMap::BuildDescriptorHeap() {
DescriptorHeap.BuildDescriptorHeap(GetDrawMeshObjectNumber() + GetDrawMaterialNumber() + GetDrawLightNumber() + 1);
}
GetDrawLightNumber 的数量
UINT FGeometryMap::GetDrawLightNumber() {
return 1;
}
看眼根签名还是不变的
class GLight : public GActorObject {
typedef GActorObject Super;
public:
GLight();
protected:
CVARIABLE()
CLightComponent* LightComponent;
};
灯光对象,他有一个 CLightComponent
这个组件,在具体的子类会被实例化未不同的组件
比如这个点光源的构造函数
CPointLightComponent::CPointLightComponent()
: Super() {
LightType = ELightType::PointLight;
const string &Path = "Assets/SpotMesh.obj";
SetLightMesh(GetMeshMgr()->CreateCustomMeshComponent(Path));
if (GetLightMesh()) {
for (auto &Material: *GetLightMesh()->GetMaterials()) {
Material->SetMaterialType(EMaterialType::BaseColor);
Material->SetMaterialDisplayStatus(EMaterialDisplayStatusType::WireframeDisplay);
Material->SetBaseColor(FVector4D(1.0f, 0.7f, 1.0f, 1.0f));
}
GetLightMesh()->SetScale(XMFLOAT3(0.3f, 0.3f, 0.3f));
}
}
会设置光照类型,加载一个点光源的线框模型,然后设置一下材质
而对于光照的实现逻辑,则需要先丢一个光源的光照强度和光照边界的强度。
float StartAttenuation;
float EndAttenuation;
- 灯光数据的传递
描述堆相关的内容前文已经提很多遍了,所以这里只看灯光这里
每一个带 CLightComponent 的会把自己加到 LightMgr 里面
void FGeometryMap::UpdateCalculations(float DeltaTime, const FViewportInfo &ViewportInfo) {
// 。。。
// light
auto Lights = GetLightMgr()->GetLights();
FLightConstantBuffer LightConstantBuffer;
for (int i = 0; i < Lights.size(); i++) {
{
if (CLightComponent* Light = Lights[i]) {
FVector3D LightIntensity = Light->GetLightIntensity();
LightConstantBuffer.SceneLights[i].LightIntensity = XMFLOAT3(LightIntensity.X, LightIntensity.Y, LightIntensity.Z);
LightConstantBuffer.SceneLights[i].LightDirection = Light->GetForwardVector();
LightConstantBuffer.SceneLights[i].LightPosition = Light->GetPosition();
LightConstantBuffer.SceneLights[i].LightType = static_cast<int>(Light->GetLightType());
switch (Light->GetLightType()) {
case ELightType::DirectionalLight: {
break;
}
case ELightType::PointLight:
case ELightType::SpotLight:
{
if (CRangeLightComponent *RangeLight = dynamic_cast<CRangeLightComponent *>(Light)) {
LightConstantBuffer.SceneLights[i].StartAttenuation = RangeLight->GetStartAttenuation();
LightConstantBuffer.SceneLights[i].EndAttenuation = RangeLight->GetEndAttenuation();
}
if (Light->GetLightType() == ELightType::SpotLight) {
if (CSpotLightComponent *SpotLight = dynamic_cast<CSpotLightComponent *>(Light)) {
LightConstantBuffer.SceneLights[i].ConicalInnerAngle = CwlMath::DegreesToRadians(SpotLight->GetConicalInnerAngle());
LightConstantBuffer.SceneLights[i].ConicalOuterAngle = CwlMath::DegreesToRadians(SpotLight->GetConicalOuterAngle());
}
}
break;
}
default: {
break;
}
}
}
}
}
LightConstantBufferView.Update(0, &LightConstantBuffer);
// 。。。
}
然后把数据传给常量缓冲区
struct FLight
{
FLight();
XMFLOAT3 LightIntensity;
float StartAttenuation;
XMFLOAT3 LightDirection;
float EndAttenuation;
XMFLOAT3 LightPosition;
int LightType;
float ConicalInnerAngle; // 弧度
float ConicalOuterAngle; // 弧度
float _Padding[2];
};
struct FLightConstantBuffer {
FLightConstantBuffer();
FLight SceneLights[16];
};
对应shader那头
struct Light
{
float3 LightIntensity;
float StartAttenuation;
float3 LightDirection;
float EndAttenuation;
float3 LightPosition;
int LightType;
float ConicalInnerAngle; // 弧度
float ConicalOuterAngle; // 弧度
float _Padding1;
float _Padding2;
};
数据传到shader了,怎么实现的呢
float4 PSMain(MeshVertexOut MVOut) : SV_TARGET
{
FMaterial Material;
Material.BaseColor = BaseColor;
float4 Ambient = float4(0.15f, 0.15f, 0.25f, 1.0f);
float4 LightStrengths = float4(0.0f, 0.0f, 0.0f, 1.0f);
float3 ModelNormal = normalize(MVOut.Normal);
float4 Specular = {0.f, 0.f, 0.f, 1.f};
// 多灯光
for (int i = 0; i < 16; i++) {
if (length(SceneLights[i].LightIntensity.xyz) <= 0.f) {
continue;
}
float3 ModelLightDirection = normalize(GetLightDirection(SceneLights[i], MVOut.WorldPosition));
float4 LightStrength = ComputeLightStrength(SceneLights[0], ModelNormal, MVOut.WorldPosition, ModelLightDirection);
Material.BaseColor = BaseColor;
float DotValue = 0;
float Diffuse = dot(ModelNormal, ModelLightDirection);
if (MaterialType == 0) { // 兰伯特
DotValue = pow(max(Diffuse, 0.0), 2.f);
}
else
{
// ...
}
LightStrengths += LightStrength * DotValue * float4(SceneLights[i].LightIntensity, 1.f);
LightStrengths.w = 1.f;
}
MVOut.Color = LightStrengths * (Material.BaseColor + Specular * Material.BaseColor) + // 漫反射 + 高光
Ambient * Material.BaseColor;
return MVOut.Color;
}
对于每一个物体都要遍历16 个灯光,计算灯光对于物体这个像素的影响。
材质
commit da6488c7a87c4931a8e0f8189df0c4d62fd46136 (HEAD -> master)
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Fri Jan 5 02:34:26 2024 +0800
18-end fix bug
但是开始看不懂了,按之前的提交一次次演化过来
根签名和材质缓冲区修改
按照提交记录18-19,主要修改
void FDirectXRootSignature::BuildRootSignature() {
const int CountBuffer = 5;
// root signature
CD3DX12_ROOT_PARAMETER RootParameters[CountBuffer];
// OBJ CBV descriptor table
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeObjCBV;
DescriptorRangeObjCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 0); // baseShaderRegister is shader register
// ViewPort CBV descriptor table
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeViewPortCBV;
DescriptorRangeViewPortCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 1);
// Material CBV descriptor table
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeMaterialCBV;
DescriptorRangeMaterialCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 2);
// Light CBV descriptor table
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeLightCBV;
DescriptorRangeLightCBV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 3);
// SRV
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeTextureSRV;
DescriptorRangeTextureSRV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 1, 4);
RootParameters[0].InitAsDescriptorTable(1, &DescriptorRangeObjCBV);
RootParameters[1].InitAsDescriptorTable(1, &DescriptorRangeViewPortCBV);
RootParameters[2].InitAsDescriptorTable(1, &DescriptorRangeMaterialCBV);
RootParameters[3].InitAsDescriptorTable(1, &DescriptorRangeLightCBV);
RootParameters[4].InitAsDescriptorTable(1, &DescriptorRangeTextureSRV, D3D12_SHADER_VISIBILITY_PIXEL);
// 静态采样方式
vector<CD3DX12_STATIC_SAMPLER_DESC> StaticSamplers;
StaticSamplers.push_back(CD3DX12_STATIC_SAMPLER_DESC(
0, // shaderRegister, s0
D3D12_FILTER_MIN_MAG_MIP_LINEAR)); // 18-16
ComPtr<ID3DBlob> SerializeRootSignature;
ComPtr<ID3DBlob> ErrorBlob;
CD3DX12_ROOT_SIGNATURE_DESC RootSignatureDesc;
RootSignatureDesc.Init(
CountBuffer,
RootParameters,
StaticSamplers.size(),
StaticSamplers.data(),
D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT);
D3D12SerializeRootSignature(
&RootSignatureDesc,
D3D_ROOT_SIGNATURE_VERSION_1,
SerializeRootSignature.GetAddressOf(),
ErrorBlob.GetAddressOf());
if (ErrorBlob) {
ENGINE_LOG_ERROR("ErrorBlob: {}", ErrorBlob->GetBufferPointer())
OutputDebugStringA((char *) ErrorBlob->GetBufferPointer());
}
ANALYSIS_HRESULT(GetD3dDevice()->CreateRootSignature(
0,
SerializeRootSignature->GetBufferPointer(),
SerializeRootSignature->GetBufferSize(),
IID_PPV_ARGS(RootSignature.GetAddressOf())))
}
这里新增了
RootParameters[4].InitAsDescriptorTable(1, &DescriptorRangeTextureSRV, D3D12_SHADER_VISIBILITY_PIXEL);
还有静态采样器,这个决定了贴图如果发生放大缩小,应该用什么方式进行采样
// 静态采样方式
vector<CD3DX12_STATIC_SAMPLER_DESC> StaticSamplers;
StaticSamplers.push_back(CD3DX12_STATIC_SAMPLER_DESC(
0, // shaderRegister, s0
D3D12_FILTER_MIN_MAG_MIP_LINEAR)); // 18-16
第一个参数是0
代表这shader的0
即Hello.hlsl的
SamplerState SimpleTextureState : register(s0)
新增
Source/CWLEngine/Engine/Rendering/Core/RenderingTextureResUpdate.h
struct FRenderingTexture {
ComPtr<ID3D12Resource> UploadBuffer;
ComPtr<ID3D12Resource> Data;
wstring Name;
wstring FileName; // path
};
每个材质应该有,名字。资源和上传缓冲区。
具体的加载流程
void FRenderingTextureResUpdate::LoadTextureResource(const std::wstring &InFileName) {
unique_ptr<FRenderingTexture> Texture = make_unique<FRenderingTexture>();
Texture->FileName = InFileName;
Texture->Name = InFileName.substr(InFileName.find_last_of(L"/\\") + 1);
CreateDDSTextureFromFile12(
GetD3dDevice().Get(),
GetD3dGraphicsCommandList().Get(),
Texture->FileName.c_str(),
Texture->Data,
Texture->UploadBuffer);
TextureMap[Texture->Name] = std::move(Texture);
}
用的是dds库
贴图常量缓冲区(其实是Shader Resouce View)
18-20
void FGeometryMap::BuildTextureConstantBuffer() {
// 最后1是视口
TextureResUpdate->BuildTextureConstantBuffer(GetDescriptorHeap(), GetDrawMeshObjectNumber() + GetDrawMaterialNumber() + GetDrawLightNumber() + 1);
}
描述堆的这一块,分配给了SRV,具体描述如用
D3D12_SHADER_RESOURCE_VIEW_DESC
然后 CreateShaderResourceView
void FRenderingTextureResUpdate::BuildTextureConstantBuffer(ID3D12DescriptorHeap *InDescriptorHeap, int Offset) {
UINT DescriptorOffset = GetD3dDevice()->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
CD3DX12_CPU_DESCRIPTOR_HANDLE CPUHandle(InDescriptorHeap->GetCPUDescriptorHandleForHeapStart());
CPUHandle.Offset(Offset, DescriptorOffset);
D3D12_SHADER_RESOURCE_VIEW_DESC SRVDesc = {}; // 18-20
SRVDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
SRVDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2D;
SRVDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
SRVDesc.Texture2D.MipLevels = 1;
SRVDesc.Texture2D.MostDetailedMip = 0;
SRVDesc.Texture2D.ResourceMinLODClamp = 0.0f;
for (auto &Texture : TextureMap) {
SRVDesc.Format = Texture.second->Data->GetDesc().Format;
SRVDesc.Texture2D.MipLevels = Texture.second->Data->GetDesc().MipLevels;
GetD3dDevice()->CreateShaderResourceView(Texture.second->Data.Get(), &SRVDesc, CPUHandle);
CPUHandle.Offset(1, DescriptorOffset);
}
}
创建多少个呢,看 TextureMap 的数量,
在一开始我们通过 LoadTextureResource 把所有的贴图load进来了。
计算坐标
18-21 版本
void FGeometryMap::UpdateCalculations(float DeltaTime, const FViewportInfo &ViewportInfo) {
XMMATRIX ViewMatrix = XMLoadFloat4x4(&ViewportInfo.ViewMatrix);
XMMATRIX ATRIXProjection = XMLoadFloat4x4(&ViewportInfo.ProjectionMatrix);
for (int i = 0; i < Geometries.size(); i++) {
for (size_t j = 0; j < Geometries[i].DescribeMeshRenderingData.size(); j++) {
FRenderingData &RenderingData = Geometries[i].DescribeMeshRenderingData[j];
// world matrix
{
// 。。。
RenderingData.WorldMatrix = {
// 。。。
};
}
XMMATRIX ATRIXWorld = XMLoadFloat4x4(&RenderingData.WorldMatrix);
XMMATRIX ATRIXTexTransform = XMLoadFloat4x4(&RenderingData.TexTransform);
FObjectTransformation ObjectTransformation;
XMStoreFloat4x4(&ObjectTransformation.World, XMMatrixTranspose(ATRIXWorld));
XMStoreFloat4x4(&ObjectTransformation.TexTransform, XMMatrixTranspose(ATRIXTexTransform));
MeshConstantBufferView.Update(j, &ObjectTransformation);
// 。。。
}
}
// 。。。
}
这里多了一个
XMMATRIX ATRIXTexTransform = XMLoadFloat4x4(&RenderingData.TexTransform);
TexTransform 现在还没有,还是单位矩阵,不管怎么样传递到了
void FGeometryMap::DrawTexture(float DeltaTime) {
UINT DescriptorOffset = GetD3dDevice()->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
CD3DX12_GPU_DESCRIPTOR_HANDLE DescHandle(GetDescriptorHeap()->GetGPUDescriptorHandleForHeapStart());
DescHandle.Offset(GetDrawMeshObjectNumber() + GetDrawMaterialNumber() + GetDrawLightNumber() + 1, DescriptorOffset);
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(4, DescHandle);
}
SetGraphicsRootDescriptorTable(4, DescHandle);
注意这个4是 RootParams[4] 这个下标4
我们调用过一次,所以这里 DrawTexture 的偏移是这个东西
void FGeometryMap::LoadTexture() {
TextureResUpdate->LoadTextureResource(L"Assets/Tex/Wood2.dds");
}
数据提交了,看shader
SamplerState SimpleTextureState : register(s0);
Texture2D BaseColorMap: register(t4); // 4对应根签名那里的4
这里的t4是,这里的是
// SRV
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeTextureSRV;
DescriptorRangeTextureSRV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 1, 4);
ps: 前面也提到过s0说的是
MeshVertexOut VSMain(MeshVertexIn MV)
{
// 。。。
// uv
// ObjectTextureTransform,每个物体上带过来的,每个物体有一个 现在是单位矩阵
// MV.TexCoord 是我们构建物体的时候赋值的
float4 MyTexCoord = mul(float4(MV.TexCoord, 0.0f, 1.0f), ObjectTextureTransform);
// TransformInfo 是 MaterialConstantBuffer 中的,目前默认给单位矩阵
Out.TexCoord = mul(MyTexCoord, TransformInfo).xy;
return Out;
}
float4 PSMain(MeshVertexOut MVOut) : SV_TARGET
{
// float4 TestColor = BaseColorMap.Sample(SimpleTextureState, MVOut.TexCoord);
// if (all(TestColor == float4(0.f, 0.f, 0.f, 0.f))) {
// return float4(1.f, 0.f, 0.f, 1.f);
// }
FMaterial Material;
Material.BaseColor = BaseColorMap.Sample(SimpleTextureState, MVOut.TexCoord) * BaseColor;
// 对 BaseColorMap(t4) 采样
// 在root sign 我们 DescriptorRangeTextureSRV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 1, 4);
// 这个就是我们加载的材质,已经在GPU了,通过Sample,进行采样
// 采样方式是 SimpleTextureState s0, 我们之前定义过一个 静态采样方式 StaticSamplers,在root sign 创建的是时候
// 然后需要传一个坐标(这个坐标是模型创建的时候就有的数据,然后乘了单位,所以所有材质拿的都是贴图的同一个点)
return BaseColorMap.Sample(SimpleTextureState, MVOut.TexCoord);
// 。。。
}
球的UV
进度来到了18-23
编译运行,球的贴图贴上去了
球面上的每一个点(x, y, z)到材质上有一个(u, v) 的关系
首先这个坐标怎么算
根据球面坐标,emmmm,一个半圆 "D", 绕 ”|“ 转一圈,变成球,所以 D 这个半圆圆心180,转一圈360,这一来做一个映射
void CSphereMeshComponent::CreateMesh(FMeshRenderingData &InMeshData, float InRadius, uint32_t InAxisSubdivisions, uint32_t InHeightSubdivisions) {
InAxisSubdivisions = CwlMath::Max(3u, InAxisSubdivisions);
InHeightSubdivisions = CwlMath::Max(2u, InHeightSubdivisions);
float ThetaValue = XM_PI / InHeightSubdivisions;
float PhiValue = XM_2PI / InAxisSubdivisions;
InMeshData.VertexData.emplace_back(FVertex(
XMFLOAT3(0, InRadius, 0),
XMFLOAT4(1, 0, 0, 1),
XMFLOAT3(0, 1, 0),
XMFLOAT2(0.5f, 0.f))); // 左下角是 (0, 0),向上是x轴,向右是y轴,【这个是顶的Tex】
for (int i = 1; i < InHeightSubdivisions; i++) { // [0, InAxisSubdivisions - 1] 个点
float Theta = ThetaValue * i;
for (int j = 0; j <= InAxisSubdivisions; j++) { // [InHeightSubdivisions - 1]
float Phi = PhiValue * j;
float x = InRadius * sinf(Theta) * cosf(Phi);
float y = InRadius * cosf(Theta);
float z = InRadius * sinf(Theta) * sinf(Phi);
InMeshData.VertexData.emplace_back(FVertex(XMFLOAT3(x, y, z), XMFLOAT4(1, 1, 1, 1)));
FVertex &CurVertex = InMeshData.VertexData[InMeshData.VertexData.size() - 1];
// 位置
XMVECTOR Pos = XMLoadFloat3(&CurVertex.Position);
XMStoreFloat3(&CurVertex.Normal, XMVector3Normalize(Pos));
// UV
CurVertex.TexCoord.x = Phi / XM_2PI;
CurVertex.TexCoord.y = Theta / XM_PI;
// U 方向的切线
CurVertex.UTangent.x = -z;
CurVertex.UTangent.y = 0;
CurVertex.UTangent.z = x;
XMVECTOR Tangent = XMLoadFloat3(&CurVertex.UTangent);
XMStoreFloat3(&CurVertex.UTangent, XMVector3Normalize(Tangent));
}
}
InMeshData.VertexData.emplace_back(FVertex(
XMFLOAT3(0, -InRadius, 0),
XMFLOAT4(0, 0, 1, 1),
XMFLOAT3(0, -1, 0),
XMFLOAT2(0, 0.5f)));
// draw
for (int i = 0; i <= InAxisSubdivisions; i++) {
InMeshData.IndexData.push_back(0);
InMeshData.IndexData.push_back(i + 1);
InMeshData.IndexData.push_back(i);
}
// draw middle
int VertexCircleNum = InAxisSubdivisions + 1;
int BaseIndex = 1;
for (int i = 0; i < InHeightSubdivisions - 2; i++) {
int CurCycleFirstIndex = i * VertexCircleNum;
int NextCycleFirstIndex = (i + 1) * VertexCircleNum;
for (int j = 0; j < InAxisSubdivisions; j++) {
InMeshData.IndexData.push_back(BaseIndex + CurCycleFirstIndex + j);
InMeshData.IndexData.push_back(BaseIndex + CurCycleFirstIndex + j + 1);
InMeshData.IndexData.push_back(BaseIndex + NextCycleFirstIndex + j);
InMeshData.IndexData.push_back(BaseIndex + NextCycleFirstIndex + j);
InMeshData.IndexData.push_back(BaseIndex + CurCycleFirstIndex + j + 1);
InMeshData.IndexData.push_back(BaseIndex + NextCycleFirstIndex + j + 1);
}
}
int Top = InMeshData.VertexData.size() - 1;
BaseIndex = Top - VertexCircleNum;
for (int i = 0; i < InAxisSubdivisions; i++) {
InMeshData.IndexData.push_back(Top);
InMeshData.IndexData.push_back(BaseIndex + i);
InMeshData.IndexData.push_back(BaseIndex + i + 1);
}
}
平面的UV
18-24
比较简单略
DDS自动加载,shader宏,设置材质贴图
18-26
void FGeometryMap::LoadTexture() {
// TODO 遍历目录,把所有的贴图都加载进来
TextureResUpdate->LoadTextureResource(L"Content/Tex/Wood.dds");
TextureResUpdate->LoadTextureResource(L"Content/Tex/Wood2.dds");
}
主要讲的是遍历Content目录,把所有dds后缀的load进来
18-27
void FDirectXRootSignature::BuildRootSignature(UINT InTextureNumber) {
const int CountBuffer = 5;
// ...
// SRV
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeTextureSRV;
DescriptorRangeTextureSRV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, InTextureNumber, 4);
RootParameters[0].InitAsDescriptorTable(1, &DescriptorRangeObjCBV);
RootParameters[1].InitAsDescriptorTable(1, &DescriptorRangeViewPortCBV);
RootParameters[2].InitAsDescriptorTable(1, &DescriptorRangeMaterialCBV);
RootParameters[3].InitAsDescriptorTable(1, &DescriptorRangeLightCBV);
RootParameters[4].InitAsDescriptorTable(1, &DescriptorRangeTextureSRV, D3D12_SHADER_VISIBILITY_PIXEL);
这个 InTextureNumber 如何传递给shader
通过这个
void FRenderingPipeLine::BuildPipeline() {
// 。。。
D3D_SHADER_MACRO ShaderMacro[] = {
{"TEX_2D_MAP_NUM", "2"},
{nullptr, nullptr}
};
// 构建Shader
VertexShader.BuildShaders(L"Shader/Hello.hlsl","VSMain","vs_5_0",ShaderMacro);
PixelShader.BuildShaders(L"Shader/Hello.hlsl","PSMain","ps_5_0",ShaderMacro);
PipelineState.BindShader(VertexShader, PixelShader);
本质上是 D3DCompileFromFile 的一个参数
可以看到有多个材质了以后
SamplerState SimpleTextureState : register(s0);
Texture2D SimpleTexture2DMap[TEX_2D_MAP_NUM]: register(t4); // 4对应根签名那里的4
变成以数组的方式访问了
Material.BaseColor = SimpleTexture2DMap[0].Sample(SimpleTextureState, MVOut.TexCoord) * BaseColor;
18-28 调整
string TextureNumber = to_string(GeometryMap.GetDrawTextureNumber());
D3D_SHADER_MACRO ShaderMacro[] = {
{"TEX_2D_MAP_NUM", TextureNumber.c_str()},
{nullptr, nullptr}
};
Material类里面,新增一个字段 BaseColorIndexKey,作为这个材质Index的索引。同时 Material 支持对他设置一张贴图
void FGeometryMap::UpdateCalculations(float DeltaTime, const FViewportInfo &ViewportInfo) {
XMMATRIX ViewMatrix = XMLoadFloat4x4(&ViewportInfo.ViewMatrix);
XMMATRIX ATRIXProjection = XMLoadFloat4x4(&ViewportInfo.ProjectionMatrix);
for (int i = 0; i < Geometries.size(); i++) {
for (size_t j = 0; j < Geometries[i].DescribeMeshRenderingData.size(); j++) {
FRenderingData &RenderingData = Geometries[i].DescribeMeshRenderingData[j];
// 。。。
// material
if (CMaterial *Material = (*RenderingData.Mesh->GetMaterials())[0]) {
FMaterialConstantBuffer MaterialConstantBuffer;
if (Material->IsDirty()) {
FVector4D BaseColor = Material->GetBaseColor();
MaterialConstantBuffer.BaseColor = XMFLOAT4(BaseColor.X, BaseColor.Y, BaseColor.Z, BaseColor.W);
MaterialConstantBuffer.MaterialType = static_cast<int>(Material->GetMaterialType());
MaterialConstantBuffer.MaterialRoughness = Material->GetMaterialRoughness();
MaterialConstantBuffer.TransformInfo = Material->GetMaterialTransform();
XMStoreFloat4x4(&MaterialConstantBuffer.TransformInfo, XMMatrixTranspose(XMLoadFloat4x4(&MaterialConstantBuffer.TransformInfo)));
if (auto Ptr = TextureResUpdate->FindRenderingTexture(Material->GetBaseColorIndexKey())) {
MaterialConstantBuffer.BaseColorID = (*Ptr)->RenderingTextureID;
} else {
MaterialConstantBuffer.BaseColorID = -1;
}
Material->SetDirty(false);
}
MaterialConstantBufferView.Update(j, &MaterialConstantBuffer);
}
}
}
// 。。。
}
分离材质SRV
18-29/31/end texture/end fix bug
之前我们材质用的是 b2,用的是常量缓冲区,相当于我们自己定义了数据结构,传递到shader用。
cbuffer MaterialConstantBuffer : register(b2)
{
int MaterialType;
float MaterialRoughness; // 粗超度
int BaseColorID;
int _MPadding2; // 对齐
float4 BaseColor;
float4x4 TransformInfo;
};
RootParameters[2].InitAsDescriptorTable(1, &DescriptorRangeMaterialCBV);
现在这
RootParameters[2].InitAsShaderResourceView(0, 1);
相当于描述堆,这块内存不用了,去掉,对应偏移的地方都要改。
struct MaterialConstantBuffer
{
int MaterialType;
float MaterialRoughness; // 粗超度
int BaseColorID;
int _MPadding2; // 对齐
float4 BaseColor;
float4x4 TransformInfo;
};
StructuredBuffer<MaterialConstantBuffer> Materials : register(t0, space1); // InitAsShaderResourceView(0, 1);
注意这里的t0 space1
原来的常量缓冲区变成了这样一个结构 StructuredBuffer
具体使用的时候也是和数组一样
这里false代表不是CBV后,不需要256对齐了
然后计算一下材质数量
void FGeometryMap::BuildMaterialShaderResourceView() {
// 创建常量缓冲区
MaterialConstantBufferView.CreateConstantBufferView(sizeof(FMaterialConstantBuffer), GetDrawMaterialNumber(), false);
// 收集材质
int ShaderIndex = 0;
for (auto &Iter: Geometries) {
for (auto &Iter2: Iter.second.DescribeMeshRenderingData) {
for (auto &Iter3: *Iter2.Mesh->GetMaterials()) {
Iter3->SetMaterialIndex(ShaderIndex);
Materials.push_back(Iter3);
ShaderIndex++;
}
}
}
}
UpdateCalculations 里面变成这样
void FGeometryMap::UpdateMaterialShaderResourceView(float DeltaTime, const FViewportInfo &ViewportInfo) {
// material
FMaterialConstantBuffer MaterialConstantBuffer;
for(int i = 0; i < Materials.size(); i++) {
CMaterial *Material = Materials[i];
if (Material->IsDirty()) {
FVector4D BaseColor = Material->GetBaseColor();
MaterialConstantBuffer.BaseColor = XMFLOAT4(BaseColor.X, BaseColor.Y, BaseColor.Z, BaseColor.W);
MaterialConstantBuffer.MaterialType = static_cast<int>(Material->GetMaterialType());
MaterialConstantBuffer.MaterialRoughness = Material->GetMaterialRoughness();
MaterialConstantBuffer.TransformInfo = Material->GetMaterialTransform();
XMStoreFloat4x4(&MaterialConstantBuffer.TransformInfo, XMMatrixTranspose(XMLoadFloat4x4(&MaterialConstantBuffer.TransformInfo)));
if (auto Ptr = TextureResUpdate->FindRenderingTexture(Material->GetBaseColorIndexKey())) {
MaterialConstantBuffer.BaseColorID = (*Ptr)->RenderingTextureID;
} else {
MaterialConstantBuffer.BaseColorID = -1;
}
Material->SetDirty(false);
}
MaterialConstantBufferView.Update(Material->GetMaterialIndex(), &MaterialConstantBuffer);
}
}
draw的时候
void FGeometryMap::DrawMaterial(float DeltaTime) {
GetD3dGraphicsCommandList()->SetGraphicsRootShaderResourceView(
2,
MaterialConstantBufferView.GetUploadBuffer()->GetGPUVirtualAddress());
}
总结一下
BuildMaterialShaderResourceView 创建了这块内存,用于material
UpdateMaterialShaderResourceView 更新数据进去
DrawMaterial 用于绘制命令
根签名 RootParameters[2].InitAsShaderResourceView(0, 1); 用于定义寄存器
一些材质的收尾
版本到
3d1e5dacff36fb819f52c94576b5068819f3c637
整体看看材质这块
这一部分记忆混乱,所以按版本回顾的比较细,在这里总结一下吧
- CDirectXRenderingEngine
- Initialize (DX初始化)
- PostInitialize
- 创建模型
- MeshMgr->BuildMesh();
- 提交命令
- Tick(忽略一些DX的流程)
- MeshMgr->PreDraw(DeltaTime);
- MeshMgr->Draw(DeltaTime);
- MeshMgr->PostDraw(DeltaTime);
CDirectXRenderingEngine 这一级
- 主要就DX初始化
- 加载模型创建顶点索引相关数据
- 构建
- 在Tick中绘制
DX相关的初始化略过,前面提到很多次了。
从加载模型开始
模型创建
if (auto* Sphere = World->CreateActorObject<GSphereMesh>()) {
Sphere->CreateMesh(1.f, 20.f, 30.f);
Sphere->SetPosition(XMFLOAT3(0, 2, 0));
if (CMaterial* Material = Sphere->GetMaterials()->at(0)) {
Material->SetMaterialType(EMaterialType::BlinnPhong);
Material->SetBaseColor("Content/Tex/Wood2.dds");
Material->SetNormal("Content/Tex/Wood2_Nor.dds");
// Material->SetSpecular("Content/Tex/Wood2_SPEC.dds");
Material->SetSpecular(FVector3D(1.f, 1.f, 1.f));
}
}
通过这样子的方式创建模型,创建一个对象,设置mesh信息,比如一个球,就需要输入创建的半径和细分的级别。
然后设置坐标,光照算法,贴图,高光度等数据。
CreateMesh 里面通过一系列花里胡哨的模板,组件等设计,最终需要的是DX的顶点和索引数据。
struct FVertex {
XMFLOAT3 Position;
XMFLOAT4 Color;
XMFLOAT3 Normal;
XMFLOAT3 UTangent;
XMFLOAT2 TexCoord;
};
struct FMeshRenderingData {
vector<FVertex> VertexData;
vector<uint16_t> IndexData;
};
每个顶点应该有这些数据,位置,颜色,法线,U方向的切线(BTN),贴图UV
一个模型生成了一个 FMeshRenderingData对象
Position 位置xyz
Color rbga
Normal 法线 向量xyz
UTangent 切线,也是向量xyz XMFLOAT3(TODO 展开解释一下计算过程)
TexCoord 贴图的UV坐标 (TODO 展开解释一下)
模型生成的算法就不介绍了,啰嗦,大概就是给一个球的半径,怎么程序化的生成球面的点。或者是怎么去模型文件把这个东西加载进来。
CMaterial 材质对象
每一个各种Mesh都有一个基类,GMesh,并且带有MeshComponent的基类组件,有一个 vector<CMaterial*> Materials;
就是我们面向对象的描述,一个模型,应该是有若干材质
CMaterial 有一些成员变量。
描述材质,比如他的颜色是 FVector4D,rgba。粗超度是多少,贴图用的是哪一张。
这个贴图前面介绍过了,我们Load好几张Content下的图片,CMaterial 会设置一个材质的ID关联,还可以关联比如材质贴图,法线贴图等
FVertex.TexCoord 则是这个顶点,用的是这个贴图的nv位置的颜色。
总结:模型创建步骤,只是从面向对象的设计上,设计了一些模型的类和属性,包括模型,材质,准备了一些数据,和DX关联不大。
构建
核心调用是
MeshMgr->BuildMesh();
RenderingPipeLine.BuildPipeline();
这里面我们要干很多和DX相关的事情
- Reset PSO。PipelineState.ResetGPSDesc();
- 加载材质 GeometryMap.LoadTexture();
- 从Content 资源目录下,读取加载所有dds文件,会获得 ID3D12Resource 类型的DX资源
- 根签名的构建 FDirectXRootSignature::BuildRootSignature
- 决定了CPU数据和GPU寄存器的绑定关系,简单来说就是C++有一个结构体的数据要GPU访问,shader那边可以去b0 那里读取
- b0 给模型,b1 给 viewport, b3给灯光
- t4 是贴图,为什么是t4上面有讲
- register(t0, space1) 给材质,上面又讲
- StaticSamplerObject 静态采样器,用于shader中的 Sample 调用,描述采样的行为,龙书9.7.3
- 最后创建根签名
- 准备Shader中的宏定义常量,这一步要在构建shader之前,所以要提前准备
- 构建顶点和像素shader
- VertexShader.BuildShaders(L"Shader/Hello.hlsl","VSMain","vs_5_1",ShaderMacro);
- PixelShader.BuildShaders(L"Shader/Hello.hlsl","PSMain","ps_5_1",ShaderMacro);
- VertexShader 是 Shader/Hello.hlsl 这个文件,主入口main函数是 VSMain,预定义宏是 ShaderMacro 用于传递变量。PixelShader同理
- 这个构建本质上是把hlsl文件构为 ID3DBlob,后面设置PSO描述的时候要用
- 输入布局的准备,后面写PSO描述要用,文字说不清,一贴出来一看就知道是啥东西
InputElementDesc = {
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0},
{"NORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 28, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0},
{"TANGENT", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 40, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0},
{"TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 52, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0}
};
-
构建 Vertex顶点和 Index 索引的 默认Buffer =》 GeometryMap.Build();
- 本质上是一个 ComPtr
- 提交绘制命令 ->IASetIndexBuffer(&IBV); 和 IASetVertexBuffers(0, 1, &VBV); 的时候要用
- IBV 和 VBV 是 D3D12_VERTEX_BUFFER_VIEW,
- view描述符,类似指针,指向并描述了 Vertex Buffer 这个资源
- 签名模型创建阶段只是一个数据,这里创建为 ID3D12Resource,反正后面要用。
- 本质上是一个 ComPtr
-
构建描述堆 =》 GeometryMap.BuildDescriptorHeap();
- DescriptorHeap.BuildDescriptorHeap(GetDrawMeshObjectNumber() + GetDrawLightNumber() + 1 + GetDrawTextureNumber());
- 我有这么大一块内存,他的名字叫描述堆,描述堆本质是描述符的数组,描述符和视图view是同意词,他是指向和描述 ID3D12Resource 的功能的
- 他一共分成了 GetDrawMeshObjectNumber() + GetDrawLightNumber() + 1 + GetDrawTextureNumber() 这么多块
- 每一块的大小是 D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV
- 这里是创建这个描述堆
-
创建Mesh常量缓冲区 =》GeometryMap.BuildMeshConstantBuffer();
- 描述堆的 GetCPUDescriptorHandleForHeapStart 地址开始 GetDrawMeshObjectNumber() 这么多个空间,是给模型传数据给GPU用的
- 有一个类叫常量缓冲区 FConstantBufferView,构建一下
同理构建 材质,光照,视口,纹理
-
GeometryMap.BuildMaterialShaderResourceView();
-
GeometryMap.BuildLightConstantBuffer();
-
GeometryMap.BuildViewportConstantBufferView();
-
GeometryMap.BuildTextureConstantBuffer();
-
最后构建PSO =》PipelineState.Build();
- 其实就是填写 D3D12_GRAPHICS_PIPELINE_STATE_DESC 这个结构
Tick中绘制
- PreDraw
- PipelineState.PreDraw(DeltaTime);
- Reset PSO
- PipelineState.PreDraw(DeltaTime);
- Draw
- GeometryMap.PreDraw(DeltaTime); 设置描述堆为上面Init创建的
- RootSignature.PreDraw(DeltaTime); 设置根签名为上面Init创建的
- GeometryMap.Draw(DeltaTime);
- DrawViewport(DeltaTime);
- DrawLight(DeltaTime);
- DrawTexture(DeltaTime);
- DrawMaterial(DeltaTime);
- DrawMesh(DeltaTime);
- 绘制viewport,灯光,贴图,材质,mesh。
- 本质上是在Tick中调用 SetGraphicsRootDescriptorTable 把描述堆的数据传到GPU的寄存器
- 如果数据有变更就改C++对象,然后FConstantBufferView常量缓冲区更新一个void*指针进去,而常量缓冲区又是描述堆中的一块。在tick中 SetGraphicsRootDescriptorTable就又传到GPU寄存器了,嗯,这么一个流程。
- PipelineState.Draw(DeltaTime);
- 目前只有按键事件相关
后面马上进入22-,之后都是一些微小的调整。。。
分层渲染
从22-02开始到22-end
这一块在讲分层渲染
D:\UE_Project\cwlengine>git log
commit 6bb1395b175b0116d4250dca741f6daeaff35052 (HEAD -> master)
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Wed Jan 10 00:05:19 2024 +0800
22-end
这一章节主要实现的是多PSO的存在,多PSO的作用,一个直观的例子就是不同的物体使用不同的设置。
一部分物体使用shader A,另一部分使用shader B。或者是一部分是线框模式,一部分是实线填充等。
一个朴素的想法是一个渲染层管理器,下面每一个渲染层对应一个PSO。在创建物体的时候,需要设置对应的渲染层,并把物体引用加到渲染层中。
另外一个问题是渲染层的渲染顺序有没有关系呢?物体A和物体B同时渲染在同一个地方是怎么样的呢?如果一个物体的贴图透明度部分是0呢?
这个问题对应22-15透明渲染层,龙书第10章 混合
构建数据的时候,往 InRenderLayer 渲染层中塞一份
void FGeometry::BuildMesh(size_t HashKey, CMeshComponent *InMesh, const FMeshRenderingData &InMeshData, int InGeometryKey) {
if (IsRenderingDataExist(InMesh)) {
return;
}
if (std::shared_ptr<FRenderLayer> InRenderLayer = FRenderLayerMgr::FindRenderLayer(InMesh->GetMeshRenderingType())) {
InRenderLayer->RenderingDatas.push_back(std::move(FRenderingData()));
auto &RenderingData = InRenderLayer->RenderingDatas.back();
// 。。。
}
}
DrawMesh 的时候,让各自的 FRenderLayer 按顺序Draw 完事
void FRenderLayer::Draw(float DeltaTime) {
UINT DescriptorOffset = GetD3dDevice()->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
for (auto &InRenderingData: RenderingDatas) {
D3D12_VERTEX_BUFFER_VIEW VBV = GeometryMap->Geometries[InRenderingData.GeometryKey].GetVertexBufferView();
D3D12_INDEX_BUFFER_VIEW IBV = GeometryMap->Geometries[InRenderingData.GeometryKey].GetIndexBufferView();
auto DesMeshHandle = CD3DX12_GPU_DESCRIPTOR_HANDLE(GeometryMap->GetDescriptorHeap()->GetGPUDescriptorHandleForHeapStart());
GetD3dGraphicsCommandList()->IASetIndexBuffer(&IBV);
GetD3dGraphicsCommandList()->IASetVertexBuffers(
0, // StartSlot 0 ~ 15 输入槽
1, // k, k + 1, k + 2, k + n - 1 顶点缓冲区的数量
&VBV);
if (InRenderingData.Mesh->GetMaterials()->size() > 0) {
CMaterial *Material = (*InRenderingData.Mesh->GetMaterials())[0];
EMaterialDisplayStatusType MaterialDisplayStatus = Material->GetMaterialDisplayStatus();
GetD3dGraphicsCommandList()->IASetPrimitiveTopology(static_cast<D3D12_PRIMITIVE_TOPOLOGY>(MaterialDisplayStatus));
} else {
GetD3dGraphicsCommandList()->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
}
// mesh offset
DesMeshHandle.Offset(InRenderingData.MeshObjectIndex, DescriptorOffset);
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(0, DesMeshHandle);
GetD3dGraphicsCommandList()->DrawIndexedInstanced(
InRenderingData.IndexSize, // 顶点数量
1, // 绘制实例数量
InRenderingData.IndexOffsetPosition, // 顶点缓冲区第一个被绘制的索引
InRenderingData.VertexOffsetPosition, // GPU 从索引缓冲区读取的第一个索引的位置
0 // 从顶点缓冲区读取每个实例数据之前添加到每个索引的值
);
}
}
天空盒和雾
commit d3bd3ce6a977dc1c95d95d2c41381952b678a8d9 (HEAD -> master)
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Wed Jan 24 01:23:40 2024 +0800
啊哈哈哈哈哈哈哈哈哈
版本来到了这里。
这里应该是24-
天空盒其实就是反向的一个大box,加上了贴图。没有新东西。
雾气的实现之前写过笔记
渲染反射
LOG 从 26- 开始
这个也写过笔记,这里再次简单回顾一下
26-02/03 对CubeMap采样
C++ 部分直接省略了,材质的常量缓冲区新增了
SpecularColor 镜面反射的颜色,和 FresnelF0 菲涅尔指数
struct MaterialConstantBuffer
{
int MaterialType;
float MaterialRoughness; // 粗超度
int BaseColorIndex;
int NormalIndex;
int SpecularIndex;
int MPadding1;
int MPadding2;
int MPadding3;
float4 BaseColor;
float3 SpecularColor;
int MPadding4;
float3 FresnelF0;
float MPadding5;
float4x4 TransformInfo;
};
StructuredBuffer<MaterialConstantBuffer> Materials : register(t0, space1); // InitAsShaderResourceView(0, 1);
实现 GetReflectionColor 作为静态反射
结果很简单,就是在最终的颜色加上反射的颜色
float3 ReflectionColor = GetReflectionColor(MatConstantBuffer, ModelNormal, MVOut.WorldPosition);
MVOut.Color.xyz += ReflectionColor;
参数是,材质buff,就是我们刚刚传递SRV过来的材质参数,上面C++的结构。
模型在世界空间下的法线。看我们的博客开头。
获取反射向量,字面意思,
float3 GetReflec(float3 InUnitWorldNormal, float3 WorldPosition)
{
float3 ViewDirection = normalize(ViewportPosition.xyz - WorldPosition);
return reflect(-ViewDirection, InUnitWorldNormal).xyz;
}
float3 GetReflectionSampleColor(float3 NewReflect)
{
return SimpleCubeMap.Sample(TextureSamplerState, NewReflect).rgb;
}
参数是上一个函数的返回值,这个采样采的是什么呢?
TextureCube SimpleCubeMap: register(t0);
啊t0又是什么鬼
在root sign里面
// SRV
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeTextureSRV;
DescriptorRangeTextureSRV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, InTextureNumber, 1);
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeCubeMapSRV;
DescriptorRangeCubeMapSRV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 1, 0);
// 。。。
RootParameters[4].InitAsShaderResourceView(0, 1); // Material
RootParameters[5].InitAsDescriptorTable(1, &DescriptorRangeTextureSRV, D3D12_SHADER_VISIBILITY_PIXEL); // Texture
RootParameters[6].InitAsDescriptorTable(1, &DescriptorRangeCubeMapSRV, D3D12_SHADER_VISIBILITY_PIXEL); // CubeMap
t0 是啥东西,这里描述堆
t0 开始有一个东西 DescriptorRangeCubeMapSRV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 1, 0);
t1 开始有 InTextureNumber 个东西,
对应 Texture2D SimpleTexture2DMap[TEX_2D_MAP_NUM]: register(t1);
而他的数据是什么时候进去的呢?
Draw的地方,把描述堆的东西从CPU到GPU
void FGeometryMap::DrawTexture(float DeltaTime)
{
UINT DescriptorOffset = GetD3dDevice()->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
// Texture
CD3DX12_GPU_DESCRIPTOR_HANDLE DescHandle(GetDescriptorHeap()->GetGPUDescriptorHandleForHeapStart());
DescHandle.Offset(0, DescriptorOffset);
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(5, DescHandle);
// CubeMap
CD3DX12_GPU_DESCRIPTOR_HANDLE CubeMapDescHandle(GetDescriptorHeap()->GetGPUDescriptorHandleForHeapStart());
CubeMapDescHandle.Offset(static_cast<int>(GetDrawTextureNumber()), DescriptorOffset);
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(6, CubeMapDescHandle);
}
在找数据对应的地方
void FGeometryMap::BuildTextureConstantBuffer()
{
TextureResUpdate->BuildTextureConstantBuffer(GetDescriptorHeap(), 0);
CubeMapResUpdate->BuildTextureConstantBuffer(GetDescriptorHeap(), GetDrawTextureNumber());
}
TextureResUpdate 在0这个位置开始写贴图,
CubeMapResUpdate 在 GetDrawTextureNumber() 开始写
这是一个 FRenderingTextureResUpdate 结构。一个从材质名字,到材质对象的管理器
通过 CreateDDSTextureFromFile12 load一个dds进来的一个结构
float3 GetReflectionColor(MaterialConstantBuffer MatConstantBuffer, float3 InUnitWorldNormal, float3 InWorldPosition)
{
float3 InReflect = GetReflec(InUnitWorldNormal, InWorldPosition);
float3 SampleReflectionColor = GetReflectionSampleColor(InUnitWorldNormal, InReflect);
float3 Shininess = GetShininess(MatConstantBuffer);
float3 FresnelFactor = FresnelSchlickFactor(MatConstantBuffer, InUnitWorldNormal, InReflect);
return SampleReflectionColor * Shininess * FresnelFactor;
}
第一个,获取反射向量,
第二个,通过向量获取cubemap的颜色,TextureCube的作用就是视野方向获取颜色。
然后颜色,需要经过一些处理,比如粗超度,菲涅尔效应。最后应用到最终效果上。
26-5
这样的反射架构,只能反射cubemap上的物体,我们要设计一个摄像机,在物体向六个面截图,然后贴图贴到反射物体上。
GCamera
GClientViewport
一个WinEngine下面有一个 World对象,World里面有一个 GCamera 对象。
Camera的基类中有一个FViewport
class FViewport
{
public:
XMFLOAT4X4 ViewMatrix;
XMFLOAT4X4 ProjectionMatrix;
};
拥有了VP矩阵,在管线中就能知道摄像机在哪里。
class GClientViewport : public GActorObject, public FViewport
{
private:
float Fov;
float Aspect;
float Near;
float Far;
bool bDirty;
};
GClientViewport 则是多了一些摄像机应该有的参数,如FOV
class GCamera
: public GClientViewport
, public IDirectXDeviceInterface
{
using Super = GClientViewport;
public:
GCamera();
// 。。。
protected:
POINT LastMousePosition;
bool bRightMouseDown;
private:
CVARIABLE()
CInputComponent* InputComponent;
ECameraType CameraType;
// 球面 (R, Theta, Phi)
float Radius;
float Theta;
float Phi;
};
26-7~26-9 完成 CubeMapRenderTarget
这是一个对于 ID3D12Resource 结构的包装,同时包含宽高,格式,viewport,裁切矩形的参数,来描述一个Texture
class FCubeMapRenderTarget
: public IDirectXDeviceInterface,
std::enable_shared_from_this<FCubeMapRenderTarget>
{
private:
UINT Width;
UINT Height;
DXGI_FORMAT Format;
D3D12_VIEWPORT Viewport;
D3D12_RECT ScissorRect;
ComPtr<ID3D12Resource> RenderTargetMap;
CD3DX12_CPU_DESCRIPTOR_HANDLE CPUShaderResourceView;
CD3DX12_CPU_DESCRIPTOR_HANDLE CPURenderTargetView[6];
};
我们创建6个 render target view。回顾一下交换链的时候使用过
void FCubeMapRenderTarget::BuildRTVDescriptors()
{
for (size_t i = 0; i < 6; i++)
{
D3D12_RENDER_TARGET_VIEW_DESC RTVDesc = {};
RTVDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
RTVDesc.Format = Format;
// MipSlice + ArraySlice * MipLevels ???
RTVDesc.Texture2DArray.MipSlice = 0;
RTVDesc.Texture2DArray.PlaneSlice = 0;
RTVDesc.Texture2DArray.FirstArraySlice = i;
RTVDesc.Texture2DArray.ArraySize = 1; // only one texture for each RTV
GetD3dDevice()->CreateRenderTargetView(RenderTargetMap.Get(), &RTVDesc, CPURenderTargetView[i]);
}
}
SRV,
回顾我们创建贴图的FRenderingTextureResUpdate 用过 CreateShaderResourceView
void FCubeMapRenderTarget::BuildSRVDescriptors()
{
D3D12_SHADER_RESOURCE_VIEW_DESC SRVDesc = {};
SRVDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
SRVDesc.Format = Format;
SRVDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBE;
SRVDesc.TextureCube.MostDetailedMip = 0;
SRVDesc.TextureCube.MipLevels = 1;
SRVDesc.TextureCube.ResourceMinLODClamp = 0.f;
GetD3dDevice()->CreateShaderResourceView(RenderTargetMap.Get(), &SRVDesc, CPUShaderResourceView);
}
26-11
新增与改动
DynamicCubeMap.h
CubeMapRenderTarget.h
具体什么用,后面一起总结把
26-12
新增了 DynamicCubeMap的 BuildDepthStencil
26-13
管线调整了流程,其实只是挪了个地方。
void FRenderingPipeLine::Draw(float DeltaTime)
{
// ...
// main viewport
GeometryMap.DrawViewport(DeltaTime);
// ...
}
其他改动先不提
26-14 挪代码ing
26-16 在RTV和DSV的地方新增了数量。包括CubeMap的6个RTV和1个DSV
26-17 DynamiCubeMap调整
。。。
26-19 后面一条LOG 修好了这里。没具体效果,继续看下去
26-20 新增了不透明物体的渲染反射层
新增了渲染层,在渲染完成不透明物体后,在渲染不透明的反射物体(比如镜面)。让镜面去反射不透明物体。
26-21
设置反射球的参数,新增DrawCubeMapTexture 的流程
。。。
26-23
新增动态反射的设置,只有设置了这个参数的物体,才会采集并反射周围的东西。
进度来到这里。这里应该是反射写完的时候了。我们来整体回归一下
D:\UE_Project\cwlengine>git log
commit 8ddea6becc5565a16479e2c34ee4816b97d01158 (HEAD -> master)
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Sun Feb 4 03:08:35 2024 +0800
调整保存
总结反射
D:\UE_Project\cwlengine>git log
commit 8ddea6becc5565a16479e2c34ee4816b97d01158 (HEAD -> master)
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Sun Feb 4 03:08:35 2024 +0800
调整保存
从 CDirectXRenderingEngine 开始
我们看到给RTV的描述堆大小是 【交换链2】+【CubeMap6】给DSV的深度模板是1+1 也有一个是CubeMap用的。
【龙书4.1.6】
:::info
堆是描述符数组,一块存放描述符的内存块。
视图View和描述符Descriptor是同义词
:::
DX 用到的资源统一为 ID3D12Resource 这是一块内存。
可以被不同的view/descriptor解释为不同的作用。比如一个ID3D12Resource可以用于RTV和DSV
场景中有两个测试球
设置为 eOpaqueReflect,是位于不透明反射层的物体。
SetDynamicReflection(true) 这个物体拥有动态反射的能力。
由于前面我们分层渲染了,天空盒,不透明物体先渲染出来了。后渲染的物体就有了反射他们的前提。
运行一下,场景中两个能反射周围环境的球。
其他的就是设置一下菲涅尔参数,粗超度等。
if (auto* Sphere = World->CreateActorObject<GSphereMesh>())
{
Sphere->SetMeshRenderingType(eOpaqueReflect);
Sphere->CreateMesh(1.f, 20.f, 30.f);
Sphere->SetPosition(XMFLOAT3(6, 4, 0));
if (CMaterial* Material = Sphere->GetMaterials()->at(0))
{
Material->SetDynamicReflection(true);
Material->SetMaterialType(BlinnPhong);
// Material->SetBaseColor("Content/Tex/Earth.dds");
Material->SetSpecular(FVector3D(1.f, 1.f, 1.f));
Material->SetFresnelF0(FVector3D(0.07f, 0.07f, 0.07f));
Material->SetMaterialRoughness(0.1f);
}
}
创建了场景的物体后,在tick前,该初始化管线了。
流程走到了 FRenderingPipeLine::BuildPipeline()
多出来的部分是啥呢
DynamicCubeMap 的创建
void FRenderingPipeLine::BuildPipeline()
{
// 。。。
// 构建动态CubeMap
DynamicCubeMap.Init(&GeometryMap, &PipelineState, &RenderLayerMgr);
DynamicCubeMap.BuildDepthStencilDesc();
DynamicCubeMap.BuildDepthStencil();
// 。。。
GeometryMap.BuildDynamicReflectionMesh();
// 一会看看根前面和描述堆的内容
GeometryMap.BuildDescriptorHeap();
// 可以随便设置一个位置,因为在UpdateCalculations中会更新位置
DynamicCubeMap.BuildViewport(FVector3D(0, 0, 0));
// 构建CubeMap
DynamicCubeMap.BuildRenderTargetDesc();
// 。。。
Init 只是引用传递。
void FDynamicCubeMap::Init(FGeometryMap* InGeometryMap, FDirectXPipelineState* InIDirectXPipelineState, FRenderLayerMgr* InRenderLayerMgr)
{
GeometryMap = InGeometryMap;
IDirectXPipelineState = InIDirectXPipelineState;
RenderLayerMgr = InRenderLayerMgr;
}
dynamic深度模板描述
void FDynamicCubeMap::BuildDepthStencilDesc()
{
DSVDesc = CD3DX12_CPU_DESCRIPTOR_HANDLE(
GetDsvHeap()->GetCPUDescriptorHandleForHeapStart(),
1,
GetDescriptorHandleIncrementSizeDSV());
}
BuildDepthStencilDesc 这里用到了之前多出的一个DSV,这里offset 1
后面 BuildDepthStencil 创建对应的深度模板。【重要 DSV的创建】
BuildDynamicReflectionMesh 在遍历所有Actor,找标记了能反射的物体,只是计数。毕竟一个反射物体要6个摄像机,会影响viewport常量缓冲区的大小的。
BuildDescriptorHeap 检查一下描述堆放了啥。大小上,就最后加了1 给cubemap。
void FGeometryMap::BuildDescriptorHeap()
{
DescriptorHeap.BuildDescriptorHeap(
0 + // 排版好看
GetDrawTextureNumber() + // Texture
GetCubeMapNumber() + // Static CubeMap like Sky
1); // DynamicCubeMap
}
但是根签名没用到???
FDynamicCubeMap::BuildViewport
一个反射物体的坐标确定了,那么六个朝向的摄像机对象是可以获得的,预创建一下,后面可以方便的拿到view 矩阵
这里面东西比较多了。
void FDynamicCubeMap::BuildRenderTargetDesc()
{
BuildRenderTargetRTV();
BuildRenderTargetSRV();
RenderTarget->Init(Width, Height, DXGI_FORMAT_R8G8B8A8_UNORM);
}
【重要,这里的RTV的创建】
这里我们要搞懂一个东西,渲染一个画面到底要那些东西,RTV, DSV, SRV 也太混乱了吧。
void FDynamicCubeMap::BuildRenderTargetRTV()
{
FEngineRenderConfig* RenderConfig = FEngineRenderConfig::GetRenderConfig();
UINT RTVDescSize = GetDescriptorHandleIncrementSizeRTV();
auto RTVDescAddr = GetRtvHeap()->GetCPUDescriptorHandleForHeapStart();
for (size_t i = 0; i < 6; i++)
{
RenderTarget->CPURenderTargetView[i] = CD3DX12_CPU_DESCRIPTOR_HANDLE(
RTVDescAddr,
RenderConfig->SwapChainCount + i,
RTVDescSize);
}
}
这里偏移 RenderConfig->SwapChainCount + i,正是前面RTV多出来的6个位置。这里是创建view(理解为指向和描述DX资源的指针)
然后是 BuildRenderTargetSRV
void FDynamicCubeMap::BuildRenderTargetSRV()
{
auto CPUStart = GeometryMap->GetDescriptorHeap()->GetCPUDescriptorHandleForHeapStart();
auto GPUStart = GeometryMap->GetDescriptorHeap()->GetGPUDescriptorHandleForHeapStart();
auto CBVDescSize = GetDescriptorHandleIncrementSizeCBV_SRV_UAV();
RenderTarget->CPUShaderResourceView = CD3DX12_CPU_DESCRIPTOR_HANDLE(
CPUStart,
GeometryMap->GetDrawTextureNumber() + GeometryMap->GetCubeMapNumber(),
CBVDescSize);
RenderTarget->GPUShaderResourceView = CD3DX12_GPU_DESCRIPTOR_HANDLE(
GPUStart,
GeometryMap->GetDrawTextureNumber() + GeometryMap->GetCubeMapNumber(),
CBVDescSize);
}
这里 CPUShaderResourceView,GPUShaderResourceView
偏移是 GeometryMap->GetDrawTextureNumber() + GeometryMap->GetCubeMapNumber()
和前面我们对应的 BuildDescriptorHeap 多创建的 +1 给dynamic cubemap对应。
继续看下去是
RenderTarget->Init(Width, Height, DXGI_FORMAT_R8G8B8A8_UNORM);
去掉一些变量数值的设置。
void FCubeMapRenderTarget::ResetRenderTarget(const UINT& InWidth, const UINT& InHeight)
{
if (Width == InWidth && Height == InHeight)
{
return;
}
Width = InWidth;
Height = InHeight;
ResetViewport(Width, Height);
ResetScissorRect(Width, Height);
// 主要是这三个
BuildRenderTargetMap();
BuildRTVDescriptors();
BuildSRVDescriptors();
}
BuildRenderTargetMap,通过 CreateCommittedResource 创建一个资源
ComPtr
void FCubeMapRenderTarget::BuildRenderTargetMap()
{
D3D12_RESOURCE_DESC RenderTargetMapDesc = {};
RenderTargetMapDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
RenderTargetMapDesc.Alignment = 0;
RenderTargetMapDesc.Width = Width;
RenderTargetMapDesc.Height = Height;
RenderTargetMapDesc.DepthOrArraySize = 6;
RenderTargetMapDesc.MipLevels = 1;
RenderTargetMapDesc.Format = Format;
RenderTargetMapDesc.SampleDesc.Count = 1;
RenderTargetMapDesc.SampleDesc.Quality = 0;
RenderTargetMapDesc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
RenderTargetMapDesc.Flags = D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
D3D12_CLEAR_VALUE RenderTargetMapClearValue = {};
RenderTargetMapClearValue.Format = Format;
RenderTargetMapClearValue.Color[0] = 0.0f;
RenderTargetMapClearValue.Color[1] = 0.0f;
RenderTargetMapClearValue.Color[2] = 0.0f;
RenderTargetMapClearValue.Color[3] = 1.0f;
auto Properties = CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_DEFAULT);
ThrowIfFailed(GetD3dDevice()->CreateCommittedResource(
&Properties,
D3D12_HEAP_FLAG_NONE,
&RenderTargetMapDesc,
D3D12_RESOURCE_STATE_COMMON,
&RenderTargetMapClearValue,
IID_PPV_ARGS(&RenderTargetMap)
));
}
资源的描述是 D3D12_RESOURCE_DESC RenderTargetMapDesc
Dimension 是 D3D12_RESOURCE_DIMENSION_TEXTURE2D Flags是 D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET (render target)
BuildSRVDescriptors
void FCubeMapRenderTarget::BuildSRVDescriptors()
{
D3D12_SHADER_RESOURCE_VIEW_DESC SRVDesc = {};
SRVDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
SRVDesc.Format = Format;
SRVDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBE;
SRVDesc.TextureCube.MostDetailedMip = 0;
SRVDesc.TextureCube.MipLevels = 1;
SRVDesc.TextureCube.ResourceMinLODClamp = 0.f;
GetD3dDevice()->CreateShaderResourceView(RenderTargetMap.Get(), &SRVDesc, CPUShaderResourceView);
}
CreateShaderResourceView 通过这个接口创建一个 ComPtr
BuildRTVDescriptors
void FCubeMapRenderTarget::BuildRTVDescriptors()
{
for (size_t i = 0; i < 6; i++)
{
D3D12_RENDER_TARGET_VIEW_DESC RTVDesc = {};
RTVDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
RTVDesc.Format = Format;
// MipSlice + ArraySlice * MipLevels ???
RTVDesc.Texture2DArray.MipSlice = 0;
RTVDesc.Texture2DArray.PlaneSlice = 0;
RTVDesc.Texture2DArray.FirstArraySlice = i;
RTVDesc.Texture2DArray.ArraySize = 1; // only one texture for each RTV
GetD3dDevice()->CreateRenderTargetView(RenderTargetMap.Get(), &RTVDesc, CPURenderTargetView[i]);
}
}
通过 CreateRenderTargetView 创建
这里我们可以看出来,我们创建了一个资源
ComPtr
然后用不同的方法
CreateRenderTargetView
CreateShaderResourceView
创建了不同的描述符RTV和SRV
后面其实是 tick中的
void FRenderingPipeLine::PreDraw(float DeltaTime)
void FRenderingPipeLine::Draw(float DeltaTime)
void FRenderingPipeLine::PostDraw(float DeltaTime)
说了好像没说
float3 GetReflectionSampleColor(float3 NewReflect)
{
return SimpleCubeMap.Sample(TextureSamplerState, NewReflect).rgb;
}
反射出的颜色主要是从这里来的。
SamplerState TextureSamplerState : register(s0);
SamplerState AnisotropicSamplerState : register(s1);
Texture2D SimpleTexture2DMap[TEX_2D_MAP_NUM]: register(t1);
TextureCube SimpleCubeMap: register(t0);
SamplerState 采样器的 s0 和 s1
void FStaticSamplerObject::BuildStaticSampler()
{
StaticSamplers.push_back(CD3DX12_STATIC_SAMPLER_DESC(
0, // shaderRegister, s0
D3D12_FILTER_MIN_MAG_MIP_POINT)); // 18-16
StaticSamplers.push_back(CD3DX12_STATIC_SAMPLER_DESC(
1,
D3D12_FILTER_ANISOTROPIC,
D3D12_TEXTURE_ADDRESS_MODE_WRAP,
D3D12_TEXTURE_ADDRESS_MODE_WRAP,
D3D12_TEXTURE_ADDRESS_MODE_WRAP,
0,
8));
}
对应t0和t1
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeTextureSRV;
DescriptorRangeTextureSRV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, InTextureNumber, 1);
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeCubeMapSRV;
DescriptorRangeCubeMapSRV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 1, 0);
每一帧把数据更新进去,然后shader获取颜色。
我们看看cubemap的更新过程
其实就是找
void FRenderingPipeLine::PreDraw(float DeltaTime)
void FRenderingPipeLine::Draw(float DeltaTime)
void FRenderingPipeLine::PostDraw(float DeltaTime)
tick中 和 DynamicCube有关的部分
if (DynamicCubeMap.IsExitDynamicReflectionMesh())
{
DynamicCubeMap.PreDraw(DeltaTime);
}
void FDynamicCubeMap::PreDraw(float DeltaTime)
{
for (size_t j = 0; j < GeometryMap->DynamicReflectionMeshes.size(); j++)
{
// transition resource state
// ... barrier
// set view port and scissor rect
// ...
UINT CBVSize = GeometryMap->ViewportConstantBufferView.GetConstantBufferByteSize();
for (size_t i = 0; i < CUBE_MAP_VIEWPORT_NUM; i++)
{
// clear render target view
// ...
// clear depth stencil view
// ..
// OMSetRenderTargets
// ...
auto ViewportAddr = GeometryMap->ViewportConstantBufferView.GetUploadBuffer()->GetGPUVirtualAddress();
ViewportAddr += (1 + i + j * CUBE_MAP_VIEWPORT_NUM) * CBVSize; // MainViewport:1 + CubeMapViewport:6 for each mesh
GetD3dGraphicsCommandList()->SetGraphicsRootConstantBufferView(1, ViewportAddr); // 1 root parameter view port
RenderLayerMgr->Draw(EMeshRenderingType::eBackground, DeltaTime);
RenderLayerMgr->Draw(EMeshRenderingType::eOpaque, DeltaTime);
RenderLayerMgr->Draw(EMeshRenderingType::eTransparent, DeltaTime);
}
// finish
// ... barrier
{
StartSetMainViewportRenderTarget();
GeometryMap->DrawViewport(DeltaTime);
// 画cube map
Draw(DeltaTime);
// 对某个模型进行渲染
RenderLayerMgr->FindObjectDraw(DeltaTime, EMeshRenderingType::eOpaqueReflect, GeometryMap->DynamicReflectionMeshes[j]);
// 重置cube map
GeometryMap->DrawCubeMapTexture(DeltaTime);
EndSetMainViewportRenderTarget();
}
}
}
怎么理解呢,对于每一个动态反射物体。的每一个摄像机(6个)
我都设置好摄像机位置
ViewportAddr += (1 + i + j * CUBE_MAP_VIEWPORT_NUM) * CBVSize; // MainViewport:1 + CubeMapViewport:6 for each mesh
GetD3dGraphicsCommandList()->SetGraphicsRootConstantBufferView(1, ViewportAddr); // 1 root parameter view port
绘制所有的东西
RenderLayerMgr->Draw(EMeshRenderingType::eBackground, DeltaTime);
RenderLayerMgr->Draw(EMeshRenderingType::eOpaque, DeltaTime);
RenderLayerMgr->Draw(EMeshRenderingType::eTransparent, DeltaTime);
画到哪里呢?
GetD3dGraphicsCommandList()->OMSetRenderTargets(
1,
&RenderTarget->CPURenderTargetView[i],
true,
&DSVDesc);
&RenderTarget->CPURenderTargetView[i],
然后回到主摄像机
GeometryMap->DrawViewport(DeltaTime);
{
StartSetMainViewportRenderTarget();
GeometryMap->DrawViewport(DeltaTime);
// 画cube map
Draw(DeltaTime);
// 对某个模型进行渲染
RenderLayerMgr->FindObjectDraw(DeltaTime, EMeshRenderingType::eOpaqueReflect, GeometryMap->DynamicReflectionMeshes[j]);
// 重置cube map
GeometryMap->DrawCubeMapTexture(DeltaTime);
EndSetMainViewportRenderTarget();
}
CubeMap数据传过去
Draw(DeltaTime);
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(6, RenderTarget->GPUShaderResourceView); // 6 root parameter cube map
单独画这么一个动态反射模型
RenderLayerMgr->FindObjectDraw(DeltaTime, EMeshRenderingType::eOpaqueReflect, GeometryMap->DynamicReflectionMeshes[j]);
阴影
主要改动在 28-03 开始
D:\UE_Project\cwlengine>git log
commit 1cbac34a6a90428cdb8a29a5aa273c57c5eeaaf4 (HEAD -> master)
Author: cwl\chenweilin <weilin1013@qq.com>
Date: Sat Mar 9 21:49:20 2024 +0800
save shadow code
先试一试直接从这里开始看ok不
从 CDirectXRenderingEngine 开始看
bool CDirectXRenderingEngine::InitDirect3D()
{
// 。。。
D3D12_DESCRIPTOR_HEAP_DESC DSVHeapDesc = {};
DSVHeapDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_DSV;
DSVHeapDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
DSVHeapDesc.NumDescriptors = 1 + 1 + 1; // depth stencil buffer + cube map + shadow map
DSVHeapDesc.NodeMask = 0;
ThrowIfFailed(Device->CreateDescriptorHeap(&DSVHeapDesc, IID_PPV_ARGS(DSVHeap.GetAddressOf())))
return true;
}
呕吼,这里DSV Heap 多了一个
其他不变
直到
CDirectXRenderingEngine::PostInitialize
MeshMgr->BuildMesh();
RenderingPipeLine.BuildPipeline();
void FRenderingPipeLine::BuildPipeline()
void FRenderingPipeLine::BuildPipeline()
{
// 。。。
// 初始化动态阴影Map,只是引用传递
GeometryMap.DynamicShadowMap.Init(&GeometryMap, &PipelineState, &RenderLayerMgr);
// 。。。
}
class FDynamicMap : public IDirectXDeviceInterface
{
public:
// 。。。
protected:
std::shared_ptr<FRenderTarget> RenderTarget;
FGeometryMap* GeometryMap; // 后面改成弱指针
FDirectXPipelineState* IDirectXPipelineState; // 后面改成弱指针
FRenderLayerMgr* RenderLayerMgr; // 后面改成弱指针
UINT Width;
UINT Height;
};
FDynamicMap 主要是共用一个render target
class FDynamicShadowMap : public FDynamicMap {}
class FDynamicCubeMap : public FDynamicMap {}
两个子类 FDynamicCubeMap 一起看和前面的流程差不多
RootSignature.BuildRootSignature(GeometryMap.GetDrawTextureNumber());
// SRV
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeTextureSRV;
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeCubeMapSRV;
CD3DX12_DESCRIPTOR_RANGE DescriptorRangeShadowMapSRV;
// start from register 2, InTextureNumber descriptor. t2
DescriptorRangeTextureSRV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, InTextureNumber, 2);
// start from register 0, 1 descriptor. t0
DescriptorRangeCubeMapSRV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 1, 0);
// start from register 1, 1 descriptor. t1
DescriptorRangeShadowMapSRV.Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 1, 1);
RootParameters[0].InitAsConstantBufferView(0); // OBJ
RootParameters[1].InitAsConstantBufferView(1); // ViewPort
RootParameters[2].InitAsConstantBufferView(2); // Light
RootParameters[3].InitAsConstantBufferView(3); // Fog
RootParameters[4].InitAsShaderResourceView(0, 1); // Material
RootParameters[5].InitAsDescriptorTable(1, &DescriptorRangeTextureSRV, D3D12_SHADER_VISIBILITY_PIXEL); // Texture
RootParameters[6].InitAsDescriptorTable(1, &DescriptorRangeCubeMapSRV, D3D12_SHADER_VISIBILITY_PIXEL); // CubeMap
RootParameters[7].InitAsDescriptorTable(1, &DescriptorRangeShadowMapSRV, D3D12_SHADER_VISIBILITY_PIXEL); // ShadowMap
根前面多了一个,t0 CubeMap, t1 ShadowMap, t2 开始 InTextureNumber 个给tex
继续看
GeometryMap.BuildDescriptorHeap();
void FGeometryMap::BuildDescriptorHeap()
{
DescriptorHeap.BuildDescriptorHeap(
GetDrawTextureNumber() + // Texture
GetCubeMapNumber() + // Static CubeMap like Sky
1 + // DynamicCubeMap for DynamicReflection
1 // ShadowMap
);
}
这里也多了一个给阴影
// 构建动态阴影Map
GeometryMap.BuildShadow();
void FGeometryMap::BuildShadow()
{
DynamicShadowMap.Init(2048, 2048);
DynamicShadowMap.BuildViewport(FVector3D(0, 0, 0));
DynamicShadowMap.BuildDepthStencilDescriptor();
DynamicShadowMap.BuildRenderTargetDescriptor();
}
Init初始化长宽
void FDynamicShadowMap::BuildViewport(const FVector3D& InCenterPoint)
{
ShadowViewport = CreateObject<GClientViewport>(new GClientViewport());
ShadowViewport->SetPosition(XMFLOAT3(InCenterPoint.X, InCenterPoint.Y, InCenterPoint.Z));
ShadowViewport->FaceTarget(InCenterPoint, FVector3D(0, 0, 0), FVector3D(0, 1, 0));
ShadowViewport->SetFrustum(0.5f * XM_PI, 1.0f, 0.1f, 10000.0f);
BuildViewMatrix(0.3f);
}
随便初始化一个摄像机,然后构建view 矩阵
void FDynamicShadowMap::BuildDepthStencilDescriptor()
{
UINT DescriptorHandleIncrementSize = GetD3dDevice()->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_DSV);
if (auto* InRenderTarget = dynamic_cast<FShadowMapRenderTarget*>(RenderTarget.get()))
{
InRenderTarget->DSVDes = CD3DX12_CPU_DESCRIPTOR_HANDLE(
GetDsvHeap()->GetCPUDescriptorHandleForHeapStart(),
2, // main viewport dsv, cube map dsv, shadow map dsv. shadow map dsv is the third one.
DescriptorHandleIncrementSize);
}
}
这里是DSV描述符
和创建SRV
void FDynamicShadowMap::BuildRenderTargetDescriptor()
{
BuildRenderTargetSRV();
RenderTarget->Init(Width, Height, DXGI_FORMAT_D24_UNORM_S8_UINT);
}
这里FRenderTarget是一个带长宽信息的资源 ComPtr
class FRenderTarget : public IDirectXDeviceInterface, public std::enable_shared_from_this<FRenderTarget>
{
// 。。。
protected:
UINT Width;
UINT Height;
DXGI_FORMAT Format;
D3D12_VIEWPORT Viewport;
D3D12_RECT ScissorRect;
ComPtr<ID3D12Resource> RenderTargetMap;
CD3DX12_CPU_DESCRIPTOR_HANDLE CPUShaderResourceView;
CD3DX12_GPU_DESCRIPTOR_HANDLE GPUShaderResourceView;
}
他的子类负责创建描述符,比如SRV, RTV
class FCubeMapRenderTarget : public FRenderTarget {}
class FShadowMapRenderTarget : public FRenderTarget {}
对于阴影来说,基类有这个初始化流程
void FRenderTarget::ResetRenderTarget(const UINT& InWidth, const UINT& InHeight)
{
if (Width == InWidth && Height == InHeight)
{
return;
}
Width = InWidth;
Height = InHeight;
ResetViewport(Width, Height);
ResetScissorRect(Width, Height);
BuildRenderTargetMap();
BuildRTVDescriptors();
BuildSRVDescriptors();
BuildDSVDescriptors();
}
BuildRenderTargetMap,BuildRTVDescriptors,BuildSRVDescriptors,BuildDSVDescriptors
这四个由子类实现。
BuildRenderTargetMap 是必要的,目的是创建 ComPtr
一些参数设置和 CubeMapRenderTarget 不一样
void FShadowMapRenderTarget::BuildRenderTargetMap()
{
D3D12_RESOURCE_DESC RenderTargetMapDesc = {};
RenderTargetMapDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
RenderTargetMapDesc.Alignment = 0;
RenderTargetMapDesc.Width = Width;
RenderTargetMapDesc.Height = Height;
RenderTargetMapDesc.DepthOrArraySize = 1; // different from CubeMapRenderTarget
RenderTargetMapDesc.MipLevels = 1;
RenderTargetMapDesc.Format = Format;
RenderTargetMapDesc.SampleDesc.Count = 1;
RenderTargetMapDesc.SampleDesc.Quality = 0;
RenderTargetMapDesc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
RenderTargetMapDesc.Flags = D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL; // different from CubeMapRenderTarget
D3D12_CLEAR_VALUE ClearValue = {}; // different from CubeMapRenderTarget
ClearValue.DepthStencil.Depth = 1.0f;
ClearValue.DepthStencil.Stencil = 0;
ClearValue.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
auto Properties = CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_DEFAULT);
ThrowIfFailed(GetD3dDevice()->CreateCommittedResource(
&Properties,
D3D12_HEAP_FLAG_NONE,
&RenderTargetMapDesc,
D3D12_RESOURCE_STATE_GENERIC_READ, // different from CubeMapRenderTarget
&ClearValue,
IID_PPV_ARGS(&RenderTargetMap)
));
}
然后他需要DSV和 SRV两个描述符来描述这个资源
void FShadowMapRenderTarget::BuildDSVDescriptors()
{
D3D12_DEPTH_STENCIL_VIEW_DESC DSVDesc = {};
DSVDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
DSVDesc.Texture2D.MipSlice = 0;
DSVDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2D;
DSVDesc.Flags = D3D12_DSV_FLAG_NONE;
GetD3dDevice()->CreateDepthStencilView(RenderTargetMap.Get(), &DSVDesc, DSVDes);
}
void FShadowMapRenderTarget::BuildShadowConstantBuffer()
{
D3D12_SHADER_RESOURCE_VIEW_DESC SRVDesc = {};
SRVDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
SRVDesc.Format = DXGI_FORMAT_R24_UNORM_X8_TYPELESS; // different from CubeMapRenderTarget
SRVDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2D; // different from CubeMapRenderTarget
SRVDesc.Texture2D.MostDetailedMip = 0;
SRVDesc.Texture2D.MipLevels = 1;
SRVDesc.Texture2D.ResourceMinLODClamp = 0.f;
SRVDesc.Texture2D.PlaneSlice = 0;
GetD3dDevice()->CreateShaderResourceView(RenderTargetMap.Get(), &SRVDesc, CPUShaderResourceView);
}
参数下次一定,反正就是两个描述符。
回到
FRenderingPipeLine::BuildPipeline
后面没啥新东西了,到tick了
后面主要看
FGeometryMap GeometryMap; 里面的 FDynamicShadowMap DynamicShadowMap;
// Shadow
DynamicShadowMap.UpdateCalculations(DeltaTime, ViewportInfo);
UpdateCalculations 是拿来在tick之前计算一些数据的
void FDynamicShadowMap::UpdateCalculations(float DeltaTime, const FViewportInfo& Info)
{
Super::UpdateCalculations(DeltaTime, Info);
if (ShadowViewport)
{
FViewportInfo ShadowViewportInfo;
GetViewportMatrix(ShadowViewportInfo.ViewMatrix, ShadowViewportInfo.ProjectionMatrix);
XMFLOAT3 Pos = ShadowViewport->GetPosition();
ShadowViewportInfo.ViewportPosition = XMFLOAT4(Pos.x, Pos.y, Pos.z, 1.0f);
GeometryMap->UpdateCalculationsViewport(
DeltaTime,
ShadowViewportInfo,
GeometryMap->GetDynamicReflectionViewportNumber() + 1);
}
}
看到这里才发现
void FGeometryMap::BuildViewportConstantBufferView(UINT InViewportOffset)
{
// 创建常量缓冲区
UINT MainViewportCount = 1;
UINT ShadowViewportCount = 1;
ViewportConstantBufferView.CreateConstantBufferView(
sizeof(FViewportTransformation),
MainViewportCount + GetDynamicReflectionViewportNumber() + ShadowViewportCount + InViewportOffset
);
摄像机的CBV也多了一个,给阴影的摄像机用。
更新灯光的时候,如果是直射光,
void FGeometryMap::UpdateLightConstantBuffer(float DeltaTime)
{
auto Lights = GetLightMgr()->GetLights();
FLightConstantBuffer LightConstantBuffer;
for (int i = 0; i < Lights.size(); i++)
{
{
if (CLightComponent* Light = Lights[i])
{
FVector3D LightIntensity = Light->GetLightIntensity();
LightConstantBuffer.SceneLights[i].LightIntensity = XMFLOAT3(LightIntensity.X, LightIntensity.Y, LightIntensity.Z);
LightConstantBuffer.SceneLights[i].LightDirection = Light->GetForwardVector();
LightConstantBuffer.SceneLights[i].LightPosition = Light->GetPosition();
LightConstantBuffer.SceneLights[i].LightType = static_cast<int>(Light->GetLightType());
switch (Light->GetLightType())
{
case DirectionalLight:
{
DynamicShadowMap.BuildParallelLightMatrix(
EngineMath::ToVector3(Light->GetForwardVector()),
FVector3D(0.F),
200.F);
XMFLOAT4X4 ShadowViewMatrix;
XMFLOAT4X4 ShadowProjectionMatrix;
DynamicShadowMap.GetViewportMatrix(ShadowViewMatrix, ShadowProjectionMatrix);
XMMATRIX ShadowViewMatrixXM = XMLoadFloat4x4(&ShadowViewMatrix);
XMMATRIX ShadowProjectionMatrixXM = XMLoadFloat4x4(&ShadowProjectionMatrix);
// NDC [-1, 1] -> [0, 1]
XMMATRIX Transform = {
0.5f, 0.0f, 0.0f, 0.0f,
0.0f, -0.5f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.5f, 0.5f, 0.0f, 1.0f,
};
XMMATRIX ShadowViewProject = ShadowViewMatrixXM * ShadowProjectionMatrixXM * Transform;
// 存储 ShadowTransform
XMStoreFloat4x4(&LightConstantBuffer.SceneLights[i].ShadowTransform, XMMatrixTranspose(ShadowViewProject));
break;
}
LightConstantBuffer.SceneLights[i].ShadowTransform 这里算出了一个transform matrix
能把世界空间的物体转换到阴影的摄像机空间中
新增了一个渲染层
FOpaqueShadowRenderLayer
#include "OpaqueShadowRenderLayer.h"
#include "CWLEngine/Engine/Rendering/Core/DirectX/RenderingPipeline/Geometry/GeometryMap.h"
#include "CWLEngine/Engine/Rendering/Core/DirectX/RenderingPipeline/PipelineState/DirectXPipelineState.h"
#include "CWLEngine/Engine/Shader/Core/ShaderType.h"
FOpaqueShadowRenderLayer::FOpaqueShadowRenderLayer()
{
RenderPriority = 10086; // 随便
}
void FOpaqueShadowRenderLayer::BuildShader()
{
std::vector<ShaderType::FShaderMacro> ShaderMacro;
BuildingShaderMacro(ShaderMacro);
std::vector<D3D_SHADER_MACRO> ShaderMacroD3D;
ShaderType::ToD3DShaderMacro(ShaderMacro, ShaderMacroD3D);
// 构建Shader
VertexShader.BuildShaders(L"Shader/Shadow.hlsl", "VSMain", "vs_5_1", ShaderMacroD3D.data());
PixelShader.BuildShaders(L"Shader/Shadow.hlsl", "PSMain", "ps_5_1", ShaderMacroD3D.data());
IDirectXPipelineState->BindShader(VertexShader, PixelShader);
// 输入布局
InputElementDesc = {
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0},
{"TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 12, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0}
};
IDirectXPipelineState->BindInputLayout(InputElementDesc.data(), InputElementDesc.size());
}
void FOpaqueShadowRenderLayer::BuildPSO()
{
Super::BuildPSO();
// 斜率偏移补偿
// d = 1 / pow(2, 24)
// b(偏移量) = DepthBias(固定偏移量) * d + SlopeScaledDepthBias(缩放因子) * MaxDepthSlope(最大深度)
// 偏移补偿 为了解决阴影粉刺 龙书第二十章
auto& GPSDesc = IDirectXPipelineState->GetGPSDesc();
GPSDesc.RasterizerState.DepthBias = 100000;
GPSDesc.RasterizerState.DepthBiasClamp = 0.0f;
GPSDesc.RasterizerState.SlopeScaledDepthBias = 1.0f;
// 关闭render target
GPSDesc.RTVFormats[0] = DXGI_FORMAT_UNKNOWN;
GPSDesc.NumRenderTargets = 0;
IDirectXPipelineState->Build(Shadow);
}
他Draw了 Shadow 这个shader
ps: 这里还要解释一下阴影粉刺和斜率偏移补偿。自己看龙书20.4.2把,很清楚了
我们看这个绘制阴影的过程
void FDynamicShadowMap::Draw(float DeltaTime)
{
Super::Draw(DeltaTime);
auto* InRenderTarget = dynamic_cast<FShadowMapRenderTarget*>(RenderTarget.get());
if (!InRenderTarget)
{
ENGINE_LOG_ERROR("FDynamicShadowMap::Draw RenderTarget is not FShadowMapRenderTarget")
return;
}
// ... barrier
GetD3dGraphicsCommandList()->ResourceBarrier(1, &Barrier);
auto RenderTargetViewport = InRenderTarget->GetViewport();
auto RenderTargetScissorRect = InRenderTarget->GetScissorRect();
GetD3dGraphicsCommandList()->RSSetViewports(1, &RenderTargetViewport);
GetD3dGraphicsCommandList()->RSSetScissorRects(1, &RenderTargetScissorRect);
GetD3dGraphicsCommandList()->ClearDepthStencilView(
InRenderTarget->DSVDes,
D3D12_CLEAR_FLAG_DEPTH | D3D12_CLEAR_FLAG_STENCIL,
1.0f,
0,
0,
nullptr);
GetD3dGraphicsCommandList()->OMSetRenderTargets(
0,
nullptr,
false,
&InRenderTarget->DSVDes);
UINT CBVSize = GeometryMap->ViewportConstantBufferView.GetConstantBufferByteSize();
auto ViewportAddr = GeometryMap->ViewportConstantBufferView.GetUploadBuffer()->GetGPUVirtualAddress();
ViewportAddr += (1 + GeometryMap->GetDynamicReflectionViewportNumber()) * CBVSize;
GetD3dGraphicsCommandList()->SetGraphicsRootConstantBufferView(1, ViewportAddr);
DrawShadowMapTexture(DeltaTime);
RenderLayerMgr->ResetPSO(EMeshRenderingType::eOpaqueShadow);
RenderLayerMgr->DrawMesh(DeltaTime, EMeshRenderingType::eOpaque);
RenderLayerMgr->DrawMesh(DeltaTime, EMeshRenderingType::eTransparent);
RenderLayerMgr->DrawMesh(DeltaTime, EMeshRenderingType::eOpaqueReflect);
// ... barrier
GetD3dGraphicsCommandList()->ResourceBarrier(1, &Barrier2);
}
我们关注这里
GetD3dGraphicsCommandList()->OMSetRenderTargets(
0,
nullptr,
false,
&InRenderTarget->DSVDes);
UINT CBVSize = GeometryMap->ViewportConstantBufferView.GetConstantBufferByteSize();
auto ViewportAddr = GeometryMap->ViewportConstantBufferView.GetUploadBuffer()->GetGPUVirtualAddress();
ViewportAddr += (1 + GeometryMap->GetDynamicReflectionViewportNumber()) * CBVSize;
GetD3dGraphicsCommandList()->SetGraphicsRootConstantBufferView(1, ViewportAddr);
DrawShadowMapTexture(DeltaTime); // .SetGraphicsRootDescriptorTable(7, ...)
把DSV 设置成我们刚刚为shadow准备的那个。OMSetRenderTargets
viewport换成阴影的摄像机(直射光方向很远的一个点)
这样就去跑shadow.hlsl 我们的深度模板就有了阴影摄像机角度的深度数据
reset pso,然后开始draw
RenderLayerMgr->ResetPSO(EMeshRenderingType::eOpaqueShadow);
RenderLayerMgr->DrawMesh(DeltaTime, EMeshRenderingType::eOpaque);
RenderLayerMgr->DrawMesh(DeltaTime, EMeshRenderingType::eTransparent);
RenderLayerMgr->DrawMesh(DeltaTime, EMeshRenderingType::eOpaqueReflect);
然后就开始画这个shader
#include "Material.hlsl"
#include "ShaderCommon.hlsl"
struct MeshVertexIn
{
float3 Position : POSITION;
float2 TexCoord : TEXCOORD;
};
struct MeshVertexOut
{
float4 PositionH : SV_POSITION;
float2 TexCoord : TEXCOORD;
};
MeshVertexOut VSMain(MeshVertexIn In)
{
MeshVertexOut Out = (MeshVertexOut)0;
MaterialConstantBuffer MatConstantBuffer = Materials[MaterialIndex];
float4 PositionWorld = mul(float4(In.Position, 1.0f), WorldMatrix);
Out.PositionH = mul(PositionWorld, ViewProjectionMatrix);
float4 MyTexCoord = mul(float4(In.TexCoord, 0.0f, 1.0f), ObjectTextureTransform);
Out.TexCoord = mul(MyTexCoord, MatConstantBuffer.TransformInfo).xy;
return Out;
}
void PSMain(MeshVertexOut In)
{
}
渲了一张图在DSV这里
就是 OMSetRenderTargets 这里写的 DSV,然后他指向的资源又有另一个SRV,
我们在这里
void FDynamicShadowMap::DrawShadowMapTexture(float DeltaTime)
{
GetD3dGraphicsCommandList()->SetGraphicsRootDescriptorTable(7, RenderTarget->GetGpuSRVOffset());
}
RootParameters[7].InitAsDescriptorTable,根签名
#ifndef _SHADER_COMMON_HLSL_
#define _SHADER_COMMON_HLSL_
#include "Light.hlsl"
SamplerState TextureSamplerState : register(s0);
SamplerState AnisotropicSamplerState : register(s1);
SamplerComparisonState ShadowSampler : register(s2);
TextureCube SimpleCubeMap: register(t0);
Texture2D SimpleShadowMap: register(t1);
Texture2D SimpleTexture2DMap[TEX_2D_MAP_NUM]: register(t2);
对应了这里的 t1,
最简单的,世界空间的点,变换到阴影计算的摄像机(平LL行光方向很远的位置那个)再到NDC空间的点。
然后再采样深度。
float GetShadowFactor(float4 InWorldPosition, float4x4 InShadowMatrix)
{
float4 ShadowPointHome = mul(InWorldPosition, InShadowMatrix);
float ShadowDepth = ShadowPointHome.z;
return SimpleShadowMap.SampleCmpLevelZero(ShadowSampler, ShadowPointHome.xy, ShadowDepth);
}
TODO 其他采样算法
聚光灯,点光等阴影
然后就开始IMGUI Log编辑器了。终于追上了以前的步骤。