cleanup and refactoring

This commit is contained in:
CDaut 2024-05-25 11:53:25 +02:00
parent 2302158928
commit 76f6bf62a4
Signed by: clara
GPG key ID: 223391B52FAD4463
1285 changed files with 757994 additions and 8 deletions

View file

@ -0,0 +1,80 @@
#*****************************************************************************
# Copyright 2020 NVIDIA Corporation. All rights reserved.
#*****************************************************************************
cmake_minimum_required(VERSION 3.9.6 FATAL_ERROR)
#--------------------------------------------------------------------------------------------------
# Project setting
get_filename_component(PROJNAME ${CMAKE_CURRENT_SOURCE_DIR} NAME)
set(PROJNAME vk_${PROJNAME}_KHR)
project(${PROJNAME} LANGUAGES C CXX)
message(STATUS "-------------------------------")
message(STATUS "Processing Project ${PROJNAME}:")
#--------------------------------------------------------------------------------------------------
# C++ target and defines
set(CMAKE_CXX_STANDARD 17)
add_executable(${PROJNAME})
_add_project_definitions(${PROJNAME})
#--------------------------------------------------------------------------------------------------
# Source files for this project
#
file(GLOB SOURCE_FILES *.cpp *.hpp *.inl *.h *.c)
file(GLOB EXTRA_COMMON ${TUTO_KHR_DIR}/common/*.*)
list(APPEND COMMON_SOURCE_FILES ${EXTRA_COMMON})
include_directories(${TUTO_KHR_DIR}/common)
#--------------------------------------------------------------------------------------------------
# GLSL to SPIR-V custom build
compile_glsl_directory(
SRC "${CMAKE_CURRENT_SOURCE_DIR}/shaders"
DST "${CMAKE_CURRENT_SOURCE_DIR}/spv"
VULKAN_TARGET "vulkan1.2"
DEPENDENCY ${VULKAN_BUILD_DEPENDENCIES}
)
#--------------------------------------------------------------------------------------------------
# Sources
target_sources(${PROJNAME} PUBLIC ${SOURCE_FILES} ${HEADER_FILES})
target_sources(${PROJNAME} PUBLIC ${COMMON_SOURCE_FILES})
target_sources(${PROJNAME} PUBLIC ${PACKAGE_SOURCE_FILES})
target_sources(${PROJNAME} PUBLIC ${GLSL_SOURCES} ${GLSL_HEADERS})
#--------------------------------------------------------------------------------------------------
# Sub-folders in Visual Studio
#
source_group("Common" FILES ${COMMON_SOURCE_FILES} ${PACKAGE_SOURCE_FILES})
source_group("Sources" FILES ${SOURCE_FILES})
source_group("Headers" FILES ${HEADER_FILES})
source_group("Shader Sources" FILES ${GLSL_SOURCES})
source_group("Shader Headers" FILES ${GLSL_HEADERS})
#--------------------------------------------------------------------------------------------------
# Linkage
#
target_link_libraries(${PROJNAME} ${PLATFORM_LIBRARIES} nvpro_core)
foreach(DEBUGLIB ${LIBRARIES_DEBUG})
target_link_libraries(${PROJNAME} debug ${DEBUGLIB})
endforeach(DEBUGLIB)
foreach(RELEASELIB ${LIBRARIES_OPTIMIZED})
target_link_libraries(${PROJNAME} optimized ${RELEASELIB})
endforeach(RELEASELIB)
#--------------------------------------------------------------------------------------------------
# copies binaries that need to be put next to the exe files (ZLib, etc.)
#
_finalize_target( ${PROJNAME} )
install(FILES ${SPV_OUTPUT} CONFIGURATIONS Release DESTINATION "bin_${ARCH}/${PROJNAME}/spv")
install(FILES ${SPV_OUTPUT} CONFIGURATIONS Debug DESTINATION "bin_${ARCH}_debug/${PROJNAME}/spv")

View file

@ -0,0 +1,601 @@
# NVIDIA Vulkan Ray Tracing Tutorial - glTF Scene
![img](images/vk_ray_tracing_gltf_KHR.png)
This example is the result of the modification of the [simple ray tracing](../ray_tracing__simple) tutorial.
Instead of loading separated OBJ objects, the example was modified to load glTF scene files containing multiple objects.
This example is not about shading, but using more complex data than OBJ.
For a more complete version, see
* https://github.com/nvpro-samples/vk_raytrace
* https://github.com/nvpro-samples/vk_shaded_gltfscene
## Scene Data
The OBJ models were loaded and stored in four buffers:
* vertices: array of structure of position, normal, texcoord, color
* indices: index of the vertex, every three makes a triangle
* materials: the wavefront material structure
* material index: material index per triangle.
Since we could have multiple OBJ, we would have arrays of those buffers.
With glTF scene, the data will be organized differently a choice we have made for convenience. Instead of having structure of vertices,
positions, normals and other attributes will be in separate buffers. There will be one single position buffer,
for all geometries of the scene, same for indices and other attributes. But for each geometry there is the information
of the number of elements and offsets.
From the source tutorial, we will not need the following and therefore remove it:
~~~~C
std::vector<ObjModel> m_objModel; // Model on host
std::vector<ObjDesc> m_objDesc; // Model description for device access
std::vector<ObjInstance> m_instances; // Scene model instances
~~~~
In `host_device.h` we will add new host/device structures: PrimMeshInfo, SceneDesc and GltfShadeMaterial.
~~~~C
// Structure used for retrieving the primitive information in the closest hit
struct PrimMeshInfo
{
uint indexOffset;
uint vertexOffset;
int materialIndex;
};
// Scene buffer addresses
struct SceneDesc
{
uint64_t vertexAddress; // Address of the Vertex buffer
uint64_t normalAddress; // Address of the Normal buffer
uint64_t uvAddress; // Address of the texture coordinates buffer
uint64_t indexAddress; // Address of the triangle indices buffer
uint64_t materialAddress; // Address of the Materials buffer (GltfShadeMaterial)
uint64_t primInfoAddress; // Address of the mesh primitives buffer (PrimMeshInfo)
};
~~~~
And also, our glTF material representation for the shading. This is a stripped down version of the glTF PBR. If you are interested in the
correct PBR implementation, check out [vk_raytrace](https://github.com/nvpro-samples/vk_raytrace).
~~~~ C
struct GltfShadeMaterial
{
vec4 pbrBaseColorFactor;
vec3 emissiveFactor;
int pbrBaseColorTexture;
};
~~~~
And for holding the all the buffers allocated for representing the scene, we will store them in the following.
~~~~C
nvh::GltfScene m_gltfScene;
nvvk::Buffer m_vertexBuffer;
nvvk::Buffer m_normalBuffer;
nvvk::Buffer m_uvBuffer;
nvvk::Buffer m_indexBuffer;
nvvk::Buffer m_materialBuffer;
nvvk::Buffer m_primInfo;
nvvk::Buffer m_sceneDesc;
~~~~
## Loading glTF scene
To load the scene, we will be using [TinyGLTF](https://github.com/syoyo/tinygltf) from Syoyo Fujita, then to avoid traversing
the scene graph, the information will be flatten using the helper [gltfScene](https://github.com/nvpro-samples/nvpro_core/tree/master/nvh#gltfscenehpp).
### Loading Scene
Instead of loading a model, we will be loading a scene, so we are replacing `loadModel()` by `loadScene()`.
In the source file, loading the scene `loadScene()` will have first the glTF import with TinyGLTF.
~~~~C
tinygltf::Model tmodel;
tinygltf::TinyGLTF tcontext;
std::string warn, error;
if(!tcontext.LoadASCIIFromFile(&tmodel, &error, &warn, filename))
assert(!"Error while loading scene");
~~~~
Then we will flatten the scene graph and grab the information we will need using the gltfScene helper.
~~~~C
m_gltfScene.importMaterials(tmodel);
m_gltfScene.importDrawableNodes(tmodel,
nvh::GltfAttributes::Normal | nvh::GltfAttributes::Texcoord_0);
~~~~
The next part is to allocate the buffers to hold the information, such as the positions, normals, texture coordinates, etc.
~~~~C
// Create the buffers on Device and copy vertices, indices and materials
nvvk::CommandPool cmdBufGet(m_device, m_graphicsQueueIndex);
VkCommandBuffer cmdBuf = cmdBufGet.createCommandBuffer();
m_vertexBuffer = m_alloc.createBuffer(cmdBuf, m_gltfScene.m_positions,
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT
| VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR);
m_indexBuffer = m_alloc.createBuffer(cmdBuf, m_gltfScene.m_indices,
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT
| VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR);
m_normalBuffer = m_alloc.createBuffer(cmdBuf, m_gltfScene.m_normals,
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
| VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
m_uvBuffer = m_alloc.createBuffer(cmdBuf, m_gltfScene.m_texcoords0,
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
| VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
~~~~
We are making a simple material, extracting only a few members from the glTF material.
~~~~ C++
// Copying all materials, only the elements we need
std::vector<GltfShadeMaterial> shadeMaterials;
for(auto& m : m_gltfScene.m_materials)
{
shadeMaterials.emplace_back(GltfShadeMaterial{m.baseColorFactor, m.emissiveFactor, m.baseColorTexture});
}
m_materialBuffer = m_alloc.createBuffer(cmdBuf, shadeMaterials,
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
~~~~
To find the positions of the triangle hit in the closest hit shader, as well as the other
attributes, we will store the offsets information of that geometry.
~~~~C
// The following is used to find the primitive mesh information in the CHIT
std::vector<PrimMeshInfo> primLookup;
for(auto& primMesh : m_gltfScene.m_primMeshes)
{
primLookup.push_back({primMesh.firstIndex, primMesh.vertexOffset, primMesh.materialIndex});
}
m_rtPrimLookup =
m_alloc.createBuffer(cmdBuf, primLookup, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
~~~~
Finally, we are creating a buffer holding the address of all buffers
~~~~ C++
SceneDesc sceneDesc;
sceneDesc.vertexAddress = nvvk::getBufferDeviceAddress(m_device, m_vertexBuffer.buffer);
sceneDesc.indexAddress = nvvk::getBufferDeviceAddress(m_device, m_indexBuffer.buffer);
sceneDesc.normalAddress = nvvk::getBufferDeviceAddress(m_device, m_normalBuffer.buffer);
sceneDesc.uvAddress = nvvk::getBufferDeviceAddress(m_device, m_uvBuffer.buffer);
sceneDesc.materialAddress = nvvk::getBufferDeviceAddress(m_device, m_materialBuffer.buffer);
sceneDesc.primInfoAddress = nvvk::getBufferDeviceAddress(m_device, m_primInfo.buffer);
m_sceneDesc = m_alloc.createBuffer(cmdBuf, sizeof(SceneDesc), &sceneDesc,
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
~~~~
Before closing the function, we will create textures (none in default scene) and submitting the command buffer.
The finalize and releasing staging is waiting for the copy of all data to the GPU.
~~~~ C
// Creates all textures found
createTextureImages(cmdBuf, tmodel);
cmdBufGet.submitAndWait(cmdBuf);
m_alloc.finalizeAndReleaseStaging();
NAME_VK(m_vertexBuffer.buffer);
NAME_VK(m_indexBuffer.buffer);
NAME_VK(m_normalBuffer.buffer);
NAME_VK(m_uvBuffer.buffer);
NAME_VK(m_materialBuffer.buffer);
NAME_VK(m_primInfo.buffer);
NAME_VK(m_sceneDesc.buffer);
}
~~~~
**:warning: NOTE**: the macro `NAME_VK` is a convenience to name Vulkan object to easily identify them in Nsight Graphics and to know where it was created.
## Converting geometry to BLAS
Instead of `objectToVkGeometryKHR()`, we will be using `primitiveToVkGeometry(const nvh::GltfPrimMesh& prim)`.
The function is similar, only the input is different, except for `VkAccelerationStructureBuildRangeInfoKHR` where
we also include the offsets.
~~~~C
//--------------------------------------------------------------------------------------------------
// Converting a GLTF primitive in the Raytracing Geometry used for the BLAS
//
auto HelloVulkan::primitiveToGeometry(const nvh::GltfPrimMesh& prim)
{
// BLAS builder requires raw device addresses.
VkDeviceAddress vertexAddress = nvvk::getBufferDeviceAddress(m_device, m_vertexBuffer.buffer);
VkDeviceAddress indexAddress = nvvk::getBufferDeviceAddress(m_device, m_indexBuffer.buffer);
uint32_t maxPrimitiveCount = prim.indexCount / 3;
// Describe buffer as array of VertexObj.
VkAccelerationStructureGeometryTrianglesDataKHR triangles{VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR};
triangles.vertexFormat = VK_FORMAT_R32G32B32_SFLOAT; // vec3 vertex position data.
triangles.vertexData.deviceAddress = vertexAddress;
triangles.vertexStride = sizeof(glm::vec3);
// Describe index data (32-bit unsigned int)
triangles.indexType = VK_INDEX_TYPE_UINT32;
triangles.indexData.deviceAddress = indexAddress;
// Indicate identity transform by setting transformData to null device pointer.
//triangles.transformData = {};
triangles.maxVertex = prim.vertexCount - 1;
// Identify the above data as containing opaque triangles.
VkAccelerationStructureGeometryKHR asGeom{VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR};
asGeom.geometryType = VK_GEOMETRY_TYPE_TRIANGLES_KHR;
asGeom.flags = VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR; // For AnyHit
asGeom.geometry.triangles = triangles;
VkAccelerationStructureBuildRangeInfoKHR offset;
offset.firstVertex = prim.vertexOffset;
offset.primitiveCount = prim.indexCount / 3;
offset.primitiveOffset = prim.firstIndex * sizeof(uint32_t);
offset.transformOffset = 0;
// Our blas is made from only one geometry, but could be made of many geometries
nvvk::RaytracingBuilderKHR::BlasInput input;
input.asGeometry.emplace_back(asGeom);
input.asBuildOffsetInfo.emplace_back(offset);
return input;
}
~~~~
## Top Level creation
There are almost no differences, besides the fact that the index of the geometry is stored in `primMesh`.
~~~~C
for(auto& node : m_gltfScene.m_nodes)
{
VkAccelerationStructureInstanceKHR rayInst;
rayInst.transform = nvvk::toTransformMatrixKHR(node.worldMatrix);
rayInst.instanceCustomIndex = node.primMesh; // gl_InstanceCustomIndexEXT: to find which primitive
rayInst.accelerationStructureReference = m_rtBuilder.getBlasDeviceAddress(node.primMesh);
rayInst.flags = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR;
rayInst.mask = 0xFF;
rayInst.instanceShaderBindingTableRecordOffset = 0; // We will use the same hit group for all objects
tlas.emplace_back(rayInst);
}
~~~~
## Raster Rendering
Raster rendering is simple. The shader was changed to use vertex, normal and texture coordinates. For
each node, we will be pushing the material Id this primitive is using. Since we have flatten the scene graph,
we can loop over all drawable nodes.
~~~~C
std::vector<VkBuffer> vertexBuffers = {m_vertexBuffer.buffer, m_normalBuffer.buffer, m_uvBuffer.buffer};
vkCmdBindVertexBuffers(cmdBuf, 0, static_cast<uint32_t>(vertexBuffers.size()), vertexBuffers.data(), offsets.data());
vkCmdBindIndexBuffer(cmdBuf, m_indexBuffer.buffer, 0, VK_INDEX_TYPE_UINT32);
uint32_t idxNode = 0;
for(auto& node : m_gltfScene.m_nodes)
{
auto& primitive = m_gltfScene.m_primMeshes[node.primMesh];
m_pcRaster.modelMatrix = node.worldMatrix;
m_pcRaster.objIndex = node.primMesh;
m_pcRaster.materialId = primitive.materialIndex;
vkCmdPushConstants(cmdBuf, m_pipelineLayout, VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0,
sizeof(PushConstantRaster), &m_pcRaster);
vkCmdDrawIndexed(cmdBuf, primitive.indexCount, 1, primitive.firstIndex, primitive.vertexOffset, 0);
}
~~~~
## Ray tracing change
In `createRtDescriptorSet()`, the only change we will add is the primitive info buffer to retrieve
the data when hitting a triangle.
~~~~C
m_rtDescSetLayoutBind.addBinding(ePrimLookup, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1,
VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR | VK_SHADER_STAGE_ANY_HIT_BIT_KHR); // Primitive info
// ...
VkDescriptorBufferInfo primitiveInfoDesc{m_rtPrimLookup.buffer, 0, VK_WHOLE_SIZE};
// ...
writes.emplace_back(m_rtDescSetLayoutBind.makeWrite(m_rtDescSet, ePrimLookup, &primitiveInfoDesc));
~~~~
## Descriptors and Pipeline Changes
Since we are using different buffers and the vertex is no longer a struct but is using
3 different buffers for the position, normal and texture coord.
The methods `createDescriptorSetLayout()`, `updateDescriptorSet()` and `createGraphicsPipeline()`
will be changed accordingly.
See [hello_vulkan](hello_vulkan.cpp)
## Shaders
The shading is the same and is not reflecting the glTF PBR shading model, but the shaders were nevertheless
changed to fit the new incoming format.
* Raster : [vertex](shaders/vert_shader.vert), [fragment](shaders/frag_shader.frag)
* Ray Trace: [RayGen](shaders/raytrace.rgen), [ClosestHit](shaders/raytrace.rchit)
## Other changes
Small other changes were done, a different scene, different camera and light position.
Camera position
~~~~C
CameraManip.setLookat(glm::vec3(0, 0, 15), glm::vec3(0, 0, 0), glm::vec3(0, 1, 0));
~~~~
Scene
~~~~C
helloVk.loadScene(nvh::findFile("media/scenes/cornellBox.gltf", defaultSearchPaths, true));
~~~~
Light Position
~~~~C
glm::vec3 lightPosition{0.f, 4.5f, 0.f};
~~~~
# Simple Path Tracing
To convert this example to a simple path tracer (see Wikipedia [Path Tracing](https://en.wikipedia.org/wiki/Path_tracing)), we need to change the `RayGen` and the `ClosestHit` shaders.
Before doing this, we will modify the application to send the current rendering frame, allowing to accumulate
samples.
![img](images/vk_ray_tracing_gltf_KHR_2.png)
Add the following two functions in `hello_vulkan.cpp`:
~~~~C
//--------------------------------------------------------------------------------------------------
// If the camera matrix has changed, resets the frame.
// otherwise, increments frame.
//
void HelloVulkan::updateFrame()
{
static glm::mat4 refCamMatrix;
static float refFov{CameraManip.getFov()};
const auto& m = CameraManip.getMatrix();
const auto fov = CameraManip.getFov();
if(refCamMatrix != m || refFov != fov)
{
resetFrame();
refCamMatrix = m;
refFov = fov;
}
m_pcRay.frame++;
}
void HelloVulkan::resetFrame()
{
m_pcRay.frame = -1;
}
~~~~
And call `updateFrame()` in the begining of the `raytrace()` function.
In `hello_vulkan.cpp`, add the function declarations
~~~~C
void updateFrame();
void resetFrame();
~~~~
And add a new `frame` member at the end of `RtPushConstant` structure.
## Ray Generation
There are a few modifications to be done in the ray generation. First, it will use the clock for its random seed number.
This is done by adding the [`GL_ARB_shader_clock`](https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_shader_clock.txt) extension.
~~~~C
#extension GL_ARB_shader_clock : enable
~~~~
The random number generator is in `sampling.glsl`, `#include` this file.
In `main()`, we will initialize the random number like this: (see tutorial on jitter camera)
~~~~C
// Initialize the random number
uint seed = tea(gl_LaunchIDEXT.y * gl_LaunchSizeEXT.x + gl_LaunchIDEXT.x, int(clockARB()));
~~~~
To accumulate the samples, instead of only write to the image, we will also use the previous frame.
~~~~C
// Do accumulation over time
if(pcRay.frame > 0)
{
float a = 1.0f / float(pcRay.frame + 1);
vec3 old_color = imageLoad(image, ivec2(gl_LaunchIDEXT.xy)).xyz;
imageStore(image, ivec2(gl_LaunchIDEXT.xy), vec4(mix(old_color, hitValue, a), 1.f));
}
else
{
// First frame, replace the value in the buffer
imageStore(image, ivec2(gl_LaunchIDEXT.xy), vec4(hitValue, 1.f));
}
~~~~
Extra information will be needed in the ray payload `hitPayload`, the `seed` and the `depth`.
The modification in `raycommon.glsl`
~~~~C
struct hitPayload
{
vec3 hitValue;
uint seed;
uint depth;
};
~~~~
## Closest Hit Shader
This modification will recursively trace until the `depth`hits 10 (hardcoded) or hit an emissive element (light).
The only information that we will keep from the shader, is the calculation of the hit state: the position and normal. So
all code from `// Vector toward the light` to the end can be removed and be replaced by the following.
~~~~C
// https://en.wikipedia.org/wiki/Path_tracing
// Material of the object
GltfMaterial mat = materials[nonuniformEXT(matIndex)];
vec3 emittance = mat.emissiveFactor;
// Pick a random direction from here and keep going.
vec3 tangent, bitangent;
createCoordinateSystem(world_normal, tangent, bitangent);
vec3 rayOrigin = world_position;
vec3 rayDirection = samplingHemisphere(prd.seed, tangent, bitangent, world_normal);
const float cos_theta = dot(rayDirection, world_normal);
// Probability density function of samplingHemisphere choosing this rayDirection
const float p = cos_theta / M_PI;
// Compute the BRDF for this ray (assuming Lambertian reflection)
vec3 albedo = mat.pbrBaseColorFactor.xyz;
if(mat.pbrBaseColorTexture > -1)
{
uint txtId = mat.pbrBaseColorTexture;
albedo *= texture(texturesMap[nonuniformEXT(txtId)], texcoord0).xyz;
}
vec3 BRDF = albedo / M_PI;
// Recursively trace reflected light sources.
if(prd.depth < 10)
{
prd.depth++;
float tMin = 0.001;
float tMax = 100000000.0;
uint flags = gl_RayFlagsOpaqueEXT;
traceRayEXT(topLevelAS, // acceleration structure
flags, // rayFlags
0xFF, // cullMask
0, // sbtRecordOffset
0, // sbtRecordStride
0, // missIndex
rayOrigin, // ray origin
tMin, // ray min range
rayDirection, // ray direction
tMax, // ray max range
0 // payload (location = 0)
);
}
vec3 incoming = prd.hitValue;
// Apply the Rendering Equation here.
prd.hitValue = emittance + (BRDF * incoming * cos_theta / p);
~~~~
:warning: **Note:** We do not implement the point light as in the Rasterizer. Therefore, only the emitting geometry will emit the energy to illuminate the scene.
## Miss Shader
To avoid contribution from the environment.
~~~~C
void main()
{
if(prd.depth == 0)
prd.hitValue = clearColor.xyz * 0.8;
else
prd.hitValue = vec3(0.01); // Tiny contribution from environment
prd.depth = 100; // Ending trace
}
~~~~
# Faster Path Tracer
The implementation above is recursive and this is really not optimal. As described in the [reflection](../ray_tracing_reflections)
tutorial, the best is to break the recursivity and do most of the work in the `RayGen`.
The following change can give up to **3 time faster** rendering.
To be able to do this, we need to extend the ray `payload` to bring data from the `Closest Hit` to the `RayGen`, which is the
ray origin and direction and the BRDF weight.
~~~~C
struct hitPayload
{
vec3 hitValue;
uint seed;
uint depth;
vec3 rayOrigin;
vec3 rayDirection;
vec3 weight;
};
~~~~
## Closest Hit
We don't need to trace anymore, so before tracing a new ray, we can store the information in
the `payload` and return before the recursion code.
~~~~C
prd.rayOrigin = rayOrigin;
prd.rayDirection = rayDirection;
prd.hitValue = emittance;
prd.weight = BRDF * cos_theta / p;
return;
~~~~
## Ray Generation
The ray generation is the one that will do the trace loop.
First initialize the `payload` and variable to compute the accumulation.
~~~~C
prd.rayOrigin = origin.xyz;
prd.rayDirection = direction.xyz;
prd.weight = vec3(0);
vec3 curWeight = vec3(1);
vec3 hitValue = vec3(0);
~~~~
Now the loop over the trace function, will be like the following.
:warning: **Note:** the depth is hardcode, but could be a parameter to the `push constant`.
~~~~C
for(; prd.depth < 10; prd.depth++)
{
traceRayEXT(topLevelAS, // acceleration structure
rayFlags, // rayFlags
0xFF, // cullMask
0, // sbtRecordOffset
0, // sbtRecordStride
0, // missIndex
prd.rayOrigin, // ray origin
tMin, // ray min range
prd.rayDirection, // ray direction
tMax, // ray max range
0 // payload (location = 0)
);
hitValue += prd.hitValue * curWeight;
curWeight *= prd.weight;
}
~~~~
:warning: **Note:** do not forget to use `hitValue` in the `imageStore`.

View file

@ -0,0 +1,885 @@
/*
* Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#include <sstream>
#define TINYGLTF_IMPLEMENTATION
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "hello_vulkan.h"
#include "nvh/cameramanipulator.hpp"
#include "nvh/fileoperations.hpp"
#include "nvh/gltfscene.hpp"
#include "nvh/nvprint.hpp"
#include "nvvk/commands_vk.hpp"
#include "nvvk/descriptorsets_vk.hpp"
#include "nvvk/images_vk.hpp"
#include "nvvk/pipeline_vk.hpp"
#include "nvvk/renderpasses_vk.hpp"
#include "nvvk/shaders_vk.hpp"
#include "nvh/alignment.hpp"
#include "nvvk/buffers_vk.hpp"
extern std::vector<std::string> defaultSearchPaths;
//--------------------------------------------------------------------------------------------------
// Keep the handle on the device
// Initialize the tool to do all our allocations: buffers, images
//
void HelloVulkan::setup(const VkInstance& instance, const VkDevice& device, const VkPhysicalDevice& physicalDevice, uint32_t queueFamily)
{
AppBaseVk::setup(instance, device, physicalDevice, queueFamily);
m_alloc.init(instance, device, physicalDevice);
m_debug.setup(m_device);
m_offscreenDepthFormat = nvvk::findDepthFormat(physicalDevice);
}
//--------------------------------------------------------------------------------------------------
// Called at each frame to update the camera matrix
//
void HelloVulkan::updateUniformBuffer(const VkCommandBuffer& cmdBuf)
{
// Prepare new UBO contents on host.
const float aspectRatio = m_size.width / static_cast<float>(m_size.height);
GlobalUniforms hostUBO = {};
const auto& view = CameraManip.getMatrix();
glm::mat4 proj = glm::perspectiveRH_ZO(glm::radians(CameraManip.getFov()), aspectRatio, 0.1f, 1000.0f);
proj[1][1] *= -1; // Inverting Y for Vulkan (not needed with perspectiveVK).
hostUBO.viewProj = proj * view;
hostUBO.viewInverse = glm::inverse(view);
hostUBO.projInverse = glm::inverse(proj);
// UBO on the device, and what stages access it.
VkBuffer deviceUBO = m_bGlobals.buffer;
auto uboUsageStages = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR;
// Ensure that the modified UBO is not visible to previous frames.
VkBufferMemoryBarrier beforeBarrier{VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER};
beforeBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
beforeBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
beforeBarrier.buffer = deviceUBO;
beforeBarrier.offset = 0;
beforeBarrier.size = sizeof(hostUBO);
vkCmdPipelineBarrier(cmdBuf, uboUsageStages, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_DEPENDENCY_DEVICE_GROUP_BIT, 0,
nullptr, 1, &beforeBarrier, 0, nullptr);
// Schedule the host-to-device upload. (hostUBO is copied into the cmd
// buffer so it is okay to deallocate when the function returns).
vkCmdUpdateBuffer(cmdBuf, m_bGlobals.buffer, 0, sizeof(GlobalUniforms), &hostUBO);
// Making sure the updated UBO will be visible.
VkBufferMemoryBarrier afterBarrier{VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER};
afterBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
afterBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
afterBarrier.buffer = deviceUBO;
afterBarrier.offset = 0;
afterBarrier.size = sizeof(hostUBO);
vkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_TRANSFER_BIT, uboUsageStages, VK_DEPENDENCY_DEVICE_GROUP_BIT, 0,
nullptr, 1, &afterBarrier, 0, nullptr);
}
//--------------------------------------------------------------------------------------------------
// Describing the layout pushed when rendering
//
void HelloVulkan::createDescriptorSetLayout()
{
auto& bind = m_descSetLayoutBind;
// Camera matrices
bind.addBinding(SceneBindings::eGlobals, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1,
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_RAYGEN_BIT_KHR);
// Array of textures
auto nbTextures = static_cast<uint32_t>(m_textures.size());
bind.addBinding(SceneBindings::eTextures, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, nbTextures,
VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR | VK_SHADER_STAGE_ANY_HIT_BIT_KHR);
// Scene buffers
bind.addBinding(eSceneDesc, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1,
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR
| VK_SHADER_STAGE_ANY_HIT_BIT_KHR);
m_descSetLayout = m_descSetLayoutBind.createLayout(m_device);
m_descPool = m_descSetLayoutBind.createPool(m_device, 1);
m_descSet = nvvk::allocateDescriptorSet(m_device, m_descPool, m_descSetLayout);
}
//--------------------------------------------------------------------------------------------------
// Setting up the buffers in the descriptor set
//
void HelloVulkan::updateDescriptorSet()
{
std::vector<VkWriteDescriptorSet> writes;
// Camera matrices and scene description
VkDescriptorBufferInfo dbiUnif{m_bGlobals.buffer, 0, VK_WHOLE_SIZE};
VkDescriptorBufferInfo sceneDesc{m_sceneDesc.buffer, 0, VK_WHOLE_SIZE};
writes.emplace_back(m_descSetLayoutBind.makeWrite(m_descSet, SceneBindings::eGlobals, &dbiUnif));
writes.emplace_back(m_descSetLayoutBind.makeWrite(m_descSet, eSceneDesc, &sceneDesc));
// All texture samplers
std::vector<VkDescriptorImageInfo> diit;
for(auto& texture : m_textures)
diit.emplace_back(texture.descriptor);
writes.emplace_back(m_descSetLayoutBind.makeWriteArray(m_descSet, SceneBindings::eTextures, diit.data()));
// Writing the information
vkUpdateDescriptorSets(m_device, static_cast<uint32_t>(writes.size()), writes.data(), 0, nullptr);
}
//--------------------------------------------------------------------------------------------------
// Creating the pipeline layout
//
void HelloVulkan::createGraphicsPipeline()
{
VkPushConstantRange pushConstantRanges = {VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, sizeof(PushConstantRaster)};
// Creating the Pipeline Layout
VkPipelineLayoutCreateInfo createInfo{VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO};
createInfo.setLayoutCount = 1;
createInfo.pSetLayouts = &m_descSetLayout;
createInfo.pushConstantRangeCount = 1;
createInfo.pPushConstantRanges = &pushConstantRanges;
vkCreatePipelineLayout(m_device, &createInfo, nullptr, &m_pipelineLayout);
// Creating the Pipeline
std::vector<std::string> paths = defaultSearchPaths;
nvvk::GraphicsPipelineGeneratorCombined gpb(m_device, m_pipelineLayout, m_offscreenRenderPass);
gpb.depthStencilState.depthTestEnable = true;
gpb.addShader(nvh::loadFile("spv/vert_shader.vert.spv", true, paths, true), VK_SHADER_STAGE_VERTEX_BIT);
gpb.addShader(nvh::loadFile("spv/frag_shader.frag.spv", true, paths, true), VK_SHADER_STAGE_FRAGMENT_BIT);
gpb.addBindingDescriptions({{0, sizeof(glm::vec3)}, {1, sizeof(glm::vec3)}, {2, sizeof(glm::vec2)}});
gpb.addAttributeDescriptions({
{0, 0, VK_FORMAT_R32G32B32_SFLOAT, 0}, // Position
{1, 1, VK_FORMAT_R32G32B32_SFLOAT, 0}, // Normal
{2, 2, VK_FORMAT_R32G32_SFLOAT, 0}, // Texcoord0
});
m_graphicsPipeline = gpb.createPipeline();
m_debug.setObjectName(m_graphicsPipeline, "Graphics");
}
//--------------------------------------------------------------------------------------------------
// Loading the OBJ file and setting up all buffers
//
void HelloVulkan::loadScene(const std::string& filename)
{
using vkBU = VkBufferUsageFlagBits;
tinygltf::Model tmodel;
tinygltf::TinyGLTF tcontext;
std::string warn, error;
LOGI("Loading file: %s", filename.c_str());
if(!tcontext.LoadASCIIFromFile(&tmodel, &error, &warn, filename))
{
assert(!"Error while loading scene");
}
LOGW("%s", warn.c_str());
LOGE("%s", error.c_str());
m_gltfScene.importMaterials(tmodel);
m_gltfScene.importDrawableNodes(tmodel, nvh::GltfAttributes::Normal | nvh::GltfAttributes::Texcoord_0);
// Create the buffers on Device and copy vertices, indices and materials
nvvk::CommandPool cmdBufGet(m_device, m_graphicsQueueIndex);
VkCommandBuffer cmdBuf = cmdBufGet.createCommandBuffer();
m_vertexBuffer = m_alloc.createBuffer(cmdBuf, m_gltfScene.m_positions,
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT
| VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR);
m_indexBuffer = m_alloc.createBuffer(cmdBuf, m_gltfScene.m_indices,
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT
| VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR);
m_normalBuffer = m_alloc.createBuffer(cmdBuf, m_gltfScene.m_normals,
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
| VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
m_uvBuffer = m_alloc.createBuffer(cmdBuf, m_gltfScene.m_texcoords0,
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
| VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
// Copying all materials, only the elements we need
std::vector<GltfShadeMaterial> shadeMaterials;
for(const auto& m : m_gltfScene.m_materials)
{
shadeMaterials.emplace_back(GltfShadeMaterial{m.baseColorFactor, m.emissiveFactor, m.baseColorTexture});
}
m_materialBuffer = m_alloc.createBuffer(cmdBuf, shadeMaterials,
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
// The following is used to find the primitive mesh information in the CHIT
std::vector<PrimMeshInfo> primLookup;
for(auto& primMesh : m_gltfScene.m_primMeshes)
{
primLookup.push_back({primMesh.firstIndex, primMesh.vertexOffset, primMesh.materialIndex});
}
m_primInfo = m_alloc.createBuffer(cmdBuf, primLookup, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
SceneDesc sceneDesc;
sceneDesc.vertexAddress = nvvk::getBufferDeviceAddress(m_device, m_vertexBuffer.buffer);
sceneDesc.indexAddress = nvvk::getBufferDeviceAddress(m_device, m_indexBuffer.buffer);
sceneDesc.normalAddress = nvvk::getBufferDeviceAddress(m_device, m_normalBuffer.buffer);
sceneDesc.uvAddress = nvvk::getBufferDeviceAddress(m_device, m_uvBuffer.buffer);
sceneDesc.materialAddress = nvvk::getBufferDeviceAddress(m_device, m_materialBuffer.buffer);
sceneDesc.primInfoAddress = nvvk::getBufferDeviceAddress(m_device, m_primInfo.buffer);
m_sceneDesc = m_alloc.createBuffer(cmdBuf, sizeof(SceneDesc), &sceneDesc,
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
// Creates all textures found
createTextureImages(cmdBuf, tmodel);
cmdBufGet.submitAndWait(cmdBuf);
m_alloc.finalizeAndReleaseStaging();
NAME_VK(m_vertexBuffer.buffer);
NAME_VK(m_indexBuffer.buffer);
NAME_VK(m_normalBuffer.buffer);
NAME_VK(m_uvBuffer.buffer);
NAME_VK(m_materialBuffer.buffer);
NAME_VK(m_primInfo.buffer);
NAME_VK(m_sceneDesc.buffer);
}
//--------------------------------------------------------------------------------------------------
// Creating the uniform buffer holding the camera matrices
// - Buffer is host visible
//
void HelloVulkan::createUniformBuffer()
{
m_bGlobals = m_alloc.createBuffer(sizeof(GlobalUniforms), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
m_debug.setObjectName(m_bGlobals.buffer, "Globals");
}
//--------------------------------------------------------------------------------------------------
// Creating all textures and samplers
//
void HelloVulkan::createTextureImages(const VkCommandBuffer& cmdBuf, tinygltf::Model& gltfModel)
{
VkSamplerCreateInfo samplerCreateInfo{VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO};
samplerCreateInfo.minFilter = VK_FILTER_LINEAR;
samplerCreateInfo.magFilter = VK_FILTER_LINEAR;
samplerCreateInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
samplerCreateInfo.maxLod = FLT_MAX;
VkFormat format = VK_FORMAT_R8G8B8A8_SRGB;
auto addDefaultTexture = [this]() {
// Make dummy image(1,1), needed as we cannot have an empty array
nvvk::ScopeCommandBuffer cmdBuf(m_device, m_graphicsQueueIndex);
std::array<uint8_t, 4> white = {255, 255, 255, 255};
VkSamplerCreateInfo sampler{VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO};
m_textures.emplace_back(m_alloc.createTexture(cmdBuf, 4, white.data(), nvvk::makeImage2DCreateInfo(VkExtent2D{1, 1}), sampler));
m_debug.setObjectName(m_textures.back().image, "dummy");
};
if(gltfModel.images.empty())
{
addDefaultTexture();
return;
}
m_textures.reserve(gltfModel.images.size());
for(size_t i = 0; i < gltfModel.images.size(); i++)
{
auto& gltfimage = gltfModel.images[i];
void* buffer = &gltfimage.image[0];
VkDeviceSize bufferSize = gltfimage.image.size();
auto imgSize = VkExtent2D{(uint32_t)gltfimage.width, (uint32_t)gltfimage.height};
if(bufferSize == 0 || gltfimage.width == -1 || gltfimage.height == -1)
{
addDefaultTexture();
continue;
}
VkImageCreateInfo imageCreateInfo = nvvk::makeImage2DCreateInfo(imgSize, format, VK_IMAGE_USAGE_SAMPLED_BIT, true);
nvvk::Image image = m_alloc.createImage(cmdBuf, bufferSize, buffer, imageCreateInfo);
nvvk::cmdGenerateMipmaps(cmdBuf, image.image, format, imgSize, imageCreateInfo.mipLevels);
VkImageViewCreateInfo ivInfo = nvvk::makeImageViewCreateInfo(image.image, imageCreateInfo);
m_textures.emplace_back(m_alloc.createTexture(image, ivInfo, samplerCreateInfo));
m_debug.setObjectName(m_textures[i].image, std::string("Txt" + std::to_string(i)));
}
}
//--------------------------------------------------------------------------------------------------
// Destroying all allocations
//
void HelloVulkan::destroyResources()
{
vkDestroyPipeline(m_device, m_graphicsPipeline, nullptr);
vkDestroyPipelineLayout(m_device, m_pipelineLayout, nullptr);
vkDestroyDescriptorPool(m_device, m_descPool, nullptr);
vkDestroyDescriptorSetLayout(m_device, m_descSetLayout, nullptr);
m_alloc.destroy(m_bGlobals);
m_alloc.destroy(m_vertexBuffer);
m_alloc.destroy(m_normalBuffer);
m_alloc.destroy(m_uvBuffer);
m_alloc.destroy(m_indexBuffer);
m_alloc.destroy(m_materialBuffer);
m_alloc.destroy(m_primInfo);
m_alloc.destroy(m_sceneDesc);
for(auto& t : m_textures)
{
m_alloc.destroy(t);
}
//#Post
m_alloc.destroy(m_offscreenColor);
m_alloc.destroy(m_offscreenDepth);
vkDestroyPipeline(m_device, m_postPipeline, nullptr);
vkDestroyPipelineLayout(m_device, m_postPipelineLayout, nullptr);
vkDestroyDescriptorPool(m_device, m_postDescPool, nullptr);
vkDestroyDescriptorSetLayout(m_device, m_postDescSetLayout, nullptr);
vkDestroyRenderPass(m_device, m_offscreenRenderPass, nullptr);
vkDestroyFramebuffer(m_device, m_offscreenFramebuffer, nullptr);
// #VKRay
m_rtBuilder.destroy();
m_sbtWrapper.destroy();
vkDestroyPipeline(m_device, m_rtPipeline, nullptr);
vkDestroyPipelineLayout(m_device, m_rtPipelineLayout, nullptr);
vkDestroyDescriptorPool(m_device, m_rtDescPool, nullptr);
vkDestroyDescriptorSetLayout(m_device, m_rtDescSetLayout, nullptr);
m_alloc.deinit();
}
//--------------------------------------------------------------------------------------------------
// Drawing the scene in raster mode
//
void HelloVulkan::rasterize(const VkCommandBuffer& cmdBuf)
{
using vkPBP = VkPipelineBindPoint;
std::vector<VkDeviceSize> offsets = {0, 0, 0};
m_debug.beginLabel(cmdBuf, "Rasterize");
// Dynamic Viewport
setViewport(cmdBuf);
// Drawing all triangles
vkCmdBindPipeline(cmdBuf, VK_PIPELINE_BIND_POINT_GRAPHICS, m_graphicsPipeline);
vkCmdBindDescriptorSets(cmdBuf, VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipelineLayout, 0, 1, &m_descSet, 0, nullptr);
std::vector<VkBuffer> vertexBuffers = {m_vertexBuffer.buffer, m_normalBuffer.buffer, m_uvBuffer.buffer};
vkCmdBindVertexBuffers(cmdBuf, 0, static_cast<uint32_t>(vertexBuffers.size()), vertexBuffers.data(), offsets.data());
vkCmdBindIndexBuffer(cmdBuf, m_indexBuffer.buffer, 0, VK_INDEX_TYPE_UINT32);
uint32_t idxNode = 0;
for(auto& node : m_gltfScene.m_nodes)
{
auto& primitive = m_gltfScene.m_primMeshes[node.primMesh];
m_pcRaster.modelMatrix = node.worldMatrix;
m_pcRaster.objIndex = node.primMesh;
m_pcRaster.materialId = primitive.materialIndex;
vkCmdPushConstants(cmdBuf, m_pipelineLayout, VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0,
sizeof(PushConstantRaster), &m_pcRaster);
vkCmdDrawIndexed(cmdBuf, primitive.indexCount, 1, primitive.firstIndex, primitive.vertexOffset, 0);
}
m_debug.endLabel(cmdBuf);
}
//--------------------------------------------------------------------------------------------------
// Handling resize of the window
//
void HelloVulkan::onResize(int /*w*/, int /*h*/)
{
createOffscreenRender();
updatePostDescriptorSet();
updateRtDescriptorSet();
resetFrame();
}
//////////////////////////////////////////////////////////////////////////
// Post-processing
//////////////////////////////////////////////////////////////////////////
//--------------------------------------------------------------------------------------------------
// Creating an offscreen frame buffer and the associated render pass
//
void HelloVulkan::createOffscreenRender()
{
m_alloc.destroy(m_offscreenColor);
m_alloc.destroy(m_offscreenDepth);
// Creating the color image
{
auto colorCreateInfo = nvvk::makeImage2DCreateInfo(m_size, m_offscreenColorFormat,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT
| VK_IMAGE_USAGE_STORAGE_BIT);
nvvk::Image image = m_alloc.createImage(colorCreateInfo);
VkImageViewCreateInfo ivInfo = nvvk::makeImageViewCreateInfo(image.image, colorCreateInfo);
VkSamplerCreateInfo sampler{VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO};
m_offscreenColor = m_alloc.createTexture(image, ivInfo, sampler);
m_offscreenColor.descriptor.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
}
// Creating the depth buffer
auto depthCreateInfo = nvvk::makeImage2DCreateInfo(m_size, m_offscreenDepthFormat, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
{
nvvk::Image image = m_alloc.createImage(depthCreateInfo);
VkImageViewCreateInfo depthStencilView{VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO};
depthStencilView.viewType = VK_IMAGE_VIEW_TYPE_2D;
depthStencilView.format = m_offscreenDepthFormat;
depthStencilView.subresourceRange = {VK_IMAGE_ASPECT_DEPTH_BIT, 0, 1, 0, 1};
depthStencilView.image = image.image;
m_offscreenDepth = m_alloc.createTexture(image, depthStencilView);
}
// Setting the image layout for both color and depth
{
nvvk::CommandPool genCmdBuf(m_device, m_graphicsQueueIndex);
auto cmdBuf = genCmdBuf.createCommandBuffer();
nvvk::cmdBarrierImageLayout(cmdBuf, m_offscreenColor.image, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL);
nvvk::cmdBarrierImageLayout(cmdBuf, m_offscreenDepth.image, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_ASPECT_DEPTH_BIT);
genCmdBuf.submitAndWait(cmdBuf);
}
// Creating a renderpass for the offscreen
if(!m_offscreenRenderPass)
{
m_offscreenRenderPass = nvvk::createRenderPass(m_device, {m_offscreenColorFormat}, m_offscreenDepthFormat, 1, true,
true, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL);
}
// Creating the frame buffer for offscreen
std::vector<VkImageView> attachments = {m_offscreenColor.descriptor.imageView, m_offscreenDepth.descriptor.imageView};
vkDestroyFramebuffer(m_device, m_offscreenFramebuffer, nullptr);
VkFramebufferCreateInfo info{VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO};
info.renderPass = m_offscreenRenderPass;
info.attachmentCount = 2;
info.pAttachments = attachments.data();
info.width = m_size.width;
info.height = m_size.height;
info.layers = 1;
vkCreateFramebuffer(m_device, &info, nullptr, &m_offscreenFramebuffer);
}
//--------------------------------------------------------------------------------------------------
// The pipeline is how things are rendered, which shaders, type of primitives, depth test and more
//
void HelloVulkan::createPostPipeline()
{
// Push constants in the fragment shader
VkPushConstantRange pushConstantRanges = {VK_SHADER_STAGE_FRAGMENT_BIT, 0, sizeof(float)};
// Creating the pipeline layout
VkPipelineLayoutCreateInfo createInfo{VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO};
createInfo.setLayoutCount = 1;
createInfo.pSetLayouts = &m_postDescSetLayout;
createInfo.pushConstantRangeCount = 1;
createInfo.pPushConstantRanges = &pushConstantRanges;
vkCreatePipelineLayout(m_device, &createInfo, nullptr, &m_postPipelineLayout);
// Pipeline: completely generic, no vertices
nvvk::GraphicsPipelineGeneratorCombined pipelineGenerator(m_device, m_postPipelineLayout, m_renderPass);
pipelineGenerator.addShader(nvh::loadFile("spv/passthrough.vert.spv", true, defaultSearchPaths, true), VK_SHADER_STAGE_VERTEX_BIT);
pipelineGenerator.addShader(nvh::loadFile("spv/post.frag.spv", true, defaultSearchPaths, true), VK_SHADER_STAGE_FRAGMENT_BIT);
pipelineGenerator.rasterizationState.cullMode = VK_CULL_MODE_NONE;
m_postPipeline = pipelineGenerator.createPipeline();
m_debug.setObjectName(m_postPipeline, "post");
}
//--------------------------------------------------------------------------------------------------
// The descriptor layout is the description of the data that is passed to the vertex or the
// fragment program.
//
void HelloVulkan::createPostDescriptor()
{
m_postDescSetLayoutBind.addBinding(0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT);
m_postDescSetLayout = m_postDescSetLayoutBind.createLayout(m_device);
m_postDescPool = m_postDescSetLayoutBind.createPool(m_device);
m_postDescSet = nvvk::allocateDescriptorSet(m_device, m_postDescPool, m_postDescSetLayout);
}
//--------------------------------------------------------------------------------------------------
// Update the output
//
void HelloVulkan::updatePostDescriptorSet()
{
VkWriteDescriptorSet writeDescriptorSets = m_postDescSetLayoutBind.makeWrite(m_postDescSet, 0, &m_offscreenColor.descriptor);
vkUpdateDescriptorSets(m_device, 1, &writeDescriptorSets, 0, nullptr);
}
//--------------------------------------------------------------------------------------------------
// Draw a full screen quad with the attached image
//
void HelloVulkan::drawPost(VkCommandBuffer cmdBuf)
{
m_debug.beginLabel(cmdBuf, "Post");
setViewport(cmdBuf);
auto aspectRatio = static_cast<float>(m_size.width) / static_cast<float>(m_size.height);
vkCmdPushConstants(cmdBuf, m_postPipelineLayout, VK_SHADER_STAGE_FRAGMENT_BIT, 0, sizeof(float), &aspectRatio);
vkCmdBindPipeline(cmdBuf, VK_PIPELINE_BIND_POINT_GRAPHICS, m_postPipeline);
vkCmdBindDescriptorSets(cmdBuf, VK_PIPELINE_BIND_POINT_GRAPHICS, m_postPipelineLayout, 0, 1, &m_postDescSet, 0, nullptr);
vkCmdDraw(cmdBuf, 3, 1, 0, 0);
m_debug.endLabel(cmdBuf);
}
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//--------------------------------------------------------------------------------------------------
// Initialize Vulkan ray tracing
// #VKRay
void HelloVulkan::initRayTracing()
{
// Requesting ray tracing properties
VkPhysicalDeviceProperties2 prop2{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2};
prop2.pNext = &m_rtProperties;
vkGetPhysicalDeviceProperties2(m_physicalDevice, &prop2);
m_rtBuilder.setup(m_device, &m_alloc, m_graphicsQueueIndex);
m_sbtWrapper.setup(m_device, m_graphicsQueueIndex, &m_alloc, m_rtProperties);
}
//--------------------------------------------------------------------------------------------------
// Converting a GLTF primitive in the Raytracing Geometry used for the BLAS
//
auto HelloVulkan::primitiveToVkGeometry(const nvh::GltfPrimMesh& prim)
{
// BLAS builder requires raw device addresses.
VkDeviceAddress vertexAddress = nvvk::getBufferDeviceAddress(m_device, m_vertexBuffer.buffer);
VkDeviceAddress indexAddress = nvvk::getBufferDeviceAddress(m_device, m_indexBuffer.buffer);
uint32_t maxPrimitiveCount = prim.indexCount / 3;
// Describe buffer as array of VertexObj.
VkAccelerationStructureGeometryTrianglesDataKHR triangles{VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR};
triangles.vertexFormat = VK_FORMAT_R32G32B32_SFLOAT; // vec3 vertex position data.
triangles.vertexData.deviceAddress = vertexAddress;
triangles.vertexStride = sizeof(glm::vec3);
// Describe index data (32-bit unsigned int)
triangles.indexType = VK_INDEX_TYPE_UINT32;
triangles.indexData.deviceAddress = indexAddress;
// Indicate identity transform by setting transformData to null device pointer.
//triangles.transformData = {};
triangles.maxVertex = prim.vertexCount - 1;
// Identify the above data as containing opaque triangles.
VkAccelerationStructureGeometryKHR asGeom{VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR};
asGeom.geometryType = VK_GEOMETRY_TYPE_TRIANGLES_KHR;
asGeom.flags = VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR; // For AnyHit
asGeom.geometry.triangles = triangles;
VkAccelerationStructureBuildRangeInfoKHR offset;
offset.firstVertex = prim.vertexOffset;
offset.primitiveCount = maxPrimitiveCount;
offset.primitiveOffset = prim.firstIndex * sizeof(uint32_t);
offset.transformOffset = 0;
// Our blas is made from only one geometry, but could be made of many geometries
nvvk::RaytracingBuilderKHR::BlasInput input;
input.asGeometry.emplace_back(asGeom);
input.asBuildOffsetInfo.emplace_back(offset);
return input;
}
//--------------------------------------------------------------------------------------------------
//
//
void HelloVulkan::createBottomLevelAS()
{
// BLAS - Storing each primitive in a geometry
std::vector<nvvk::RaytracingBuilderKHR::BlasInput> allBlas;
allBlas.reserve(m_gltfScene.m_primMeshes.size());
for(auto& primMesh : m_gltfScene.m_primMeshes)
{
auto geo = primitiveToVkGeometry(primMesh);
allBlas.push_back({geo});
}
m_rtBuilder.buildBlas(allBlas, VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR);
}
//--------------------------------------------------------------------------------------------------
//
//
void HelloVulkan::createTopLevelAS()
{
std::vector<VkAccelerationStructureInstanceKHR> tlas;
tlas.reserve(m_gltfScene.m_nodes.size());
for(auto& node : m_gltfScene.m_nodes)
{
VkAccelerationStructureInstanceKHR rayInst{};
rayInst.transform = nvvk::toTransformMatrixKHR(node.worldMatrix);
rayInst.instanceCustomIndex = node.primMesh; // gl_InstanceCustomIndexEXT: to find which primitive
rayInst.accelerationStructureReference = m_rtBuilder.getBlasDeviceAddress(node.primMesh);
rayInst.flags = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR;
rayInst.mask = 0xFF;
rayInst.instanceShaderBindingTableRecordOffset = 0; // We will use the same hit group for all objects
tlas.emplace_back(rayInst);
}
m_rtBuilder.buildTlas(tlas, VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR);
}
//--------------------------------------------------------------------------------------------------
// This descriptor set holds the Acceleration structure and the output image
//
void HelloVulkan::createRtDescriptorSet()
{
// Top-level acceleration structure, usable by both the ray generation and the closest hit (to shoot shadow rays)
m_rtDescSetLayoutBind.addBinding(RtxBindings::eTlas, VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, 1,
VK_SHADER_STAGE_RAYGEN_BIT_KHR | VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR); // TLAS
m_rtDescSetLayoutBind.addBinding(RtxBindings::eOutImage, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1,
VK_SHADER_STAGE_RAYGEN_BIT_KHR); // Output image
m_rtDescSetLayoutBind.addBinding(RtxBindings::ePrimLookup, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1,
VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR | VK_SHADER_STAGE_ANY_HIT_BIT_KHR); // Primitive info
m_rtDescPool = m_rtDescSetLayoutBind.createPool(m_device);
m_rtDescSetLayout = m_rtDescSetLayoutBind.createLayout(m_device);
VkDescriptorSetAllocateInfo allocateInfo{VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO};
allocateInfo.descriptorPool = m_rtDescPool;
allocateInfo.descriptorSetCount = 1;
allocateInfo.pSetLayouts = &m_rtDescSetLayout;
vkAllocateDescriptorSets(m_device, &allocateInfo, &m_rtDescSet);
VkAccelerationStructureKHR tlas = m_rtBuilder.getAccelerationStructure();
VkWriteDescriptorSetAccelerationStructureKHR descASInfo{VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR};
descASInfo.accelerationStructureCount = 1;
descASInfo.pAccelerationStructures = &tlas;
VkDescriptorImageInfo imageInfo{{}, m_offscreenColor.descriptor.imageView, VK_IMAGE_LAYOUT_GENERAL};
VkDescriptorBufferInfo primitiveInfoDesc{m_primInfo.buffer, 0, VK_WHOLE_SIZE};
std::vector<VkWriteDescriptorSet> writes;
writes.emplace_back(m_rtDescSetLayoutBind.makeWrite(m_rtDescSet, RtxBindings::eTlas, &descASInfo));
writes.emplace_back(m_rtDescSetLayoutBind.makeWrite(m_rtDescSet, RtxBindings::eOutImage, &imageInfo));
writes.emplace_back(m_rtDescSetLayoutBind.makeWrite(m_rtDescSet, RtxBindings::ePrimLookup, &primitiveInfoDesc));
vkUpdateDescriptorSets(m_device, static_cast<uint32_t>(writes.size()), writes.data(), 0, nullptr);
}
//--------------------------------------------------------------------------------------------------
// Writes the output image to the descriptor set
// - Required when changing resolution
//
void HelloVulkan::updateRtDescriptorSet()
{
// (1) Output buffer
VkDescriptorImageInfo imageInfo{{}, m_offscreenColor.descriptor.imageView, VK_IMAGE_LAYOUT_GENERAL};
VkWriteDescriptorSet wds = m_rtDescSetLayoutBind.makeWrite(m_rtDescSet, RtxBindings::eOutImage, &imageInfo);
vkUpdateDescriptorSets(m_device, 1, &wds, 0, nullptr);
}
//--------------------------------------------------------------------------------------------------
// Pipeline for the ray tracer: all shaders, raygen, chit, miss
//
void HelloVulkan::createRtPipeline()
{
enum StageIndices
{
eRaygen,
eMiss,
eMiss2,
eClosestHit,
eShaderGroupCount
};
// All stages
std::array<VkPipelineShaderStageCreateInfo, eShaderGroupCount> stages{};
VkPipelineShaderStageCreateInfo stage{VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO};
stage.pName = "main"; // All the same entry point
// Raygen
stage.module = nvvk::createShaderModule(m_device, nvh::loadFile("spv/raytrace.rgen.spv", true, defaultSearchPaths, true));
stage.stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR;
stages[eRaygen] = stage;
// Miss
stage.module = nvvk::createShaderModule(m_device, nvh::loadFile("spv/raytrace.rmiss.spv", true, defaultSearchPaths, true));
stage.stage = VK_SHADER_STAGE_MISS_BIT_KHR;
stages[eMiss] = stage;
// The second miss shader is invoked when a shadow ray misses the geometry. It simply indicates that no occlusion has been found
stage.module =
nvvk::createShaderModule(m_device, nvh::loadFile("spv/raytraceShadow.rmiss.spv", true, defaultSearchPaths, true));
stage.stage = VK_SHADER_STAGE_MISS_BIT_KHR;
stages[eMiss2] = stage;
// Hit Group - Closest Hit
stage.module = nvvk::createShaderModule(m_device, nvh::loadFile("spv/raytrace.rchit.spv", true, defaultSearchPaths, true));
stage.stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR;
stages[eClosestHit] = stage;
// Shader groups
VkRayTracingShaderGroupCreateInfoKHR group{VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR};
group.anyHitShader = VK_SHADER_UNUSED_KHR;
group.closestHitShader = VK_SHADER_UNUSED_KHR;
group.generalShader = VK_SHADER_UNUSED_KHR;
group.intersectionShader = VK_SHADER_UNUSED_KHR;
// Raygen
group.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR;
group.generalShader = eRaygen;
m_rtShaderGroups.push_back(group);
// Miss
group.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR;
group.generalShader = eMiss;
m_rtShaderGroups.push_back(group);
// Shadow Miss
group.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR;
group.generalShader = eMiss2;
m_rtShaderGroups.push_back(group);
// closest hit shader
group.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR;
group.generalShader = VK_SHADER_UNUSED_KHR;
group.closestHitShader = eClosestHit;
m_rtShaderGroups.push_back(group);
// Push constant: we want to be able to update constants used by the shaders
VkPushConstantRange pushConstant{VK_SHADER_STAGE_RAYGEN_BIT_KHR | VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR | VK_SHADER_STAGE_MISS_BIT_KHR,
0, sizeof(PushConstantRay)};
VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo{VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO};
pipelineLayoutCreateInfo.pushConstantRangeCount = 1;
pipelineLayoutCreateInfo.pPushConstantRanges = &pushConstant;
// Descriptor sets: one specific to ray tracing, and one shared with the rasterization pipeline
std::vector<VkDescriptorSetLayout> rtDescSetLayouts = {m_rtDescSetLayout, m_descSetLayout};
pipelineLayoutCreateInfo.setLayoutCount = static_cast<uint32_t>(rtDescSetLayouts.size());
pipelineLayoutCreateInfo.pSetLayouts = rtDescSetLayouts.data();
vkCreatePipelineLayout(m_device, &pipelineLayoutCreateInfo, nullptr, &m_rtPipelineLayout);
// Assemble the shader stages and recursion depth info into the ray tracing pipeline
VkRayTracingPipelineCreateInfoKHR rayPipelineInfo{VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR};
rayPipelineInfo.stageCount = static_cast<uint32_t>(stages.size()); // Stages are shaders
rayPipelineInfo.pStages = stages.data();
// In this case, m_rtShaderGroups.size() == 4: we have one raygen group,
// two miss shader groups, and one hit group.
rayPipelineInfo.groupCount = static_cast<uint32_t>(m_rtShaderGroups.size());
rayPipelineInfo.pGroups = m_rtShaderGroups.data();
// The ray tracing process can shoot rays from the camera, and a shadow ray can be shot from the
// hit points of the camera rays, hence a recursion level of 2. This number should be kept as low
// as possible for performance reasons. Even recursive ray tracing should be flattened into a loop
// in the ray generation to avoid deep recursion.
rayPipelineInfo.maxPipelineRayRecursionDepth = 2; // Ray depth
rayPipelineInfo.layout = m_rtPipelineLayout;
vkCreateRayTracingPipelinesKHR(m_device, {}, {}, 1, &rayPipelineInfo, nullptr, &m_rtPipeline);
// Creating the SBT
m_sbtWrapper.create(m_rtPipeline, rayPipelineInfo);
for(auto& s : stages)
vkDestroyShaderModule(m_device, s.module, nullptr);
}
//--------------------------------------------------------------------------------------------------
// Ray Tracing the scene
//
void HelloVulkan::raytrace(const VkCommandBuffer& cmdBuf, const glm::vec4& clearColor)
{
updateFrame();
m_debug.beginLabel(cmdBuf, "Ray trace");
// Initializing push constant values
m_pcRay.clearColor = clearColor;
m_pcRay.lightPosition = m_pcRaster.lightPosition;
m_pcRay.lightIntensity = m_pcRaster.lightIntensity;
m_pcRay.lightType = m_pcRaster.lightType;
std::vector<VkDescriptorSet> descSets{m_rtDescSet, m_descSet};
vkCmdBindPipeline(cmdBuf, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, m_rtPipeline);
vkCmdBindDescriptorSets(cmdBuf, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, m_rtPipelineLayout, 0,
(uint32_t)descSets.size(), descSets.data(), 0, nullptr);
vkCmdPushConstants(cmdBuf, m_rtPipelineLayout,
VK_SHADER_STAGE_RAYGEN_BIT_KHR | VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR | VK_SHADER_STAGE_MISS_BIT_KHR,
0, sizeof(PushConstantRay), &m_pcRay);
auto& regions = m_sbtWrapper.getRegions();
vkCmdTraceRaysKHR(cmdBuf, &regions[0], &regions[1], &regions[2], &regions[3], m_size.width, m_size.height, 1);
m_debug.endLabel(cmdBuf);
}
//--------------------------------------------------------------------------------------------------
// If the camera matrix has changed, resets the frame.
// otherwise, increments frame.
//
void HelloVulkan::updateFrame()
{
static glm::mat4 refCamMatrix;
static float refFov{CameraManip.getFov()};
const auto& m = CameraManip.getMatrix();
const auto fov = CameraManip.getFov();
if(refCamMatrix != m || refFov != fov)
{
resetFrame();
refCamMatrix = m;
refFov = fov;
}
m_pcRay.frame++;
}
void HelloVulkan::resetFrame()
{
m_pcRay.frame = -1;
}

View file

@ -0,0 +1,136 @@
/*
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "shaders/host_device.h"
#include "nvvkhl/appbase_vk.hpp"
#include "nvvk/debug_util_vk.hpp"
#include "nvvk/descriptorsets_vk.hpp"
#include "nvvk/memallocator_dma_vk.hpp"
#include "nvvk/resourceallocator_vk.hpp"
// #VKRay
#include "nvh/gltfscene.hpp"
#include "nvvk/raytraceKHR_vk.hpp"
#include "nvvk/sbtwrapper_vk.hpp"
//--------------------------------------------------------------------------------------------------
// Simple rasterizer of OBJ objects
// - Each OBJ loaded are stored in an `ObjModel` and referenced by a `ObjInstance`
// - It is possible to have many `ObjInstance` referencing the same `ObjModel`
// - Rendering is done in an offscreen framebuffer
// - The image of the framebuffer is displayed in post-process in a full-screen quad
//
class HelloVulkan : public nvvkhl::AppBaseVk
{
public:
void setup(const VkInstance& instance, const VkDevice& device, const VkPhysicalDevice& physicalDevice, uint32_t queueFamily) override;
void createDescriptorSetLayout();
void createGraphicsPipeline();
void loadScene(const std::string& filename);
void updateDescriptorSet();
void createUniformBuffer();
void createTextureImages(const VkCommandBuffer& cmdBuf, tinygltf::Model& gltfModel);
void updateUniformBuffer(const VkCommandBuffer& cmdBuf);
void onResize(int /*w*/, int /*h*/) override;
void destroyResources();
void rasterize(const VkCommandBuffer& cmdBuff);
nvh::GltfScene m_gltfScene;
nvvk::Buffer m_vertexBuffer;
nvvk::Buffer m_normalBuffer;
nvvk::Buffer m_uvBuffer;
nvvk::Buffer m_indexBuffer;
nvvk::Buffer m_materialBuffer;
nvvk::Buffer m_primInfo;
nvvk::Buffer m_sceneDesc;
// Information pushed at each draw call
PushConstantRaster m_pcRaster{
{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}, // Identity matrix
{0.f, 100.f, 0.f}, // light position
0, // instance Id
1000.f, // light intensity
0, // light type
0 // material id
};
// Graphic pipeline
VkPipelineLayout m_pipelineLayout;
VkPipeline m_graphicsPipeline;
nvvk::DescriptorSetBindings m_descSetLayoutBind;
VkDescriptorPool m_descPool;
VkDescriptorSetLayout m_descSetLayout;
VkDescriptorSet m_descSet;
nvvk::Buffer m_bGlobals; // Device-Host of the camera matrices
std::vector<nvvk::Texture> m_textures; // vector of all textures of the scene
nvvk::ResourceAllocatorDma m_alloc; // Allocator for buffer, images, acceleration structures
nvvk::DebugUtil m_debug; // Utility to name objects
// #Post - Draw the rendered image on a quad using a tonemapper
void createOffscreenRender();
void createPostPipeline();
void createPostDescriptor();
void updatePostDescriptorSet();
void drawPost(VkCommandBuffer cmdBuf);
nvvk::DescriptorSetBindings m_postDescSetLayoutBind;
VkDescriptorPool m_postDescPool{VK_NULL_HANDLE};
VkDescriptorSetLayout m_postDescSetLayout{VK_NULL_HANDLE};
VkDescriptorSet m_postDescSet{VK_NULL_HANDLE};
VkPipeline m_postPipeline{VK_NULL_HANDLE};
VkPipelineLayout m_postPipelineLayout{VK_NULL_HANDLE};
VkRenderPass m_offscreenRenderPass{VK_NULL_HANDLE};
VkFramebuffer m_offscreenFramebuffer{VK_NULL_HANDLE};
nvvk::Texture m_offscreenColor;
nvvk::Texture m_offscreenDepth;
VkFormat m_offscreenColorFormat{VK_FORMAT_R32G32B32A32_SFLOAT};
VkFormat m_offscreenDepthFormat{VK_FORMAT_X8_D24_UNORM_PACK32};
// #VKRay
void initRayTracing();
auto primitiveToVkGeometry(const nvh::GltfPrimMesh& prim);
void createBottomLevelAS();
void createTopLevelAS();
void createRtDescriptorSet();
void updateRtDescriptorSet();
void createRtPipeline();
void raytrace(const VkCommandBuffer& cmdBuf, const glm::vec4& clearColor);
void updateFrame();
void resetFrame();
VkPhysicalDeviceRayTracingPipelinePropertiesKHR m_rtProperties{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR};
nvvk::RaytracingBuilderKHR m_rtBuilder;
nvvk::DescriptorSetBindings m_rtDescSetLayoutBind;
VkDescriptorPool m_rtDescPool;
VkDescriptorSetLayout m_rtDescSetLayout;
VkDescriptorSet m_rtDescSet;
std::vector<VkRayTracingShaderGroupCreateInfoKHR> m_rtShaderGroups;
VkPipelineLayout m_rtPipelineLayout;
VkPipeline m_rtPipeline;
nvvk::SBTWrapper m_sbtWrapper;
PushConstantRay m_pcRay{};
};

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 320 KiB

View file

@ -0,0 +1,309 @@
/*
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
// ImGui - standalone example application for Glfw + Vulkan, using programmable
// pipeline If you are new to ImGui, see examples/README.txt and documentation
// at the top of imgui.cpp.
#include <array>
#define IMGUI_DEFINE_MATH_OPERATORS
#include "backends/imgui_impl_glfw.h"
#include "backends/imgui_impl_vulkan.h"
#include "imgui.h"
#include "imgui/imgui_helper.h"
#include "hello_vulkan.h"
#include "imgui/imgui_camera_widget.h"
#include "nvh/cameramanipulator.hpp"
#include "nvh/fileoperations.hpp"
#include "nvpsystem.hpp"
#include "nvvk/commands_vk.hpp"
#include "nvvk/context_vk.hpp"
//////////////////////////////////////////////////////////////////////////
#define UNUSED(x) (void)(x)
//////////////////////////////////////////////////////////////////////////
// Default search path for shaders
std::vector<std::string> defaultSearchPaths;
// GLFW Callback functions
static void onErrorCallback(int error, const char* description)
{
fprintf(stderr, "GLFW Error %d: %s\n", error, description);
}
// Extra UI
void renderUI(HelloVulkan& helloVk, bool useRaytracer)
{
ImGuiH::CameraWidget();
if(!useRaytracer && ImGui::CollapsingHeader("Light"))
{
ImGui::RadioButton("Point", &helloVk.m_pcRaster.lightType, 0);
ImGui::SameLine();
ImGui::RadioButton("Infinite", &helloVk.m_pcRaster.lightType, 1);
ImGui::SliderFloat3("Position", &helloVk.m_pcRaster.lightPosition.x, -20.f, 20.f);
ImGui::SliderFloat("Intensity", &helloVk.m_pcRaster.lightIntensity, 0.f, 150.f);
}
}
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
static int const SAMPLE_WIDTH = 1280;
static int const SAMPLE_HEIGHT = 720;
//--------------------------------------------------------------------------------------------------
// Application Entry
//
int main(int argc, char** argv)
{
UNUSED(argc);
// Setup GLFW window
glfwSetErrorCallback(onErrorCallback);
if(!glfwInit())
{
return 1;
}
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
GLFWwindow* window = glfwCreateWindow(SAMPLE_WIDTH, SAMPLE_HEIGHT, PROJECT_NAME, nullptr, nullptr);
// Setup Vulkan
if(!glfwVulkanSupported())
{
printf("GLFW: Vulkan Not Supported\n");
return 1;
}
// setup some basic things for the sample, logging file for example
NVPSystem system(PROJECT_NAME);
// Search path for shaders and other media
defaultSearchPaths = {
NVPSystem::exePath() + PROJECT_RELDIRECTORY,
NVPSystem::exePath() + PROJECT_RELDIRECTORY "..",
std::string(PROJECT_NAME),
};
// Vulkan required extensions
assert(glfwVulkanSupported() == 1);
uint32_t count{0};
auto reqExtensions = glfwGetRequiredInstanceExtensions(&count);
// Requesting Vulkan extensions and layers
nvvk::ContextCreateInfo contextInfo;
contextInfo.setVersion(1, 2); // Using Vulkan 1.2
for(uint32_t ext_id = 0; ext_id < count; ext_id++) // Adding required extensions (surface, win32, linux, ..)
contextInfo.addInstanceExtension(reqExtensions[ext_id]);
contextInfo.addInstanceExtension(VK_EXT_DEBUG_UTILS_EXTENSION_NAME, true); // Allow debug names
contextInfo.addDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME); // Enabling ability to present rendering
// #VKRay: Activate the ray tracing extension
VkPhysicalDeviceAccelerationStructureFeaturesKHR accelFeature{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR};
contextInfo.addDeviceExtension(VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME, false, &accelFeature); // To build acceleration structures
VkPhysicalDeviceRayTracingPipelineFeaturesKHR rtPipelineFeature{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR};
contextInfo.addDeviceExtension(VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME, false, &rtPipelineFeature); // To use vkCmdTraceRaysKHR
contextInfo.addDeviceExtension(VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME); // Required by ray tracing pipeline
contextInfo.addDeviceExtension(VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME);
VkPhysicalDeviceShaderClockFeaturesKHR clockFeature{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR};
contextInfo.addDeviceExtension(VK_KHR_SHADER_CLOCK_EXTENSION_NAME, false, &clockFeature);
// Creating Vulkan base application
nvvk::Context vkctx{};
vkctx.initInstance(contextInfo);
// Find all compatible devices
auto compatibleDevices = vkctx.getCompatibleDevices(contextInfo);
assert(!compatibleDevices.empty());
// Use a compatible device
vkctx.initDevice(compatibleDevices[0], contextInfo);
// Create example
HelloVulkan helloVk;
// Window need to be opened to get the surface on which to draw
const VkSurfaceKHR surface = helloVk.getVkSurface(vkctx.m_instance, window);
vkctx.setGCTQueueWithPresent(surface);
helloVk.setup(vkctx.m_instance, vkctx.m_device, vkctx.m_physicalDevice, vkctx.m_queueGCT.familyIndex);
helloVk.createSwapchain(surface, SAMPLE_WIDTH, SAMPLE_HEIGHT);
helloVk.createDepthBuffer();
helloVk.createRenderPass();
helloVk.createFrameBuffers();
// Setup Imgui
helloVk.initGUI(0); // Using sub-pass 0
// Creation of the example
helloVk.loadScene(nvh::findFile("media/scenes/grid.gltf", defaultSearchPaths, true));
// Setup camera
CameraManip.setWindowSize(SAMPLE_WIDTH, SAMPLE_HEIGHT);
//do not set a camera if none are in the scene
glm::vec3 eye, center, up;
if(helloVk.m_gltfScene.m_cameras.empty())
{
eye = {0.f, 0.f, 15.f};
center = vec3(0.f);
up = {0.f, 1.f, 0.f};
}
else
{
auto main_camera = helloVk.m_gltfScene.m_cameras[0];
eye = main_camera.eye;
center = main_camera.center;
up = main_camera.up;
}
//set camera
CameraManip.setLookat(eye, center, up);
helloVk.createOffscreenRender();
helloVk.createDescriptorSetLayout();
helloVk.createGraphicsPipeline();
helloVk.createUniformBuffer();
helloVk.updateDescriptorSet();
// #VKRay
helloVk.initRayTracing();
helloVk.createBottomLevelAS();
helloVk.createTopLevelAS();
helloVk.createRtDescriptorSet();
helloVk.createRtPipeline();
helloVk.createPostDescriptor();
helloVk.createPostPipeline();
helloVk.updatePostDescriptorSet();
glm::vec4 clearColor = glm::vec4(1, 1, 1, 1.00f);
bool useRaytracer = true;
helloVk.setupGlfwCallbacks(window);
ImGui_ImplGlfw_InitForVulkan(window, true);
// Main loop
while(!glfwWindowShouldClose(window))
{
glfwPollEvents();
if(helloVk.isMinimized())
continue;
// Start the Dear ImGui frame
ImGui_ImplGlfw_NewFrame();
ImGui::NewFrame();
// Show UI window.
if(helloVk.showGui())
{
ImGuiH::Panel::Begin();
ImGui::ColorEdit3("Clear color", reinterpret_cast<float*>(&clearColor));
if(ImGui::Checkbox("Ray Tracer mode", &useRaytracer)) // Switch between raster and ray tracing
helloVk.resetFrame();
renderUI(helloVk, useRaytracer);
ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / ImGui::GetIO().Framerate, ImGui::GetIO().Framerate);
ImGuiH::Control::Info("", "", "(F10) Toggle Pane", ImGuiH::Control::Flags::Disabled);
ImGuiH::Panel::End();
}
// Start rendering the scene
helloVk.prepareFrame();
// Start command buffer of this frame
auto curFrame = helloVk.getCurFrame();
const VkCommandBuffer& cmdBuf = helloVk.getCommandBuffers()[curFrame];
VkCommandBufferBeginInfo beginInfo{VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO};
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(cmdBuf, &beginInfo);
// Updating camera buffer
helloVk.updateUniformBuffer(cmdBuf);
// Clearing screen
std::array<VkClearValue, 2> clearValues{};
clearValues[0].color = {{clearColor[0], clearColor[1], clearColor[2], clearColor[3]}};
clearValues[1].depthStencil = {1.0f, 0};
// Offscreen render pass
{
VkRenderPassBeginInfo offscreenRenderPassBeginInfo{VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO};
offscreenRenderPassBeginInfo.clearValueCount = 2;
offscreenRenderPassBeginInfo.pClearValues = clearValues.data();
offscreenRenderPassBeginInfo.renderPass = helloVk.m_offscreenRenderPass;
offscreenRenderPassBeginInfo.framebuffer = helloVk.m_offscreenFramebuffer;
offscreenRenderPassBeginInfo.renderArea = {{0, 0}, helloVk.getSize()};
// Rendering Scene
if(useRaytracer)
{
helloVk.raytrace(cmdBuf, clearColor);
}
else
{
vkCmdBeginRenderPass(cmdBuf, &offscreenRenderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
helloVk.rasterize(cmdBuf);
vkCmdEndRenderPass(cmdBuf);
}
}
// 2nd rendering pass: tone mapper, UI
{
VkRenderPassBeginInfo postRenderPassBeginInfo{VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO};
postRenderPassBeginInfo.clearValueCount = 2;
postRenderPassBeginInfo.pClearValues = clearValues.data();
postRenderPassBeginInfo.renderPass = helloVk.getRenderPass();
postRenderPassBeginInfo.framebuffer = helloVk.getFramebuffers()[curFrame];
postRenderPassBeginInfo.renderArea = {{0, 0}, helloVk.getSize()};
// Rendering tonemapper
vkCmdBeginRenderPass(cmdBuf, &postRenderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
helloVk.drawPost(cmdBuf);
// Rendering UI
ImGui::Render();
ImGui_ImplVulkan_RenderDrawData(ImGui::GetDrawData(), cmdBuf);
vkCmdEndRenderPass(cmdBuf);
}
// Submit for display
vkEndCommandBuffer(cmdBuf);
helloVk.submitFrame();
}
// Cleanup
vkDeviceWaitIdle(helloVk.getDevice());
helloVk.destroyResources();
helloVk.destroy();
vkctx.deinit();
glfwDestroyWindow(window);
glfwTerminate();
return 0;
}

View file

@ -0,0 +1,90 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_EXT_nonuniform_qualifier : enable
#extension GL_GOOGLE_include_directive : enable
#extension GL_EXT_scalar_block_layout : enable
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
#extension GL_EXT_buffer_reference2 : require
#include "gltf.glsl"
#include "host_device.h"
layout(push_constant) uniform _PushConstantRaster
{
PushConstantRaster pcRaster;
};
// clang-format off
// Incoming
layout(location = 1) in vec3 i_worldPos;
layout(location = 2) in vec3 i_worldNrm;
layout(location = 3) in vec3 i_viewDir;
layout(location = 4) in vec2 i_texCoord;
// Outgoing
layout(location = 0) out vec4 o_color;
// Buffers
layout(buffer_reference, scalar) buffer GltfMaterial { GltfShadeMaterial m[]; };
layout(set = 0, binding = eSceneDesc ) readonly buffer SceneDesc_ { SceneDesc sceneDesc; } ;
layout(set = 0, binding = eTextures) uniform sampler2D[] textureSamplers;
// clang-format on
void main()
{
// Material of the object
GltfMaterial gltfMat = GltfMaterial(sceneDesc.materialAddress);
GltfShadeMaterial mat = gltfMat.m[pcRaster.materialId];
vec3 N = normalize(i_worldNrm);
// Vector toward light
vec3 L;
float lightIntensity = pcRaster.lightIntensity;
if(pcRaster.lightType == 0)
{
vec3 lDir = pcRaster.lightPosition - i_worldPos;
float d = length(lDir);
lightIntensity = pcRaster.lightIntensity / (d * d);
L = normalize(lDir);
}
else
{
L = normalize(pcRaster.lightPosition);
}
// Diffuse
vec3 diffuse = computeDiffuse(mat, L, N);
if(mat.pbrBaseColorTexture > -1)
{
uint txtId = mat.pbrBaseColorTexture;
vec3 diffuseTxt = texture(textureSamplers[nonuniformEXT(txtId)], i_texCoord).xyz;
diffuse *= diffuseTxt;
}
// Specular
vec3 specular = computeSpecular(mat, i_viewDir, L, N);
// Result
o_color = vec4(lightIntensity * (diffuse + specular), 1);
}

View file

@ -0,0 +1,43 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#include "host_device.h"
vec3 computeDiffuse(GltfShadeMaterial mat, vec3 lightDir, vec3 normal)
{
// Lambertian
float dotNL = max(dot(normal, lightDir), 0.0);
return mat.pbrBaseColorFactor.xyz * dotNL;
}
vec3 computeSpecular(GltfShadeMaterial mat, vec3 viewDir, vec3 lightDir, vec3 normal)
{
// Compute specular only if not in shadow
const float kPi = 3.14159265;
const float kShininess = 60.0;
// Specular
const float kEnergyConservation = (2.0 + kShininess) / (2.0 * kPi);
vec3 V = normalize(-viewDir);
vec3 R = reflect(-lightDir, normal);
float specular = kEnergyConservation * pow(max(dot(V, R), 0.0), kShininess);
return vec3(specular);
}

View file

@ -0,0 +1,113 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef COMMON_HOST_DEVICE
#define COMMON_HOST_DEVICE
#ifdef __cplusplus
#include <glm/glm.hpp>
#include <stdint.h> /* for uint64_t */
// GLSL Type
using vec2 = glm::vec2;
using vec3 = glm::vec3;
using vec4 = glm::vec4;
using mat4 = glm::mat4;
using uint = unsigned int;
#endif
// clang-format off
#ifdef __cplusplus // Descriptor binding helper for C++ and GLSL
#define START_BINDING(a) enum a {
#define END_BINDING() }
#else
#define START_BINDING(a) const uint
#define END_BINDING()
#endif
START_BINDING(SceneBindings)
eGlobals = 0, // Global uniform containing camera matrices
eSceneDesc = 1, // Access to the scene buffers
eTextures = 2 // Access to textures
END_BINDING();
START_BINDING(RtxBindings)
eTlas = 0, // Top-level acceleration structure
eOutImage = 1, // Ray tracer output image
ePrimLookup = 2 // Lookup of objects
END_BINDING();
// clang-format on
// Scene buffer addresses
struct SceneDesc
{
uint64_t vertexAddress; // Address of the Vertex buffer
uint64_t normalAddress; // Address of the Normal buffer
uint64_t uvAddress; // Address of the texture coordinates buffer
uint64_t indexAddress; // Address of the triangle indices buffer
uint64_t materialAddress; // Address of the Materials buffer (GltfShadeMaterial)
uint64_t primInfoAddress; // Address of the mesh primitives buffer (PrimMeshInfo)
};
// Uniform buffer set at each frame
struct GlobalUniforms
{
mat4 viewProj; // Camera view * projection
mat4 viewInverse; // Camera inverse view matrix
mat4 projInverse; // Camera inverse projection matrix
};
// Push constant structure for the raster
struct PushConstantRaster
{
mat4 modelMatrix; // matrix of the instance
vec3 lightPosition;
uint objIndex;
float lightIntensity;
int lightType;
int materialId;
};
// Push constant structure for the ray tracer
struct PushConstantRay
{
vec4 clearColor;
vec3 lightPosition;
float lightIntensity;
int lightType;
int frame;
};
// Structure used for retrieving the primitive information in the closest hit
struct PrimMeshInfo
{
uint indexOffset;
uint vertexOffset;
int materialIndex;
};
struct GltfShadeMaterial
{
vec4 pbrBaseColorFactor;
vec3 emissiveFactor;
int pbrBaseColorTexture;
};
#endif

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 450
layout(location = 0) out vec2 outUV;
out gl_PerVertex
{
vec4 gl_Position;
};
void main()
{
outUV = vec2((gl_VertexIndex << 1) & 2, gl_VertexIndex & 2);
gl_Position = vec4(outUV * 2.0f - 1.0f, 1.0f, 1.0f);
}

View file

@ -0,0 +1,156 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 460
#extension GL_EXT_ray_tracing : require
#extension GL_EXT_nonuniform_qualifier : enable
#extension GL_EXT_scalar_block_layout : enable
#extension GL_GOOGLE_include_directive : enable
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
#extension GL_EXT_buffer_reference2 : require
#include "gltf.glsl"
#include "raycommon.glsl"
#include "sampling.glsl"
#include "host_device.h"
hitAttributeEXT vec2 attribs;
// clang-format off
layout(location = 0) rayPayloadInEXT hitPayload prd;
layout(location = 1) rayPayloadEXT bool isShadowed;
layout(set = 0, binding = 0 ) uniform accelerationStructureEXT topLevelAS;
layout(set = 0, binding = 2) readonly buffer _InstanceInfo {PrimMeshInfo primInfo[];};
layout(buffer_reference, scalar) readonly buffer Vertices { vec3 v[]; };
layout(buffer_reference, scalar) readonly buffer Indices { ivec3 i[]; };
layout(buffer_reference, scalar) readonly buffer Normals { vec3 n[]; };
layout(buffer_reference, scalar) readonly buffer TexCoords { vec2 t[]; };
layout(buffer_reference, scalar) readonly buffer Materials { GltfShadeMaterial m[]; };
layout(set = 1, binding = eSceneDesc ) readonly buffer SceneDesc_ { SceneDesc sceneDesc; };
layout(set = 1, binding = eTextures) uniform sampler2D texturesMap[]; // all textures
layout(push_constant) uniform _PushConstantRay { PushConstantRay pcRay; };
// clang-format on
void main()
{
// Retrieve the Primitive mesh buffer information
PrimMeshInfo pinfo = primInfo[gl_InstanceCustomIndexEXT];
// Getting the 'first index' for this mesh (offset of the mesh + offset of the triangle)
uint indexOffset = (pinfo.indexOffset / 3) + gl_PrimitiveID;
uint vertexOffset = pinfo.vertexOffset; // Vertex offset as defined in glTF
uint matIndex = max(0, pinfo.materialIndex); // material of primitive mesh
Materials gltfMat = Materials(sceneDesc.materialAddress);
Vertices vertices = Vertices(sceneDesc.vertexAddress);
Indices indices = Indices(sceneDesc.indexAddress);
Normals normals = Normals(sceneDesc.normalAddress);
TexCoords texCoords = TexCoords(sceneDesc.uvAddress);
Materials materials = Materials(sceneDesc.materialAddress);
// Getting the 3 indices of the triangle (local)
ivec3 triangleIndex = indices.i[indexOffset];
triangleIndex += ivec3(vertexOffset); // (global)
const vec3 barycentrics = vec3(1.0 - attribs.x - attribs.y, attribs.x, attribs.y);
// Vertex of the triangle
const vec3 pos0 = vertices.v[triangleIndex.x];
const vec3 pos1 = vertices.v[triangleIndex.y];
const vec3 pos2 = vertices.v[triangleIndex.z];
const vec3 position = pos0 * barycentrics.x + pos1 * barycentrics.y + pos2 * barycentrics.z;
const vec3 world_position = vec3(gl_ObjectToWorldEXT * vec4(position, 1.0));
// Normal
const vec3 nrm0 = normals.n[triangleIndex.x];
const vec3 nrm1 = normals.n[triangleIndex.y];
const vec3 nrm2 = normals.n[triangleIndex.z];
vec3 normal = normalize(nrm0 * barycentrics.x + nrm1 * barycentrics.y + nrm2 * barycentrics.z);
const vec3 world_normal = normalize(vec3(normal * gl_WorldToObjectEXT));
const vec3 geom_normal = normalize(cross(pos1 - pos0, pos2 - pos0));
// TexCoord
const vec2 uv0 = texCoords.t[triangleIndex.x];
const vec2 uv1 = texCoords.t[triangleIndex.y];
const vec2 uv2 = texCoords.t[triangleIndex.z];
const vec2 texcoord0 = uv0 * barycentrics.x + uv1 * barycentrics.y + uv2 * barycentrics.z;
// https://en.wikipedia.org/wiki/Path_tracing
// Material of the object
GltfShadeMaterial mat = materials.m[matIndex];
vec3 emittance = mat.emissiveFactor;
// Pick a random direction from here and keep going.
vec3 tangent, bitangent;
createCoordinateSystem(world_normal, tangent, bitangent);
vec3 rayOrigin = world_position;
vec3 rayDirection = samplingHemisphere(prd.seed, tangent, bitangent, world_normal);
const float cos_theta = dot(rayDirection, world_normal);
// Probability density function of samplingHemisphere choosing this rayDirection
const float p = cos_theta / M_PI;
// Compute the BRDF for this ray (assuming Lambertian reflection)
vec3 albedo = mat.pbrBaseColorFactor.xyz;
if(mat.pbrBaseColorTexture > -1)
{
uint txtId = mat.pbrBaseColorTexture;
albedo *= texture(texturesMap[nonuniformEXT(txtId)], texcoord0).xyz;
}
vec3 BRDF = albedo / M_PI;
prd.rayOrigin = rayOrigin;
prd.rayDirection = rayDirection;
prd.hitValue = emittance;
prd.weight = BRDF * cos_theta / p;
return;
// Recursively trace reflected light sources.
if(prd.depth < 10)
{
prd.depth++;
float tMin = 0.001;
float tMax = 100000000.0;
uint flags = gl_RayFlagsOpaqueEXT;
traceRayEXT(topLevelAS, // acceleration structure
flags, // rayFlags
0xFF, // cullMask
0, // sbtRecordOffset
0, // sbtRecordStride
0, // missIndex
rayOrigin, // ray origin
tMin, // ray min range
rayDirection, // ray direction
tMax, // ray max range
0 // payload (location = 0)
);
}
vec3 incoming = prd.hitValue;
// Apply the Rendering Equation here.
prd.hitValue = emittance + (BRDF * incoming * cos_theta / p);
}

View file

@ -0,0 +1,99 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 460
#extension GL_EXT_ray_tracing : require
#extension GL_GOOGLE_include_directive : enable
#extension GL_ARB_shader_clock : enable
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
#include "raycommon.glsl"
#include "sampling.glsl"
#include "host_device.h"
// clang-format off
layout(location = 0) rayPayloadEXT hitPayload prd;
layout(set = 0, binding = 0) uniform accelerationStructureEXT topLevelAS;
layout(set = 0, binding = 1, rgba32f) uniform image2D image;
layout(set = 1, binding = 0) uniform _GlobalUniforms { GlobalUniforms uni; };
layout(push_constant) uniform _PushConstantRay { PushConstantRay pcRay; };
// clang-format on
void main()
{
// Initialize the random number
uint seed = tea(gl_LaunchIDEXT.y * gl_LaunchSizeEXT.x + gl_LaunchIDEXT.x, int(clockARB()));
const vec2 pixelCenter = vec2(gl_LaunchIDEXT.xy) + vec2(0.5);
const vec2 inUV = pixelCenter / vec2(gl_LaunchSizeEXT.xy);
vec2 d = inUV * 2.0 - 1.0;
vec4 origin = uni.viewInverse * vec4(0, 0, 0, 1);
vec4 target = uni.projInverse * vec4(d.x, d.y, 1, 1);
vec4 direction = uni.viewInverse * vec4(normalize(target.xyz), 0);
uint rayFlags = gl_RayFlagsOpaqueEXT;
float tMin = 0.001;
float tMax = 10000.0;
prd.hitValue = vec3(0);
prd.seed = seed;
prd.depth = 0;
prd.rayOrigin = origin.xyz;
prd.rayDirection = direction.xyz;
prd.weight = vec3(0);
vec3 curWeight = vec3(1);
vec3 hitValue = vec3(0);
for(; prd.depth < 100; prd.depth++)
{
traceRayEXT(topLevelAS, // acceleration structure
rayFlags, // rayFlags
0xFF, // cullMask
0, // sbtRecordOffset
0, // sbtRecordStride
0, // missIndex
prd.rayOrigin, // ray origin
tMin, // ray min range
prd.rayDirection, // ray direction
tMax, // ray max range
0 // payload (location = 0)
);
hitValue += prd.hitValue * curWeight;
curWeight *= prd.weight;
}
// Do accumulation over time
if(pcRay.frame > 0)
{
float a = 1.0f / float(pcRay.frame + 1);
vec3 old_color = imageLoad(image, ivec2(gl_LaunchIDEXT.xy)).xyz;
imageStore(image, ivec2(gl_LaunchIDEXT.xy), vec4(mix(old_color, hitValue, a), 1.f));
}
else
{
// First frame, replace the value in the buffer
imageStore(image, ivec2(gl_LaunchIDEXT.xy), vec4(hitValue, 1.f));
}
}

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 460
#extension GL_EXT_ray_tracing : require
#extension GL_GOOGLE_include_directive : enable
#include "raycommon.glsl"
layout(location = 0) rayPayloadInEXT hitPayload prd;
layout(push_constant) uniform Constants
{
vec4 clearColor;
};
void main()
{
if(prd.depth == 0)
prd.hitValue = clearColor.xyz * 0.8;
else
prd.hitValue = vec3(0.01); // No contribution from environment
prd.depth = 100; // Ending trace
}

View file

@ -0,0 +1,37 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 450
layout(location = 0) in vec2 outUV;
layout(location = 0) out vec4 fragColor;
layout(set = 0, binding = 0) uniform sampler2D noisyTxt;
layout(push_constant) uniform shaderInformation
{
float aspectRatio;
}
pushc;
void main()
{
vec2 uv = outUV;
float gamma = 1. / 2.2;
fragColor = pow(texture(noisyTxt, uv).rgba, vec4(gamma));
}

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
struct hitPayload
{
vec3 hitValue;
uint seed;
uint depth;
vec3 rayOrigin;
vec3 rayDirection;
vec3 weight;
};

View file

@ -0,0 +1,177 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 460
#extension GL_EXT_ray_tracing : require
#extension GL_EXT_nonuniform_qualifier : enable
#extension GL_EXT_scalar_block_layout : enable
#extension GL_GOOGLE_include_directive : enable
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
#extension GL_EXT_buffer_reference2 : require
#include "gltf.glsl"
#include "raycommon.glsl"
#include "host_device.h"
hitAttributeEXT vec2 attribs;
// clang-format off
layout(location = 0) rayPayloadInEXT hitPayload prd;
layout(location = 1) rayPayloadEXT bool isShadowed;
layout(set = 0, binding = 0 ) uniform accelerationStructureEXT topLevelAS;
layout(set = 0, binding = 2) readonly buffer _InstanceInfo {PrimMeshInfo primInfo[];};
//layout(set = 1, binding = B_MATERIALS) readonly buffer _MaterialBuffer {GltfShadeMaterial materials[];};
layout(buffer_reference, scalar) readonly buffer Vertices { vec3 v[]; };
layout(buffer_reference, scalar) readonly buffer Indices { uint i[]; };
layout(buffer_reference, scalar) readonly buffer Normals { vec3 n[]; };
layout(buffer_reference, scalar) readonly buffer TexCoords { vec2 t[]; };
layout(buffer_reference, scalar) readonly buffer Materials { GltfShadeMaterial m[]; };
layout(set = 1, binding = eSceneDesc ) readonly buffer SceneDesc_ { SceneDesc sceneDesc; };
layout(set = 1, binding = eTextures) uniform sampler2D texturesMap[]; // all textures
// clang-format on
layout(push_constant) uniform Constants
{
vec4 clearColor;
vec3 lightPosition;
float lightIntensity;
int lightType;
}
pushC;
void main()
{
// Retrieve the Primitive mesh buffer information
PrimMeshInfo pinfo = primInfo[gl_InstanceCustomIndexEXT];
// Getting the 'first index' for this mesh (offset of the mesh + offset of the triangle)
uint indexOffset = pinfo.indexOffset + (3 * gl_PrimitiveID);
uint vertexOffset = pinfo.vertexOffset; // Vertex offset as defined in glTF
uint matIndex = max(0, pinfo.materialIndex); // material of primitive mesh
Materials gltfMat = Materials(sceneDesc.materialAddress);
Vertices vertices = Vertices(sceneDesc.vertexAddress);
Indices indices = Indices(sceneDesc.indexAddress);
Normals normals = Normals(sceneDesc.normalAddress);
TexCoords texCoords = TexCoords(sceneDesc.uvAddress);
Materials materials = Materials(sceneDesc.materialAddress);
// Getting the 3 indices of the triangle (local)
ivec3 triangleIndex = ivec3(indices.i[indexOffset + 0], indices.i[indexOffset + 1], indices.i[indexOffset + 2]);
triangleIndex += ivec3(vertexOffset); // (global)
const vec3 barycentrics = vec3(1.0 - attribs.x - attribs.y, attribs.x, attribs.y);
// Vertex of the triangle
const vec3 pos0 = vertices.v[triangleIndex.x];
const vec3 pos1 = vertices.v[triangleIndex.y];
const vec3 pos2 = vertices.v[triangleIndex.z];
const vec3 position = pos0 * barycentrics.x + pos1 * barycentrics.y + pos2 * barycentrics.z;
const vec3 world_position = vec3(gl_ObjectToWorldEXT * vec4(position, 1.0));
// Normal
const vec3 nrm0 = normals.n[triangleIndex.x];
const vec3 nrm1 = normals.n[triangleIndex.y];
const vec3 nrm2 = normals.n[triangleIndex.z];
vec3 normal = normalize(nrm0 * barycentrics.x + nrm1 * barycentrics.y + nrm2 * barycentrics.z);
const vec3 world_normal = normalize(vec3(normal * gl_WorldToObjectEXT));
const vec3 geom_normal = normalize(cross(pos1 - pos0, pos2 - pos0));
// TexCoord
const vec2 uv0 = texCoords.t[triangleIndex.x];
const vec2 uv1 = texCoords.t[triangleIndex.y];
const vec2 uv2 = texCoords.t[triangleIndex.z];
const vec2 texcoord0 = uv0 * barycentrics.x + uv1 * barycentrics.y + uv2 * barycentrics.z;
// Vector toward the light
vec3 L;
float lightIntensity = pushC.lightIntensity;
float lightDistance = 100000.0;
// Point light
if(pushC.lightType == 0)
{
vec3 lDir = pushC.lightPosition - world_position;
lightDistance = length(lDir);
lightIntensity = pushC.lightIntensity / (lightDistance * lightDistance);
L = normalize(lDir);
}
else // Directional light
{
L = normalize(pushC.lightPosition - vec3(0));
}
// Material of the object
GltfShadeMaterial mat = materials.m[matIndex];
// Diffuse
vec3 diffuse = computeDiffuse(mat, L, world_normal);
if(mat.pbrBaseColorTexture > -1)
{
uint txtId = mat.pbrBaseColorTexture;
diffuse *= texture(texturesMap[nonuniformEXT(txtId)], texcoord0).xyz;
}
vec3 specular = vec3(0);
float attenuation = 1;
// Tracing shadow ray only if the light is visible from the surface
if(dot(world_normal, L) > 0)
{
float tMin = 0.001;
float tMax = lightDistance;
vec3 origin = gl_WorldRayOriginEXT + gl_WorldRayDirectionEXT * gl_HitTEXT;
vec3 rayDir = L;
uint flags = gl_RayFlagsTerminateOnFirstHitEXT | gl_RayFlagsOpaqueEXT | gl_RayFlagsSkipClosestHitShaderEXT;
isShadowed = true;
traceRayEXT(topLevelAS, // acceleration structure
flags, // rayFlags
0xFF, // cullMask
0, // sbtRecordOffset
0, // sbtRecordStride
1, // missIndex
origin, // ray origin
tMin, // ray min range
rayDir, // ray direction
tMax, // ray max range
1 // payload (location = 1)
);
if(isShadowed)
{
attenuation = 0.3;
}
else
{
// Specular
specular = computeSpecular(mat, gl_WorldRayDirectionEXT, L, world_normal);
}
}
prd.hitValue = vec3(lightIntensity * attenuation * (diffuse + specular));
}

View file

@ -0,0 +1,67 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 460
#extension GL_EXT_ray_tracing : require
#extension GL_GOOGLE_include_directive : enable
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
#include "raycommon.glsl"
#include "sampling.glsl"
#include "host_device.h"
// clang-format off
layout(location = 0) rayPayloadEXT hitPayload prd;
layout(set = 0, binding = 0) uniform accelerationStructureEXT topLevelAS;
layout(set = 0, binding = 1, rgba32f) uniform image2D image;
layout(set = 1, binding = 0) uniform _GlobalUniforms { GlobalUniforms uni; };
layout(push_constant) uniform _PushConstantRay { PushConstantRay pcRay; };
// clang-format on
void main()
{
const vec2 pixelCenter = vec2(gl_LaunchIDEXT.xy) + vec2(0.5);
const vec2 inUV = pixelCenter / vec2(gl_LaunchSizeEXT.xy);
vec2 d = inUV * 2.0 - 1.0;
vec4 origin = uni.viewInverse * vec4(0, 0, 0, 1);
vec4 target = uni.projInverse * vec4(d.x, d.y, 1, 1);
vec4 direction = uni.viewInverse * vec4(normalize(target.xyz), 0);
uint rayFlags = gl_RayFlagsOpaqueEXT;
float tMin = 0.001;
float tMax = 10000.0;
traceRayEXT(topLevelAS, // acceleration structure
rayFlags, // rayFlags
0xFF, // cullMask
0, // sbtRecordOffset
0, // sbtRecordStride
0, // missIndex
origin.xyz, // ray origin
tMin, // ray min range
direction.xyz, // ray direction
tMax, // ray max range
0 // payload (location = 0)
);
imageStore(image, ivec2(gl_LaunchIDEXT.xy), vec4(prd.hitValue, 1.0));
}

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 460
#extension GL_EXT_ray_tracing : require
#extension GL_GOOGLE_include_directive : enable
#include "raycommon.glsl"
layout(location = 0) rayPayloadInEXT hitPayload prd;
layout(push_constant) uniform Constants
{
vec4 clearColor;
};
void main()
{
prd.hitValue = clearColor.xyz * 0.8;
}

View file

@ -0,0 +1,29 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 460
#extension GL_EXT_ray_tracing : require
layout(location = 1) rayPayloadInEXT bool isShadowed;
void main()
{
isShadowed = false;
}

View file

@ -0,0 +1,84 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
// Generate a random unsigned int from two unsigned int values, using 16 pairs
// of rounds of the Tiny Encryption Algorithm. See Zafar, Olano, and Curtis,
// "GPU Random Numbers via the Tiny Encryption Algorithm"
uint tea(uint val0, uint val1)
{
uint v0 = val0;
uint v1 = val1;
uint s0 = 0;
for(uint n = 0; n < 16; n++)
{
s0 += 0x9e3779b9;
v0 += ((v1 << 4) + 0xa341316c) ^ (v1 + s0) ^ ((v1 >> 5) + 0xc8013ea4);
v1 += ((v0 << 4) + 0xad90777d) ^ (v0 + s0) ^ ((v0 >> 5) + 0x7e95761e);
}
return v0;
}
// Generate a random unsigned int in [0, 2^24) given the previous RNG state
// using the Numerical Recipes linear congruential generator
uint lcg(inout uint prev)
{
uint LCG_A = 1664525u;
uint LCG_C = 1013904223u;
prev = (LCG_A * prev + LCG_C);
return prev & 0x00FFFFFF;
}
// Generate a random float in [0, 1) given the previous RNG state
float rnd(inout uint prev)
{
return (float(lcg(prev)) / float(0x01000000));
}
//-------------------------------------------------------------------------------------------------
// Sampling
//-------------------------------------------------------------------------------------------------
// Randomly samples from a cosine-weighted hemisphere oriented in the `z` direction.
// From Ray Tracing Gems section 16.6.1, "Cosine-Weighted Hemisphere Oriented to the Z-Axis"
vec3 samplingHemisphere(inout uint seed, in vec3 x, in vec3 y, in vec3 z)
{
#define M_PI 3.14159265
float r1 = rnd(seed);
float r2 = rnd(seed);
float sq = sqrt(r1);
vec3 direction = vec3(cos(2 * M_PI * r2) * sq, sin(2 * M_PI * r2) * sq, sqrt(1. - r1));
direction = direction.x * x + direction.y * y + direction.z * z;
return direction;
}
// Return the tangent and binormal from the incoming normal
void createCoordinateSystem(in vec3 N, out vec3 Nt, out vec3 Nb)
{
if(abs(N.x) > abs(N.y))
Nt = vec3(N.z, 0, -N.x) / sqrt(N.x * N.x + N.z * N.z);
else
Nt = vec3(0, -N.z, N.y) / sqrt(N.y * N.y + N.z * N.z);
Nb = cross(N, Nt);
}

View file

@ -0,0 +1,65 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_EXT_scalar_block_layout : enable
#extension GL_GOOGLE_include_directive : enable
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
#include "gltf.glsl"
#include "host_device.h"
layout(binding = 0) uniform _GlobalUniforms
{
GlobalUniforms uni;
};
layout(push_constant) uniform _PushConstantRaster
{
PushConstantRaster pcRaster;
};
layout(location = 0) in vec3 i_position;
layout(location = 1) in vec3 i_normal;
layout(location = 2) in vec2 i_texCoord;
layout(location = 1) out vec3 o_worldPos;
layout(location = 2) out vec3 o_worldNrm;
layout(location = 3) out vec3 o_viewDir;
layout(location = 4) out vec2 o_texCoord;
out gl_PerVertex
{
vec4 gl_Position;
};
void main()
{
vec3 origin = vec3(uni.viewInverse * vec4(0, 0, 0, 1));
o_worldPos = vec3(pcRaster.modelMatrix * vec4(i_position, 1.0));
o_viewDir = vec3(o_worldPos - origin);
o_texCoord = i_texCoord;
o_worldNrm = mat3(pcRaster.modelMatrix) * i_normal;
gl_Position = uni.viewProj * vec4(o_worldPos, 1.0);
}