New version of the samples and tutorials based on KHR_ray_tracing

This commit is contained in:
mklefrancois 2020-03-31 17:51:08 +02:00
parent 2fd15056a2
commit b6402f0c09
271 changed files with 134108 additions and 2 deletions

View file

@ -0,0 +1,102 @@
cmake_minimum_required(VERSION 2.8)
get_filename_component(PROJNAME ${CMAKE_CURRENT_SOURCE_DIR} NAME)
SET(PROJNAME vk_${PROJNAME}_KHR)
Project(${PROJNAME})
Message(STATUS "-------------------------------")
Message(STATUS "Processing Project ${PROJNAME}:")
#####################################################################################
_add_project_definitions(${PROJNAME})
#####################################################################################
# Source files for this project
#
file(GLOB SOURCE_FILES *.cpp *.hpp *.inl *.h *.c)
file(GLOB EXTRA_COMMON "../common/*.*")
list(APPEND COMMON_SOURCE_FILES ${EXTRA_COMMON})
include_directories("../common")
#####################################################################################
# GLSL to SPIR-V custom build
#
# more than one file can be given: _compile_GLSL("GLSL_mesh.vert;GLSL_mesh.frag" "GLSL_mesh.spv" GLSL_SOURCES)
# the SpirV validator is fine as long as files are for different pipeline stages (entry points still need to be main())
#_compile_GLSL(<source(s)> <target spv> <LIST where files are appended>)
UNSET(GLSL_SOURCES)
UNSET(SPV_OUTPUT)
file(GLOB_RECURSE GLSL_HEADER_FILES "shaders/*.h" "shaders/*.glsl")
file(GLOB_RECURSE GLSL_SOURCE_FILES
"shaders/*.comp"
"shaders/*.frag"
"shaders/*.vert"
"shaders/*.rchit"
"shaders/*.rahit"
"shaders/*.rmiss"
"shaders/*.rgen"
)
foreach(GLSL ${GLSL_SOURCE_FILES})
get_filename_component(FILE_NAME ${GLSL} NAME)
_compile_GLSL(${GLSL} "shaders/${FILE_NAME}.spv" GLSL_SOURCES SPV_OUTPUT)
endforeach(GLSL)
list(APPEND GLSL_SOURCES ${GLSL_HEADER_FILES})
source_group(Shader_Files FILES ${GLSL_SOURCES})
#####################################################################################
# Executable
#
# if(WIN32 AND NOT GLUT_FOUND)
# add_definitions(/wd4996) #remove printf warning
# add_definitions(/wd4244) #remove double to float conversion warning
# add_definitions(/wd4305) #remove double to float truncation warning
# else()
# add_definitions(-fpermissive)
# endif()
add_executable(${PROJNAME} ${SOURCE_FILES} ${COMMON_SOURCE_FILES} ${PACKAGE_SOURCE_FILES} ${GLSL_SOURCES} ${CUDA_FILES} ${CUBIN_SOURCES})
#_set_subsystem_console(${PROJNAME})
#####################################################################################
# common source code needed for this sample
#
source_group(common FILES
${COMMON_SOURCE_FILES}
${PACKAGE_SOURCE_FILES}
)
source_group("Source Files" FILES ${SOURCE_FILES})
# if(UNIX)
# set(UNIXLINKLIBS dl pthread)
# else()
# set(UNIXLINKLIBS)
# endif()
#####################################################################################
# Linkage
#
target_link_libraries(${PROJNAME} ${PLATFORM_LIBRARIES} shared_sources)
foreach(DEBUGLIB ${LIBRARIES_DEBUG})
target_link_libraries(${PROJNAME} debug ${DEBUGLIB})
endforeach(DEBUGLIB)
foreach(RELEASELIB ${LIBRARIES_OPTIMIZED})
target_link_libraries(${PROJNAME} optimized ${RELEASELIB})
endforeach(RELEASELIB)
#####################################################################################
# copies binaries that need to be put next to the exe files (ZLib, etc.)
#
_copy_binaries_to_target( ${PROJNAME} )
install(FILES ${SPV_OUTPUT} CONFIGURATIONS Release DESTINATION "bin_${ARCH}/${PROJNAME}/shaders")
install(FILES ${SPV_OUTPUT} CONFIGURATIONS Debug DESTINATION "bin_${ARCH}_debug/${PROJNAME}/shaders")
install(FILES ${CUBIN_SOURCES} CONFIGURATIONS Release DESTINATION "bin_${ARCH}/${PROJNAME}")
install(FILES ${CUBIN_SOURCES} CONFIGURATIONS Debug DESTINATION "bin_${ARCH}_debug/${PROJNAME}")
install(DIRECTORY "../media" CONFIGURATIONS Release DESTINATION "bin_${ARCH}/${PROJNAME}")
install(DIRECTORY "../media" CONFIGURATIONS Debug DESTINATION "bin_${ARCH}_debug/${PROJNAME}")

View file

@ -0,0 +1,14 @@
# NVIDIA Vulkan Ray Tracing Tutorial
This example is the result of the ray tracing tutorial.
The tutorial is adding ray tracing capability to an OBJ rasterizer in Vulkan
If you haven't done it, [**Start Ray Tracing Tutorial**](https://nvpro-samples.github.io/vk_raytracing_tutorial/).
![resultRaytraceShadowMedieval](../docs/Images/resultRaytraceShadowMedieval.png)
## Going Further
Once the tutorial completed and the basics of ray tracing are in place, other tuturials are going further from this code base.
See all other [additional ray tracing tutorials](https://nvpro-samples.github.io/vk_raytracing_tutorial/vkrt_tuto_further.md.html)

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,915 @@
/* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sstream>
#include <vulkan/vulkan.hpp>
extern std::vector<std::string> defaultSearchPaths;
#define STB_IMAGE_IMPLEMENTATION
#include "fileformats/stb_image.h"
#include "obj_loader.h"
#include "hello_vulkan.h"
#include "nvh//cameramanipulator.hpp"
#include "nvvkpp/descriptorsets_vkpp.hpp"
#include "nvvkpp/pipeline_vkpp.hpp"
#include "nvh/fileoperations.hpp"
#include "nvvkpp/commands_vkpp.hpp"
#include "nvvkpp/renderpass_vkpp.hpp"
#include "nvvkpp/utilities_vkpp.hpp"
// Holding the camera matrices
struct CameraMatrices
{
nvmath::mat4f view;
nvmath::mat4f proj;
nvmath::mat4f viewInverse;
// #VKRay
nvmath::mat4f projInverse;
};
//--------------------------------------------------------------------------------------------------
// Keep the handle on the device
// Initialize the tool to do all our allocations: buffers, images
//
void HelloVulkan::setup(const vk::Device& device,
const vk::PhysicalDevice& physicalDevice,
uint32_t queueFamily)
{
AppBase::setup(device, physicalDevice, queueFamily);
m_alloc.init(device, physicalDevice);
m_debug.setup(m_device);
}
//--------------------------------------------------------------------------------------------------
// Called at each frame to update the camera matrix
//
void HelloVulkan::updateUniformBuffer()
{
const float aspectRatio = m_size.width / static_cast<float>(m_size.height);
CameraMatrices ubo = {};
ubo.view = CameraManip.getMatrix();
ubo.proj = nvmath::perspectiveVK(CameraManip.getFov(), aspectRatio, 0.1f, 1000.0f);
// ubo.proj[1][1] *= -1; // Inverting Y for Vulkan
ubo.viewInverse = nvmath::invert(ubo.view);
// #VKRay
ubo.projInverse = nvmath::invert(ubo.proj);
void* data = m_device.mapMemory(m_cameraMat.allocation, 0, sizeof(ubo));
memcpy(data, &ubo, sizeof(ubo));
m_device.unmapMemory(m_cameraMat.allocation);
}
//--------------------------------------------------------------------------------------------------
// Describing the layout pushed when rendering
//
void HelloVulkan::createDescriptorSetLayout()
{
using vkDS = vk::DescriptorSetLayoutBinding;
using vkDT = vk::DescriptorType;
using vkSS = vk::ShaderStageFlagBits;
uint32_t nbTxt = static_cast<uint32_t>(m_textures.size());
uint32_t nbObj = static_cast<uint32_t>(m_objModel.size());
// Camera matrices (binding = 0)
m_descSetLayoutBind.emplace_back(
vkDS(0, vkDT::eUniformBuffer, 1, vkSS::eVertex | vkSS::eRaygenKHR));
// Materials (binding = 1)
m_descSetLayoutBind.emplace_back(
vkDS(1, vkDT::eStorageBuffer, nbObj, vkSS::eVertex | vkSS::eFragment | vkSS::eClosestHitKHR));
// Scene description (binding = 2)
m_descSetLayoutBind.emplace_back( //
vkDS(2, vkDT::eStorageBuffer, 1, vkSS::eVertex | vkSS::eFragment | vkSS::eClosestHitKHR));
// Textures (binding = 3)
m_descSetLayoutBind.emplace_back(
vkDS(3, vkDT::eCombinedImageSampler, nbTxt, vkSS::eFragment | vkSS::eClosestHitKHR));
// Materials (binding = 4)
m_descSetLayoutBind.emplace_back(
vkDS(4, vkDT::eStorageBuffer, nbObj, vkSS::eFragment | vkSS::eClosestHitKHR));
// Storing vertices (binding = 5)
m_descSetLayoutBind.emplace_back( //
vkDS(5, vkDT::eStorageBuffer, nbObj, vkSS::eClosestHitKHR));
// Storing indices (binding = 6)
m_descSetLayoutBind.emplace_back( //
vkDS(6, vkDT::eStorageBuffer, nbObj, vkSS::eClosestHitKHR));
m_descSetLayout = nvvkpp::util::createDescriptorSetLayout(m_device, m_descSetLayoutBind);
m_descPool = nvvkpp::util::createDescriptorPool(m_device, m_descSetLayoutBind, 1);
m_descSet = nvvkpp::util::createDescriptorSet(m_device, m_descPool, m_descSetLayout);
}
//--------------------------------------------------------------------------------------------------
// Setting up the buffers in the descriptor set
//
void HelloVulkan::updateDescriptorSet()
{
std::vector<vk::WriteDescriptorSet> writes;
// Camera matrices and scene description
vk::DescriptorBufferInfo dbiUnif{m_cameraMat.buffer, 0, VK_WHOLE_SIZE};
writes.emplace_back(nvvkpp::util::createWrite(m_descSet, m_descSetLayoutBind[0], &dbiUnif));
vk::DescriptorBufferInfo dbiSceneDesc{m_sceneDesc.buffer, 0, VK_WHOLE_SIZE};
writes.emplace_back(nvvkpp::util::createWrite(m_descSet, m_descSetLayoutBind[2], &dbiSceneDesc));
// All material buffers, 1 buffer per OBJ
std::vector<vk::DescriptorBufferInfo> dbiMat;
std::vector<vk::DescriptorBufferInfo> dbiMatIdx;
std::vector<vk::DescriptorBufferInfo> dbiVert;
std::vector<vk::DescriptorBufferInfo> dbiIdx;
for(auto& obj : m_objModel)
{
dbiMat.emplace_back(obj.matColorBuffer.buffer, 0, VK_WHOLE_SIZE);
dbiMatIdx.emplace_back(obj.matIndexBuffer.buffer, 0, VK_WHOLE_SIZE);
dbiVert.emplace_back(obj.vertexBuffer.buffer, 0, VK_WHOLE_SIZE);
dbiIdx.emplace_back(obj.indexBuffer.buffer, 0, VK_WHOLE_SIZE);
}
writes.emplace_back(nvvkpp::util::createWrite(m_descSet, m_descSetLayoutBind[1], dbiMat.data()));
writes.emplace_back(
nvvkpp::util::createWrite(m_descSet, m_descSetLayoutBind[4], dbiMatIdx.data()));
writes.emplace_back(nvvkpp::util::createWrite(m_descSet, m_descSetLayoutBind[5], dbiVert.data()));
writes.emplace_back(nvvkpp::util::createWrite(m_descSet, m_descSetLayoutBind[6], dbiIdx.data()));
// All texture samplers
std::vector<vk::DescriptorImageInfo> diit;
for(auto& texture : m_textures)
{
diit.push_back(texture.descriptor);
}
writes.emplace_back(nvvkpp::util::createWrite(m_descSet, m_descSetLayoutBind[3], diit.data()));
// Writing the information
m_device.updateDescriptorSets(static_cast<uint32_t>(writes.size()), writes.data(), 0, nullptr);
}
//--------------------------------------------------------------------------------------------------
// Creating the pipeline layout
//
void HelloVulkan::createGraphicsPipeline()
{
using vkSS = vk::ShaderStageFlagBits;
vk::PushConstantRange pushConstantRanges = {vkSS::eVertex | vkSS::eFragment, 0,
sizeof(ObjPushConstant)};
// Creating the Pipeline Layout
vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo;
vk::DescriptorSetLayout descSetLayout(m_descSetLayout);
pipelineLayoutCreateInfo.setSetLayoutCount(1);
pipelineLayoutCreateInfo.setPSetLayouts(&descSetLayout);
pipelineLayoutCreateInfo.setPushConstantRangeCount(1);
pipelineLayoutCreateInfo.setPPushConstantRanges(&pushConstantRanges);
m_pipelineLayout = m_device.createPipelineLayout(pipelineLayoutCreateInfo);
// Creating the Pipeline
std::vector<std::string> paths = defaultSearchPaths;
nvvkpp::GraphicsPipelineGenerator gpb(m_device, m_pipelineLayout, m_offscreenRenderPass);
gpb.depthStencilState = {true};
gpb.addShader(nvh::loadFile("shaders/vert_shader.vert.spv", true, paths), vkSS::eVertex);
gpb.addShader(nvh::loadFile("shaders/frag_shader.frag.spv", true, paths), vkSS::eFragment);
gpb.vertexInputState.bindingDescriptions = {{0, sizeof(VertexObj)}};
gpb.vertexInputState.attributeDescriptions = {
{0, 0, vk::Format::eR32G32B32Sfloat, offsetof(VertexObj, pos)},
{1, 0, vk::Format::eR32G32B32Sfloat, offsetof(VertexObj, nrm)},
{2, 0, vk::Format::eR32G32B32Sfloat, offsetof(VertexObj, color)},
{3, 0, vk::Format::eR32G32Sfloat, offsetof(VertexObj, texCoord)}};
m_graphicsPipeline = gpb.create();
m_debug.setObjectName(m_graphicsPipeline, "Graphics");
}
//--------------------------------------------------------------------------------------------------
// Loading the OBJ file and setting up all buffers
//
void HelloVulkan::loadModel(const std::string& filename, nvmath::mat4f transform)
{
using vkBU = vk::BufferUsageFlagBits;
ObjLoader loader;
loader.loadModel(filename);
// Converting from Srgb to linear
for(auto& m : loader.m_materials)
{
m.ambient = nvmath::pow(m.ambient, 2.2f);
m.diffuse = nvmath::pow(m.diffuse, 2.2f);
m.specular = nvmath::pow(m.specular, 2.2f);
}
ObjInstance instance;
instance.objIndex = static_cast<uint32_t>(m_objModel.size());
instance.transform = transform;
instance.transformIT = nvmath::transpose(nvmath::invert(transform));
instance.txtOffset = static_cast<uint32_t>(m_textures.size());
ObjModel model;
model.nbIndices = static_cast<uint32_t>(loader.m_indices.size());
model.nbVertices = static_cast<uint32_t>(loader.m_vertices.size());
// Create the buffers on Device and copy vertices, indices and materials
nvvkpp::SingleCommandBuffer cmdBufGet(m_device, m_graphicsQueueIndex);
vk::CommandBuffer cmdBuf = cmdBufGet.createCommandBuffer();
model.vertexBuffer =
m_alloc.createBuffer(cmdBuf, loader.m_vertices,
vkBU::eVertexBuffer | vkBU::eStorageBuffer | vkBU::eShaderDeviceAddress);
model.indexBuffer =
m_alloc.createBuffer(cmdBuf, loader.m_indices,
vkBU::eIndexBuffer | vkBU::eStorageBuffer | vkBU::eShaderDeviceAddress);
model.matColorBuffer = m_alloc.createBuffer(cmdBuf, loader.m_materials, vkBU::eStorageBuffer);
model.matIndexBuffer = m_alloc.createBuffer(cmdBuf, loader.m_matIndx, vkBU::eStorageBuffer);
// Creates all textures found
createTextureImages(cmdBuf, loader.m_textures);
cmdBufGet.flushCommandBuffer(cmdBuf);
m_alloc.flushStaging();
std::string objNb = std::to_string(instance.objIndex);
m_debug.setObjectName(model.vertexBuffer.buffer, (std::string("vertex_" + objNb).c_str()));
m_debug.setObjectName(model.indexBuffer.buffer, (std::string("index_" + objNb).c_str()));
m_debug.setObjectName(model.matColorBuffer.buffer, (std::string("mat_" + objNb).c_str()));
m_debug.setObjectName(model.matIndexBuffer.buffer, (std::string("matIdx_" + objNb).c_str()));
m_objModel.emplace_back(model);
m_objInstance.emplace_back(instance);
}
//--------------------------------------------------------------------------------------------------
// Creating the uniform buffer holding the camera matrices
// - Buffer is host visible
//
void HelloVulkan::createUniformBuffer()
{
using vkBU = vk::BufferUsageFlagBits;
using vkMP = vk::MemoryPropertyFlagBits;
m_cameraMat = m_alloc.createBuffer(sizeof(CameraMatrices), vkBU::eUniformBuffer,
vkMP::eHostVisible | vkMP::eHostCoherent);
m_debug.setObjectName(m_cameraMat.buffer, "cameraMat");
}
//--------------------------------------------------------------------------------------------------
// Create a storage buffer containing the description of the scene elements
// - Which geometry is used by which instance
// - Transformation
// - Offset for texture
//
void HelloVulkan::createSceneDescriptionBuffer()
{
using vkBU = vk::BufferUsageFlagBits;
nvvkpp::SingleCommandBuffer cmdGen(m_device, m_graphicsQueueIndex);
auto cmdBuf = cmdGen.createCommandBuffer();
m_sceneDesc = m_alloc.createBuffer(cmdBuf, m_objInstance, vkBU::eStorageBuffer);
cmdGen.flushCommandBuffer(cmdBuf);
m_alloc.flushStaging();
m_debug.setObjectName(m_sceneDesc.buffer, "sceneDesc");
}
//--------------------------------------------------------------------------------------------------
// Creating all textures and samplers
//
void HelloVulkan::createTextureImages(const vk::CommandBuffer& cmdBuf,
const std::vector<std::string>& textures)
{
using vkIU = vk::ImageUsageFlagBits;
vk::SamplerCreateInfo samplerCreateInfo{
{}, vk::Filter::eLinear, vk::Filter::eLinear, vk::SamplerMipmapMode::eLinear};
samplerCreateInfo.setMaxLod(FLT_MAX);
vk::Format format = vk::Format::eR8G8B8A8Srgb;
// If no textures are present, create a dummy one to accommodate the pipeline layout
if(textures.empty() && m_textures.empty())
{
nvvkTexture texture;
std::array<uint8_t, 4> color{255u, 255u, 255u, 255u};
vk::DeviceSize bufferSize = sizeof(color);
auto imgSize = vk::Extent2D(1, 1);
auto imageCreateInfo = nvvkpp::image::create2DInfo(imgSize, format);
// Creating the VKImage
texture = m_alloc.createImage(cmdBuf, bufferSize, color.data(), imageCreateInfo);
// Setting up the descriptor used by the shader
texture.descriptor =
nvvkpp::image::create2DDescriptor(m_device, texture.image, samplerCreateInfo, format);
// The image format must be in VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
nvvkpp::image::setImageLayout(cmdBuf, texture.image, vk::ImageLayout::eUndefined,
vk::ImageLayout::eShaderReadOnlyOptimal);
m_textures.push_back(texture);
}
else
{
// Uploading all images
for(const auto& texture : textures)
{
std::stringstream o;
int texWidth, texHeight, texChannels;
o << "media/textures/" << texture;
std::string txtFile = nvh::findFile(o.str(), defaultSearchPaths);
stbi_uc* pixels =
stbi_load(txtFile.c_str(), &texWidth, &texHeight, &texChannels, STBI_rgb_alpha);
// Handle failure
if(!pixels)
{
texWidth = texHeight = 1;
texChannels = 4;
std::array<uint8_t, 4> color{255u, 0u, 255u, 255u};
pixels = reinterpret_cast<stbi_uc*>(color.data());
}
vk::DeviceSize bufferSize = static_cast<uint64_t>(texWidth) * texHeight * sizeof(uint8_t) * 4;
auto imgSize = vk::Extent2D(texWidth, texHeight);
auto imageCreateInfo = nvvkpp::image::create2DInfo(imgSize, format, vkIU::eSampled, true);
{
nvvkTexture texture;
texture = m_alloc.createImage(cmdBuf, bufferSize, pixels, imageCreateInfo);
nvvkpp::image::generateMipmaps(cmdBuf, texture.image, format, imgSize,
imageCreateInfo.mipLevels);
texture.descriptor =
nvvkpp::image::create2DDescriptor(m_device, texture.image, samplerCreateInfo, format);
m_textures.push_back(texture);
}
}
}
}
//--------------------------------------------------------------------------------------------------
// Destroying all allocations
//
void HelloVulkan::destroyResources()
{
m_device.destroy(m_graphicsPipeline);
m_device.destroy(m_pipelineLayout);
m_device.destroy(m_descPool);
m_device.destroy(m_descSetLayout);
m_alloc.destroy(m_cameraMat);
m_alloc.destroy(m_sceneDesc);
for(auto& m : m_objModel)
{
m_alloc.destroy(m.vertexBuffer);
m_alloc.destroy(m.indexBuffer);
m_alloc.destroy(m.matColorBuffer);
m_alloc.destroy(m.matIndexBuffer);
}
for(auto& t : m_textures)
{
m_alloc.destroy(t);
}
//#Post
m_device.destroy(m_postPipeline);
m_device.destroy(m_postPipelineLayout);
m_device.destroy(m_postDescPool);
m_device.destroy(m_postDescSetLayout);
m_alloc.destroy(m_offscreenColor);
m_alloc.destroy(m_offscreenDepth);
m_device.destroy(m_offscreenRenderPass);
m_device.destroy(m_offscreenFramebuffer);
// #VKRay
m_rtBuilder.destroy();
m_device.destroy(m_rtDescPool);
m_device.destroy(m_rtDescSetLayout);
m_device.destroy(m_rtPipeline);
m_device.destroy(m_rtPipelineLayout);
m_alloc.destroy(m_rtSBTBuffer);
}
//--------------------------------------------------------------------------------------------------
// Drawing the scene in raster mode
//
void HelloVulkan::rasterize(const vk::CommandBuffer& cmdBuf)
{
using vkPBP = vk::PipelineBindPoint;
using vkSS = vk::ShaderStageFlagBits;
vk::DeviceSize offset{0};
m_debug.beginLabel(cmdBuf, "Rasterize");
// Dynamic Viewport
cmdBuf.setViewport(0, {vk::Viewport(0, 0, (float)m_size.width, (float)m_size.height, 0, 1)});
cmdBuf.setScissor(0, {{{0, 0}, {m_size.width, m_size.height}}});
// Drawing all triangles
cmdBuf.bindPipeline(vkPBP::eGraphics, m_graphicsPipeline);
cmdBuf.bindDescriptorSets(vkPBP::eGraphics, m_pipelineLayout, 0, {m_descSet}, {});
for(int i = 0; i < m_objInstance.size(); ++i)
{
auto& inst = m_objInstance[i];
auto& model = m_objModel[inst.objIndex];
m_pushConstant.instanceId = i; // Telling which instance is drawn
cmdBuf.pushConstants<ObjPushConstant>(m_pipelineLayout, vkSS::eVertex | vkSS::eFragment, 0,
m_pushConstant);
cmdBuf.bindVertexBuffers(0, 1, &model.vertexBuffer.buffer, &offset);
cmdBuf.bindIndexBuffer(model.indexBuffer.buffer, 0, vk::IndexType::eUint32);
cmdBuf.drawIndexed(model.nbIndices, 1, 0, 0, 0);
}
m_debug.endLabel(cmdBuf);
}
//--------------------------------------------------------------------------------------------------
// Handling resize of the window
//
void HelloVulkan::onResize(int /*w*/, int /*h*/)
{
createOffscreenRender();
updatePostDescriptorSet();
updateRtDescriptorSet();
}
//////////////////////////////////////////////////////////////////////////
// Post-processing
//////////////////////////////////////////////////////////////////////////
//--------------------------------------------------------------------------------------------------
// Creating an offscreen frame buffer and the associated render pass
//
void HelloVulkan::createOffscreenRender()
{
m_alloc.destroy(m_offscreenColor);
m_alloc.destroy(m_offscreenDepth);
// Creating the color image
auto colorCreateInfo = nvvkpp::image::create2DInfo(m_size, m_offscreenColorFormat,
vk::ImageUsageFlagBits::eColorAttachment
| vk::ImageUsageFlagBits::eSampled
| vk::ImageUsageFlagBits::eStorage);
m_offscreenColor = m_alloc.createImage(colorCreateInfo);
m_offscreenColor.descriptor =
nvvkpp::image::create2DDescriptor(m_device, m_offscreenColor.image, vk::SamplerCreateInfo{},
m_offscreenColorFormat, vk::ImageLayout::eGeneral);
// Creating the depth buffer
auto depthCreateInfo =
nvvkpp::image::create2DInfo(m_size, m_offscreenDepthFormat,
vk::ImageUsageFlagBits::eDepthStencilAttachment);
m_offscreenDepth = m_alloc.createImage(depthCreateInfo);
vk::ImageViewCreateInfo depthStencilView;
depthStencilView.setViewType(vk::ImageViewType::e2D);
depthStencilView.setFormat(m_offscreenDepthFormat);
depthStencilView.setSubresourceRange({vk::ImageAspectFlagBits::eDepth, 0, 1, 0, 1});
depthStencilView.setImage(m_offscreenDepth.image);
m_offscreenDepth.descriptor.imageView = m_device.createImageView(depthStencilView);
// Setting the image layout for both color and depth
{
nvvkpp::SingleCommandBuffer genCmdBuf(m_device, m_graphicsQueueIndex);
auto cmdBuf = genCmdBuf.createCommandBuffer();
nvvkpp::image::setImageLayout(cmdBuf, m_offscreenColor.image, vk::ImageLayout::eUndefined,
vk::ImageLayout::eGeneral);
nvvkpp::image::setImageLayout(cmdBuf, m_offscreenDepth.image, vk::ImageAspectFlagBits::eDepth,
vk::ImageLayout::eUndefined,
vk::ImageLayout::eDepthStencilAttachmentOptimal);
genCmdBuf.flushCommandBuffer(cmdBuf);
}
// Creating a renderpass for the offscreen
if(!m_offscreenRenderPass)
{
m_offscreenRenderPass =
nvvkpp::util::createRenderPass(m_device, {m_offscreenColorFormat}, m_offscreenDepthFormat,
1, true, true, vk::ImageLayout::eGeneral,
vk::ImageLayout::eGeneral);
}
// Creating the frame buffer for offscreen
std::vector<vk::ImageView> attachments = {m_offscreenColor.descriptor.imageView,
m_offscreenDepth.descriptor.imageView};
m_device.destroy(m_offscreenFramebuffer);
vk::FramebufferCreateInfo info;
info.setRenderPass(m_offscreenRenderPass);
info.setAttachmentCount(2);
info.setPAttachments(attachments.data());
info.setWidth(m_size.width);
info.setHeight(m_size.height);
info.setLayers(1);
m_offscreenFramebuffer = m_device.createFramebuffer(info);
}
//--------------------------------------------------------------------------------------------------
// The pipeline is how things are rendered, which shaders, type of primitives, depth test and more
//
void HelloVulkan::createPostPipeline()
{
// Push constants in the fragment shader
vk::PushConstantRange pushConstantRanges = {vk::ShaderStageFlagBits::eFragment, 0, sizeof(float)};
// Creating the pipeline layout
vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo;
pipelineLayoutCreateInfo.setSetLayoutCount(1);
pipelineLayoutCreateInfo.setPSetLayouts(&m_postDescSetLayout);
pipelineLayoutCreateInfo.setPushConstantRangeCount(1);
pipelineLayoutCreateInfo.setPPushConstantRanges(&pushConstantRanges);
m_postPipelineLayout = m_device.createPipelineLayout(pipelineLayoutCreateInfo);
// Pipeline: completely generic, no vertices
std::vector<std::string> paths = defaultSearchPaths;
nvvkpp::GraphicsPipelineGenerator pipelineGenerator(m_device, m_postPipelineLayout, m_renderPass);
pipelineGenerator.addShader(nvh::loadFile("shaders/passthrough.vert.spv", true, paths),
vk::ShaderStageFlagBits::eVertex);
pipelineGenerator.addShader(nvh::loadFile("shaders/post.frag.spv", true, paths),
vk::ShaderStageFlagBits::eFragment);
pipelineGenerator.rasterizationState.setCullMode(vk::CullModeFlagBits::eNone);
m_postPipeline = pipelineGenerator.create();
m_debug.setObjectName(m_postPipeline, "post");
}
//--------------------------------------------------------------------------------------------------
// The descriptor layout is the description of the data that is passed to the vertex or the
// fragment program.
//
void HelloVulkan::createPostDescriptor()
{
using vkDS = vk::DescriptorSetLayoutBinding;
using vkDT = vk::DescriptorType;
using vkSS = vk::ShaderStageFlagBits;
m_postDescSetLayoutBind.emplace_back(vkDS(0, vkDT::eCombinedImageSampler, 1, vkSS::eFragment));
m_postDescSetLayout = nvvkpp::util::createDescriptorSetLayout(m_device, m_postDescSetLayoutBind);
m_postDescPool = nvvkpp::util::createDescriptorPool(m_device, m_postDescSetLayoutBind);
m_postDescSet = nvvkpp::util::createDescriptorSet(m_device, m_postDescPool, m_postDescSetLayout);
}
//--------------------------------------------------------------------------------------------------
// Update the output
//
void HelloVulkan::updatePostDescriptorSet()
{
vk::WriteDescriptorSet writeDescriptorSets =
nvvkpp::util::createWrite(m_postDescSet, m_postDescSetLayoutBind[0],
&m_offscreenColor.descriptor);
m_device.updateDescriptorSets(writeDescriptorSets, nullptr);
}
//--------------------------------------------------------------------------------------------------
// Draw a full screen quad with the attached image
//
void HelloVulkan::drawPost(vk::CommandBuffer cmdBuf)
{
m_debug.beginLabel(cmdBuf, "Post");
cmdBuf.setViewport(0, {vk::Viewport(0, 0, (float)m_size.width, (float)m_size.height, 0, 1)});
cmdBuf.setScissor(0, {{{0, 0}, {m_size.width, m_size.height}}});
auto aspectRatio = static_cast<float>(m_size.width) / static_cast<float>(m_size.height);
cmdBuf.pushConstants<float>(m_postPipelineLayout, vk::ShaderStageFlagBits::eFragment, 0,
aspectRatio);
cmdBuf.bindPipeline(vk::PipelineBindPoint::eGraphics, m_postPipeline);
cmdBuf.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, m_postPipelineLayout, 0,
m_postDescSet, {});
cmdBuf.draw(3, 1, 0, 0);
m_debug.endLabel(cmdBuf);
}
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//--------------------------------------------------------------------------------------------------
// Initialize Vulkan ray tracing
// #VKRay
void HelloVulkan::initRayTracing()
{
// Requesting ray tracing properties
auto properties = m_physicalDevice.getProperties2<vk::PhysicalDeviceProperties2,
vk::PhysicalDeviceRayTracingPropertiesKHR>();
m_rtProperties = properties.get<vk::PhysicalDeviceRayTracingPropertiesKHR>();
m_rtBuilder.setup(m_device, m_physicalDevice, m_graphicsQueueIndex);
}
//--------------------------------------------------------------------------------------------------
// Converting a OBJ primitive to the ray tracing geometry used for the BLAS
//
nvvkpp::RaytracingBuilderKHR::Blas HelloVulkan::objectToVkGeometryKHR(const ObjModel& model)
{
// Setting up the creation info of acceleration structure
vk::AccelerationStructureCreateGeometryTypeInfoKHR asCreate;
asCreate.setGeometryType(vk::GeometryTypeKHR::eTriangles);
asCreate.setIndexType(vk::IndexType::eUint32);
asCreate.setVertexFormat(vk::Format::eR32G32B32Sfloat);
asCreate.setMaxPrimitiveCount(model.nbIndices / 3); // Nb triangles
asCreate.setMaxVertexCount(model.nbVertices);
asCreate.setAllowsTransforms(VK_FALSE); // No adding transformation matrices
// Building part
vk::DeviceAddress vertexAddress = m_device.getBufferAddress({model.vertexBuffer.buffer});
vk::DeviceAddress indexAddress = m_device.getBufferAddress({model.indexBuffer.buffer});
vk::AccelerationStructureGeometryTrianglesDataKHR triangles;
triangles.setVertexFormat(asCreate.vertexFormat);
triangles.setVertexData(vertexAddress);
triangles.setVertexStride(sizeof(VertexObj));
triangles.setIndexType(asCreate.indexType);
triangles.setIndexData(indexAddress);
triangles.setTransformData({});
// Setting up the build info of the acceleration
vk::AccelerationStructureGeometryKHR asGeom;
asGeom.setGeometryType(asCreate.geometryType);
asGeom.setFlags(vk::GeometryFlagBitsKHR::eOpaque);
asGeom.geometry.setTriangles(triangles);
// The primitive itself
vk::AccelerationStructureBuildOffsetInfoKHR offset;
offset.setFirstVertex(0);
offset.setPrimitiveCount(asCreate.maxPrimitiveCount);
offset.setPrimitiveOffset(0);
offset.setTransformOffset(0);
// Our blas is only one geometry, but could be made of many geometries
nvvkpp::RaytracingBuilderKHR::Blas blas;
blas.asGeometry.emplace_back(asGeom);
blas.asCreateGeometryInfo.emplace_back(asCreate);
blas.asBuildOffsetInfo.emplace_back(offset);
return blas;
}
//--------------------------------------------------------------------------------------------------
//
//
void HelloVulkan::createBottomLevelAS()
{
// BLAS - Storing each primitive in a geometry
std::vector<nvvkpp::RaytracingBuilderKHR::Blas> allBlas;
allBlas.reserve(m_objModel.size());
for(const auto& obj : m_objModel)
{
auto blas = objectToVkGeometryKHR(obj);
// We could add more geometry in each BLAS, but we add only one for now
allBlas.emplace_back(blas);
}
m_rtBuilder.buildBlas(allBlas, vk::BuildAccelerationStructureFlagBitsKHR::ePreferFastTrace);
}
void HelloVulkan::createTopLevelAS()
{
std::vector<nvvkpp::RaytracingBuilderKHR::Instance> tlas;
tlas.reserve(m_objInstance.size());
for(int i = 0; i < static_cast<int>(m_objInstance.size()); i++)
{
nvvkpp::RaytracingBuilderKHR::Instance rayInst;
rayInst.transform = m_objInstance[i].transform; // Position of the instance
rayInst.instanceId = i; // gl_InstanceID
rayInst.blasId = m_objInstance[i].objIndex;
rayInst.hitGroupId = 0; // We will use the same hit group for all objects
rayInst.flags = vk::GeometryInstanceFlagBitsKHR::eTriangleCullDisable;
tlas.emplace_back(rayInst);
}
m_rtBuilder.buildTlas(tlas, vk::BuildAccelerationStructureFlagBitsKHR::ePreferFastTrace);
}
//--------------------------------------------------------------------------------------------------
// This descriptor set holds the Acceleration structure and the output image
//
void HelloVulkan::createRtDescriptorSet()
{
using vkDT = vk::DescriptorType;
using vkSS = vk::ShaderStageFlagBits;
using vkDSLB = vk::DescriptorSetLayoutBinding;
m_rtDescSetLayoutBind.emplace_back(vkDSLB(0, vkDT::eAccelerationStructureKHR, 1,
vkSS::eRaygenKHR | vkSS::eClosestHitKHR)); // TLAS
m_rtDescSetLayoutBind.emplace_back(
vkDSLB(1, vkDT::eStorageImage, 1, vkSS::eRaygenKHR)); // Output image
m_rtDescPool = nvvkpp::util::createDescriptorPool(m_device, m_rtDescSetLayoutBind);
m_rtDescSetLayout = nvvkpp::util::createDescriptorSetLayout(m_device, m_rtDescSetLayoutBind);
m_rtDescSet = m_device.allocateDescriptorSets({m_rtDescPool, 1, &m_rtDescSetLayout})[0];
vk::WriteDescriptorSetAccelerationStructureKHR descASInfo;
descASInfo.setAccelerationStructureCount(1);
descASInfo.setPAccelerationStructures(&m_rtBuilder.getAccelerationStructure());
vk::DescriptorImageInfo imageInfo{
{}, m_offscreenColor.descriptor.imageView, vk::ImageLayout::eGeneral};
std::vector<vk::WriteDescriptorSet> writes;
writes.emplace_back(
nvvkpp::util::createWrite(m_rtDescSet, m_rtDescSetLayoutBind[0], &descASInfo));
writes.emplace_back(nvvkpp::util::createWrite(m_rtDescSet, m_rtDescSetLayoutBind[1], &imageInfo));
m_device.updateDescriptorSets(static_cast<uint32_t>(writes.size()), writes.data(), 0, nullptr);
}
//--------------------------------------------------------------------------------------------------
// Writes the output image to the descriptor set
// - Required when changing resolution
//
void HelloVulkan::updateRtDescriptorSet()
{
using vkDT = vk::DescriptorType;
// (1) Output buffer
vk::DescriptorImageInfo imageInfo{
{}, m_offscreenColor.descriptor.imageView, vk::ImageLayout::eGeneral};
vk::WriteDescriptorSet wds{m_rtDescSet, 1, 0, 1, vkDT::eStorageImage, &imageInfo};
m_device.updateDescriptorSets(wds, nullptr);
}
//--------------------------------------------------------------------------------------------------
// Pipeline for the ray tracer: all shaders, raygen, chit, miss
//
void HelloVulkan::createRtPipeline()
{
std::vector<std::string> paths = defaultSearchPaths;
vk::ShaderModule raygenSM =
nvvkpp::util::createShaderModule(m_device, //
nvh::loadFile("shaders/raytrace.rgen.spv", true, paths));
vk::ShaderModule missSM =
nvvkpp::util::createShaderModule(m_device, //
nvh::loadFile("shaders/raytrace.rmiss.spv", true, paths));
// The second miss shader is invoked when a shadow ray misses the geometry. It
// simply indicates that no occlusion has been found
vk::ShaderModule shadowmissSM = nvvkpp::util::createShaderModule(
m_device, nvh::loadFile("shaders/raytraceShadow.rmiss.spv", true, paths));
std::vector<vk::PipelineShaderStageCreateInfo> stages;
// Raygen
vk::RayTracingShaderGroupCreateInfoKHR rg{vk::RayTracingShaderGroupTypeKHR::eGeneral,
VK_SHADER_UNUSED_KHR, VK_SHADER_UNUSED_KHR,
VK_SHADER_UNUSED_KHR, VK_SHADER_UNUSED_KHR};
stages.push_back({{}, vk::ShaderStageFlagBits::eRaygenKHR, raygenSM, "main"});
rg.setGeneralShader(static_cast<uint32_t>(stages.size() - 1));
m_rtShaderGroups.push_back(rg);
// Miss
vk::RayTracingShaderGroupCreateInfoKHR mg{vk::RayTracingShaderGroupTypeKHR::eGeneral,
VK_SHADER_UNUSED_KHR, VK_SHADER_UNUSED_KHR,
VK_SHADER_UNUSED_KHR, VK_SHADER_UNUSED_KHR};
stages.push_back({{}, vk::ShaderStageFlagBits::eMissKHR, missSM, "main"});
mg.setGeneralShader(static_cast<uint32_t>(stages.size() - 1));
m_rtShaderGroups.push_back(mg);
// Shadow Miss
stages.push_back({{}, vk::ShaderStageFlagBits::eMissKHR, shadowmissSM, "main"});
mg.setGeneralShader(static_cast<uint32_t>(stages.size() - 1));
m_rtShaderGroups.push_back(mg);
// Hit Group - Closest Hit + AnyHit
vk::ShaderModule chitSM =
nvvkpp::util::createShaderModule(m_device, //
nvh::loadFile("shaders/raytrace.rchit.spv", true, paths));
vk::RayTracingShaderGroupCreateInfoKHR hg{vk::RayTracingShaderGroupTypeKHR::eTrianglesHitGroup,
VK_SHADER_UNUSED_KHR, VK_SHADER_UNUSED_KHR,
VK_SHADER_UNUSED_KHR, VK_SHADER_UNUSED_KHR};
stages.push_back({{}, vk::ShaderStageFlagBits::eClosestHitKHR, chitSM, "main"});
hg.setClosestHitShader(static_cast<uint32_t>(stages.size() - 1));
m_rtShaderGroups.push_back(hg);
vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo;
// Push constant: we want to be able to update constants used by the shaders
vk::PushConstantRange pushConstant{vk::ShaderStageFlagBits::eRaygenKHR
| vk::ShaderStageFlagBits::eClosestHitKHR
| vk::ShaderStageFlagBits::eMissKHR,
0, sizeof(RtPushConstant)};
pipelineLayoutCreateInfo.setPushConstantRangeCount(1);
pipelineLayoutCreateInfo.setPPushConstantRanges(&pushConstant);
// Descriptor sets: one specific to ray tracing, and one shared with the rasterization pipeline
std::vector<vk::DescriptorSetLayout> rtDescSetLayouts = {m_rtDescSetLayout, m_descSetLayout};
pipelineLayoutCreateInfo.setSetLayoutCount(static_cast<uint32_t>(rtDescSetLayouts.size()));
pipelineLayoutCreateInfo.setPSetLayouts(rtDescSetLayouts.data());
m_rtPipelineLayout = m_device.createPipelineLayout(pipelineLayoutCreateInfo);
// Assemble the shader stages and recursion depth info into the ray tracing pipeline
vk::RayTracingPipelineCreateInfoKHR rayPipelineInfo;
rayPipelineInfo.setStageCount(static_cast<uint32_t>(stages.size())); // Stages are shaders
rayPipelineInfo.setPStages(stages.data());
rayPipelineInfo.setGroupCount(static_cast<uint32_t>(
m_rtShaderGroups.size())); // 1-raygen, n-miss, n-(hit[+anyhit+intersect])
rayPipelineInfo.setPGroups(m_rtShaderGroups.data());
rayPipelineInfo.setMaxRecursionDepth(2); // Ray depth
rayPipelineInfo.setLayout(m_rtPipelineLayout);
m_rtPipeline = m_device.createRayTracingPipelineKHR({}, rayPipelineInfo).value;
m_device.destroy(raygenSM);
m_device.destroy(missSM);
m_device.destroy(shadowmissSM);
m_device.destroy(chitSM);
}
//--------------------------------------------------------------------------------------------------
// The Shader Binding Table (SBT)
// - getting all shader handles and writing them in a SBT buffer
// - Besides exception, this could be always done like this
// See how the SBT buffer is used in run()
//
void HelloVulkan::createRtShaderBindingTable()
{
auto groupCount =
static_cast<uint32_t>(m_rtShaderGroups.size()); // 3 shaders: raygen, miss, chit
uint32_t groupHandleSize = m_rtProperties.shaderGroupHandleSize; // Size of a program identifier
// Fetch all the shader handles used in the pipeline, so that they can be written in the SBT
uint32_t sbtSize = groupCount * groupHandleSize;
std::vector<uint8_t> shaderHandleStorage(sbtSize);
m_device.getRayTracingShaderGroupHandlesKHR(m_rtPipeline, 0, groupCount, sbtSize,
shaderHandleStorage.data());
// Write the handles in the SBT
nvvkpp::SingleCommandBuffer genCmdBuf(m_device, m_graphicsQueueIndex);
vk::CommandBuffer cmdBuf = genCmdBuf.createCommandBuffer();
m_rtSBTBuffer =
m_alloc.createBuffer(cmdBuf, shaderHandleStorage, vk::BufferUsageFlagBits::eRayTracingKHR);
m_debug.setObjectName(m_rtSBTBuffer.buffer, "SBT");
genCmdBuf.flushCommandBuffer(cmdBuf);
m_alloc.flushStaging();
}
//--------------------------------------------------------------------------------------------------
// Ray Tracing the scene
//
void HelloVulkan::raytrace(const vk::CommandBuffer& cmdBuf, const nvmath::vec4f& clearColor)
{
m_debug.beginLabel(cmdBuf, "Ray trace");
// Initializing push constant values
m_rtPushConstants.clearColor = clearColor;
m_rtPushConstants.lightPosition = m_pushConstant.lightPosition;
m_rtPushConstants.lightIntensity = m_pushConstant.lightIntensity;
m_rtPushConstants.lightType = m_pushConstant.lightType;
cmdBuf.bindPipeline(vk::PipelineBindPoint::eRayTracingKHR, m_rtPipeline);
cmdBuf.bindDescriptorSets(vk::PipelineBindPoint::eRayTracingKHR, m_rtPipelineLayout, 0,
{m_rtDescSet, m_descSet}, {});
cmdBuf.pushConstants<RtPushConstant>(m_rtPipelineLayout,
vk::ShaderStageFlagBits::eRaygenKHR
| vk::ShaderStageFlagBits::eClosestHitKHR
| vk::ShaderStageFlagBits::eMissKHR,
0, m_rtPushConstants);
vk::DeviceSize progSize = m_rtProperties.shaderGroupHandleSize; // Size of a program identifier
vk::DeviceSize rayGenOffset = 0u * progSize; // Start at the beginning of m_sbtBuffer
vk::DeviceSize missOffset = 1u * progSize; // Jump over raygen
vk::DeviceSize hitGroupOffset = 3u * progSize; // Jump over the previous shaders
vk::DeviceSize sbtSize = progSize * (vk::DeviceSize)m_rtShaderGroups.size();
const vk::StridedBufferRegionKHR raygenShaderBindingTable = {m_rtSBTBuffer.buffer, rayGenOffset,
progSize, sbtSize};
const vk::StridedBufferRegionKHR missShaderBindingTable = {m_rtSBTBuffer.buffer, missOffset,
progSize, sbtSize};
const vk::StridedBufferRegionKHR hitShaderBindingTable = {m_rtSBTBuffer.buffer, hitGroupOffset,
progSize, sbtSize};
const vk::StridedBufferRegionKHR callableShaderBindingTable;
cmdBuf.traceRaysKHR(&raygenShaderBindingTable, &missShaderBindingTable, &hitShaderBindingTable,
&callableShaderBindingTable, //
m_size.width, m_size.height, 1); //
m_debug.endLabel(cmdBuf);
}

View file

@ -0,0 +1,165 @@
/* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include "nvvkpp/allocator_dedicated_vkpp.hpp"
#include "nvvkpp/appbase_vkpp.hpp"
#include "nvvkpp/debug_util_vkpp.hpp"
// #VKRay
#define ALLOC_DEDICATED
#include "nvvkpp/raytraceKHR_vkpp.hpp"
using nvvkBuffer = nvvkpp::BufferDedicated;
using nvvkTexture = nvvkpp::TextureDedicated;
//--------------------------------------------------------------------------------------------------
// Simple rasterizer of OBJ objects
// - Each OBJ loaded are stored in an `ObjModel` and referenced by a `ObjInstance`
// - It is possible to have many `ObjInstance` referencing the same `ObjModel`
// - Rendering is done in an offscreen framebuffer
// - The image of the framebuffer is displayed in post-process in a full-screen quad
//
class HelloVulkan : public nvvkpp::AppBase
{
public:
void setup(const vk::Device& device,
const vk::PhysicalDevice& physicalDevice,
uint32_t queueFamily) override;
void createDescriptorSetLayout();
void createGraphicsPipeline();
void loadModel(const std::string& filename, nvmath::mat4f transform = nvmath::mat4f(1));
void updateDescriptorSet();
void createUniformBuffer();
void createSceneDescriptionBuffer();
void createTextureImages(const vk::CommandBuffer& cmdBuf,
const std::vector<std::string>& textures);
void updateUniformBuffer();
void onResize(int /*w*/, int /*h*/) override;
void destroyResources();
void rasterize(const vk::CommandBuffer& cmdBuff);
// The OBJ model
struct ObjModel
{
uint32_t nbIndices{0};
uint32_t nbVertices{0};
nvvkBuffer vertexBuffer; // Device buffer of all 'Vertex'
nvvkBuffer indexBuffer; // Device buffer of the indices forming triangles
nvvkBuffer matColorBuffer; // Device buffer of array of 'Wavefront material'
nvvkBuffer matIndexBuffer; // Device buffer of array of 'Wavefront material'
};
// Instance of the OBJ
struct ObjInstance
{
uint32_t objIndex{0}; // Reference to the `m_objModel`
uint32_t txtOffset{0}; // Offset in `m_textures`
nvmath::mat4f transform{1}; // Position of the instance
nvmath::mat4f transformIT{1}; // Inverse transpose
};
// Information pushed at each draw call
struct ObjPushConstant
{
nvmath::vec3f lightPosition{10.f, 15.f, 8.f};
int instanceId{0}; // To retrieve the transformation matrix
float lightIntensity{100.f};
int lightType{0}; // 0: point, 1: infinite
};
ObjPushConstant m_pushConstant;
// Array of objects and instances in the scene
std::vector<ObjModel> m_objModel;
std::vector<ObjInstance> m_objInstance;
// Graphic pipeline
vk::PipelineLayout m_pipelineLayout;
vk::Pipeline m_graphicsPipeline;
std::vector<vk::DescriptorSetLayoutBinding> m_descSetLayoutBind;
vk::DescriptorPool m_descPool;
vk::DescriptorSetLayout m_descSetLayout;
vk::DescriptorSet m_descSet;
nvvkBuffer m_cameraMat; // Device-Host of the camera matrices
nvvkBuffer m_sceneDesc; // Device buffer of the OBJ instances
std::vector<nvvkTexture> m_textures; // vector of all textures of the scene
nvvkpp::AllocatorDedicated m_alloc; // Allocator for buffer, images, acceleration structures
nvvkpp::DebugUtil m_debug; // Utility to name objects
// #Post
void createOffscreenRender();
void createPostPipeline();
void createPostDescriptor();
void updatePostDescriptorSet();
void drawPost(vk::CommandBuffer cmdBuf);
std::vector<vk::DescriptorSetLayoutBinding> m_postDescSetLayoutBind;
vk::DescriptorPool m_postDescPool;
vk::DescriptorSetLayout m_postDescSetLayout;
vk::DescriptorSet m_postDescSet;
vk::Pipeline m_postPipeline;
vk::PipelineLayout m_postPipelineLayout;
vk::RenderPass m_offscreenRenderPass;
vk::Framebuffer m_offscreenFramebuffer;
nvvkTexture m_offscreenColor;
vk::Format m_offscreenColorFormat{vk::Format::eR32G32B32A32Sfloat};
nvvkTexture m_offscreenDepth;
vk::Format m_offscreenDepthFormat{vk::Format::eD32Sfloat};
// #VKRay
void initRayTracing();
nvvkpp::RaytracingBuilderKHR::Blas objectToVkGeometryKHR(const ObjModel& model);
void createBottomLevelAS();
void createTopLevelAS();
void createRtDescriptorSet();
void updateRtDescriptorSet();
void createRtPipeline();
void createRtShaderBindingTable();
void raytrace(const vk::CommandBuffer& cmdBuf, const nvmath::vec4f& clearColor);
vk::PhysicalDeviceRayTracingPropertiesKHR m_rtProperties;
nvvkpp::RaytracingBuilderKHR m_rtBuilder;
std::vector<vk::DescriptorSetLayoutBinding> m_rtDescSetLayoutBind;
vk::DescriptorPool m_rtDescPool;
vk::DescriptorSetLayout m_rtDescSetLayout;
vk::DescriptorSet m_rtDescSet;
std::vector<vk::RayTracingShaderGroupCreateInfoKHR> m_rtShaderGroups;
vk::PipelineLayout m_rtPipelineLayout;
vk::Pipeline m_rtPipeline;
nvvkBuffer m_rtSBTBuffer;
struct RtPushConstant
{
nvmath::vec4f clearColor;
nvmath::vec3f lightPosition;
float lightIntensity;
int lightType;
} m_rtPushConstants;
};

View file

@ -0,0 +1,304 @@
/* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// ImGui - standalone example application for Glfw + Vulkan, using programmable
// pipeline If you are new to ImGui, see examples/README.txt and documentation
// at the top of imgui.cpp.
#include <array>
#include <vulkan/vulkan.hpp>
#include "imgui.h"
#include "imgui_impl_glfw.h"
#include "hello_vulkan.h"
#include "nvh/cameramanipulator.hpp"
#include "nvh/fileoperations.hpp"
#include "nvpsystem.hpp"
#include "nvvkpp/appbase_vkpp.hpp"
#include "nvvkpp/commands_vkpp.hpp"
#include "nvvkpp/context_vkpp.hpp"
#include "nvvkpp/utilities_vkpp.hpp"
//////////////////////////////////////////////////////////////////////////
#define UNUSED(x) (void)(x)
//////////////////////////////////////////////////////////////////////////
// Default search path for shaders
std::vector<std::string> defaultSearchPaths;
// GLFW Callback functions
static void onErrorCallback(int error, const char* description)
{
fprintf(stderr, "GLFW Error %d: %s\n", error, description);
}
// Extra UI
void renderUI(HelloVulkan& helloVk)
{
static int item = 1;
if(ImGui::Combo("Up Vector", &item, "X\0Y\0Z\0\0"))
{
nvmath::vec3f pos, eye, up;
CameraManip.getLookat(pos, eye, up);
up = nvmath::vec3f(item == 0, item == 1, item == 2);
CameraManip.setLookat(pos, eye, up);
}
ImGui::SliderFloat3("Light Position", &helloVk.m_pushConstant.lightPosition.x, -20.f, 20.f);
ImGui::SliderFloat("Light Intensity", &helloVk.m_pushConstant.lightIntensity, 0.f, 100.f);
ImGui::RadioButton("Point", &helloVk.m_pushConstant.lightType, 0);
ImGui::SameLine();
ImGui::RadioButton("Infinite", &helloVk.m_pushConstant.lightType, 1);
}
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
static int const SAMPLE_WIDTH = 1280;
static int const SAMPLE_HEIGHT = 720;
//--------------------------------------------------------------------------------------------------
// Application Entry
//
int main(int argc, char** argv)
{
UNUSED(argc);
// Setup GLFW window
glfwSetErrorCallback(onErrorCallback);
if(!glfwInit())
{
return 1;
}
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
GLFWwindow* window = glfwCreateWindow(SAMPLE_WIDTH, SAMPLE_HEIGHT,
"NVIDIA Vulkan Raytracing Tutorial", nullptr, nullptr);
// Setup camera
CameraManip.setWindowSize(SAMPLE_WIDTH, SAMPLE_HEIGHT);
CameraManip.setLookat(nvmath::vec3f(5, 4, -4), nvmath::vec3f(0, 1, 0), nvmath::vec3f(0, 1, 0));
// Setup Vulkan
if(!glfwVulkanSupported())
{
printf("GLFW: Vulkan Not Supported\n");
return 1;
}
// setup some basic things for the sample, logging file for example
NVPSystem system(argv[0], PROJECT_NAME);
// Search path for shaders and other media
defaultSearchPaths = {
PROJECT_ABSDIRECTORY,
PROJECT_ABSDIRECTORY "../",
NVPSystem::exePath() + std::string(PROJECT_RELDIRECTORY),
NVPSystem::exePath() + std::string(PROJECT_RELDIRECTORY) + std::string("../"),
};
// Enabling the extension feature
vk::PhysicalDeviceRayTracingFeaturesKHR raytracingFeature;
// Requesting Vulkan extensions and layers
nvvkpp::ContextCreateInfo contextInfo(true);
contextInfo.setVersion(1, 2);
contextInfo.addInstanceLayer("VK_LAYER_LUNARG_monitor", true);
contextInfo.addInstanceExtension(VK_KHR_SURFACE_EXTENSION_NAME);
#ifdef WIN32
contextInfo.addInstanceExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
#else
contextInfo.addInstanceExtension(VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
contextInfo.addInstanceExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME);
#endif
contextInfo.addInstanceExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
contextInfo.addDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
contextInfo.addDeviceExtension(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME);
contextInfo.addDeviceExtension(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
// #VKRay: Activate the ray tracing extension
contextInfo.addDeviceExtension(VK_KHR_RAY_TRACING_EXTENSION_NAME, false, &raytracingFeature);
contextInfo.addDeviceExtension(VK_KHR_MAINTENANCE3_EXTENSION_NAME);
contextInfo.addDeviceExtension(VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME);
contextInfo.addDeviceExtension(VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME);
contextInfo.addDeviceExtension(VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME);
// Creating Vulkan base application
nvvkpp::Context vkctx{};
vkctx.initInstance(contextInfo);
// Find all compatible devices
auto compatibleDevices = vkctx.getCompatibleDevices(contextInfo);
assert(!compatibleDevices.empty());
// Use a compatible device
vkctx.initDevice(compatibleDevices[0], contextInfo);
// Create example
HelloVulkan helloVk;
// Window need to be opened to get the surface on which to draw
const vk::SurfaceKHR surface = helloVk.getVkSurface(vkctx.m_instance, window);
vkctx.setGCTQueueWithPresent(surface);
helloVk.setup(vkctx.m_device, vkctx.m_physicalDevice, vkctx.m_queueGCT.familyIndex);
helloVk.createSurface(surface, SAMPLE_WIDTH, SAMPLE_HEIGHT);
helloVk.createDepthBuffer();
helloVk.createRenderPass();
helloVk.createFrameBuffers();
// Setup Imgui
helloVk.initGUI(0); // Using sub-pass 0
// Creation of the example
helloVk.loadModel(nvh::findFile("media/scenes/plane.obj", defaultSearchPaths));
helloVk.loadModel(nvh::findFile("media/scenes/Medieval_building.obj", defaultSearchPaths));
helloVk.createOffscreenRender();
helloVk.createDescriptorSetLayout();
helloVk.createGraphicsPipeline();
helloVk.createUniformBuffer();
helloVk.createSceneDescriptionBuffer();
helloVk.updateDescriptorSet();
// #VKRay
helloVk.initRayTracing();
helloVk.createBottomLevelAS();
helloVk.createTopLevelAS();
helloVk.createRtDescriptorSet();
helloVk.createRtPipeline();
helloVk.createRtShaderBindingTable();
helloVk.createPostDescriptor();
helloVk.createPostPipeline();
helloVk.updatePostDescriptorSet();
nvmath::vec4f clearColor = nvmath::vec4f(1, 1, 1, 1.00f);
bool useRaytracer = true;
helloVk.setupGlfwCallbacks(window);
ImGui_ImplGlfw_InitForVulkan(window, true);
// Main loop
while(!glfwWindowShouldClose(window))
{
glfwPollEvents();
if(helloVk.isMinimized())
continue;
// Start the Dear ImGui frame
ImGui_ImplGlfw_NewFrame();
ImGui::NewFrame();
// Updating camera buffer
helloVk.updateUniformBuffer();
// Show UI window.
if(1 == 1)
{
ImGui::ColorEdit3("Clear color", reinterpret_cast<float*>(&clearColor));
ImGui::Checkbox("Ray Tracer mode", &useRaytracer); // Switch between raster and ray tracing
renderUI(helloVk);
ImGui::Text("Application average %.3f ms/frame (%.1f FPS)",
1000.0f / ImGui::GetIO().Framerate, ImGui::GetIO().Framerate);
ImGui::Render();
}
// Start rendering the scene
helloVk.prepareFrame();
// Start command buffer of this frame
auto curFrame = helloVk.getCurFrame();
const vk::CommandBuffer& cmdBuff = helloVk.getCommandBuffers()[curFrame];
cmdBuff.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit});
// Clearing screen
vk::ClearValue clearValues[2];
clearValues[0].setColor(nvvkpp::util::clearColor(clearColor));
clearValues[1].setDepthStencil({1.0f, 0});
// Offscreen render pass
{
vk::RenderPassBeginInfo offscreenRenderPassBeginInfo;
offscreenRenderPassBeginInfo.setClearValueCount(2);
offscreenRenderPassBeginInfo.setPClearValues(clearValues);
offscreenRenderPassBeginInfo.setRenderPass(helloVk.m_offscreenRenderPass);
offscreenRenderPassBeginInfo.setFramebuffer(helloVk.m_offscreenFramebuffer);
offscreenRenderPassBeginInfo.setRenderArea({{}, helloVk.getSize()});
// Rendering Scene
if(useRaytracer)
{
helloVk.raytrace(cmdBuff, clearColor);
}
else
{
cmdBuff.beginRenderPass(offscreenRenderPassBeginInfo, vk::SubpassContents::eInline);
helloVk.rasterize(cmdBuff);
cmdBuff.endRenderPass();
}
}
// 2nd rendering pass: tone mapper, UI
{
vk::RenderPassBeginInfo postRenderPassBeginInfo;
postRenderPassBeginInfo.setClearValueCount(2);
postRenderPassBeginInfo.setPClearValues(clearValues);
postRenderPassBeginInfo.setRenderPass(helloVk.getRenderPass());
postRenderPassBeginInfo.setFramebuffer(helloVk.getFramebuffers()[curFrame]);
postRenderPassBeginInfo.setRenderArea({{}, helloVk.getSize()});
cmdBuff.beginRenderPass(postRenderPassBeginInfo, vk::SubpassContents::eInline);
// Rendering tonemapper
helloVk.drawPost(cmdBuff);
// Rendering UI
ImGui::RenderDrawDataVK(cmdBuff, ImGui::GetDrawData());
cmdBuff.endRenderPass();
}
// Submit for display
cmdBuff.end();
helloVk.submitFrame();
}
// Cleanup
helloVk.getDevice().waitIdle();
helloVk.destroyResources();
helloVk.destroy();
vkctx.m_instance.destroySurfaceKHR(surface);
vkctx.deinit();
glfwDestroyWindow(window);
glfwTerminate();
return 0;
}

View file

@ -0,0 +1,79 @@
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_EXT_nonuniform_qualifier : enable
#extension GL_GOOGLE_include_directive : enable
#extension GL_EXT_scalar_block_layout : enable
#include "wavefront.glsl"
layout(push_constant) uniform shaderInformation
{
vec3 lightPosition;
uint instanceId;
float lightIntensity;
int lightType;
}
pushC;
// clang-format off
// Incoming
//layout(location = 0) flat in int matIndex;
layout(location = 1) in vec2 fragTexCoord;
layout(location = 2) in vec3 fragNormal;
layout(location = 3) in vec3 viewDir;
layout(location = 4) in vec3 worldPos;
// Outgoing
layout(location = 0) out vec4 outColor;
// Buffers
layout(binding = 1, scalar) buffer MatColorBufferObject { WaveFrontMaterial m[]; } materials[];
layout(binding = 2, scalar) buffer ScnDesc { sceneDesc i[]; } scnDesc;
layout(binding = 3) uniform sampler2D[] textureSamplers;
layout(binding = 4, scalar) buffer MatIndex { int i[]; } matIdx[];
// clang-format on
void main()
{
// Object of this instance
int objId = scnDesc.i[pushC.instanceId].objId;
// Material of the object
int matIndex = matIdx[objId].i[gl_PrimitiveID];
WaveFrontMaterial mat = materials[objId].m[matIndex];
vec3 N = normalize(fragNormal);
// Vector toward light
vec3 L;
float lightIntensity = pushC.lightIntensity;
if(pushC.lightType == 0)
{
vec3 lDir = pushC.lightPosition - worldPos;
float d = length(lDir);
lightIntensity = pushC.lightIntensity / (d * d);
L = normalize(lDir);
}
else
{
L = normalize(pushC.lightPosition - vec3(0));
}
// Diffuse
vec3 diffuse = computeDiffuse(mat, L, N);
if(mat.textureId >= 0)
{
int txtOffset = scnDesc.i[pushC.instanceId].txtOffset;
uint txtId = txtOffset + mat.textureId;
vec3 diffuseTxt = texture(textureSamplers[txtId], fragTexCoord).xyz;
diffuse *= diffuseTxt;
}
// Specular
vec3 specular = computeSpecular(mat, viewDir, L, N);
// Result
outColor = vec4(lightIntensity * (diffuse + specular), 1);
}

View file

@ -0,0 +1,15 @@
#version 450
layout (location = 0) out vec2 outUV;
out gl_PerVertex
{
vec4 gl_Position;
};
void main()
{
outUV = vec2((gl_VertexIndex << 1) & 2, gl_VertexIndex & 2);
gl_Position = vec4(outUV * 2.0f - 1.0f, 1.0f, 1.0f);
}

View file

@ -0,0 +1,18 @@
#version 450
layout(location = 0) in vec2 outUV;
layout(location = 0) out vec4 fragColor;
layout(set = 0, binding = 0) uniform sampler2D noisyTxt;
layout(push_constant) uniform shaderInformation
{
float aspectRatio;
}
pushc;
void main()
{
vec2 uv = outUV;
float gamma = 1. / 2.2;
fragColor = pow(texture(noisyTxt, uv).rgba, vec4(gamma));
}

View file

@ -0,0 +1,4 @@
struct hitPayload
{
vec3 hitValue;
};

View file

@ -0,0 +1,132 @@
#version 460
#extension GL_EXT_ray_tracing : require
#extension GL_EXT_nonuniform_qualifier : enable
#extension GL_EXT_scalar_block_layout : enable
#extension GL_GOOGLE_include_directive : enable
#include "raycommon.glsl"
#include "wavefront.glsl"
hitAttributeEXT vec3 attribs;
// clang-format off
layout(location = 0) rayPayloadInEXT hitPayload prd;
layout(location = 1) rayPayloadEXT bool isShadowed;
layout(binding = 0, set = 0) uniform accelerationStructureEXT topLevelAS;
layout(binding = 1, set = 1, scalar) buffer MatColorBufferObject { WaveFrontMaterial m[]; } materials[];
layout(binding = 2, set = 1, scalar) buffer ScnDesc { sceneDesc i[]; } scnDesc;
layout(binding = 3, set = 1) uniform sampler2D textureSamplers[];
layout(binding = 4, set = 1) buffer MatIndexColorBuffer { int i[]; } matIndex[];
layout(binding = 5, set = 1, scalar) buffer Vertices { Vertex v[]; } vertices[];
layout(binding = 6, set = 1) buffer Indices { uint i[]; } indices[];
// clang-format on
layout(push_constant) uniform Constants
{
vec4 clearColor;
vec3 lightPosition;
float lightIntensity;
int lightType;
}
pushC;
void main()
{
// Object of this instance
uint objId = scnDesc.i[gl_InstanceID].objId;
// Indices of the triangle
ivec3 ind = ivec3(indices[objId].i[3 * gl_PrimitiveID + 0], //
indices[objId].i[3 * gl_PrimitiveID + 1], //
indices[objId].i[3 * gl_PrimitiveID + 2]); //
// Vertex of the triangle
Vertex v0 = vertices[objId].v[ind.x];
Vertex v1 = vertices[objId].v[ind.y];
Vertex v2 = vertices[objId].v[ind.z];
const vec3 barycentrics = vec3(1.0 - attribs.x - attribs.y, attribs.x, attribs.y);
// Computing the normal at hit position
vec3 normal = v0.nrm * barycentrics.x + v1.nrm * barycentrics.y + v2.nrm * barycentrics.z;
// Transforming the normal to world space
normal = normalize(vec3(scnDesc.i[gl_InstanceID].transfoIT * vec4(normal, 0.0)));
// Computing the coordinates of the hit position
vec3 worldPos = v0.pos * barycentrics.x + v1.pos * barycentrics.y + v2.pos * barycentrics.z;
// Transforming the position to world space
worldPos = vec3(scnDesc.i[gl_InstanceID].transfo * vec4(worldPos, 1.0));
// Vector toward the light
vec3 L;
float lightIntensity = pushC.lightIntensity;
float lightDistance = 100000.0;
// Point light
if(pushC.lightType == 0)
{
vec3 lDir = pushC.lightPosition - worldPos;
lightDistance = length(lDir);
lightIntensity = pushC.lightIntensity / (lightDistance * lightDistance);
L = normalize(lDir);
}
else // Directional light
{
L = normalize(pushC.lightPosition - vec3(0));
}
// Material of the object
int matIdx = matIndex[objId].i[gl_PrimitiveID];
WaveFrontMaterial mat = materials[objId].m[matIdx];
// Diffuse
vec3 diffuse = computeDiffuse(mat, L, normal);
if(mat.textureId >= 0)
{
uint txtId = mat.textureId + scnDesc.i[gl_InstanceID].txtOffset;
vec2 texCoord =
v0.texCoord * barycentrics.x + v1.texCoord * barycentrics.y + v2.texCoord * barycentrics.z;
diffuse *= texture(textureSamplers[txtId], texCoord).xyz;
}
vec3 specular = vec3(0);
float attenuation = 1;
// Tracing shadow ray only if the light is visible from the surface
if(dot(normal, L) > 0)
{
float tMin = 0.001;
float tMax = lightDistance;
vec3 origin = gl_WorldRayOriginEXT + gl_WorldRayDirectionEXT * gl_HitTEXT;
vec3 rayDir = L;
uint flags = gl_RayFlagsTerminateOnFirstHitEXT | gl_RayFlagsOpaqueEXT
| gl_RayFlagsSkipClosestHitShaderEXT;
isShadowed = true;
traceRayEXT(topLevelAS, // acceleration structure
flags, // rayFlags
0xFF, // cullMask
0, // sbtRecordOffset
0, // sbtRecordStride
1, // missIndex
origin, // ray origin
tMin, // ray min range
rayDir, // ray direction
tMax, // ray max range
1 // payload (location = 1)
);
if(isShadowed)
{
attenuation = 0.3;
}
else
{
// Specular
specular = computeSpecular(mat, gl_WorldRayDirectionEXT, L, normal);
}
}
prd.hitValue = vec3(lightIntensity * attenuation * (diffuse + specular));
}

View file

@ -0,0 +1,48 @@
#version 460
#extension GL_EXT_ray_tracing : require
#extension GL_GOOGLE_include_directive : enable
#include "raycommon.glsl"
layout(binding = 0, set = 0) uniform accelerationStructureEXT topLevelAS;
layout(binding = 1, set = 0, rgba32f) uniform image2D image;
layout(location = 0) rayPayloadEXT hitPayload prd;
layout(binding = 0, set = 1) uniform CameraProperties
{
mat4 view;
mat4 proj;
mat4 viewInverse;
mat4 projInverse;
}
cam;
void main()
{
const vec2 pixelCenter = vec2(gl_LaunchIDEXT.xy) + vec2(0.5);
const vec2 inUV = pixelCenter / vec2(gl_LaunchSizeEXT.xy);
vec2 d = inUV * 2.0 - 1.0;
vec4 origin = cam.viewInverse * vec4(0, 0, 0, 1);
vec4 target = cam.projInverse * vec4(d.x, d.y, 1, 1);
vec4 direction = cam.viewInverse * vec4(normalize(target.xyz), 0);
uint rayFlags = gl_RayFlagsOpaqueEXT;
float tMin = 0.001;
float tMax = 10000.0;
traceRayEXT(topLevelAS, // acceleration structure
rayFlags, // rayFlags
0xFF, // cullMask
0, // sbtRecordOffset
0, // sbtRecordStride
0, // missIndex
origin.xyz, // ray origin
tMin, // ray min range
direction.xyz, // ray direction
tMax, // ray max range
0 // payload (location = 0)
);
imageStore(image, ivec2(gl_LaunchIDEXT.xy), vec4(prd.hitValue, 1.0));
}

View file

@ -0,0 +1,16 @@
#version 460
#extension GL_EXT_ray_tracing : require
#extension GL_GOOGLE_include_directive : enable
#include "raycommon.glsl"
layout(location = 0) rayPayloadInEXT hitPayload prd;
layout(push_constant) uniform Constants
{
vec4 clearColor;
};
void main()
{
prd.hitValue = clearColor.xyz * 0.8;
}

View file

@ -0,0 +1,9 @@
#version 460
#extension GL_EXT_ray_tracing : require
layout(location = 1) rayPayloadInEXT bool isShadowed;
void main()
{
isShadowed = false;
}

View file

@ -0,0 +1,158 @@
//#include "raycommon.hlsl"
//#include "wavefront.hlsl"
struct MyAttrib
{
float3 attribs;
};
struct Payload
{
bool isShadowed;
};
[[vk::binding(0,0)]] RaytracingAccelerationStructure topLevelAS;
[[vk::binding(2,1)]] RWStructuredBuffer<sceneDesc> scnDesc;
[[vk::binding(5,1)]] StructuredBuffer<Vertex> vertices[];
[[vk::binding(6,1)]] StructuredBuffer<uint> indices[];
[[vk::binding(1,1)]] StructuredBuffer<WaveFrontMaterial> materials[];
[[vk::binding(3,1)]] Texture2D textures[];
[[vk::binding(3,1)]] SamplerState samplers[];
[[vk::binding(4,1)]] StructuredBuffer<int> matIndex[];
// clang-format on
struct Constants
{
float4 clearColor;
float3 lightPosition;
float lightIntensity;
int lightType;
};
[[vk::push_constant]] Constants pushC;
[shader("closesthit")]
void main(inout hitPayload prd, in MyAttrib attr)
{
// Object of this instance
uint objId = scnDesc[InstanceIndex()].objId;
// Indices of the triangle
int3 ind = int3(indices[objId][3 * PrimitiveIndex() + 0],
indices[objId][3 * PrimitiveIndex() + 1],
indices[objId][3 * PrimitiveIndex() + 2]);
// Vertex of the triangle
Vertex v0 = vertices[objId][ind.x];
Vertex v1 = vertices[objId][ind.y];
Vertex v2 = vertices[objId][ind.z];
const float3 barycentrics = float3(1.0 - attr.attribs.x -
attr.attribs.y, attr.attribs.x, attr.attribs.y);
// Computing the normal at hit position
float3 normal = v0.nrm * barycentrics.x + v1.nrm * barycentrics.y +
v2.nrm * barycentrics.z;
// Transforming the normal to world space
normal = normalize((mul(scnDesc[InstanceIndex()].transfoIT
,float4(normal, 0.0))).xyz);
// Computing the coordinates of the hit position
float3 worldPos = v0.pos * barycentrics.x + v1.pos * barycentrics.y
+ v2.pos * barycentrics.z;
// Transforming the position to world space
worldPos = (mul(scnDesc[InstanceIndex()].transfo, float4(worldPos,
1.0))).xyz;
// Vector toward the light
float3 L;
float lightIntensity = pushC.lightIntensity;
float lightDistance = 100000.0;
// Point light
if(pushC.lightType == 0)
{
float3 lDir = pushC.lightPosition - worldPos;
lightDistance = length(lDir);
lightIntensity = pushC.lightIntensity / (lightDistance *
lightDistance);
L = normalize(lDir);
}
else // Directional light
{
L = normalize(pushC.lightPosition - float3(0,0,0));
}
// Material of the object
int matIdx = matIndex[objId][PrimitiveIndex()];
WaveFrontMaterial mat = materials[objId][matIdx];
// Diffuse
float3 diffuse = computeDiffuse(mat, L, normal);
if(mat.textureId >= 0)
{
uint txtId = mat.textureId + scnDesc[InstanceIndex()].txtOffset;
float2 texCoord =
v0.texCoord * barycentrics.x + v1.texCoord * barycentrics.y +
v2.texCoord * barycentrics.z;
diffuse *= textures[txtId].SampleLevel(samplers[txtId], texCoord,
0).xyz;
}
float3 specular = float3(0,0,0);
float attenuation = 1;
// Tracing shadow ray only if the light is visible from the surface
if(dot(normal, L) > 0)
{
float tMin = 0.001;
float tMax = lightDistance;
float3 origin = WorldRayOrigin() + WorldRayDirection() *
RayTCurrent();
float3 rayDir = L;
uint flags =
RAY_FLAG_ACCEPT_FIRST_HIT_AND_END_SEARCH |
RAY_FLAG_FORCE_OPAQUE |
RAY_FLAG_SKIP_CLOSEST_HIT_SHADER;
RayDesc desc;
desc.Origin = origin;
desc.Direction = rayDir;
desc.TMin = tMin;
desc.TMax = tMax;
Payload shadowPayload;
shadowPayload.isShadowed = true;
TraceRay(topLevelAS,
flags,
0xFF,
0,
0,
1,
desc,
shadowPayload
);
if(shadowPayload.isShadowed)
{
attenuation = 0.9;
}
else
{
// Specular
specular = computeSpecular(mat, WorldRayDirection(), L, normal);
}
}
prd.hitValue = float3(lightIntensity * attenuation * (diffuse +
specular));
}

View file

@ -0,0 +1,61 @@
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_EXT_scalar_block_layout : enable
#extension GL_GOOGLE_include_directive : enable
#include "wavefront.glsl"
// clang-format off
layout(binding = 2, set = 0, scalar) buffer ScnDesc { sceneDesc i[]; } scnDesc;
// clang-format on
layout(binding = 0) uniform UniformBufferObject
{
mat4 view;
mat4 proj;
mat4 viewI;
}
ubo;
layout(push_constant) uniform shaderInformation
{
vec3 lightPosition;
uint instanceId;
float lightIntensity;
int lightType;
}
pushC;
layout(location = 0) in vec3 inPosition;
layout(location = 1) in vec3 inNormal;
layout(location = 2) in vec3 inColor;
layout(location = 3) in vec2 inTexCoord;
//layout(location = 0) flat out int matIndex;
layout(location = 1) out vec2 fragTexCoord;
layout(location = 2) out vec3 fragNormal;
layout(location = 3) out vec3 viewDir;
layout(location = 4) out vec3 worldPos;
out gl_PerVertex
{
vec4 gl_Position;
};
void main()
{
mat4 objMatrix = scnDesc.i[pushC.instanceId].transfo;
mat4 objMatrixIT = scnDesc.i[pushC.instanceId].transfoIT;
vec3 origin = vec3(ubo.viewI * vec4(0, 0, 0, 1));
worldPos = vec3(objMatrix * vec4(inPosition, 1.0));
viewDir = vec3(worldPos - origin);
fragTexCoord = inTexCoord;
fragNormal = vec3(objMatrixIT * vec4(inNormal, 0.0));
// matIndex = inMatID;
gl_Position = ubo.proj * ubo.view * vec4(worldPos, 1.0);
}

View file

@ -0,0 +1,57 @@
struct Vertex
{
vec3 pos;
vec3 nrm;
vec3 color;
vec2 texCoord;
};
struct WaveFrontMaterial
{
vec3 ambient;
vec3 diffuse;
vec3 specular;
vec3 transmittance;
vec3 emission;
float shininess;
float ior; // index of refraction
float dissolve; // 1 == opaque; 0 == fully transparent
int illum; // illumination model (see http://www.fileformat.info/format/material/)
int textureId;
};
struct sceneDesc
{
int objId;
int txtOffset;
mat4 transfo;
mat4 transfoIT;
};
vec3 computeDiffuse(WaveFrontMaterial mat, vec3 lightDir, vec3 normal)
{
// Lambertian
float dotNL = max(dot(normal, lightDir), 0.0);
vec3 c = mat.diffuse * dotNL;
if(mat.illum >= 1)
return c + mat.ambient;
}
vec3 computeSpecular(WaveFrontMaterial mat, vec3 viewDir, vec3 lightDir, vec3 normal)
{
if(mat.illum < 2)
return vec3(0);
// Compute specular only if not in shadow
const float kPi = 3.14159265;
const float kShininess = max(mat.shininess, 4.0);
// Specular
const float kEnergyConservation = (2.0 + kShininess) / (2.0 * kPi);
vec3 V = normalize(-viewDir);
vec3 R = reflect(-lightDir, normal);
float specular = kEnergyConservation * pow(max(dot(V, R), 0.0), kShininess);
return vec3(mat.specular * specular);
}