Refactoring

This commit is contained in:
mklefrancois 2021-09-07 09:42:21 +02:00
parent 3e399adf0a
commit d90ce79135
222 changed files with 9045 additions and 5734 deletions

View file

@ -0,0 +1,80 @@
#*****************************************************************************
# Copyright 2020 NVIDIA Corporation. All rights reserved.
#*****************************************************************************
cmake_minimum_required(VERSION 3.9.6 FATAL_ERROR)
#--------------------------------------------------------------------------------------------------
# Project setting
get_filename_component(PROJNAME ${CMAKE_CURRENT_SOURCE_DIR} NAME)
set(PROJNAME vk_${PROJNAME}_KHR)
project(${PROJNAME} LANGUAGES C CXX)
message(STATUS "-------------------------------")
message(STATUS "Processing Project ${PROJNAME}:")
#--------------------------------------------------------------------------------------------------
# C++ target and defines
set(CMAKE_CXX_STANDARD 17)
add_executable(${PROJNAME})
_add_project_definitions(${PROJNAME})
#--------------------------------------------------------------------------------------------------
# Source files for this project
#
file(GLOB SOURCE_FILES *.cpp *.hpp *.inl *.h *.c)
file(GLOB EXTRA_COMMON ${TUTO_KHR_DIR}/common/*.*)
list(APPEND COMMON_SOURCE_FILES ${EXTRA_COMMON})
include_directories(${TUTO_KHR_DIR}/common)
#--------------------------------------------------------------------------------------------------
# GLSL to SPIR-V custom build
compile_glsl_directory(
SRC "${CMAKE_CURRENT_SOURCE_DIR}/shaders"
DST "${CMAKE_CURRENT_SOURCE_DIR}/spv"
VULKAN_TARGET "vulkan1.2"
DEPENDENCY ${VULKAN_BUILD_DEPENDENCIES}
)
#--------------------------------------------------------------------------------------------------
# Sources
target_sources(${PROJNAME} PUBLIC ${SOURCE_FILES} ${HEADER_FILES})
target_sources(${PROJNAME} PUBLIC ${COMMON_SOURCE_FILES})
target_sources(${PROJNAME} PUBLIC ${PACKAGE_SOURCE_FILES})
target_sources(${PROJNAME} PUBLIC ${GLSL_SOURCES} ${GLSL_HEADERS})
#--------------------------------------------------------------------------------------------------
# Sub-folders in Visual Studio
#
source_group("Common" FILES ${COMMON_SOURCE_FILES} ${PACKAGE_SOURCE_FILES})
source_group("Sources" FILES ${SOURCE_FILES})
source_group("Headers" FILES ${HEADER_FILES})
source_group("Shader Sources" FILES ${GLSL_SOURCES})
source_group("Shader Headers" FILES ${GLSL_HEADERS})
#--------------------------------------------------------------------------------------------------
# Linkage
#
target_link_libraries(${PROJNAME} ${PLATFORM_LIBRARIES} nvpro_core)
foreach(DEBUGLIB ${LIBRARIES_DEBUG})
target_link_libraries(${PROJNAME} debug ${DEBUGLIB})
endforeach(DEBUGLIB)
foreach(RELEASELIB ${LIBRARIES_OPTIMIZED})
target_link_libraries(${PROJNAME} optimized ${RELEASELIB})
endforeach(RELEASELIB)
#--------------------------------------------------------------------------------------------------
# copies binaries that need to be put next to the exe files (ZLib, etc.)
#
_finalize_target( ${PROJNAME} )
install(FILES ${SPV_OUTPUT} CONFIGURATIONS Release DESTINATION "bin_${ARCH}/${PROJNAME}/spv")
install(FILES ${SPV_OUTPUT} CONFIGURATIONS Debug DESTINATION "bin_${ARCH}_debug/${PROJNAME}/spv")

View file

@ -0,0 +1,260 @@
# Motion Blur
![](images/motionblur.png)
This is an extension of the Vulkan ray tracing [tutorial](https://nvpro-samples.github.io/vk_raytracing_tutorial_KHR).
If you haven't compiled it before, here is the [setup](../docs/setup.md).
## VK_NV_ray_tracing_motion_blur
This sample shows the usage of the [motion blur extension](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_NV_ray_tracing_motion_blur.html). In changes from the original sample, we will do the following:
* Use trace call with a time parameter.
* Using the various flags to enable motion support in an acceleration structure.
* Support for time-varying vertex positions in a geometry.
* Add motion over time to instances, including scaling, shearing, rotation, and translation (SRT) and matrix motion, while keeping some static.
Defining an animation works by defining the state of the scene at a start time, T0, and an end time, T1. For instance, T0 could be the start of a frame, and T1 could be the end of a frame, then rays can be traced at any intermediate time, such as at t=0.5, halfway through the frame, and motion blur can be done by choosing a random t for each ray.
## Enabling Motion Blur
### Extensions
In main.cpp, we add the device extension `VK_NV_ray_tracing_motion_blur` and enable all features.
```` C
// #NV_Motion_blur
VkPhysicalDeviceRayTracingMotionBlurFeaturesNV rtMotionBlurFeatures{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MOTION_BLUR_FEATURES_NV};
contextInfo.addDeviceExtension(VK_NV_RAY_TRACING_MOTION_BLUR_EXTENSION_NAME, false, &rtMotionBlurFeatures); // Required for motion blur
````
### Pipeline
When creating the ray tracing pipeline, the VkRayTracingPipelineCreateInfoKHR struct's flags must include `VK_PIPELINE_CREATE_RAY_TRACING_ALLOW_MOTION_BIT_NV`.
```` C
rayPipelineInfo.flags = VK_PIPELINE_CREATE_RAY_TRACING_ALLOW_MOTION_BIT_NV;
````
### Scene Objects
We will use the following four models. The later sections will add matrix animation to two instances of the cube_multi.obj model,
and the plane.obj model will stay static. The third and fourth models are the keyframes for a vertex animation. Cube.obj is the
cube at time 0 (T0), and cube_modif.obj is the cube at time 1 (T1).
```` C
// Creation of the example
helloVk.loadModel(nvh::findFile("media/scenes/cube_multi.obj", defaultSearchPaths, true));
helloVk.loadModel(nvh::findFile("media/scenes/plane.obj", defaultSearchPaths, true));
helloVk.loadModel(nvh::findFile("media/scenes/cube.obj", defaultSearchPaths, true));
helloVk.loadModel(nvh::findFile("media/scenes/cube_modif.obj", defaultSearchPaths, true));
````
## Vertex Varying Motion
As seen in the picture, the vertices of the left green cube change positions over time.
We specify this by giving two geometries to the BLAS builder. Setting the geometry at T0
is done the same way as before. To add the destination keyframe at T1, we make the
`VkAccelerationStructureGeometryTrianglesDataKHR` structure's `pNext` field point to a
`VkAccelerationStructureGeometryMotionTrianglesDataNV` structure. Additionally, we must add
`VK_BUILD_ACCELERATION_STRUCTURE_MOTION_BIT_NV` to the BLAS build info flags.
At first we are adding the cube_multi and plane. The cube_multi object's geometry doesn't animate,
but its transformation does, so we will set its animation in the TLAS in the Instance Motion section.
````C
void HelloVulkan::createBottomLevelAS()
{
// Static geometries
std::vector<nvvk::RaytracingBuilderKHR::BlasInput> allBlas;
allBlas.emplace_back(objectToVkGeometryKHR(m_objModel[0]));
allBlas.emplace_back(objectToVkGeometryKHR(m_objModel[1]));
````
Then we add the cube and add the motion information; the reference to the geometry at T1 and the flag for which
we want this object to have motion.
````C
// Animated geometry
allBlas.emplace_back(objectToVkGeometryKHR(m_objModel[2]));
// Adding the m_objModel[3] as the destination of m_objModel[2]
VkAccelerationStructureGeometryMotionTrianglesDataNV motionTriangles{
VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_MOTION_TRIANGLES_DATA_NV};
motionTriangles.vertexData.deviceAddress = nvvk::getBufferDeviceAddress(m_device, m_objModel[3].vertexBuffer.buffer);
allBlas[2].asGeometry[0].geometry.triangles.pNext = &motionTriangles;
// Telling that this geometry has motion
allBlas[2].flags = VK_BUILD_ACCELERATION_STRUCTURE_MOTION_BIT_NV;
````
Building all the BLAS stays the same.
````C
m_rtBuilder.buildBlas(allBlas, VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR);
}
````
## Instance Motion
Instance motion describes motion in the TLAS, where objects move as a whole. There are 3 types:
* Static
* Matrix motion
* SRT motion
The array of instances uses [`VkAccelerationStructureMotionInstanceNV`](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkAccelerationStructureMotionInstanceNV.html) instead of `VkAccelerationStructureInstanceKHR`.
````C
std::vector<VkAccelerationStructureMotionInstanceNVPad> tlas;
````
### Matrix Motion
The moving matrix needs to fill the [`VkAccelerationStructureMatrixMotionInstanceNV`](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkAccelerationStructureMatrixMotionInstanceNV.html) structure.
```` C
// Cube (moving/matrix translation)
objId = 0;
{
// Position of the instance at T0 and T1
nvmath::mat4f matT0(1); // Identity
nvmath::mat4f matT1 = nvmath::translation_mat4(nvmath::vec3f(0.30f, 0.0f, 0.0f));
VkAccelerationStructureMatrixMotionInstanceNV data;
data.transformT0 = nvvk::toTransformMatrixKHR(matT0);
data.transformT1 = nvvk::toTransformMatrixKHR(matT1);
data.instanceCustomIndex = objId; // gl_InstanceCustomIndexEXT
data.accelerationStructureReference = m_rtBuilder.getBlasDeviceAddress(m_objInstance[objId].objIndex);
data.instanceShaderBindingTableRecordOffset = 0; // We will use the same hit group for all objects
data.flags = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR;
data.mask = 0xFF;
VkAccelerationStructureMotionInstanceNVPad rayInst;
rayInst.type = VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_MATRIX_MOTION_NV;
rayInst.data.matrixMotionInstance = data;
tlas.emplace_back(rayInst);
}
````
### SRT Motion
The SRT motion uses the [`VkAccelerationStructureSRTMotionInstanceNV`](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkAccelerationStructureSRTMotionInstanceNV.html)
structure, where it interpolates between two [`VkSRTDataNV`](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkSRTDataNV.html) structures.
````C
// Cube (moving/SRT rotation)
objId = 0;
{
nvmath::quatf rot;
rot.from_euler_xyz({0, 0, 0});
// Position of the instance at T0 and T1
VkSRTDataNV matT0{}; // Translated to 0,0,2
matT0.sx = 1.0f;
matT0.sy = 1.0f;
matT0.sz = 1.0f;
matT0.tz = 2.0f;
matT0.qx = rot.x;
matT0.qy = rot.y;
matT0.qz = rot.z;
matT0.qw = rot.w;
VkSRTDataNV matT1 = matT0; // Setting a rotation
rot.from_euler_xyz({deg2rad(10.0f), deg2rad(30.0f), 0.0f});
matT1.qx = rot.x;
matT1.qy = rot.y;
matT1.qz = rot.z;
matT1.qw = rot.w;
VkAccelerationStructureSRTMotionInstanceNV data{};
data.transformT0 = matT0;
data.transformT1 = matT1;
data.instanceCustomIndex = objId; // gl_InstanceCustomIndexEXT
data.accelerationStructureReference = m_rtBuilder.getBlasDeviceAddress(m_objInstance[objId].objIndex);
data.instanceShaderBindingTableRecordOffset = 0; // We will use the same hit group for all objects
data.flags = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR;
data.mask = 0xFF;
VkAccelerationStructureMotionInstanceNVPad rayInst;
rayInst.type = VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_SRT_MOTION_NV;
rayInst.data.srtMotionInstance = data;
tlas.emplace_back(rayInst);
}
````
### Static
Static instances use the same structure as we normally use with static scenes, `VkAccelerationStructureInstanceKHR`.
```` C
// Plane (static)
objId = 1;
{
nvmath::mat4f matT0 = nvmath::translation_mat4(nvmath::vec3f(0, -1, 0));
VkAccelerationStructureInstanceKHR data{};
data.transform = nvvk::toTransformMatrixKHR(matT0); // Position of the instance
data.instanceCustomIndex = objId; // gl_InstanceCustomIndexEXT
data.accelerationStructureReference = m_rtBuilder.getBlasDeviceAddress(m_objInstance[objId].objIndex);
data.instanceShaderBindingTableRecordOffset = 0; // We will use the same hit group for all objects
data.flags = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR;
data.mask = 0xFF;
VkAccelerationStructureMotionInstanceNVPad rayInst;
rayInst.type = VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_STATIC_NV;
rayInst.data.staticInstance = data;
tlas.emplace_back(rayInst);
}
````
### Building
The building call is similar, only the flag is changing.
````C
m_rtBuilder.buildTlas(tlas, VK_BUILD_ACCELERATION_STRUCTURE_MOTION_BIT_NV, false, true);
````
## Shader
In the shader, we enable the `GL_NV_ray_tracing_motion_blur` extension.
```` C
#extension GL_NV_ray_tracing_motion_blur : require
````
Then we call `traceRayMotionNV` instead of `traceRayEXT`. The `time` argument varies between 0 and 1.
````C
traceRayMotionNV(topLevelAS, // acceleration structure
rayFlags, // rayFlags
0xFF, // cullMask
0, // sbtRecordOffset
0, // sbtRecordStride
0, // missIndex
origin.xyz, // ray origin
tMin, // ray min range
direction.xyz, // ray direction
tMax, // ray max range
time, // time
0 // payload (location = 0)
);
````
## Other
We have used some technique from the [jitter cam](../ray_tracing_jitter_cam) to sampling time randomly.
Using random time value for each pixel at each frame gives a nicer look when accumulated over time then using a single time per frame.
This is the how stuttered motion would look like.
![](images/rotary_disc_shutter.png)
https://en.wikipedia.org/wiki/Rotary_disc_shutter
:warning: Using motion blur pipeline with all instances static will be slower than using the static pipeline. Not by much but for performance, it's better to use the appropriate pipeline.
:warning: Calling `traceRayEXT` from `raytrace.rchit` works, and we get motion-blurred shadows without having to call `traceRayMotionNV` in the closest-hit shader. This works only if `traceRayEXT` is called within the execution of a motion trace call.

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,156 @@
/*
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "nvvk/appbase_vk.hpp"
#include "nvvk/debug_util_vk.hpp"
#include "nvvk/descriptorsets_vk.hpp"
#include "nvvk/memallocator_dma_vk.hpp"
#include "nvvk/resourceallocator_vk.hpp"
#include "shaders/host_device.h"
// #VKRay
#include "nvvk/raytraceKHR_vk.hpp"
//--------------------------------------------------------------------------------------------------
// Simple rasterizer of OBJ objects
// - Each OBJ loaded are stored in an `ObjModel` and referenced by a `ObjInstance`
// - It is possible to have many `ObjInstance` referencing the same `ObjModel`
// - Rendering is done in an offscreen framebuffer
// - The image of the framebuffer is displayed in post-process in a full-screen quad
//
class HelloVulkan : public nvvk::AppBaseVk
{
public:
void setup(const VkInstance& instance, const VkDevice& device, const VkPhysicalDevice& physicalDevice, uint32_t queueFamily) override;
void createDescriptorSetLayout();
void createGraphicsPipeline();
void loadModel(const std::string& filename, nvmath::mat4f transform = nvmath::mat4f(1));
void updateDescriptorSet();
void createUniformBuffer();
void createObjDescriptionBuffer();
void createTextureImages(const VkCommandBuffer& cmdBuf, const std::vector<std::string>& textures);
void updateUniformBuffer(const VkCommandBuffer& cmdBuf);
void onResize(int /*w*/, int /*h*/) override;
void destroyResources();
void rasterize(const VkCommandBuffer& cmdBuff);
// The OBJ model
struct ObjModel
{
uint32_t nbIndices{0};
uint32_t nbVertices{0};
nvvk::Buffer vertexBuffer; // Device buffer of all 'Vertex'
nvvk::Buffer indexBuffer; // Device buffer of the indices forming triangles
nvvk::Buffer matColorBuffer; // Device buffer of array of 'Wavefront material'
nvvk::Buffer matIndexBuffer; // Device buffer of array of 'Wavefront material'
};
// Instance of the OBJ
struct ObjInstance
{
nvmath::mat4f transform; // Matrix of the instance
uint32_t objIndex{0}; // Model index reference
};
// Information pushed at each draw call
PushConstantRaster m_pcRaster{
{1}, // Identity matrix
{9.5f, 5.5f, -6.5f}, // light position
0, // instance Id
100.f, // light intensity
0 // light type
};
// Array of objects and instances in the scene
std::vector<ObjModel> m_objModel; // Model on host
std::vector<ObjDesc> m_objDesc; // Model description for device access
std::vector<ObjInstance> m_instances; // Scene model instances
// Graphic pipeline
VkPipelineLayout m_pipelineLayout;
VkPipeline m_graphicsPipeline;
nvvk::DescriptorSetBindings m_descSetLayoutBind;
VkDescriptorPool m_descPool;
VkDescriptorSetLayout m_descSetLayout;
VkDescriptorSet m_descSet;
nvvk::Buffer m_bGlobals; // Device-Host of the camera matrices
nvvk::Buffer m_bObjDesc; // Device buffer of the OBJ descriptions
std::vector<nvvk::Texture> m_textures; // vector of all textures of the scene
nvvk::ResourceAllocatorDma m_alloc; // Allocator for buffer, images, acceleration structures
nvvk::DebugUtil m_debug; // Utility to name objects
// #Post - Draw the rendered image on a quad using a tonemapper
void createOffscreenRender();
void createPostPipeline();
void createPostDescriptor();
void updatePostDescriptorSet();
void drawPost(VkCommandBuffer cmdBuf);
nvvk::DescriptorSetBindings m_postDescSetLayoutBind;
VkDescriptorPool m_postDescPool{VK_NULL_HANDLE};
VkDescriptorSetLayout m_postDescSetLayout{VK_NULL_HANDLE};
VkDescriptorSet m_postDescSet{VK_NULL_HANDLE};
VkPipeline m_postPipeline{VK_NULL_HANDLE};
VkPipelineLayout m_postPipelineLayout{VK_NULL_HANDLE};
VkRenderPass m_offscreenRenderPass{VK_NULL_HANDLE};
VkFramebuffer m_offscreenFramebuffer{VK_NULL_HANDLE};
nvvk::Texture m_offscreenColor;
nvvk::Texture m_offscreenDepth;
VkFormat m_offscreenColorFormat{VK_FORMAT_R32G32B32A32_SFLOAT};
VkFormat m_offscreenDepthFormat{VK_FORMAT_X8_D24_UNORM_PACK32};
// #VKRay
void initRayTracing();
auto objectToVkGeometryKHR(const ObjModel& model);
void createBottomLevelAS();
void createTopLevelAS();
void createRtDescriptorSet();
void updateRtDescriptorSet();
void createRtPipeline();
void createRtShaderBindingTable();
void raytrace(const VkCommandBuffer& cmdBuf, const nvmath::vec4f& clearColor);
void updateFrame();
void resetFrame();
VkPhysicalDeviceRayTracingPipelinePropertiesKHR m_rtProperties{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR};
nvvk::RaytracingBuilderKHR m_rtBuilder;
nvvk::DescriptorSetBindings m_rtDescSetLayoutBind;
VkDescriptorPool m_rtDescPool;
VkDescriptorSetLayout m_rtDescSetLayout;
VkDescriptorSet m_rtDescSet;
std::vector<VkRayTracingShaderGroupCreateInfoKHR> m_rtShaderGroups;
VkPipelineLayout m_rtPipelineLayout;
VkPipeline m_rtPipeline;
nvvk::Buffer m_rtSBTBuffer;
// Push constant for ray tracer
PushConstantRay m_pcRay{};
int m_maxFrames{1000};
};

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

View file

@ -0,0 +1,319 @@
/*
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
// ImGui - standalone example application for Glfw + Vulkan, using programmable
// pipeline If you are new to ImGui, see examples/README.txt and documentation
// at the top of imgui.cpp.
#include <array>
#include "backends/imgui_impl_glfw.h"
#include "imgui.h"
#include "hello_vulkan.h"
#include "imgui/imgui_camera_widget.h"
#include "nvh/cameramanipulator.hpp"
#include "nvh/fileoperations.hpp"
#include "nvpsystem.hpp"
#include "nvvk/commands_vk.hpp"
#include "nvvk/context_vk.hpp"
//////////////////////////////////////////////////////////////////////////
#define UNUSED(x) (void)(x)
//////////////////////////////////////////////////////////////////////////
// Default search path for shaders
std::vector<std::string> defaultSearchPaths;
// GLFW Callback functions
static void onErrorCallback(int error, const char* description)
{
fprintf(stderr, "GLFW Error %d: %s\n", error, description);
}
// Extra UI
void renderUI(HelloVulkan& helloVk)
{
bool changed{false};
ImGuiH::CameraWidget();
if(ImGui::CollapsingHeader("Light"))
{
auto& pc = helloVk.m_pcRaster;
changed |= ImGui::RadioButton("Point", &pc.lightType, 0);
ImGui::SameLine();
changed |= ImGui::RadioButton("Infinite", &pc.lightType, 1);
changed |= ImGui::SliderFloat3("Position", &pc.lightPosition.x, -20.f, 20.f);
changed |= ImGui::SliderFloat("Intensity", &pc.lightIntensity, 0.f, 150.f);
}
changed |= ImGui::SliderInt("Max Frames", &helloVk.m_maxFrames, 1, 100);
if(changed)
helloVk.resetFrame();
}
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
static int const SAMPLE_WIDTH = 1280;
static int const SAMPLE_HEIGHT = 720;
//--------------------------------------------------------------------------------------------------
// Application Entry
//
int main(int argc, char** argv)
{
UNUSED(argc);
// Setup GLFW window
glfwSetErrorCallback(onErrorCallback);
if(!glfwInit())
{
return 1;
}
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
GLFWwindow* window = glfwCreateWindow(SAMPLE_WIDTH, SAMPLE_HEIGHT, PROJECT_NAME, nullptr, nullptr);
// Setup camera
CameraManip.setWindowSize(SAMPLE_WIDTH, SAMPLE_HEIGHT);
CameraManip.setLookat({3.445, 2.151, -2.098}, {0.435, -0.431, 0.705}, {0.000, 1.000, 0.000});
// Setup Vulkan
if(!glfwVulkanSupported())
{
printf("GLFW: Vulkan Not Supported\n");
return 1;
}
// setup some basic things for the sample, logging file for example
NVPSystem system(PROJECT_NAME);
// Search path for shaders and other media
defaultSearchPaths = {
NVPSystem::exePath() + PROJECT_RELDIRECTORY,
NVPSystem::exePath() + PROJECT_RELDIRECTORY "..",
std::string(PROJECT_NAME),
};
// Vulkan required extensions
assert(glfwVulkanSupported() == 1);
uint32_t count{0};
auto reqExtensions = glfwGetRequiredInstanceExtensions(&count);
// Requesting Vulkan extensions and layers
nvvk::ContextCreateInfo contextInfo;
contextInfo.setVersion(1, 2); // Using Vulkan 1.2
for(uint32_t ext_id = 0; ext_id < count; ext_id++) // Adding required extensions (surface, win32, linux, ..)
contextInfo.addInstanceExtension(reqExtensions[ext_id]);
contextInfo.addInstanceLayer("VK_LAYER_LUNARG_monitor", true); // FPS in titlebar
contextInfo.addInstanceExtension(VK_EXT_DEBUG_UTILS_EXTENSION_NAME, true); // Allow debug names
contextInfo.addDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME); // Enabling ability to present rendering
// #VKRay: Activate the ray tracing extension
VkPhysicalDeviceAccelerationStructureFeaturesKHR accelFeature{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR};
contextInfo.addDeviceExtension(VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME, false, &accelFeature); // To build acceleration structures
VkPhysicalDeviceRayTracingPipelineFeaturesKHR rtPipelineFeature{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR};
contextInfo.addDeviceExtension(VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME, false, &rtPipelineFeature); // To use vkCmdTraceRaysKHR
contextInfo.addDeviceExtension(VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME); // Required by ray tracing pipeline
// #NV_Motion_blur
VkPhysicalDeviceRayTracingMotionBlurFeaturesNV rtMotionBlurFeatures{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MOTION_BLUR_FEATURES_NV};
contextInfo.addDeviceExtension(VK_NV_RAY_TRACING_MOTION_BLUR_EXTENSION_NAME, false, &rtMotionBlurFeatures); // Required for motion blur
// Creating Vulkan base application
nvvk::Context vkctx{};
vkctx.ignoreDebugMessage(0x79de34d4); // Missing Device Extension "VK_NV_ray_tracing_motion_blur"
vkctx.initInstance(contextInfo);
// Find all compatible devices
auto compatibleDevices = vkctx.getCompatibleDevices(contextInfo);
assert(!compatibleDevices.empty());
// Use a compatible device
vkctx.initDevice(compatibleDevices[0], contextInfo);
// Create example
HelloVulkan helloVk;
// Window need to be opened to get the surface on which to draw
const VkSurfaceKHR surface = helloVk.getVkSurface(vkctx.m_instance, window);
vkctx.setGCTQueueWithPresent(surface);
helloVk.setup(vkctx.m_instance, vkctx.m_device, vkctx.m_physicalDevice, vkctx.m_queueGCT.familyIndex);
helloVk.createSwapchain(surface, SAMPLE_WIDTH, SAMPLE_HEIGHT);
helloVk.createDepthBuffer();
helloVk.createRenderPass();
helloVk.createFrameBuffers();
// Setup Imgui
helloVk.initGUI(0); // Using sub-pass 0
// Creation of the example
helloVk.loadModel(nvh::findFile("media/scenes/cube_multi.obj", defaultSearchPaths, true));
helloVk.loadModel(nvh::findFile("media/scenes/plane.obj", defaultSearchPaths, true));
helloVk.loadModel(nvh::findFile("media/scenes/cube.obj", defaultSearchPaths, true));
helloVk.loadModel(nvh::findFile("media/scenes/cube_modif.obj", defaultSearchPaths, true));
// Set the positions of the instances and reuse the last instance (cube_modif) to use cube_multi instead
helloVk.m_instances[1].transform = nvmath::translation_mat4(nvmath::vec3f(0, -1, 0));
helloVk.m_instances[2].transform = nvmath::translation_mat4(nvmath::vec3f(2, 0, 2));
helloVk.m_instances[3].objIndex = 0;
helloVk.m_instances[3].transform = nvmath::translation_mat4(nvmath::vec3f(0, 0, 2)); // SRT - unused
helloVk.createOffscreenRender();
helloVk.createDescriptorSetLayout();
helloVk.createGraphicsPipeline();
helloVk.createUniformBuffer();
helloVk.createObjDescriptionBuffer();
helloVk.updateDescriptorSet();
// #VKRay
helloVk.initRayTracing();
helloVk.createBottomLevelAS();
helloVk.createTopLevelAS();
helloVk.createRtDescriptorSet();
helloVk.createRtPipeline();
helloVk.createRtShaderBindingTable();
helloVk.createPostDescriptor();
helloVk.createPostPipeline();
helloVk.updatePostDescriptorSet();
nvmath::vec4f clearColor = nvmath::vec4f(1, 1, 1, 1.00f);
bool useRaytracer = true;
helloVk.setupGlfwCallbacks(window);
ImGui_ImplGlfw_InitForVulkan(window, true);
// Main loop
while(!glfwWindowShouldClose(window))
{
glfwPollEvents();
if(helloVk.isMinimized())
continue;
// Start the Dear ImGui frame
ImGui_ImplGlfw_NewFrame();
ImGui::NewFrame();
// Show UI window.
if(helloVk.showGui())
{
ImGuiH::Panel::Begin();
bool changed = false;
// Edit 3 floats representing a color
changed |= ImGui::ColorEdit3("Clear color", reinterpret_cast<float*>(&clearColor));
// Switch between raster and ray tracing
changed |= ImGui::Checkbox("Ray Tracer mode", &useRaytracer);
if(changed)
helloVk.resetFrame();
renderUI(helloVk);
ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / ImGui::GetIO().Framerate, ImGui::GetIO().Framerate);
ImGuiH::Control::Info("", "", "(F10) Toggle Pane", ImGuiH::Control::Flags::Disabled);
ImGuiH::Panel::End();
}
// Start rendering the scene
helloVk.prepareFrame();
// Start command buffer of this frame
auto curFrame = helloVk.getCurFrame();
const VkCommandBuffer& cmdBuf = helloVk.getCommandBuffers()[curFrame];
VkCommandBufferBeginInfo beginInfo{VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO};
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(cmdBuf, &beginInfo);
// Updating camera buffer
helloVk.updateUniformBuffer(cmdBuf);
// Clearing screen
std::array<VkClearValue, 2> clearValues{};
clearValues[0].color = {{clearColor[0], clearColor[1], clearColor[2], clearColor[3]}};
clearValues[1].depthStencil = {1.0f, 0};
// Offscreen render pass
{
VkRenderPassBeginInfo offscreenRenderPassBeginInfo{VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO};
offscreenRenderPassBeginInfo.clearValueCount = 2;
offscreenRenderPassBeginInfo.pClearValues = clearValues.data();
offscreenRenderPassBeginInfo.renderPass = helloVk.m_offscreenRenderPass;
offscreenRenderPassBeginInfo.framebuffer = helloVk.m_offscreenFramebuffer;
offscreenRenderPassBeginInfo.renderArea = {{0, 0}, helloVk.getSize()};
// Rendering Scene
if(useRaytracer)
{
helloVk.raytrace(cmdBuf, clearColor);
}
else
{
vkCmdBeginRenderPass(cmdBuf, &offscreenRenderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
helloVk.rasterize(cmdBuf);
vkCmdEndRenderPass(cmdBuf);
}
}
// 2nd rendering pass: tone mapper, UI
{
VkRenderPassBeginInfo postRenderPassBeginInfo{VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO};
postRenderPassBeginInfo.clearValueCount = 2;
postRenderPassBeginInfo.pClearValues = clearValues.data();
postRenderPassBeginInfo.renderPass = helloVk.getRenderPass();
postRenderPassBeginInfo.framebuffer = helloVk.getFramebuffers()[curFrame];
postRenderPassBeginInfo.renderArea = {{0, 0}, helloVk.getSize()};
// Rendering tonemapper
vkCmdBeginRenderPass(cmdBuf, &postRenderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
helloVk.drawPost(cmdBuf);
// Rendering UI
ImGui::Render();
ImGui_ImplVulkan_RenderDrawData(ImGui::GetDrawData(), cmdBuf);
vkCmdEndRenderPass(cmdBuf);
}
// Submit for display
vkEndCommandBuffer(cmdBuf);
helloVk.submitFrame();
}
// Cleanup
vkDeviceWaitIdle(helloVk.getDevice());
helloVk.destroyResources();
helloVk.destroy();
vkctx.deinit();
glfwDestroyWindow(window);
glfwTerminate();
return 0;
}

View file

@ -0,0 +1,99 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_EXT_nonuniform_qualifier : enable
#extension GL_GOOGLE_include_directive : enable
#extension GL_EXT_scalar_block_layout : enable
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
#extension GL_EXT_buffer_reference2 : require
#include "wavefront.glsl"
layout(push_constant) uniform _PushConstantRaster
{
PushConstantRaster pcRaster;
};
// clang-format off
// Incoming
layout(location = 1) in vec3 i_worldPos;
layout(location = 2) in vec3 i_worldNrm;
layout(location = 3) in vec3 i_viewDir;
layout(location = 4) in vec2 i_texCoord;
// Outgoing
layout(location = 0) out vec4 o_color;
layout(buffer_reference, scalar) buffer Vertices {Vertex v[]; }; // Positions of an object
layout(buffer_reference, scalar) buffer Indices {uint i[]; }; // Triangle indices
layout(buffer_reference, scalar) buffer Materials {WaveFrontMaterial m[]; }; // Array of all materials on an object
layout(buffer_reference, scalar) buffer MatIndices {int i[]; }; // Material ID for each triangle
layout(binding = eObjDescs, scalar) buffer ObjDesc_ { ObjDesc i[]; } objDesc;
layout(binding = eTextures) uniform sampler2D[] textureSamplers;
// clang-format on
void main()
{
// Material of the object
ObjDesc objResource = objDesc.i[pcRaster.objIndex];
MatIndices matIndices = MatIndices(objResource.materialIndexAddress);
Materials materials = Materials(objResource.materialAddress);
int matIndex = matIndices.i[gl_PrimitiveID];
WaveFrontMaterial mat = materials.m[matIndex];
vec3 N = normalize(i_worldNrm);
// Vector toward light
vec3 L;
float lightIntensity = pcRaster.lightIntensity;
if(pcRaster.lightType == 0)
{
vec3 lDir = pcRaster.lightPosition - i_worldPos;
float d = length(lDir);
lightIntensity = pcRaster.lightIntensity / (d * d);
L = normalize(lDir);
}
else
{
L = normalize(pcRaster.lightPosition);
}
// Diffuse
vec3 diffuse = computeDiffuse(mat, L, N);
if(mat.textureId >= 0)
{
int txtOffset = objDesc.i[pcRaster.objIndex].txtOffset;
uint txtId = txtOffset + mat.textureId;
vec3 diffuseTxt = texture(textureSamplers[nonuniformEXT(txtId)], i_texCoord).xyz;
diffuse *= diffuseTxt;
}
// Specular
vec3 specular = computeSpecular(mat, i_viewDir, L, N);
// Result
o_color = vec4(lightIntensity * (diffuse + specular), 1);
}

View file

@ -0,0 +1,118 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef COMMON_HOST_DEVICE
#define COMMON_HOST_DEVICE
#ifdef __cplusplus
#include "nvmath/nvmath.h"
// GLSL Type
using vec2 = nvmath::vec2f;
using vec3 = nvmath::vec3f;
using vec4 = nvmath::vec4f;
using mat4 = nvmath::mat4f;
using uint = unsigned int;
#endif
// clang-format off
#ifdef __cplusplus // Descriptor binding helper for C++ and GLSL
#define START_BINDING(a) enum a {
#define END_BINDING() }
#else
#define START_BINDING(a) const uint
#define END_BINDING()
#endif
START_BINDING(SceneBindings)
eGlobals = 0, // Global uniform containing camera matrices
eObjDescs = 1, // Access to the object descriptions
eTextures = 2 // Access to textures
END_BINDING();
START_BINDING(RtxBindings)
eTlas = 0, // Top-level acceleration structure
eOutImage = 1 // Ray tracer output image
END_BINDING();
// clang-format on
// Information of a obj model when referenced in a shader
struct ObjDesc
{
int txtOffset; // Texture index offset in the array of textures
uint64_t vertexAddress; // Address of the Vertex buffer
uint64_t indexAddress; // Address of the index buffer
uint64_t materialAddress; // Address of the material buffer
uint64_t materialIndexAddress; // Address of the triangle material index buffer
};
// Uniform buffer set at each frame
struct GlobalUniforms
{
mat4 viewProj; // Camera view * projection
mat4 viewInverse; // Camera inverse view matrix
mat4 projInverse; // Camera inverse projection matrix
};
// Push constant structure for the raster
struct PushConstantRaster
{
mat4 modelMatrix; // matrix of the instance
vec3 lightPosition;
uint objIndex;
float lightIntensity;
int lightType;
};
// Push constant structure for the ray tracer
struct PushConstantRay
{
vec4 clearColor;
vec3 lightPosition;
float lightIntensity;
int lightType;
int frame;
};
struct Vertex // See ObjLoader, copy of VertexObj, could be compressed for device
{
vec3 pos;
vec3 nrm;
vec3 color;
vec2 texCoord;
};
struct WaveFrontMaterial // See ObjLoader, copy of MaterialObj, could be compressed for device
{
vec3 ambient;
vec3 diffuse;
vec3 specular;
vec3 transmittance;
vec3 emission;
float shininess;
float ior; // index of refraction
float dissolve; // 1 == opaque; 0 == fully transparent
int illum; // illumination model (see http://www.fileformat.info/format/material/)
int textureId;
};
#endif

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 450
layout(location = 0) out vec2 outUV;
out gl_PerVertex
{
vec4 gl_Position;
};
void main()
{
outUV = vec2((gl_VertexIndex << 1) & 2, gl_VertexIndex & 2);
gl_Position = vec4(outUV * 2.0f - 1.0f, 1.0f, 1.0f);
}

View file

@ -0,0 +1,37 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 450
layout(location = 0) in vec2 outUV;
layout(location = 0) out vec4 fragColor;
layout(set = 0, binding = 0) uniform sampler2D noisyTxt;
layout(push_constant) uniform shaderInformation
{
float aspectRatio;
}
pushc;
void main()
{
vec2 uv = outUV;
float gamma = 1. / 2.2;
fragColor = pow(texture(noisyTxt, uv).rgba, vec4(gamma));
}

View file

@ -0,0 +1,53 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
// Generate a random unsigned int from two unsigned int values, using 16 pairs
// of rounds of the Tiny Encryption Algorithm. See Zafar, Olano, and Curtis,
// "GPU Random Numbers via the Tiny Encryption Algorithm"
uint tea(uint val0, uint val1)
{
uint v0 = val0;
uint v1 = val1;
uint s0 = 0;
for(uint n = 0; n < 16; n++)
{
s0 += 0x9e3779b9;
v0 += ((v1 << 4) + 0xa341316c) ^ (v1 + s0) ^ ((v1 >> 5) + 0xc8013ea4);
v1 += ((v0 << 4) + 0xad90777d) ^ (v0 + s0) ^ ((v0 >> 5) + 0x7e95761e);
}
return v0;
}
// Generate a random unsigned int in [0, 2^24) given the previous RNG state
// using the Numerical Recipes linear congruential generator
uint lcg(inout uint prev)
{
uint LCG_A = 1664525u;
uint LCG_C = 1013904223u;
prev = (LCG_A * prev + LCG_C);
return prev & 0x00FFFFFF;
}
// Generate a random float in [0, 1) given the previous RNG state
float rnd(inout uint prev)
{
return (float(lcg(prev)) / float(0x01000000));
}

View file

@ -0,0 +1,23 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
struct hitPayload
{
vec3 hitValue;
};

View file

@ -0,0 +1,145 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 460
#extension GL_EXT_ray_tracing : require
#extension GL_EXT_nonuniform_qualifier : enable
#extension GL_EXT_scalar_block_layout : enable
#extension GL_GOOGLE_include_directive : enable
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
#extension GL_EXT_buffer_reference2 : require
#include "raycommon.glsl"
#include "wavefront.glsl"
hitAttributeEXT vec2 attribs;
// clang-format off
layout(location = 0) rayPayloadInEXT hitPayload prd;
layout(location = 1) rayPayloadEXT bool isShadowed;
layout(buffer_reference, scalar) buffer Vertices {Vertex v[]; }; // Positions of an object
layout(buffer_reference, scalar) buffer Indices {ivec3 i[]; }; // Triangle indices
layout(buffer_reference, scalar) buffer Materials {WaveFrontMaterial m[]; }; // Array of all materials on an object
layout(buffer_reference, scalar) buffer MatIndices {int i[]; }; // Material ID for each triangle
layout(set = 0, binding = eTlas) uniform accelerationStructureEXT topLevelAS;
layout(set = 1, binding = eObjDescs, scalar) buffer ObjDesc_ { ObjDesc i[]; } objDesc;
layout(set = 1, binding = eTextures) uniform sampler2D textureSamplers[];
layout(push_constant) uniform _PushConstantRay { PushConstantRay pcRay; };
// clang-format on
void main()
{
// Object data
ObjDesc objResource = objDesc.i[gl_InstanceCustomIndexEXT];
MatIndices matIndices = MatIndices(objResource.materialIndexAddress);
Materials materials = Materials(objResource.materialAddress);
Indices indices = Indices(objResource.indexAddress);
Vertices vertices = Vertices(objResource.vertexAddress);
// Indices of the triangle
ivec3 ind = indices.i[gl_PrimitiveID];
// Vertex of the triangle
Vertex v0 = vertices.v[ind.x];
Vertex v1 = vertices.v[ind.y];
Vertex v2 = vertices.v[ind.z];
const vec3 barycentrics = vec3(1.0 - attribs.x - attribs.y, attribs.x, attribs.y);
// Computing the coordinates of the hit position
const vec3 pos = v0.pos * barycentrics.x + v1.pos * barycentrics.y + v2.pos * barycentrics.z;
const vec3 worldPos = vec3(gl_ObjectToWorldEXT * vec4(pos, 1.0)); // Transforming the position to world space
// Computing the normal at hit position
const vec3 nrm = v0.nrm * barycentrics.x + v1.nrm * barycentrics.y + v2.nrm * barycentrics.z;
const vec3 worldNrm = normalize(vec3(nrm * gl_WorldToObjectEXT)); // Transforming the normal to world space
// Vector toward the light
vec3 L;
float lightIntensity = pcRay.lightIntensity;
float lightDistance = 100000.0;
// Point light
if(pcRay.lightType == 0)
{
vec3 lDir = pcRay.lightPosition - worldPos;
lightDistance = length(lDir);
lightIntensity = pcRay.lightIntensity / (lightDistance * lightDistance);
L = normalize(lDir);
}
else // Directional light
{
L = normalize(pcRay.lightPosition);
}
// Material of the object
int matIdx = matIndices.i[gl_PrimitiveID];
WaveFrontMaterial mat = materials.m[matIdx];
// Diffuse
vec3 diffuse = computeDiffuse(mat, L, worldNrm);
if(mat.textureId >= 0)
{
uint txtId = mat.textureId + objDesc.i[gl_InstanceCustomIndexEXT].txtOffset;
vec2 texCoord = v0.texCoord * barycentrics.x + v1.texCoord * barycentrics.y + v2.texCoord * barycentrics.z;
diffuse *= texture(textureSamplers[nonuniformEXT(txtId)], texCoord).xyz;
}
vec3 specular = vec3(0);
float attenuation = 1;
// Tracing shadow ray only if the light is visible from the surface
if(dot(worldNrm, L) > 0)
{
float tMin = 0.001;
float tMax = lightDistance;
vec3 origin = gl_WorldRayOriginEXT + gl_WorldRayDirectionEXT * gl_HitTEXT;
vec3 rayDir = L;
uint flags = gl_RayFlagsTerminateOnFirstHitEXT | gl_RayFlagsOpaqueEXT | gl_RayFlagsSkipClosestHitShaderEXT;
isShadowed = true;
traceRayEXT(topLevelAS, // acceleration structure
flags, // rayFlags
0xFF, // cullMask
0, // sbtRecordOffset
0, // sbtRecordStride
1, // missIndex
origin, // ray origin
tMin, // ray min range
rayDir, // ray direction
tMax, // ray max range
1 // payload (location = 1)
);
if(isShadowed)
{
attenuation = 0.3;
}
else
{
// Specular
specular = computeSpecular(mat, gl_WorldRayDirectionEXT, L, worldNrm);
}
}
prd.hitValue = vec3(lightIntensity * attenuation * (diffuse + specular));
}

View file

@ -0,0 +1,101 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 460
#extension GL_EXT_ray_tracing : require
#extension GL_GOOGLE_include_directive : enable
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
#extension GL_NV_ray_tracing_motion_blur : require
#include "raycommon.glsl"
#include "host_device.h"
#include "random.glsl"
// clang-format off
layout(location = 0) rayPayloadEXT hitPayload prd;
layout(set = 0, binding = eTlas) uniform accelerationStructureEXT topLevelAS;
layout(set = 0, binding = eOutImage, rgba32f) uniform image2D image;
layout(set = 1, binding = eGlobals) uniform _GlobalUniforms { GlobalUniforms uni; };
layout(push_constant) uniform _PushConstantRay { PushConstantRay pcRay; };
// clang-format on
const int NBSAMPLES = 10;
void main()
{
// Initialize the random number
uint seed = tea(gl_LaunchIDEXT.y * gl_LaunchSizeEXT.x + gl_LaunchIDEXT.x, pcRay.frame);
vec3 hitValues = vec3(0);
for(int smpl = 0; smpl < NBSAMPLES; smpl++)
{
float r1 = rnd(seed);
float r2 = rnd(seed);
// Subpixel jitter: send the ray through a different position inside the pixel
// each time, to provide antialiasing.
vec2 subpixel_jitter = pcRay.frame == 0 ? vec2(0.5f, 0.5f) : vec2(r1, r2);
const vec2 pixelCenter = vec2(gl_LaunchIDEXT.xy) + subpixel_jitter;
const vec2 inUV = pixelCenter / vec2(gl_LaunchSizeEXT.xy);
vec2 d = inUV * 2.0 - 1.0;
vec4 origin = uni.viewInverse * vec4(0, 0, 0, 1);
vec4 target = uni.projInverse * vec4(d.x, d.y, 1, 1);
vec4 direction = uni.viewInverse * vec4(normalize(target.xyz), 0);
uint rayFlags = gl_RayFlagsOpaqueEXT;
float tMin = 0.001;
float tMax = 10000.0;
float time = rnd(seed);
// float time = float(smpl)/float(NBSAMPLES); // stuttered motion
prd.hitValue = vec3(0, 0, 0);
traceRayMotionNV(topLevelAS, // acceleration structure
rayFlags, // rayFlags
0xFF, // cullMask
0, // sbtRecordOffset
0, // sbtRecordStride
0, // missIndex
origin.xyz, // ray origin
tMin, // ray min range
direction.xyz, // ray direction
tMax, // ray max range
time, // time
0 // payload (location = 0)
);
hitValues += prd.hitValue;
}
prd.hitValue = hitValues / NBSAMPLES;
// Do accumulation over time
if(pcRay.frame > 0)
{
float a = 1.0f / float(pcRay.frame + 1);
vec3 old_color = imageLoad(image, ivec2(gl_LaunchIDEXT.xy)).xyz;
imageStore(image, ivec2(gl_LaunchIDEXT.xy), vec4(mix(old_color, prd.hitValue, a), 1.f));
}
else
{
// First frame, replace the value in the buffer
imageStore(image, ivec2(gl_LaunchIDEXT.xy), vec4(prd.hitValue, 1.f));
}
}

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 460
#extension GL_EXT_ray_tracing : require
#extension GL_GOOGLE_include_directive : enable
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
#include "raycommon.glsl"
#include "wavefront.glsl"
layout(location = 0) rayPayloadInEXT hitPayload prd;
layout(push_constant) uniform _PushConstantRay
{
PushConstantRay pcRay;
};
void main()
{
prd.hitValue = pcRay.clearColor.xyz * 0.8;
}

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 460
#extension GL_EXT_ray_tracing : require
layout(location = 1) rayPayloadInEXT bool isShadowed;
void main()
{
isShadowed = false;
}

View file

@ -0,0 +1,66 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_EXT_scalar_block_layout : enable
#extension GL_GOOGLE_include_directive : enable
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require
#include "wavefront.glsl"
layout(binding = 0) uniform _GlobalUniforms
{
GlobalUniforms uni;
};
layout(push_constant) uniform _PushConstantRaster
{
PushConstantRaster pcRaster;
};
layout(location = 0) in vec3 i_position;
layout(location = 1) in vec3 i_normal;
layout(location = 2) in vec3 i_color;
layout(location = 3) in vec2 i_texCoord;
layout(location = 1) out vec3 o_worldPos;
layout(location = 2) out vec3 o_worldNrm;
layout(location = 3) out vec3 o_viewDir;
layout(location = 4) out vec2 o_texCoord;
out gl_PerVertex
{
vec4 gl_Position;
};
void main()
{
vec3 origin = vec3(uni.viewInverse * vec4(0, 0, 0, 1));
o_worldPos = vec3(pcRaster.modelMatrix * vec4(i_position, 1.0));
o_viewDir = vec3(o_worldPos - origin);
o_texCoord = i_texCoord;
o_worldNrm = mat3(pcRaster.modelMatrix) * i_normal;
gl_Position = uni.viewProj * vec4(o_worldPos, 1.0);
}

View file

@ -0,0 +1,48 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION
* SPDX-License-Identifier: Apache-2.0
*/
#include "host_device.h"
vec3 computeDiffuse(WaveFrontMaterial mat, vec3 lightDir, vec3 normal)
{
// Lambertian
float dotNL = max(dot(normal, lightDir), 0.0);
vec3 c = mat.diffuse * dotNL;
if(mat.illum >= 1)
c += mat.ambient;
return c;
}
vec3 computeSpecular(WaveFrontMaterial mat, vec3 viewDir, vec3 lightDir, vec3 normal)
{
if(mat.illum < 2)
return vec3(0);
// Compute specular only if not in shadow
const float kPi = 3.14159265;
const float kShininess = max(mat.shininess, 4.0);
// Specular
const float kEnergyConservation = (2.0 + kShininess) / (2.0 * kPi);
vec3 V = normalize(-viewDir);
vec3 R = reflect(-lightDir, normal);
float specular = kEnergyConservation * pow(max(dot(V, R), 0.0), kShininess);
return vec3(mat.specular * specular);
}