New version of the samples and tutorials based on KHR_ray_tracing

This commit is contained in:
mklefrancois 2020-03-31 17:51:08 +02:00
parent 2fd15056a2
commit b6402f0c09
271 changed files with 134108 additions and 2 deletions

View file

@ -0,0 +1,102 @@
cmake_minimum_required(VERSION 2.8)
get_filename_component(PROJNAME ${CMAKE_CURRENT_SOURCE_DIR} NAME)
SET(PROJNAME vk_${PROJNAME}_KHR)
Project(${PROJNAME})
Message(STATUS "-------------------------------")
Message(STATUS "Processing Project ${PROJNAME}:")
#####################################################################################
_add_project_definitions(${PROJNAME})
#####################################################################################
# Source files for this project
#
file(GLOB SOURCE_FILES *.cpp *.hpp *.inl *.h *.c)
file(GLOB EXTRA_COMMON "../common/*.*")
list(APPEND COMMON_SOURCE_FILES ${EXTRA_COMMON})
include_directories("../common")
#####################################################################################
# GLSL to SPIR-V custom build
#
# more than one file can be given: _compile_GLSL("GLSL_mesh.vert;GLSL_mesh.frag" "GLSL_mesh.spv" GLSL_SOURCES)
# the SpirV validator is fine as long as files are for different pipeline stages (entry points still need to be main())
#_compile_GLSL(<source(s)> <target spv> <LIST where files are appended>)
UNSET(GLSL_SOURCES)
UNSET(SPV_OUTPUT)
file(GLOB_RECURSE GLSL_HEADER_FILES "shaders/*.h" "shaders/*.glsl")
file(GLOB_RECURSE GLSL_SOURCE_FILES
"shaders/*.comp"
"shaders/*.frag"
"shaders/*.vert"
"shaders/*.rchit"
"shaders/*.rahit"
"shaders/*.rmiss"
"shaders/*.rgen"
)
foreach(GLSL ${GLSL_SOURCE_FILES})
get_filename_component(FILE_NAME ${GLSL} NAME)
_compile_GLSL(${GLSL} "shaders/${FILE_NAME}.spv" GLSL_SOURCES SPV_OUTPUT)
endforeach(GLSL)
list(APPEND GLSL_SOURCES ${GLSL_HEADER_FILES})
source_group(Shader_Files FILES ${GLSL_SOURCES})
#####################################################################################
# Executable
#
# if(WIN32 AND NOT GLUT_FOUND)
# add_definitions(/wd4996) #remove printf warning
# add_definitions(/wd4244) #remove double to float conversion warning
# add_definitions(/wd4305) #remove double to float truncation warning
# else()
# add_definitions(-fpermissive)
# endif()
add_executable(${PROJNAME} ${SOURCE_FILES} ${COMMON_SOURCE_FILES} ${PACKAGE_SOURCE_FILES} ${GLSL_SOURCES} ${CUDA_FILES} ${CUBIN_SOURCES})
#_set_subsystem_console(${PROJNAME})
#####################################################################################
# common source code needed for this sample
#
source_group(common FILES
${COMMON_SOURCE_FILES}
${PACKAGE_SOURCE_FILES}
)
source_group("Source Files" FILES ${SOURCE_FILES})
# if(UNIX)
# set(UNIXLINKLIBS dl pthread)
# else()
# set(UNIXLINKLIBS)
# endif()
#####################################################################################
# Linkage
#
target_link_libraries(${PROJNAME} ${PLATFORM_LIBRARIES} shared_sources)
foreach(DEBUGLIB ${LIBRARIES_DEBUG})
target_link_libraries(${PROJNAME} debug ${DEBUGLIB})
endforeach(DEBUGLIB)
foreach(RELEASELIB ${LIBRARIES_OPTIMIZED})
target_link_libraries(${PROJNAME} optimized ${RELEASELIB})
endforeach(RELEASELIB)
#####################################################################################
# copies binaries that need to be put next to the exe files (ZLib, etc.)
#
_copy_binaries_to_target( ${PROJNAME} )
install(FILES ${SPV_OUTPUT} CONFIGURATIONS Release DESTINATION "bin_${ARCH}/${PROJNAME}/shaders")
install(FILES ${SPV_OUTPUT} CONFIGURATIONS Debug DESTINATION "bin_${ARCH}_debug/${PROJNAME}/shaders")
install(FILES ${CUBIN_SOURCES} CONFIGURATIONS Release DESTINATION "bin_${ARCH}/${PROJNAME}")
install(FILES ${CUBIN_SOURCES} CONFIGURATIONS Debug DESTINATION "bin_${ARCH}_debug/${PROJNAME}")
install(DIRECTORY "../media" CONFIGURATIONS Release DESTINATION "bin_${ARCH}/${PROJNAME}")
install(DIRECTORY "../media" CONFIGURATIONS Debug DESTINATION "bin_${ARCH}_debug/${PROJNAME}")

View file

@ -0,0 +1,8 @@
# NVIDIA Vulkan Ray Tracing Tutorial
This example is a simple OBJ viewer in Vulkan, without any ray tracing functionality.
It is the starting point of the ray tracing tutorial, the source of the application in which ray tracing will be added.
## [**Start Ray Tracing Tutorial**](https://nvpro-samples.github.io/vk_raytracing_tutorial/)
![resultRaytraceShadowMedieval](../docs/Images/resultRasterCube.png)

View file

@ -0,0 +1,550 @@
/* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sstream>
#include <vulkan/vulkan.hpp>
extern std::vector<std::string> defaultSearchPaths;
#define STB_IMAGE_IMPLEMENTATION
#include "fileformats/stb_image.h"
#include "obj_loader.h"
#include "hello_vulkan.h"
#include "nvh//cameramanipulator.hpp"
#include "nvvkpp/descriptorsets_vkpp.hpp"
#include "nvvkpp/pipeline_vkpp.hpp"
#include "nvh/fileoperations.hpp"
#include "nvvkpp/commands_vkpp.hpp"
#include "nvvkpp/renderpass_vkpp.hpp"
#include "nvvkpp/utilities_vkpp.hpp"
// Holding the camera matrices
struct CameraMatrices
{
nvmath::mat4f view;
nvmath::mat4f proj;
nvmath::mat4f viewInverse;
};
//--------------------------------------------------------------------------------------------------
// Keep the handle on the device
// Initialize the tool to do all our allocations: buffers, images
//
void HelloVulkan::setup(const vk::Device& device, const vk::PhysicalDevice& physicalDevice, uint32_t queueFamily)
{
AppBase::setup(device, physicalDevice, queueFamily);
m_alloc.init(device, physicalDevice);
m_debug.setup(m_device);
}
//--------------------------------------------------------------------------------------------------
// Called at each frame to update the camera matrix
//
void HelloVulkan::updateUniformBuffer()
{
const float aspectRatio = m_size.width / static_cast<float>(m_size.height);
CameraMatrices ubo = {};
ubo.view = CameraManip.getMatrix();
ubo.proj = nvmath::perspectiveVK(CameraManip.getFov(), aspectRatio, 0.1f, 1000.0f);
//ubo.proj[1][1] *= -1; // Inverting Y for Vulkan
ubo.viewInverse = nvmath::invert(ubo.view);
void* data = m_device.mapMemory(m_cameraMat.allocation, 0, sizeof(ubo));
memcpy(data, &ubo, sizeof(ubo));
m_device.unmapMemory(m_cameraMat.allocation);
}
//--------------------------------------------------------------------------------------------------
// Describing the layout pushed when rendering
//
void HelloVulkan::createDescriptorSetLayout()
{
using vkDS = vk::DescriptorSetLayoutBinding;
using vkDT = vk::DescriptorType;
using vkSS = vk::ShaderStageFlagBits;
uint32_t nbTxt = static_cast<uint32_t>(m_textures.size());
uint32_t nbObj = static_cast<uint32_t>(m_objModel.size());
// Camera matrices (binding = 0)
m_descSetLayoutBind.emplace_back(vkDS(0, vkDT::eUniformBuffer, 1, vkSS::eVertex));
// Materials (binding = 1)
m_descSetLayoutBind.emplace_back(vkDS(1, vkDT::eStorageBuffer, nbObj, vkSS::eVertex | vkSS::eFragment));
// Scene description (binding = 2)
m_descSetLayoutBind.emplace_back( //
vkDS(2, vkDT::eStorageBuffer, 1, vkSS::eVertex | vkSS::eFragment));
// Textures (binding = 3)
m_descSetLayoutBind.emplace_back(vkDS(3, vkDT::eCombinedImageSampler, nbTxt, vkSS::eFragment));
// Materials (binding = 4)
m_descSetLayoutBind.emplace_back(vkDS(4, vkDT::eStorageBuffer, nbObj, vkSS::eFragment));
m_descSetLayout = nvvkpp::util::createDescriptorSetLayout(m_device, m_descSetLayoutBind);
m_descPool = nvvkpp::util::createDescriptorPool(m_device, m_descSetLayoutBind, 1);
m_descSet = nvvkpp::util::createDescriptorSet(m_device, m_descPool, m_descSetLayout);
}
//--------------------------------------------------------------------------------------------------
// Setting up the buffers in the descriptor set
//
void HelloVulkan::updateDescriptorSet()
{
std::vector<vk::WriteDescriptorSet> writes;
// Camera matrices and scene description
vk::DescriptorBufferInfo dbiUnif{m_cameraMat.buffer, 0, VK_WHOLE_SIZE};
writes.emplace_back(nvvkpp::util::createWrite(m_descSet, m_descSetLayoutBind[0], &dbiUnif));
vk::DescriptorBufferInfo dbiSceneDesc{m_sceneDesc.buffer, 0, VK_WHOLE_SIZE};
writes.emplace_back(nvvkpp::util::createWrite(m_descSet, m_descSetLayoutBind[2], &dbiSceneDesc));
// All material buffers, 1 buffer per OBJ
std::vector<vk::DescriptorBufferInfo> dbiMat;
std::vector<vk::DescriptorBufferInfo> dbiMatIdx;
for(size_t i = 0; i < m_objModel.size(); ++i)
{
dbiMat.push_back({m_objModel[i].matColorBuffer.buffer, 0, VK_WHOLE_SIZE});
dbiMatIdx.push_back({m_objModel[i].matIndexBuffer.buffer, 0, VK_WHOLE_SIZE});
}
writes.emplace_back(nvvkpp::util::createWrite(m_descSet, m_descSetLayoutBind[1], dbiMat.data()));
writes.emplace_back(nvvkpp::util::createWrite(m_descSet, m_descSetLayoutBind[4], dbiMatIdx.data()));
// All texture samplers
std::vector<vk::DescriptorImageInfo> diit;
for(size_t i = 0; i < m_textures.size(); ++i)
{
diit.push_back(m_textures[i].descriptor);
}
writes.emplace_back(nvvkpp::util::createWrite(m_descSet, m_descSetLayoutBind[3], diit.data()));
// Writing the information
m_device.updateDescriptorSets(static_cast<uint32_t>(writes.size()), writes.data(), 0, nullptr);
}
//--------------------------------------------------------------------------------------------------
// Creating the pipeline layout
//
void HelloVulkan::createGraphicsPipeline()
{
using vkSS = vk::ShaderStageFlagBits;
vk::PushConstantRange pushConstantRanges = {vkSS::eVertex | vkSS::eFragment, 0, sizeof(ObjPushConstant)};
// Creating the Pipeline Layout
vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo;
vk::DescriptorSetLayout descSetLayout(m_descSetLayout);
pipelineLayoutCreateInfo.setSetLayoutCount(1);
pipelineLayoutCreateInfo.setPSetLayouts(&descSetLayout);
pipelineLayoutCreateInfo.setPushConstantRangeCount(1);
pipelineLayoutCreateInfo.setPPushConstantRanges(&pushConstantRanges);
m_pipelineLayout = m_device.createPipelineLayout(pipelineLayoutCreateInfo);
// Creating the Pipeline
std::vector<std::string> paths = defaultSearchPaths;
nvvkpp::GraphicsPipelineGenerator gpb(m_device, m_pipelineLayout, m_offscreenRenderPass);
gpb.depthStencilState = {true};
gpb.addShader(nvh::loadFile("shaders/vert_shader.vert.spv", true, paths), vkSS::eVertex);
gpb.addShader(nvh::loadFile("shaders/frag_shader.frag.spv", true, paths), vkSS::eFragment);
gpb.vertexInputState.bindingDescriptions = {{0, sizeof(VertexObj)}};
gpb.vertexInputState.attributeDescriptions = {{0, 0, vk::Format::eR32G32B32Sfloat, offsetof(VertexObj, pos)},
{1, 0, vk::Format::eR32G32B32Sfloat, offsetof(VertexObj, nrm)},
{2, 0, vk::Format::eR32G32B32Sfloat, offsetof(VertexObj, color)},
{3, 0, vk::Format::eR32G32Sfloat, offsetof(VertexObj, texCoord)}};
m_graphicsPipeline = gpb.create();
m_debug.setObjectName(m_graphicsPipeline, "Graphics");
}
//--------------------------------------------------------------------------------------------------
// Loading the OBJ file and setting up all buffers
//
void HelloVulkan::loadModel(const std::string& filename, nvmath::mat4f transform)
{
using vkBU = vk::BufferUsageFlagBits;
ObjLoader loader;
loader.loadModel(filename);
// Converting from Srgb to linear
for(auto& m : loader.m_materials)
{
m.ambient = nvmath::pow(m.ambient, 2.2f);
m.diffuse = nvmath::pow(m.diffuse, 2.2f);
m.specular = nvmath::pow(m.specular, 2.2f);
}
ObjInstance instance;
instance.objIndex = static_cast<uint32_t>(m_objModel.size());
instance.transform = transform;
instance.transformIT = nvmath::transpose(nvmath::invert(transform));
instance.txtOffset = static_cast<uint32_t>(m_textures.size());
ObjModel model;
model.nbIndices = static_cast<uint32_t>(loader.m_indices.size());
model.nbVertices = static_cast<uint32_t>(loader.m_vertices.size());
// Create the buffers on Device and copy vertices, indices and materials
nvvkpp::SingleCommandBuffer cmdBufGet(m_device, m_graphicsQueueIndex);
vk::CommandBuffer cmdBuf = cmdBufGet.createCommandBuffer();
model.vertexBuffer = m_alloc.createBuffer(cmdBuf, loader.m_vertices, vkBU::eVertexBuffer);
model.indexBuffer = m_alloc.createBuffer(cmdBuf, loader.m_indices, vkBU::eIndexBuffer);
model.matColorBuffer = m_alloc.createBuffer(cmdBuf, loader.m_materials, vkBU::eStorageBuffer);
model.matIndexBuffer = m_alloc.createBuffer(cmdBuf, loader.m_matIndx, vkBU::eStorageBuffer);
// Creates all textures found
createTextureImages(cmdBuf, loader.m_textures);
cmdBufGet.flushCommandBuffer(cmdBuf);
m_alloc.flushStaging();
std::string objNb = std::to_string(instance.objIndex);
m_debug.setObjectName(model.vertexBuffer.buffer, (std::string("vertex_" + objNb).c_str()));
m_debug.setObjectName(model.indexBuffer.buffer, (std::string("index_" + objNb).c_str()));
m_debug.setObjectName(model.matColorBuffer.buffer, (std::string("mat_" + objNb).c_str()));
m_debug.setObjectName(model.matIndexBuffer.buffer, (std::string("matIdx_" + objNb).c_str()));
m_objModel.emplace_back(model);
m_objInstance.emplace_back(instance);
}
//--------------------------------------------------------------------------------------------------
// Creating the uniform buffer holding the camera matrices
// - Buffer is host visible
//
void HelloVulkan::createUniformBuffer()
{
using vkBU = vk::BufferUsageFlagBits;
using vkMP = vk::MemoryPropertyFlagBits;
m_cameraMat = m_alloc.createBuffer(sizeof(CameraMatrices), vkBU::eUniformBuffer, vkMP::eHostVisible | vkMP::eHostCoherent);
m_debug.setObjectName(m_cameraMat.buffer, "cameraMat");
}
//--------------------------------------------------------------------------------------------------
// Create a storage buffer containing the description of the scene elements
// - Which geometry is used by which instance
// - Transformation
// - Offset for texture
//
void HelloVulkan::createSceneDescriptionBuffer()
{
using vkBU = vk::BufferUsageFlagBits;
nvvkpp::SingleCommandBuffer cmdGen(m_device, m_graphicsQueueIndex);
auto cmdBuf = cmdGen.createCommandBuffer();
m_sceneDesc = m_alloc.createBuffer(cmdBuf, m_objInstance, vkBU::eStorageBuffer);
cmdGen.flushCommandBuffer(cmdBuf);
m_alloc.flushStaging();
m_debug.setObjectName(m_sceneDesc.buffer, "sceneDesc");
}
//--------------------------------------------------------------------------------------------------
// Creating all textures and samplers
//
void HelloVulkan::createTextureImages(const vk::CommandBuffer& cmdBuf, const std::vector<std::string>& textures)
{
using vkIU = vk::ImageUsageFlagBits;
vk::SamplerCreateInfo samplerCreateInfo{{}, vk::Filter::eLinear, vk::Filter::eLinear, vk::SamplerMipmapMode::eLinear};
samplerCreateInfo.setMaxLod(FLT_MAX);
vk::Format format = vk::Format::eR8G8B8A8Srgb;
// If no textures are present, create a dummy one to accommodate the pipeline layout
if(textures.empty() && m_textures.empty())
{
nvvkTexture texture;
std::array<uint8_t, 4> color{255u, 255u, 255u, 255u};
vk::DeviceSize bufferSize = sizeof(color);
auto imgSize = vk::Extent2D(1, 1);
auto imageCreateInfo = nvvkpp::image::create2DInfo(imgSize, format);
// Creating the VKImage
texture = m_alloc.createImage(cmdBuf, bufferSize, color.data(), imageCreateInfo);
// Setting up the descriptor used by the shader
texture.descriptor = nvvkpp::image::create2DDescriptor(m_device, texture.image, samplerCreateInfo, format);
// The image format must be in VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
nvvkpp::image::setImageLayout(cmdBuf, texture.image, vk::ImageLayout::eUndefined, vk::ImageLayout::eShaderReadOnlyOptimal);
m_textures.push_back(texture);
}
else
{
// Uploading all images
for(const auto& texture : textures)
{
std::stringstream o;
int texWidth, texHeight, texChannels;
o << "media/textures/" << texture;
std::string txtFile = nvh::findFile(o.str(), defaultSearchPaths);
stbi_uc* pixels = stbi_load(txtFile.c_str(), &texWidth, &texHeight, &texChannels, STBI_rgb_alpha);
// Handle failure
if(!pixels)
{
texWidth = texHeight = 1;
texChannels = 4;
std::array<uint8_t, 4> color{255u, 0u, 255u, 255u};
pixels = reinterpret_cast<stbi_uc*>(color.data());
}
vk::DeviceSize bufferSize = static_cast<uint64_t>(texWidth) * texHeight * sizeof(uint8_t) * 4;
auto imgSize = vk::Extent2D(texWidth, texHeight);
auto imageCreateInfo = nvvkpp::image::create2DInfo(imgSize, format, vkIU::eSampled, true);
{
nvvkTexture texture;
texture = m_alloc.createImage(cmdBuf, bufferSize, pixels, imageCreateInfo);
nvvkpp::image::generateMipmaps(cmdBuf, texture.image, format, imgSize, imageCreateInfo.mipLevels);
texture.descriptor = nvvkpp::image::create2DDescriptor(m_device, texture.image, samplerCreateInfo, format);
m_textures.push_back(texture);
}
}
}
}
//--------------------------------------------------------------------------------------------------
// Destroying all allocations
//
void HelloVulkan::destroyResources()
{
m_device.destroy(m_graphicsPipeline);
m_device.destroy(m_pipelineLayout);
m_device.destroy(m_descPool);
m_device.destroy(m_descSetLayout);
m_alloc.destroy(m_cameraMat);
m_alloc.destroy(m_sceneDesc);
for(auto& m : m_objModel)
{
m_alloc.destroy(m.vertexBuffer);
m_alloc.destroy(m.indexBuffer);
m_alloc.destroy(m.matColorBuffer);
m_alloc.destroy(m.matIndexBuffer);
}
for(auto& t : m_textures)
{
m_alloc.destroy(t);
}
//#Post
m_device.destroy(m_postPipeline);
m_device.destroy(m_postPipelineLayout);
m_device.destroy(m_postDescPool);
m_device.destroy(m_postDescSetLayout);
m_alloc.destroy(m_offscreenColor);
m_alloc.destroy(m_offscreenDepth);
m_device.destroy(m_offscreenRenderPass);
m_device.destroy(m_offscreenFramebuffer);
}
//--------------------------------------------------------------------------------------------------
// Drawing the scene in raster mode
//
void HelloVulkan::rasterize(const vk::CommandBuffer& cmdBuf)
{
using vkPBP = vk::PipelineBindPoint;
using vkSS = vk::ShaderStageFlagBits;
vk::DeviceSize offset{0};
m_debug.beginLabel(cmdBuf, "Rasterize");
// Dynamic Viewport
cmdBuf.setViewport(0, {vk::Viewport(0, 0, (float)m_size.width, (float)m_size.height, 0, 1)});
cmdBuf.setScissor(0, {{{0, 0}, {m_size.width, m_size.height}}});
// Drawing all triangles
cmdBuf.bindPipeline(vkPBP::eGraphics, m_graphicsPipeline);
cmdBuf.bindDescriptorSets(vkPBP::eGraphics, m_pipelineLayout, 0, {m_descSet}, {});
for(int i = 0; i < m_objInstance.size(); ++i)
{
auto& inst = m_objInstance[i];
auto& model = m_objModel[inst.objIndex];
m_pushConstant.instanceId = i; // Telling which instance is drawn
cmdBuf.pushConstants<ObjPushConstant>(m_pipelineLayout, vkSS::eVertex | vkSS::eFragment, 0, m_pushConstant);
cmdBuf.bindVertexBuffers(0, 1, &model.vertexBuffer.buffer, &offset);
cmdBuf.bindIndexBuffer(model.indexBuffer.buffer, 0, vk::IndexType::eUint32);
cmdBuf.drawIndexed(model.nbIndices, 1, 0, 0, 0);
}
m_debug.endLabel(cmdBuf);
}
//--------------------------------------------------------------------------------------------------
// Handling resize of the window
//
void HelloVulkan::onResize(int /*w*/, int /*h*/)
{
createOffscreenRender();
updatePostDescriptorSet();
}
//////////////////////////////////////////////////////////////////////////
// Post-processing
//////////////////////////////////////////////////////////////////////////
//--------------------------------------------------------------------------------------------------
// Creating an offscreen frame buffer and the associated render pass
//
void HelloVulkan::createOffscreenRender()
{
m_alloc.destroy(m_offscreenColor);
m_alloc.destroy(m_offscreenDepth);
// Creating the color image
auto colorCreateInfo = nvvkpp::image::create2DInfo(m_size, m_offscreenColorFormat,
vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eSampled
| vk::ImageUsageFlagBits::eStorage);
m_offscreenColor = m_alloc.createImage(colorCreateInfo);
m_offscreenColor.descriptor = nvvkpp::image::create2DDescriptor(m_device, m_offscreenColor.image, vk::SamplerCreateInfo{},
m_offscreenColorFormat, vk::ImageLayout::eGeneral);
// Creating the depth buffer
auto depthCreateInfo =
nvvkpp::image::create2DInfo(m_size, m_offscreenDepthFormat, vk::ImageUsageFlagBits::eDepthStencilAttachment);
m_offscreenDepth = m_alloc.createImage(depthCreateInfo);
vk::ImageViewCreateInfo depthStencilView;
depthStencilView.setViewType(vk::ImageViewType::e2D);
depthStencilView.setFormat(m_offscreenDepthFormat);
depthStencilView.setSubresourceRange({vk::ImageAspectFlagBits::eDepth, 0, 1, 0, 1});
depthStencilView.setImage(m_offscreenDepth.image);
m_offscreenDepth.descriptor.imageView = m_device.createImageView(depthStencilView);
// Setting the image layout for both color and depth
{
nvvkpp::SingleCommandBuffer genCmdBuf(m_device, m_graphicsQueueIndex);
auto cmdBuf = genCmdBuf.createCommandBuffer();
nvvkpp::image::setImageLayout(cmdBuf, m_offscreenColor.image, vk::ImageLayout::eUndefined, vk::ImageLayout::eGeneral);
nvvkpp::image::setImageLayout(cmdBuf, m_offscreenDepth.image, vk::ImageAspectFlagBits::eDepth,
vk::ImageLayout::eUndefined, vk::ImageLayout::eDepthStencilAttachmentOptimal);
genCmdBuf.flushCommandBuffer(cmdBuf);
}
// Creating a renderpass for the offscreen
if(!m_offscreenRenderPass)
{
m_offscreenRenderPass = nvvkpp::util::createRenderPass(m_device, {m_offscreenColorFormat}, m_offscreenDepthFormat, 1,
true, true, vk::ImageLayout::eGeneral, vk::ImageLayout::eGeneral);
}
// Creating the frame buffer for offscreen
std::vector<vk::ImageView> attachments = {m_offscreenColor.descriptor.imageView, m_offscreenDepth.descriptor.imageView};
m_device.destroy(m_offscreenFramebuffer);
vk::FramebufferCreateInfo info;
info.setRenderPass(m_offscreenRenderPass);
info.setAttachmentCount(2);
info.setPAttachments(attachments.data());
info.setWidth(m_size.width);
info.setHeight(m_size.height);
info.setLayers(1);
m_offscreenFramebuffer = m_device.createFramebuffer(info);
}
//--------------------------------------------------------------------------------------------------
// The pipeline is how things are rendered, which shaders, type of primitives, depth test and more
//
void HelloVulkan::createPostPipeline()
{
// Push constants in the fragment shader
vk::PushConstantRange pushConstantRanges = {vk::ShaderStageFlagBits::eFragment, 0, sizeof(float)};
// Creating the pipeline layout
vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo;
pipelineLayoutCreateInfo.setSetLayoutCount(1);
pipelineLayoutCreateInfo.setPSetLayouts(&m_postDescSetLayout);
pipelineLayoutCreateInfo.setPushConstantRangeCount(1);
pipelineLayoutCreateInfo.setPPushConstantRanges(&pushConstantRanges);
m_postPipelineLayout = m_device.createPipelineLayout(pipelineLayoutCreateInfo);
// Pipeline: completely generic, no vertices
std::vector<std::string> paths = defaultSearchPaths;
nvvkpp::GraphicsPipelineGenerator pipelineGenerator(m_device, m_postPipelineLayout, m_renderPass);
pipelineGenerator.addShader(nvh::loadFile("shaders/passthrough.vert.spv", true, paths), vk::ShaderStageFlagBits::eVertex);
pipelineGenerator.addShader(nvh::loadFile("shaders/post.frag.spv", true, paths), vk::ShaderStageFlagBits::eFragment);
pipelineGenerator.rasterizationState.setCullMode(vk::CullModeFlagBits::eNone);
m_postPipeline = pipelineGenerator.create();
m_debug.setObjectName(m_postPipeline, "post");
}
//--------------------------------------------------------------------------------------------------
// The descriptor layout is the description of the data that is passed to the vertex or the
// fragment program.
//
void HelloVulkan::createPostDescriptor()
{
using vkDS = vk::DescriptorSetLayoutBinding;
using vkDT = vk::DescriptorType;
using vkSS = vk::ShaderStageFlagBits;
m_postDescSetLayoutBind.emplace_back(vkDS(0, vkDT::eCombinedImageSampler, 1, vkSS::eFragment));
m_postDescSetLayout = nvvkpp::util::createDescriptorSetLayout(m_device, m_postDescSetLayoutBind);
m_postDescPool = nvvkpp::util::createDescriptorPool(m_device, m_postDescSetLayoutBind);
m_postDescSet = nvvkpp::util::createDescriptorSet(m_device, m_postDescPool, m_postDescSetLayout);
}
//--------------------------------------------------------------------------------------------------
// Update the output
//
void HelloVulkan::updatePostDescriptorSet()
{
vk::WriteDescriptorSet writeDescriptorSets =
nvvkpp::util::createWrite(m_postDescSet, m_postDescSetLayoutBind[0], &m_offscreenColor.descriptor);
m_device.updateDescriptorSets(writeDescriptorSets, nullptr);
}
//--------------------------------------------------------------------------------------------------
// Draw a full screen quad with the attached image
//
void HelloVulkan::drawPost(vk::CommandBuffer cmdBuf)
{
m_debug.beginLabel(cmdBuf, "Post");
cmdBuf.setViewport(0, {vk::Viewport(0, 0, (float)m_size.width, (float)m_size.height, 0, 1)});
cmdBuf.setScissor(0, {{{0, 0}, {m_size.width, m_size.height}}});
auto aspectRatio = static_cast<float>(m_size.width) / static_cast<float>(m_size.height);
cmdBuf.pushConstants<float>(m_postPipelineLayout, vk::ShaderStageFlagBits::eFragment, 0, aspectRatio);
cmdBuf.bindPipeline(vk::PipelineBindPoint::eGraphics, m_postPipeline);
cmdBuf.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, m_postPipelineLayout, 0, m_postDescSet, {});
cmdBuf.draw(3, 1, 0, 0);
m_debug.endLabel(cmdBuf);
}

View file

@ -0,0 +1,130 @@
/* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include "nvvkpp/allocator_dedicated_vkpp.hpp"
#include "nvvkpp/appbase_vkpp.hpp"
#include "nvvkpp/debug_util_vkpp.hpp"
using nvvkBuffer = nvvkpp::BufferDedicated;
using nvvkTexture = nvvkpp::TextureDedicated;
//--------------------------------------------------------------------------------------------------
// Simple rasterizer of OBJ objects
// - Each OBJ loaded are stored in an `ObjModel` and referenced by a `ObjInstance`
// - It is possible to have many `ObjInstance` referencing the same `ObjModel`
// - Rendering is done in an offscreen framebuffer
// - The image of the framebuffer is displayed in post-process in a full-screen quad
//
class HelloVulkan : public nvvkpp::AppBase
{
public:
void setup(const vk::Device& device, const vk::PhysicalDevice& physicalDevice, uint32_t queueFamily) override;
void createDescriptorSetLayout();
void createGraphicsPipeline();
void loadModel(const std::string& filename, nvmath::mat4f transform = nvmath::mat4f(1));
void updateDescriptorSet();
void createUniformBuffer();
void createSceneDescriptionBuffer();
void createTextureImages(const vk::CommandBuffer& cmdBuf, const std::vector<std::string>& textures);
void updateUniformBuffer();
void onResize(int /*w*/, int /*h*/) override;
void destroyResources();
void rasterize(const vk::CommandBuffer& cmdBuff);
// The OBJ model
struct ObjModel
{
uint32_t nbIndices{0};
uint32_t nbVertices{0};
nvvkBuffer vertexBuffer; // Device buffer of all 'Vertex'
nvvkBuffer indexBuffer; // Device buffer of the indices forming triangles
nvvkBuffer matColorBuffer; // Device buffer of array of 'Wavefront material'
nvvkBuffer matIndexBuffer; // Device buffer of array of 'Wavefront material'
};
// Instance of the OBJ
struct ObjInstance
{
uint32_t objIndex{0}; // Reference to the `m_objModel`
uint32_t txtOffset{0}; // Offset in `m_textures`
nvmath::mat4f transform{1}; // Position of the instance
nvmath::mat4f transformIT{1}; // Inverse transpose
};
// Information pushed at each draw call
struct ObjPushConstant
{
nvmath::vec3f lightPosition{10.f, 15.f, 8.f};
int instanceId{0}; // To retrieve the transformation matrix
float lightIntensity{100.f};
int lightType{0}; // 0: point, 1: infinite
};
ObjPushConstant m_pushConstant;
// Array of objects and instances in the scene
std::vector<ObjModel> m_objModel;
std::vector<ObjInstance> m_objInstance;
// Graphic pipeline
vk::PipelineLayout m_pipelineLayout;
vk::Pipeline m_graphicsPipeline;
std::vector<vk::DescriptorSetLayoutBinding> m_descSetLayoutBind;
vk::DescriptorPool m_descPool;
vk::DescriptorSetLayout m_descSetLayout;
vk::DescriptorSet m_descSet;
nvvkBuffer m_cameraMat; // Device-Host of the camera matrices
nvvkBuffer m_sceneDesc; // Device buffer of the OBJ instances
std::vector<nvvkTexture> m_textures; // vector of all textures of the scene
nvvkpp::AllocatorDedicated m_alloc; // Allocator for buffer, images, acceleration structures
nvvkpp::DebugUtil m_debug; // Utility to name objects
// #Post
void createOffscreenRender();
void createPostPipeline();
void createPostDescriptor();
void updatePostDescriptorSet();
void drawPost(vk::CommandBuffer cmdBuf);
std::vector<vk::DescriptorSetLayoutBinding> m_postDescSetLayoutBind;
vk::DescriptorPool m_postDescPool;
vk::DescriptorSetLayout m_postDescSetLayout;
vk::DescriptorSet m_postDescSet;
vk::Pipeline m_postPipeline;
vk::PipelineLayout m_postPipelineLayout;
vk::RenderPass m_offscreenRenderPass;
vk::Framebuffer m_offscreenFramebuffer;
nvvkTexture m_offscreenColor;
vk::Format m_offscreenColorFormat{vk::Format::eR32G32B32A32Sfloat};
nvvkTexture m_offscreenDepth;
vk::Format m_offscreenDepthFormat{vk::Format::eD32Sfloat};
};

View file

@ -0,0 +1,277 @@
/* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// ImGui - standalone example application for Glfw + Vulkan, using programmable
// pipeline If you are new to ImGui, see examples/README.txt and documentation
// at the top of imgui.cpp.
#include <array>
#include <vulkan/vulkan.hpp>
#include "imgui.h"
#include "imgui_impl_glfw.h"
#include "hello_vulkan.h"
#include "nvh/cameramanipulator.hpp"
#include "nvh/fileoperations.hpp"
#include "nvpsystem.hpp"
#include "nvvkpp/appbase_vkpp.hpp"
#include "nvvkpp/commands_vkpp.hpp"
#include "nvvkpp/context_vkpp.hpp"
#include "nvvkpp/utilities_vkpp.hpp"
//////////////////////////////////////////////////////////////////////////
#define UNUSED(x) (void)(x)
//////////////////////////////////////////////////////////////////////////
// Default search path for shaders
std::vector<std::string> defaultSearchPaths;
// GLFW Callback functions
static void onErrorCallback(int error, const char* description)
{
fprintf(stderr, "GLFW Error %d: %s\n", error, description);
}
// Extra UI
void renderUI(HelloVulkan& helloVk)
{
static int item = 1;
if(ImGui::Combo("Up Vector", &item, "X\0Y\0Z\0\0"))
{
nvmath::vec3f pos, eye, up;
CameraManip.getLookat(pos, eye, up);
up = nvmath::vec3f(item == 0, item == 1, item == 2);
CameraManip.setLookat(pos, eye, up);
}
ImGui::SliderFloat3("Light Position", &helloVk.m_pushConstant.lightPosition.x, -20.f, 20.f);
ImGui::SliderFloat("Light Intensity", &helloVk.m_pushConstant.lightIntensity, 0.f, 100.f);
ImGui::RadioButton("Point", &helloVk.m_pushConstant.lightType, 0);
ImGui::SameLine();
ImGui::RadioButton("Infinite", &helloVk.m_pushConstant.lightType, 1);
}
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
static int const SAMPLE_WIDTH = 1280;
static int const SAMPLE_HEIGHT = 720;
//--------------------------------------------------------------------------------------------------
// Application Entry
//
int main(int argc, char** argv)
{
UNUSED(argc);
// Setup GLFW window
glfwSetErrorCallback(onErrorCallback);
if(!glfwInit())
{
return 1;
}
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
GLFWwindow* window = glfwCreateWindow(SAMPLE_WIDTH, SAMPLE_HEIGHT, "NVIDIA Vulkan Raytracing Tutorial", nullptr, nullptr);
// Setup camera
CameraManip.setWindowSize(SAMPLE_WIDTH, SAMPLE_HEIGHT);
CameraManip.setLookat(nvmath::vec3f(2.0f, 2.0f, 2.0f), nvmath::vec3f(0, 0, 0), nvmath::vec3f(0, 1, 0));
// Setup Vulkan
if(!glfwVulkanSupported())
{
printf("GLFW: Vulkan Not Supported\n");
return 1;
}
// setup some basic things for the sample, logging file for example
NVPSystem system(argv[0], PROJECT_NAME);
// Search path for shaders and other media
defaultSearchPaths = {
PROJECT_ABSDIRECTORY,
PROJECT_ABSDIRECTORY "../",
NVPSystem::exePath() + std::string(PROJECT_RELDIRECTORY),
NVPSystem::exePath() + std::string(PROJECT_RELDIRECTORY) + std::string("../"),
};
// Enabling the extension feature
vk::PhysicalDeviceDescriptorIndexingFeaturesEXT indexFeature;
vk::PhysicalDeviceScalarBlockLayoutFeaturesEXT scalarFeature;
// Requesting Vulkan extensions and layers
nvvkpp::ContextCreateInfo contextInfo;
contextInfo.addInstanceLayer("VK_LAYER_LUNARG_monitor", true);
contextInfo.addInstanceExtension(VK_KHR_SURFACE_EXTENSION_NAME);
#ifdef _WIN32
contextInfo.addInstanceExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
#else
contextInfo.addInstanceExtension(VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
contextInfo.addInstanceExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME);
#endif
contextInfo.addInstanceExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
contextInfo.addDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
contextInfo.addDeviceExtension(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME);
contextInfo.addDeviceExtension(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
contextInfo.addDeviceExtension(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME, false, &indexFeature);
contextInfo.addDeviceExtension(VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME, false, &scalarFeature);
// Creating Vulkan base application
nvvkpp::Context vkctx{};
vkctx.initInstance(contextInfo);
// Find all compatible devices
auto compatibleDevices = vkctx.getCompatibleDevices(contextInfo);
assert(!compatibleDevices.empty());
// Use a compatible device
vkctx.initDevice(compatibleDevices[0], contextInfo);
// Create example
HelloVulkan helloVk;
// Window need to be opened to get the surface on which to draw
const vk::SurfaceKHR surface = helloVk.getVkSurface(vkctx.m_instance, window);
vkctx.setGCTQueueWithPresent(surface);
helloVk.setup(vkctx.m_device, vkctx.m_physicalDevice, vkctx.m_queueGCT.familyIndex);
helloVk.createSurface(surface, SAMPLE_WIDTH, SAMPLE_HEIGHT);
helloVk.createDepthBuffer();
helloVk.createRenderPass();
helloVk.createFrameBuffers();
// Setup Imgui
helloVk.initGUI(0); // Using sub-pass 0
// Creation of the example
helloVk.loadModel(nvh::findFile("media/scenes/cube_multi.obj", defaultSearchPaths));
helloVk.createOffscreenRender();
helloVk.createDescriptorSetLayout();
helloVk.createGraphicsPipeline();
helloVk.createUniformBuffer();
helloVk.createSceneDescriptionBuffer();
helloVk.updateDescriptorSet();
helloVk.createPostDescriptor();
helloVk.createPostPipeline();
helloVk.updatePostDescriptorSet();
nvmath::vec4f clearColor = nvmath::vec4f(1, 1, 1, 1.00f);
helloVk.setupGlfwCallbacks(window);
ImGui_ImplGlfw_InitForVulkan(window, true);
// Main loop
while(!glfwWindowShouldClose(window))
{
glfwPollEvents();
if(helloVk.isMinimized())
continue;
// Start the Dear ImGui frame
ImGui_ImplGlfw_NewFrame();
ImGui::NewFrame();
// Updating camera buffer
helloVk.updateUniformBuffer();
// Show UI window.
if(1 == 1)
{
ImGui::ColorEdit3("Clear color", reinterpret_cast<float*>(&clearColor));
renderUI(helloVk);
ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / ImGui::GetIO().Framerate, ImGui::GetIO().Framerate);
ImGui::Render();
}
// Start rendering the scene
helloVk.prepareFrame();
// Start command buffer of this frame
auto curFrame = helloVk.getCurFrame();
const vk::CommandBuffer& cmdBuff = helloVk.getCommandBuffers()[curFrame];
cmdBuff.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit});
// Clearing screen
vk::ClearValue clearValues[2];
clearValues[0].setColor(nvvkpp::util::clearColor(clearColor));
clearValues[1].setDepthStencil({1.0f, 0});
// Offscreen render pass
{
vk::RenderPassBeginInfo offscreenRenderPassBeginInfo;
offscreenRenderPassBeginInfo.setClearValueCount(2);
offscreenRenderPassBeginInfo.setPClearValues(clearValues);
offscreenRenderPassBeginInfo.setRenderPass(helloVk.m_offscreenRenderPass);
offscreenRenderPassBeginInfo.setFramebuffer(helloVk.m_offscreenFramebuffer);
offscreenRenderPassBeginInfo.setRenderArea({{}, helloVk.getSize()});
// Rendering Scene
cmdBuff.beginRenderPass(offscreenRenderPassBeginInfo, vk::SubpassContents::eInline);
helloVk.rasterize(cmdBuff);
cmdBuff.endRenderPass();
}
// 2nd rendering pass: tone mapper, UI
{
vk::RenderPassBeginInfo postRenderPassBeginInfo;
postRenderPassBeginInfo.setClearValueCount(2);
postRenderPassBeginInfo.setPClearValues(clearValues);
postRenderPassBeginInfo.setRenderPass(helloVk.getRenderPass());
postRenderPassBeginInfo.setFramebuffer(helloVk.getFramebuffers()[curFrame]);
postRenderPassBeginInfo.setRenderArea({{}, helloVk.getSize()});
cmdBuff.beginRenderPass(postRenderPassBeginInfo, vk::SubpassContents::eInline);
// Rendering tonemapper
helloVk.drawPost(cmdBuff);
// Rendering UI
ImGui::RenderDrawDataVK(cmdBuff, ImGui::GetDrawData());
cmdBuff.endRenderPass();
}
// Submit for display
cmdBuff.end();
helloVk.submitFrame();
}
// Cleanup
helloVk.getDevice().waitIdle();
helloVk.destroyResources();
helloVk.destroy();
vkctx.m_instance.destroySurfaceKHR(surface);
vkctx.deinit();
glfwDestroyWindow(window);
glfwTerminate();
return 0;
}

View file

@ -0,0 +1,79 @@
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_EXT_nonuniform_qualifier : enable
#extension GL_GOOGLE_include_directive : enable
#extension GL_EXT_scalar_block_layout : enable
#include "wavefront.glsl"
layout(push_constant) uniform shaderInformation
{
vec3 lightPosition;
uint instanceId;
float lightIntensity;
int lightType;
}
pushC;
// clang-format off
// Incoming
//layout(location = 0) flat in int matIndex;
layout(location = 1) in vec2 fragTexCoord;
layout(location = 2) in vec3 fragNormal;
layout(location = 3) in vec3 viewDir;
layout(location = 4) in vec3 worldPos;
// Outgoing
layout(location = 0) out vec4 outColor;
// Buffers
layout(binding = 1, scalar) buffer MatColorBufferObject { WaveFrontMaterial m[]; } materials[];
layout(binding = 2, scalar) buffer ScnDesc { sceneDesc i[]; } scnDesc;
layout(binding = 3) uniform sampler2D[] textureSamplers;
layout(binding = 4, scalar) buffer MatIndex { int i[]; } matIdx[];
// clang-format on
void main()
{
// Object of this instance
int objId = scnDesc.i[pushC.instanceId].objId;
// Material of the object
int matIndex = matIdx[objId].i[gl_PrimitiveID];
WaveFrontMaterial mat = materials[objId].m[matIndex];
vec3 N = normalize(fragNormal);
// Vector toward light
vec3 L;
float lightIntensity = pushC.lightIntensity;
if(pushC.lightType == 0)
{
vec3 lDir = pushC.lightPosition - worldPos;
float d = length(lDir);
lightIntensity = pushC.lightIntensity / (d * d);
L = normalize(lDir);
}
else
{
L = normalize(pushC.lightPosition - vec3(0));
}
// Diffuse
vec3 diffuse = computeDiffuse(mat, L, N);
if(mat.textureId >= 0)
{
int txtOffset = scnDesc.i[pushC.instanceId].txtOffset;
uint txtId = txtOffset + mat.textureId;
vec3 diffuseTxt = texture(textureSamplers[txtId], fragTexCoord).xyz;
diffuse *= diffuseTxt;
}
// Specular
vec3 specular = computeSpecular(mat, viewDir, L, N);
// Result
outColor = vec4(lightIntensity * (diffuse + specular), 1);
}

View file

@ -0,0 +1,15 @@
#version 450
layout (location = 0) out vec2 outUV;
out gl_PerVertex
{
vec4 gl_Position;
};
void main()
{
outUV = vec2((gl_VertexIndex << 1) & 2, gl_VertexIndex & 2);
gl_Position = vec4(outUV * 2.0f - 1.0f, 1.0f, 1.0f);
}

View file

@ -0,0 +1,18 @@
#version 450
layout(location = 0) in vec2 outUV;
layout(location = 0) out vec4 fragColor;
layout(set = 0, binding = 0) uniform sampler2D noisyTxt;
layout(push_constant) uniform shaderInformation
{
float aspectRatio;
}
pushc;
void main()
{
vec2 uv = outUV;
float gamma = 1. / 2.2;
fragColor = pow(texture(noisyTxt, uv).rgba, vec4(gamma));
}

View file

@ -0,0 +1,61 @@
#version 450
#extension GL_ARB_separate_shader_objects : enable
#extension GL_EXT_scalar_block_layout : enable
#extension GL_GOOGLE_include_directive : enable
#include "wavefront.glsl"
// clang-format off
layout(binding = 2, set = 0, scalar) buffer ScnDesc { sceneDesc i[]; } scnDesc;
// clang-format on
layout(binding = 0) uniform UniformBufferObject
{
mat4 view;
mat4 proj;
mat4 viewI;
}
ubo;
layout(push_constant) uniform shaderInformation
{
vec3 lightPosition;
uint instanceId;
float lightIntensity;
int lightType;
}
pushC;
layout(location = 0) in vec3 inPosition;
layout(location = 1) in vec3 inNormal;
layout(location = 2) in vec3 inColor;
layout(location = 3) in vec2 inTexCoord;
//layout(location = 0) flat out int matIndex;
layout(location = 1) out vec2 fragTexCoord;
layout(location = 2) out vec3 fragNormal;
layout(location = 3) out vec3 viewDir;
layout(location = 4) out vec3 worldPos;
out gl_PerVertex
{
vec4 gl_Position;
};
void main()
{
mat4 objMatrix = scnDesc.i[pushC.instanceId].transfo;
mat4 objMatrixIT = scnDesc.i[pushC.instanceId].transfoIT;
vec3 origin = vec3(ubo.viewI * vec4(0, 0, 0, 1));
worldPos = vec3(objMatrix * vec4(inPosition, 1.0));
viewDir = vec3(worldPos - origin);
fragTexCoord = inTexCoord;
fragNormal = vec3(objMatrixIT * vec4(inNormal, 0.0));
// matIndex = inMatID;
gl_Position = ubo.proj * ubo.view * vec4(worldPos, 1.0);
}

View file

@ -0,0 +1,57 @@
struct Vertex
{
vec3 pos;
vec3 nrm;
vec3 color;
vec2 texCoord;
};
struct WaveFrontMaterial
{
vec3 ambient;
vec3 diffuse;
vec3 specular;
vec3 transmittance;
vec3 emission;
float shininess;
float ior; // index of refraction
float dissolve; // 1 == opaque; 0 == fully transparent
int illum; // illumination model (see http://www.fileformat.info/format/material/)
int textureId;
};
struct sceneDesc
{
int objId;
int txtOffset;
mat4 transfo;
mat4 transfoIT;
};
vec3 computeDiffuse(WaveFrontMaterial mat, vec3 lightDir, vec3 normal)
{
// Lambertian
float dotNL = max(dot(normal, lightDir), 0.0);
vec3 c = mat.diffuse * dotNL;
if(mat.illum >= 1)
return c + mat.ambient;
}
vec3 computeSpecular(WaveFrontMaterial mat, vec3 viewDir, vec3 lightDir, vec3 normal)
{
if(mat.illum < 2)
return vec3(0);
// Compute specular only if not in shadow
const float kPi = 3.14159265;
const float kShininess = max(mat.shininess, 4.0);
// Specular
const float kEnergyConservation = (2.0 + kShininess) / (2.0 * kPi);
vec3 V = normalize(-viewDir);
vec3 R = reflect(-lightDir, normal);
float specular = kEnergyConservation * pow(max(dot(V, R), 0.0), kShininess);
return vec3(mat.specular * specular);
}