Update on Overleaf.

This commit is contained in:
uxwmp 2023-06-15 09:26:12 +00:00 committed by node
commit cd463f4a54
56 changed files with 1974 additions and 0 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 505 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 717 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 514 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 697 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 366 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 280 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 518 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 3 KiB

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 236 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 227 KiB

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 354 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,78 @@
\section{Basic terms}
\begin{frame}
\centering
\Huge
Basic terms
\end{frame}
\subsection{Raytracing}
\begin{frame}{Raytracing}
\begin{center}
\begin{minipage}{0.4\linewidth}
\flushleft Turn 3D model...
\includegraphics[width=\linewidth]{presentation/img/proseminar_workbench.png}
\end{minipage}
\pause
$\rightarrow$
\hspace{10mm}
\begin{minipage}{0.4\linewidth}
\flushright ...into a physically accurate image
\includegraphics[width=0.8\linewidth]{presentation/img/proseminar_cycles.png}
\end{minipage}
\end{center}
\end{frame}
\begin{frame}{Raytracing}
\begin{block}{Task}
\begin{itemize}
\item Determine the color of each Pixel in the scene
\item Color (light intensity) is given by the rendering integral:\\
\[
\underbrace{I(x,x^{\prime})}_{\text{Light transport intensity from }x \text{ to } x^{\prime}}=
\underbrace{g(x,x')}_{\text{geometry term}}
\left[
\underbrace{\epsilon(x,x')}_{\text{emissive light}}+\underbrace{\int_S \rho(x,x',x'')I(x',x'')dx''}_{\text{light scattered towards the point}}
\right]
\]
\begin{itemize}
\item Attempts to capture the physical light transport phenomenon in a single equation
\end{itemize}
[\cite{ACM:rendering_equation}]
\item Problem: This equation is not analytically solvable\\
$\rightarrow$ Solve numerically using Monte-Carlo integration (i.e. raytracing)
\end{itemize}
\end{block}
\end{frame}
\begin{frame}{Raytracing}
\setbeamercovered{transparent}
\begin{block}{Principle (simplified)}
\begin{itemize}
\item Cast rays from the camera towards the scene
\item Calculate geometry intersection
\item Trace rays from intersection point to all light sources
\item Calculate color from emission and the sampled reflected light taking geometry into account (e.g. occlusion)
\item Have the ray "bounce around" to account for global illumination
\end{itemize}
\end{block}
\pause
\begin{block}{Variables}
Scene depends on lots of variables:
\begin{itemize}
\item Material properties (roughness, emission strength, color, transmissiveness...)
\item Vertex positions
\end{itemize}
\end{block}
\end{frame}
\begin{frame}{Image synthesis - optical phenomena}
\centering
\includegraphics[width=0.38\linewidth]{presentation/img/proseminar_cycles_annotated.png}
\end{frame}
\subsection{Differentiable rendering}
\begin{frame}{Differentiable rendering}
\begin{itemize}
\item Given: Function mapping an 3D-scene to a real number (e.g. error function)
\item Target: Calculate gradient of that function
\item Required: Differentiate with respect to any scene parameter
\end{itemize}
\end{frame}

View file

@ -0,0 +1,104 @@
\section{Motivation - why differentiable rendering is important}
\begin{frame}
\centering
\Huge
Motivation - why differentiable rendering is important
\end{frame}
\begin{frame}{Importance of differentiable rendering}
\begin{block}{Examples for Applications}
\begin{itemize}
\item Learning-based Inverse Rendering of Complex Indoor Scenes
with Differentiable Monte Carlo Raytracing [\cite{ACM:inverse_rendering}]\\
$\rightarrow$ Inverse rendering
\item Generating Semantic Adversarial Examples with Differentiable Rendering [\cite{DBLP:journals/corr/abs-1910-00727}]\\
$\rightarrow$ Machine learning
\item Real-Time Lighting Estimation for Augmented Reality [\cite{IEEE:AR_lighting_estimation}]\\
$\rightarrow$ Realistic real time shading for AR applications
\item Acoustic Camera Pose Refinement [\cite{IEEE:Ac_cam_refinment}]\\
$\rightarrow$ Optimize six degrees of freedom for acoustic underwater cameras
\end{itemize}
\end{block}
\end{frame}
\subsection{Inverse rendering}
\begin{frame}{Inverse rendering}
\begin{itemize}
\item Conventional rendering: Synthesize an Image from a 3D scene
\item Inverse rendering is solving the inverse problem: Synthesize a 3D scene from images
\item 3D modelling can be hard and time consuming
\item Approach:
\begin{itemize}
\item Approximate the 3D scene (often very coarse)
\item Render the approximation differentiably
\item Calculate the error between the render and the images
\item Use ADAM or comparable gradient descent algorithm to minimize this error
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}{Inverse rendering - current example}
\centering
\includemedia[
width=0.62\linewidth,height=0.35\linewidth,
activate=onclick,
addresource=proseminar_chair.mp4,
playbutton=fancy,
transparent,
passcontext,
flashvars={
source=proseminar_chair.mp4
&autoPlay=true
}
]{}{VPlayer.swf}
\\
Source: \cite{ACM:inverse_rendering_signed_distance_function}
\end{frame}
\subsection{Adversarial image generation}
\begin{frame}{Adversarial image generation}
\begin{center}
\begin{minipage}{0.4\linewidth}
\begin{itemize}
\item Common Problem in machine learning: Classification\\
$\implies$ Given a set of labels and a set of data, assign a label to each element in the dataset
\item Labeled data is needed to train classifier network
\end{itemize}
\pause
\vspace{15mm}
Image source: Auth0, \href{https://auth0.com/blog/captcha-can-ruin-your-ux-here-s-how-to-use-it-right/}{CAPTCHA Can Ruin Your UX. Heres How to Use it Right}
\end{minipage}
\begin{minipage}{0.5\linewidth}
\centering
\includegraphics[width=0.5\linewidth]{presentation/img/recaptcha_example.png}
\end{minipage}
\end{center}
\end{frame}
\begin{frame}{Adversarial image generation}
\begin{itemize}
\item Problem: Labeling training data is tedious and expensive\\
$\implies$ We want to automatically generate training data
\item One solution: Generative adversarial networks. Let two neural nets "compete"; a generator and a classifier. (e.g. AutoGAN [\cite{DBLP:AutoGAN}])\\
$\implies$ Impossible to make semantic changes to the image (e.g. lighting) since no knowlege of the 3D scene exists
\item Different solution: Generate image using differentiable raytracing, use gradient descent to optimize the result image to fall into a specific class\\
$\implies$ Scene parameters can be manipulated
\end{itemize}
\end{frame}
\begin{frame}{Adversarial image generation - example [\cite{DBLP:journals/corr/abs-1910-00727}]}
\begin{center}
\begin{figure}
\begin{minipage}{0.45\linewidth}
\includegraphics[width=\linewidth]{presentation/img/adversarial_rendering_results/correct_car.png}
\includegraphics[width=\linewidth]{presentation/img/adversarial_rendering_results/correct_pedestrian.png}
\end{minipage}
\begin{minipage}{0.45\linewidth}
\includegraphics[width=\linewidth]{presentation/img/adversarial_rendering_results/incorrect_car.png}
\includegraphics[width=\linewidth]{presentation/img/adversarial_rendering_results/incorrect_pedestrian.png}
\end{minipage}
\centering
\caption{Left: Original images, features are correctly identified.\\
Right: adversarial examples, silver car is not recognized and pedestrians are identified where there are none. Only semantic features (color, position, rotation) have been changed.}
\label{fig:adv_img_example}
\end{figure}
\end{center}
\end{frame}

View file

@ -0,0 +1,137 @@
\section{Problems}
\begin{frame}
\centering
\Huge
Problems
\end{frame}
\subsection{Why differentiable rendering is hard}
\begin{frame}{Why differentiable rendering is hard}
\begin{itemize}
\item Rendering integral contains the geometry term that is not differentiable
\item The gradiant of the visibility can lead to dirac delta terms which have 0 probability of being sampled correctly [\cite{ACM:diracdelta},\cite{ACM:diffable_raytracing}]
\item Differentiation with respect to certain scene parameters possible but we need to differentiate with respect to any scene parameter
\end{itemize}
\pause
\begin{center}
\begin{minipage}{0.4\linewidth}
\begin{figure}
\centering
\begin{minipage}{0.5\linewidth}
\begin{tikzpicture}[domain=-0.5:2]
\draw[very thin,color=gray] (-0.1,-0.6) grid (2.1,2.1);
\draw[->] (-0.2,-0.5) -- (2.2,-0.5) node[right] {$\omega$};
\draw[->] (0,-0.7) -- (0,2.2) node[above] {$V(x,\omega)$};
\draw (0,0) -- (1,0) plot coordinates {(0,0) (1,0)}[color=red];
\draw (1,1) -- (2,1) plot coordinates {(1,1) (2,1)}[color=red];
\draw (0,0) node[left] {$0$};
\draw (0,1) node[left] {$1$};
\draw (1,-0.7) node[below] {$\omega_0$};
\end{tikzpicture}
\end{minipage}
\hspace{3mm}
\begin{minipage}{0.4\linewidth}
\caption{Visibility of a point $x$ with respect to $\omega$. Observe the discontinuity at $\omega_0$.}
\label{fig:visibility}
\end{minipage}
\end{figure}
\end{minipage}
% second diagram
\begin{minipage}{0.5\linewidth}
\begin{figure}
\centering
\begin{minipage}{0.5\linewidth}
\begin{tikzpicture}[domain=-0.5:2]
\draw[very thin,color=gray] (-0.1,-0.6) grid (2.1,2.1);
\draw[->] (-0.2,-0.5) -- (2.2,-0.5) node[right] {$\omega$};
\draw[->] (0,-0.7) -- (0,2.2) node[above] {$\frac{\partial}{\partial\omega}V(x,\omega)$};
\draw (0,0) -- (2,0) plot coordinates {(0,0) (2,0)}[color=red];
\draw (0,0) node[left] {$0$};
\draw (0,1) node[left] {$1$};
\draw (0,2) node[left] {$\infty$};
\draw (1,2) node[color=red] {$\bullet$};
\draw (1,-0.7) node[below] {$\omega_0$};
\end{tikzpicture}
\end{minipage}
\begin{minipage}{0.4\linewidth}
\caption{Differentiation of the left graph with respect to $\omega$. Observe the discontinuity at $\omega_0$ in the left graph leading to a dirac delta spike at $\omega_0$ in the differentiation.}
\label{fig:dirac-delta-spike}
\end{minipage}
\end{figure}
\end{minipage}
\end{center}
\end{frame}
\begin{frame}{Geometry term}
\centering
\begin{minipage}{0.4\linewidth}
\includegraphics[width=\linewidth]{presentation/img/blockers.png}
\end{minipage}
\hspace{15mm}
\begin{minipage}{0.4\linewidth}
\includegraphics[width=\linewidth]{presentation/img/blockers_diff.png}
\end{minipage}
\end{frame}
\subsection{Former methods}
\begin{frame}{Former methods}
\begin{block}{Previous differentiable renderers considered by this paper}
\begin{itemize}
\item OpenDR [\cite{DBLP:OpenDR}]
\item Neural 3D Mesh Renderer [\cite{DBLP:Neural3DKatoetal}]
\item Both rasterization based (first render the image using rasterization, then approximate the gradients using the resulting color buffer)
\item Focused on speed rather than precision
\end{itemize}
\end{block}
\end{frame}
\begin{frame}{Former methods - visualization}
\begin{figure}
\begin{minipage}{0.12\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/comparisons/plane.png}
\caption{planar scene}
\label{fig:planar-scene}
\end{figure}
\end{minipage}
\hspace{2mm}
\begin{minipage}{0.12\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/comparisons/opendr.png}
\caption{OpenDR}
\label{fig:grad-OpenDR}
\end{figure}
\end{minipage}
\hspace{2mm}
\begin{minipage}{0.12\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/comparisons/Neural.png}
\caption{Neural}
\label{fig:grad-Neural3DMesh}
\end{figure}
\end{minipage}
\hspace{2mm}
\begin{minipage}{0.12\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/comparisons/ours.png}
\caption{this paper}
\label{fig:grad-this}
\end{figure}
\end{minipage}
\hspace{4mm}
\begin{minipage}{0.3\linewidth}
\caption{
%A plane lit by a point light source. Images visualize a gradient with respect to the plane moving right. Since the light source remains static the gradient should only be $\ne 0$ at the boundaries. OpenDR and Neural are not able to correctly calculate the gradients as they are based on color buffer differences.\\
Visualizations of gradients calculated by different differentiable renderers.\\
Images: \cite{ACM:diffable_raytracing}
}
\label{fig:grad-explanation}
\end{minipage}
\end{figure}
\pause
$\implies$ Problems are caused at the edges and by approximation using color buffers
\end{frame}

View file

@ -0,0 +1,226 @@
\section{This method}
\begin{frame}
\centering
\Huge
This method
\end{frame}
\subsection{Edge sampling}
\subsection{Hierarchical edge sampling}
\subsection{conclusion - what can this method do?}
% talk about limitations here!
\begin{frame}{Inverse rendering - Results in this paper}
\begin{block}{Inverse rendering here}
\begin{itemize}
\item Fit camera pose, material parameters and light source intensity
\item Scene: Strong indirect illumination and non lambertian materials
\item Initial guess: Assign almost all objects a white color, arbitrary camera pose
\item 177 parameters in total
\item Absolute difference as loss function and ADAM optimizer
\item Start at a resulution of $64\times 64$ and linearly increase to $512\times 512$ in 8 steps\\
$\implies$ Avoid getting stuck in local minima of the loss function
\end{itemize}
\end{block}
\end{frame}
\begin{frame}{Inverse rendering - results in this paper}
\begin{center}
\begin{minipage}{0.25\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/results/guess.png}
\caption{Initial guess}
\label{fig:results-guess}
\end{figure}
\end{minipage}
\hspace{2mm}
\begin{minipage}{0.25\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/results/photo.png}
\caption{Target (photograph)}
\label{fig:results-target}
\end{figure}
\end{minipage}
\hspace{2mm}
\begin{minipage}{0.25\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/results/result.png}
\caption{Optimized image}
\label{fig:results-optimized}
\end{figure}
\end{minipage}
\end{center}
\end{frame}
\begin{frame}
\begin{figure}
\centering
\begin{minipage}{0.14\linewidth}
\includegraphics[width=\linewidth]{presentation/img/render_optimization/triangles/img-027.png}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\includegraphics[width=\linewidth]{presentation/img/render_optimization/shade/img-028.png}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\includegraphics[width=\linewidth]{presentation/img/render_optimization/glossy/img-029.png}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\includegraphics[width=\linewidth]{presentation/img/render_optimization/glossy_recv/img-030.png}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\includegraphics[width=\linewidth]{presentation/img/render_optimization/specular/img-031.png}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\includegraphics[width=\linewidth]{presentation/img/render_optimization/global_illumination/img-032.png}
\end{minipage}
\begin{minipage}{0.1\linewidth}
\caption{initial guess}
\label{fig:grid_init_guess}
\end{minipage}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%% second row %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\centering
\begin{minipage}{0.14\linewidth}
\includegraphics[width=\linewidth]{presentation/img/render_optimization/triangles/img-033.png}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\includegraphics[width=\linewidth]{presentation/img/render_optimization/shade/img-034.png}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\includegraphics[width=\linewidth]{presentation/img/render_optimization/glossy/img-035.png}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\includegraphics[width=\linewidth]{presentation/img/render_optimization/glossy_recv/img-036.png}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\includegraphics[width=\linewidth]{presentation/img/render_optimization/specular/img-037.png}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\includegraphics[width=\linewidth]{presentation/img/render_optimization/global_illumination/img-038.png}
\end{minipage}
\begin{minipage}{0.1\linewidth}
\caption{target images}
\label{fig:grid_target}
\end{minipage}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%% third row %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\centering
\begin{minipage}{0.15\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/render_optimization/triangles/img-039.png}
\caption{primary occlusion}
\end{figure}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/render_optimization/shade/img-040.png}
\caption{shadow}
\end{figure}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/render_optimization/glossy/img-041.png}
\caption{glossy}
\end{figure}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/render_optimization/glossy_recv/img-042.png}
\caption{glossy receiver}
\end{figure}
\end{minipage}
\begin{minipage}{0.14\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/render_optimization/specular/img-043.png}
\caption{near-specular}
\end{figure}
\end{minipage}
\begin{minipage}{0.15\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/render_optimization/global_illumination/img-044.png}
\caption{global illumination}
\end{figure}
\end{minipage}
\begin{minipage}{0.08\linewidth}
\caption{optimized result}
\label{fig:grid_optimized}
\end{minipage}
\end{figure}
\end{frame}
\begin{frame}{Inverse rendering - example from this paper}
\centering
\begin{minipage}{0.19\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/teapot_video/teapot_init.png}
\vspace{0mm}
\caption{initial guess}
\label{fig:teapot_init}
\end{figure}
\end{minipage}
\begin{minipage}{0.19\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/teapot_video/teapot_init_diff.png}
\caption{difference\\
initial $\leftrightarrow$ target}
\label{fig:teapot_init_diff}
\end{figure}
\end{minipage}
\begin{minipage}{0.19\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/teapot_video/teapot_target.png}
\vspace{0mm}
\caption{target image}
\label{fig:teapot_target}
\end{figure}
\end{minipage}
\begin{minipage}{0.19\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/teapot_video/teapot_final_diff.png}
\caption{difference\\
final $\leftrightarrow$ target}
\label{fig:teapot_final_diff}
\end{figure}
\end{minipage}
\begin{minipage}{0.19\linewidth}
\begin{figure}
\centering
\includegraphics[width=\linewidth]{presentation/img/teapot_video/teapot_final.png}
\vspace{0mm}
\caption{final image}
\label{fig:teapot_final}
\end{figure}
\end{minipage}
\end{frame}
\begin{frame}{Inverse rendering - example from this paper}
\centering
\includemedia[
width=0.62\linewidth,height=0.35\linewidth,
activate=onclick,
addresource=teapot.mp4,
playbutton=fancy,
transparent,
passcontext,
flashvars={
source=teapot.mp4
&autoPlay=true
}
]{}{VPlayer.swf}
\\
All media in this section taken from \cite{ACM:diffable_raytracing}
\end{frame}

View file

@ -0,0 +1,212 @@
@article{ACM:diffable_raytracing,
author = {Li, Tzu-Mao and Aittala, Miika and Durand, Fr\'{e}do and Lehtinen, Jaakko},
title = {Differentiable Monte Carlo Ray Tracing through Edge Sampling},
year = {2018},
issue_date = {December 2018},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {37},
number = {6},
issn = {0730-0301},
url = {https://doi.org/10.1145/3272127.3275109},
doi = {10.1145/3272127.3275109},
abstract = {Gradient-based methods are becoming increasingly important for computer graphics, machine learning, and computer vision. The ability to compute gradients is crucial to optimization, inverse problems, and deep learning. In rendering, the gradient is required with respect to variables such as camera parameters, light sources, scene geometry, or material appearance. However, computing the gradient of rendering is challenging because the rendering integral includes visibility terms that are not differentiable. Previous work on differentiable rendering has focused on approximate solutions. They often do not handle secondary effects such as shadows or global illumination, or they do not provide the gradient with respect to variables other than pixel coordinates.We introduce a general-purpose differentiable ray tracer, which, to our knowledge, is the first comprehensive solution that is able to compute derivatives of scalar functions over a rendered image with respect to arbitrary scene parameters such as camera pose, scene geometry, materials, and lighting parameters. The key to our method is a novel edge sampling algorithm that directly samples the Dirac delta functions introduced by the derivatives of the discontinuous integrand. We also develop efficient importance sampling methods based on spatial hierarchies. Our method can generate gradients in times running from seconds to minutes depending on scene complexity and desired precision.We interface our differentiable ray tracer with the deep learning library PyTorch and show prototype applications in inverse rendering and the generation of adversarial examples for neural networks.},
journal = {ACM Trans. Graph.},
articleno = {222},
numpages = {11},
keywords = {differentiable programming, inverse rendering, ray tracing}
}
@inproceedings{ACM:rendering_equation,
author = {Kajiya, James T.},
title = {The Rendering Equation},
year = {1986},
isbn = {0897911962},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/15922.15902},
doi = {10.1145/15922.15902},
abstract = {We present an integral equation which generalizes a variety of known rendering algorithms. In the course of discussing a monte carlo solution we also present a new form of variance reduction, called Hierarchical sampling and give a number of elaborations shows that it may be an efficient new technique for a wide variety of monte carlo procedures. The resulting rendering algorithm extends the range of optical phenomena which can be effectively simulated.},
booktitle = {Proceedings of the 13th Annual Conference on Computer Graphics and Interactive Techniques},
pages = {143150},
numpages = {8},
series = {SIGGRAPH '86}
}
@article{ACM:diracdelta,
author = {Ramamoorthi, Ravi and Mahajan, Dhruv and Belhumeur, Peter},
title = {A First-Order Analysis of Lighting, Shading, and Shadows},
year = {2007},
issue_date = {January 2007},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {26},
number = {1},
issn = {0730-0301},
url = {https://doi.org/10.1145/1189762.1189764},
doi = {10.1145/1189762.1189764},
abstract = {The shading in a scene depends on a combination of many factors---how the lighting varies spatially across a surface, how it varies along different directions, the geometric curvature and reflectance properties of objects, and the locations of soft shadows. In this article, we conduct a complete first-order or gradient analysis of lighting, shading, and shadows, showing how each factor separately contributes to scene appearance, and when it is important. Gradients are well-suited to analyzing the intricate combination of appearance effects, since each gradient term corresponds directly to variation in a specific factor. First, we show how the spatial and directional gradients of the light field change as light interacts with curved objects. This extends the recent frequency analysis of Durand et al. [2005] to gradients, and has many advantages for operations, like bump mapping, that are difficult to analyze in the Fourier domain. Second, we consider the individual terms responsible for shading gradients, such as lighting variation, convolution with the surface BRDF, and the object's curvature. This analysis indicates the relative importance of various terms, and shows precisely how they combine in shading. Third, we understand the effects of soft shadows, computing accurate visibility gradients, and generalizing previous work to arbitrary curved occluders. As one practical application, our visibility gradients can be directly used with conventional ray-tracing methods in practical gradient interpolation methods for efficient rendering. Moreover, our theoretical framework can be used to adaptively sample images in high-gradient regions for efficient rendering.},
journal = {ACM Trans. Graph.},
pages = {2es},
numpages = {21},
keywords = {reflectance, Gradients, Fourier analysis, shadows}
}
@ARTICLE{IEEE:AR_lighting_estimation,
author={Liu, Celong and Wang, Lingyu and Li, Zhong and Quan, Shuxue and Xu, Yi},
journal={IEEE Transactions on Visualization and Computer Graphics},
title={Real-Time Lighting Estimation for Augmented Reality via Differentiable Screen-Space Rendering},
year={2023},
volume={29},
number={4},
pages={2132-2145},
doi={10.1109/TVCG.2022.3141943}
}
@INPROCEEDINGS{IEEE:Ac_cam_refinment,
author={Wu, Chujie and Wang, Yusheng and Ji, Yooghoon and Tsuchiya, Hiroshi and Asama, Hajime and Yamashita, Atsushi},
booktitle={2023 IEEE/SICE International Symposium on System Integration (SII)},
title={Acoustic Camera Pose Refinement Using Differentiable Rendering},
year={2023},
volume={},
number={},
pages={1-6},
abstract={Acoustic cameras, also known as 2D forward looking sonars, show high reliability in underwater environments as they can produce high resolution images even if the illumination is limited. However, due to the unique imaging principle, it is hard to estimate ground-truth-level extrinsic parameters even in a known 3D scene. Usually, there are methods such as direct measurements by rulers to acquire a rough pose with centimeter-level error. It is necessary to refine the pose to millimeter-level error. In this work, we develop a novel differentiable acoustic camera simulator, which can be applied for estimating accurate 6 degrees of freedom pose of the acoustic cameras. We calculate the derivatives of synthetic acoustic images with respect to camera pose, and further integrated them into a gradient-based optimization pipeline to refine the pose. To mitigate the domain gap between real and synthetic images, an unpaired image translation method is used to transfer the real image to synthetic domain. Experiments prove the feasibility of the proposed method. It outperforms methods of previous research for higher efficiency and accuracy.},
keywords={},
doi={10.1109/SII55687.2023.10039267},
ISSN={2474-2325},
}
@inproceedings{ACM:inverse_rendering,
author = {Zhu, Jingsen and Luan, Fujun and Huo, Yuchi and Lin, Zihao and Zhong, Zhihua and Xi, Dianbing and Wang, Rui and Bao, Hujun and Zheng, Jiaxiang and Tang, Rui},
title = {Learning-Based Inverse Rendering of Complex Indoor Scenes with Differentiable Monte Carlo Raytracing},
year = {2022},
isbn = {9781450394703},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3550469.3555407},
doi = {10.1145/3550469.3555407},
abstract = {Indoor scenes typically exhibit complex, spatially-varying appearance from global illumination, making inverse rendering a challenging ill-posed problem. This work presents an end-to-end, learning-based inverse rendering framework incorporating differentiable Monte Carlo raytracing with importance sampling. The framework takes a single image as input to jointly recover the underlying geometry, spatially-varying lighting, and photorealistic materials. Specifically, we introduce a physically-based differentiable rendering layer with screen-space ray tracing, resulting in more realistic specular reflections that match the input photo. In addition, we create a large-scale, photorealistic indoor scene dataset with significantly richer details like complex furniture and dedicated decorations. Further, we design a novel out-of-view lighting network with uncertainty-aware refinement leveraging hypernetwork-based neural radiance fields to predict lighting outside the view of the input photo. Through extensive evaluations on common benchmark datasets, we demonstrate superior inverse rendering quality of our method compared to state-of-the-art baselines, enabling various applications such as complex object insertion and material editing with high fidelity. Code and data will be made available at https://jingsenzhu.github.io/invrend},
booktitle = {SIGGRAPH Asia 2022 Conference Papers},
articleno = {6},
numpages = {8},
keywords = {inverse rendering, ray tracing, lighting estimation},
location = {Daegu, Republic of Korea},
series = {SA '22}
}
@article{DBLP:journals/corr/abs-1910-00727,
author = {Lakshya Jain and
Wilson Wu and
Steven Chen and
Uyeong Jang and
Varun Chandrasekaran and
Sanjit A. Seshia and
Somesh Jha},
title = {Generating Semantic Adversarial Examples with Differentiable Rendering},
journal = {CoRR},
volume = {abs/1910.00727},
year = {2019},
url = {http://arxiv.org/abs/1910.00727},
eprinttype = {arXiv},
eprint = {1910.00727},
timestamp = {Fri, 04 Oct 2019 12:28:06 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1910-00727.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{ACM:inverse_rendering_signed_distance_function,
author = {Vicini, Delio and Speierer, S\'{e}bastien and Jakob, Wenzel},
title = {Differentiable Signed Distance Function Rendering},
year = {2022},
issue_date = {July 2022},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {41},
number = {4},
issn = {0730-0301},
url = {https://doi.org/10.1145/3528223.3530139},
doi = {10.1145/3528223.3530139},
abstract = {Physically-based differentiable rendering has recently emerged as an attractive new technique for solving inverse problems that recover complete 3D scene representations from images. The inversion of shape parameters is of particular interest but also poses severe challenges: shapes are intertwined with visibility, whose discontinuous nature introduces severe bias in computed derivatives unless costly precautions are taken. Shape representations like triangle meshes suffer from additional difficulties, since the continuous optimization of mesh parameters cannot introduce topological changes.One common solution to these difficulties entails representing shapes using signed distance functions (SDFs) and gradually adapting their zero level set during optimization. Previous differentiable rendering of SDFs did not fully account for visibility gradients and required the use of mask or silhouette supervision, or discretization into a triangle mesh.In this article, we show how to extend the commonly used sphere tracing algorithm so that it additionally outputs a reparameterization that provides the means to compute accurate shape parameter derivatives. At a high level, this resembles techniques for differentiable mesh rendering, though we show that the SDF representation admits a particularly efficient reparameterization that outperforms prior work. Our experiments demonstrate the reconstruction of (synthetic) objects without complex regularization or priors, using only a per-pixel RGB loss.},
journal = {ACM Trans. Graph.},
articleno = {125},
numpages = {18},
keywords = {signed distance functions, level set method, differentiable rendering, inverse rendering, sphere tracing, gradient-based optimization}
}
@article{DBLP:AutoGAN,
author = {Blerta Lindqvist and
Shridatt Sugrim and
Rauf Izmailov},
title = {AutoGAN: Robust Classifier Against Adversarial Attacks},
journal = {CoRR},
volume = {abs/1812.03405},
year = {2018},
url = {http://arxiv.org/abs/1812.03405},
eprinttype = {arXiv},
eprint = {1812.03405},
timestamp = {Tue, 01 Jan 2019 15:01:25 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1812-03405.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:OpenDR,
author = {Matthew M. Loper and
Michael J. Black},
editor = {David J. Fleet and
Tom{\'{a}}s Pajdla and
Bernt Schiele and
Tinne Tuytelaars},
title = {OpenDR: An Approximate Differentiable Renderer},
booktitle = {Computer Vision - {ECCV} 2014 - 13th European Conference, Zurich,
Switzerland, September 6-12, 2014, Proceedings, Part {VII}},
series = {Lecture Notes in Computer Science},
volume = {8695},
pages = {154--169},
publisher = {Springer},
year = {2014},
url = {https://doi.org/10.1007/978-3-319-10584-0\_11},
doi = {10.1007/978-3-319-10584-0\_11},
timestamp = {Tue, 14 May 2019 10:00:45 +0200},
biburl = {https://dblp.org/rec/conf/eccv/LoperB14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:Neural3DKatoetal,
author = {Hiroharu Kato and
Yoshitaka Ushiku and
Tatsuya Harada},
title = {Neural 3D Mesh Renderer},
booktitle = {2018 {IEEE} Conference on Computer Vision and Pattern Recognition,
{CVPR} 2018, Salt Lake City, UT, USA, June 18-22, 2018},
pages = {3907--3916},
publisher = {Computer Vision Foundation / {IEEE} Computer Society},
year = {2018},
url = {http://openaccess.thecvf.com/content\_cvpr\_2018/html/Kato\_Neural\_3D\_Mesh\_CVPR\_2018\_paper.html},
doi = {10.1109/CVPR.2018.00411},
timestamp = {Fri, 24 Mar 2023 00:02:56 +0100},
biburl = {https://dblp.org/rec/conf/cvpr/KatoUH18.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}

View file

@ -0,0 +1,54 @@
\documentclass[en]{sdqbeamer}
\usepackage[english]{babel}
\usepackage{pdfpages}
\usepackage{graphicx}
\usepackage{media9}
\addmediapath{img}
\addmediapath{img/teapot_video}
\usepackage{caption}
\captionsetup[figure]{labelformat=empty}
\usepackage{csquotes}
\titleimage{banner_2020_kit}
\newcommand{\presentationdate}{\emph{set date here}}
%% Gruppenlogo
\grouplogo{}
%% Gruppenname und Breite (Standard: 50 mm)
\groupname{Proseminar ''differentiable programming''}
%\groupnamewidth{50mm}
% Beginn der Präsentation
\title[Differentiable Monte Carlo Ray Tracing through Edge Sampling]{Differentiable Monte Carlo Ray Tracing through Edge Sampling}
\subtitle{presentation for the proseminar ''differentiable programming''}
\author[Clemens Dautermann]{Clemens Dautermann}
\date[\presentationdate]{\presentationdate}
% Literatur
\usepackage[citestyle=authoryear,bibstyle=numeric,hyperref,backend=biber]{biblatex}
\addbibresource{presentation.bib}
\bibhang1em
\begin{document}
%Titelseite
\KITtitleframe
\begin{frame}{Outline}
\tableofcontents
\end{frame}
\include{presentation/modules/basic_terms}
\include{presentation/modules/motivation}
\include{presentation/modules/problems}
\include{presentation/modules/this_method}
\begin{frame}[allowframebreaks=1]{Bibliography}
\printbibliography
\end{frame}
\end{document}