Update on Overleaf.
This commit is contained in:
parent
48998b7380
commit
257108290e
4 changed files with 41 additions and 19 deletions
|
|
@ -33,9 +33,9 @@
|
|||
\underbrace{\epsilon(x,x')}_{\text{emissive light}}+\underbrace{\int_S \rho(x,x',x'')I(x',x'')dx''}_{\text{light scattered towards the point}}
|
||||
\right]
|
||||
\]
|
||||
\begin{itemize}
|
||||
\item Attempts to capture the physical light transport phenomenon in a single equation
|
||||
\end{itemize}
|
||||
%\begin{itemize}
|
||||
%\item Attempts to capture the physical light transport phenomenon in a single equation
|
||||
%\end{itemize}
|
||||
[\cite{ACM:rendering_equation}]
|
||||
\item Problem: This equation is not analytically solvable\\
|
||||
$\rightarrow$ Solve numerically using Monte-Carlo integration (i.e. raytracing)
|
||||
|
|
@ -51,7 +51,7 @@
|
|||
\item Calculate geometry intersection
|
||||
\item Trace rays from intersection point to all light sources
|
||||
\item Calculate color from emission and the sampled reflected light taking geometry into account (e.g. occlusion)
|
||||
\item Have the ray "bounce around" to account for global illumination
|
||||
\item Have the ray "bounce around" to account for indirect lighting
|
||||
\end{itemize}
|
||||
\end{block}
|
||||
\pause
|
||||
|
|
|
|||
|
|
@ -23,14 +23,14 @@ with Differentiable Monte Carlo Raytracing [\cite{ACM:inverse_rendering}]\\
|
|||
\begin{frame}{Inverse rendering}
|
||||
\begin{itemize}
|
||||
\item Conventional rendering: Synthesize an Image from a 3D scene
|
||||
\item Inverse rendering is solving the inverse problem: Synthesize a 3D scene from images
|
||||
\item 3D modelling can be hard and time consuming
|
||||
\item Inverse problem: Synthesize a 3D scene from images
|
||||
%\item 3D modelling can be hard and time consuming
|
||||
\item Approach:
|
||||
\begin{itemize}
|
||||
\item Approximate the 3D scene (often very coarse)
|
||||
\item Approximate the 3D scene
|
||||
\item Render the approximation differentiably
|
||||
\item Calculate the error between the render and the images
|
||||
\item Use ADAM or comparable gradient descent algorithm to minimize this error
|
||||
\item Calculate the error
|
||||
\item Use a gradient descent algorithm to minimize this error
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
|
@ -75,11 +75,11 @@ with Differentiable Monte Carlo Raytracing [\cite{ACM:inverse_rendering}]\\
|
|||
\end{frame}
|
||||
\begin{frame}{Adversarial image generation}
|
||||
\begin{itemize}
|
||||
\item Problem: Labeling training data is tedious and expensive\\
|
||||
\item Problem: Labeling training data is tedious\\
|
||||
$\implies$ We want to automatically generate training data
|
||||
\item One solution: Generative adversarial networks. Let two neural nets "compete"; a generator and a classifier. (e.g. AutoGAN [\cite{DBLP:AutoGAN}])\\
|
||||
$\implies$ Impossible to make semantic changes to the image (e.g. lighting) since no knowlege of the 3D scene exists
|
||||
\item Different solution: Generate image using differentiable raytracing, use gradient descent to optimize the result image to fall into a specific class\\
|
||||
\item One solution: Generative adversarial networks. (e.g. AutoGAN [\cite{DBLP:AutoGAN}])\\
|
||||
$\implies$ Impossible to make semantic changes to the image (e.g. lighting)
|
||||
\item Different solution: Use differentiable raytracing\\
|
||||
$\implies$ Scene parameters can be manipulated
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
|
@ -97,7 +97,7 @@ with Differentiable Monte Carlo Raytracing [\cite{ACM:inverse_rendering}]\\
|
|||
\end{minipage}
|
||||
\centering
|
||||
\caption{Left: Original images, features are correctly identified.\\
|
||||
Right: adversarial examples, silver car is not recognized and pedestrians are identified where there are none. Only semantic features (color, position, rotation) have been changed.}
|
||||
Right: adversarial examples, missing/wrong identifications after only semantic changes}
|
||||
\label{fig:adv_img_example}
|
||||
\end{figure}
|
||||
\end{center}
|
||||
|
|
|
|||
|
|
@ -7,9 +7,11 @@
|
|||
\subsection{Why differentiable rendering is hard}
|
||||
\begin{frame}{Why differentiable rendering is hard}
|
||||
\begin{itemize}
|
||||
\item Rendering integral contains the geometry term that is not differentiable
|
||||
\item The gradiant of the visibility can lead to dirac delta terms which have 0 probability of being sampled correctly [\cite{ACM:diracdelta},\cite{ACM:diffable_raytracing}]
|
||||
\item Differentiation with respect to certain scene parameters possible but we need to differentiate with respect to any scene parameter
|
||||
\item Geometry term
|
||||
\item Causes dirac delta terms\\
|
||||
$\implies$ Have 0 probability of being sampled correctly [\cite{ACM:diracdelta},\cite{ACM:diffable_raytracing}]
|
||||
%\item Differentiation with respect to certain scene parameters possible but we need to differentiate with respect to any scene parameter
|
||||
\item Need to differentiate with respect to any scene parameter
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
\begin{frame}{primary occlusion}
|
||||
|
|
@ -26,8 +28,8 @@
|
|||
\begin{itemize}
|
||||
\item OpenDR [\cite{DBLP:OpenDR}]
|
||||
\item Neural 3D Mesh Renderer [\cite{DBLP:Neural3DKatoetal}]
|
||||
\item Both rasterization based (first render the image using rasterization, then approximate the gradients using the resulting color buffer)
|
||||
\item Focused on speed rather than precision
|
||||
\item Both rasterization based %(first render the image using rasterization, then approximate the gradients using the resulting color buffer)
|
||||
\item Focused on speed $\rightarrow$ impercise
|
||||
\end{itemize}
|
||||
\end{block}
|
||||
\end{frame}
|
||||
|
|
|
|||
|
|
@ -6,8 +6,28 @@
|
|||
\end{frame}
|
||||
\subsection{Edge sampling}
|
||||
\begin{frame}{Edge sampling}
|
||||
\begin{block}{Assumptions}
|
||||
\begin{itemize}
|
||||
\item Continuous parameter set
|
||||
\item Triangle meshes
|
||||
\item No interpenetrating triangles
|
||||
\item No point lights, no perfectly specular surfaces
|
||||
\item Ignore time domain
|
||||
\end{itemize}
|
||||
\end{block}
|
||||
\pause
|
||||
\begin{block}{Idea}
|
||||
\begin{itemize}
|
||||
\item Traditional sampling for continuous regions
|
||||
\item Edge sampling the discontinuous part
|
||||
\end{itemize}
|
||||
\end{block}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}{Edge sampling - Illustration}
|
||||
|
||||
\end{frame}
|
||||
|
||||
\subsection{conclusion - what can this method do?}
|
||||
% talk about limitations here!
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue