Update on Overleaf.
This commit is contained in:
commit
cd463f4a54
56 changed files with 1974 additions and 0 deletions
104
presentation/modules/motivation.tex
Normal file
104
presentation/modules/motivation.tex
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
\section{Motivation - why differentiable rendering is important}
|
||||
\begin{frame}
|
||||
\centering
|
||||
\Huge
|
||||
Motivation - why differentiable rendering is important
|
||||
\end{frame}
|
||||
\begin{frame}{Importance of differentiable rendering}
|
||||
\begin{block}{Examples for Applications}
|
||||
\begin{itemize}
|
||||
\item Learning-based Inverse Rendering of Complex Indoor Scenes
|
||||
with Differentiable Monte Carlo Raytracing [\cite{ACM:inverse_rendering}]\\
|
||||
$\rightarrow$ Inverse rendering
|
||||
\item Generating Semantic Adversarial Examples with Differentiable Rendering [\cite{DBLP:journals/corr/abs-1910-00727}]\\
|
||||
$\rightarrow$ Machine learning
|
||||
\item Real-Time Lighting Estimation for Augmented Reality [\cite{IEEE:AR_lighting_estimation}]\\
|
||||
$\rightarrow$ Realistic real time shading for AR applications
|
||||
\item Acoustic Camera Pose Refinement [\cite{IEEE:Ac_cam_refinment}]\\
|
||||
$\rightarrow$ Optimize six degrees of freedom for acoustic underwater cameras
|
||||
\end{itemize}
|
||||
\end{block}
|
||||
\end{frame}
|
||||
\subsection{Inverse rendering}
|
||||
\begin{frame}{Inverse rendering}
|
||||
\begin{itemize}
|
||||
\item Conventional rendering: Synthesize an Image from a 3D scene
|
||||
\item Inverse rendering is solving the inverse problem: Synthesize a 3D scene from images
|
||||
\item 3D modelling can be hard and time consuming
|
||||
\item Approach:
|
||||
\begin{itemize}
|
||||
\item Approximate the 3D scene (often very coarse)
|
||||
\item Render the approximation differentiably
|
||||
\item Calculate the error between the render and the images
|
||||
\item Use ADAM or comparable gradient descent algorithm to minimize this error
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}{Inverse rendering - current example}
|
||||
\centering
|
||||
\includemedia[
|
||||
width=0.62\linewidth,height=0.35\linewidth,
|
||||
activate=onclick,
|
||||
addresource=proseminar_chair.mp4,
|
||||
playbutton=fancy,
|
||||
transparent,
|
||||
passcontext,
|
||||
flashvars={
|
||||
source=proseminar_chair.mp4
|
||||
&autoPlay=true
|
||||
}
|
||||
]{}{VPlayer.swf}
|
||||
\\
|
||||
|
||||
Source: \cite{ACM:inverse_rendering_signed_distance_function}
|
||||
\end{frame}
|
||||
\subsection{Adversarial image generation}
|
||||
\begin{frame}{Adversarial image generation}
|
||||
\begin{center}
|
||||
\begin{minipage}{0.4\linewidth}
|
||||
\begin{itemize}
|
||||
\item Common Problem in machine learning: Classification\\
|
||||
$\implies$ Given a set of labels and a set of data, assign a label to each element in the dataset
|
||||
\item Labeled data is needed to train classifier network
|
||||
\end{itemize}
|
||||
\pause
|
||||
\vspace{15mm}
|
||||
Image source: Auth0, \href{https://auth0.com/blog/captcha-can-ruin-your-ux-here-s-how-to-use-it-right/}{CAPTCHA Can Ruin Your UX. Here’s How to Use it Right}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.5\linewidth}
|
||||
\centering
|
||||
\includegraphics[width=0.5\linewidth]{presentation/img/recaptcha_example.png}
|
||||
\end{minipage}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
\begin{frame}{Adversarial image generation}
|
||||
\begin{itemize}
|
||||
\item Problem: Labeling training data is tedious and expensive\\
|
||||
$\implies$ We want to automatically generate training data
|
||||
\item One solution: Generative adversarial networks. Let two neural nets "compete"; a generator and a classifier. (e.g. AutoGAN [\cite{DBLP:AutoGAN}])\\
|
||||
$\implies$ Impossible to make semantic changes to the image (e.g. lighting) since no knowlege of the 3D scene exists
|
||||
\item Different solution: Generate image using differentiable raytracing, use gradient descent to optimize the result image to fall into a specific class\\
|
||||
$\implies$ Scene parameters can be manipulated
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}{Adversarial image generation - example [\cite{DBLP:journals/corr/abs-1910-00727}]}
|
||||
\begin{center}
|
||||
\begin{figure}
|
||||
\begin{minipage}{0.45\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/adversarial_rendering_results/correct_car.png}
|
||||
\includegraphics[width=\linewidth]{presentation/img/adversarial_rendering_results/correct_pedestrian.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.45\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/adversarial_rendering_results/incorrect_car.png}
|
||||
\includegraphics[width=\linewidth]{presentation/img/adversarial_rendering_results/incorrect_pedestrian.png}
|
||||
\end{minipage}
|
||||
\centering
|
||||
\caption{Left: Original images, features are correctly identified.\\
|
||||
Right: adversarial examples, silver car is not recognized and pedestrians are identified where there are none. Only semantic features (color, position, rotation) have been changed.}
|
||||
\label{fig:adv_img_example}
|
||||
\end{figure}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
Loading…
Add table
Add a link
Reference in a new issue