104 lines
No EOL
4.2 KiB
TeX
104 lines
No EOL
4.2 KiB
TeX
\section{Motivation~-~Why differentiable Rendering is important}
|
||
% \begin{frame}
|
||
% \centering
|
||
% \Huge
|
||
% Motivation~-~Why differentiable Rendering is important
|
||
% \end{frame}
|
||
\begin{frame}{Importance of differentiable Rendering}
|
||
\begin{block}{Examples for Applications}
|
||
\begin{itemize}
|
||
\item Learning-based Inverse Rendering of Complex Indoor Scenes
|
||
with Differentiable Monte Carlo Raytracing [\cite{ACM:inverse_rendering}]\\
|
||
$\rightarrow$ Inverse rendering
|
||
\item Generating Semantic Adversarial Examples with Differentiable Rendering [\cite{DBLP:journals/corr/abs-1910-00727}]\\
|
||
$\rightarrow$ Machine learning
|
||
\item Real-Time Lighting Estimation for Augmented Reality [\cite{IEEE:AR_lighting_estimation}]\\
|
||
$\rightarrow$ Realistic real time shading for AR applications
|
||
\item Acoustic Camera Pose Refinement [\cite{IEEE:Ac_cam_refinment}]\\
|
||
$\rightarrow$ Optimize six degrees of freedom for acoustic underwater cameras
|
||
\end{itemize}
|
||
\end{block}
|
||
\end{frame}
|
||
\subsection{Inverse Rendering}
|
||
\begin{frame}{Inverse Rendering}
|
||
\begin{itemize}
|
||
\item Conventional rendering: Synthesize an Image from a 3D scene
|
||
\item Inverse problem: Synthesize a 3D scene from images
|
||
%\item 3D modelling can be hard and time consuming
|
||
\item Approach:
|
||
\begin{itemize}
|
||
\item Approximate the 3D scene
|
||
\item Render the approximation differentiably
|
||
\item Calculate the error
|
||
\item Use a gradient descent algorithm to minimize this error
|
||
\end{itemize}
|
||
\end{itemize}
|
||
\end{frame}
|
||
|
||
|
||
\begin{frame}{Inverse Rendering~-~Current Example}
|
||
\centering
|
||
\includemedia[
|
||
width=0.62\linewidth,height=0.35\linewidth,
|
||
activate=onclick,
|
||
addresource=proseminar_chair.mp4,
|
||
playbutton=fancy,
|
||
transparent,
|
||
passcontext,
|
||
flashvars={
|
||
source=proseminar_chair.mp4
|
||
&autoPlay=true
|
||
}
|
||
]{}{VPlayer.swf}
|
||
\\
|
||
|
||
Source:~\cite{ACM:inverse_rendering_signed_distance_function}
|
||
\end{frame}
|
||
\subsection{Adversarial Image Generation}
|
||
\begin{frame}{Adversarial Image Generation}
|
||
\begin{center}
|
||
\begin{minipage}{0.4\linewidth}
|
||
\begin{itemize}
|
||
\item Common problem in machine learning: Classification\\
|
||
$\implies$ Given a set of labels and a set of data, assign a label to each element in the dataset
|
||
\item Labeled data is needed to train classifier network
|
||
\end{itemize}
|
||
\pause{}
|
||
\vspace{15mm}
|
||
Image source: Auth0, \href{https://auth0.com/blog/captcha-can-ruin-your-ux-here-s-how-to-use-it-right/}{CAPTCHA Can Ruin Your UX. Here’s How to Use it Right}
|
||
\end{minipage}
|
||
\begin{minipage}{0.5\linewidth}
|
||
\centering
|
||
\includegraphics[width=0.5\linewidth]{img/recaptcha_example.png}
|
||
\end{minipage}
|
||
\end{center}
|
||
\end{frame}
|
||
\begin{frame}{Adversarial Image Generation}
|
||
\begin{itemize}
|
||
\item Problem: Labeling training data is tedious\\
|
||
$\implies$ We want to automatically generate training data
|
||
\item One solution: Generative adversarial networks (e.g. AutoGAN [\cite{DBLP:AutoGAN}]).\\
|
||
$\implies$ Impossible to make semantic changes to the image
|
||
\item Different solution: Use differentiable raytracing\\
|
||
$\implies$ Scene parameters can be manipulated
|
||
\end{itemize}
|
||
\end{frame}
|
||
|
||
\begin{frame}{Adversarial Image Generation~-~Example [\cite{DBLP:journals/corr/abs-1910-00727}]}
|
||
\begin{center}
|
||
\begin{figure}
|
||
\begin{minipage}{0.45\linewidth}
|
||
\includegraphics[width=\linewidth]{img/adversarial_rendering_results/correct_car.png}
|
||
\includegraphics[width=\linewidth]{img/adversarial_rendering_results/correct_pedestrian.png}
|
||
\end{minipage}
|
||
\begin{minipage}{0.45\linewidth}
|
||
\includegraphics[width=\linewidth]{img/adversarial_rendering_results/incorrect_car.png}
|
||
\includegraphics[width=\linewidth]{img/adversarial_rendering_results/incorrect_pedestrian.png}
|
||
\end{minipage}
|
||
\centering
|
||
\caption{Left: Original images, features are correctly identified\\
|
||
Right: Adversarial examples, missing/wrong identifications after only semantic changes}
|
||
\label{fig:adv_img_example}
|
||
\end{figure}
|
||
\end{center}
|
||
\end{frame} |