Update on Overleaf.
This commit is contained in:
commit
cd463f4a54
56 changed files with 1974 additions and 0 deletions
78
presentation/modules/basic_terms.tex
Normal file
78
presentation/modules/basic_terms.tex
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
\section{Basic terms}
|
||||
\begin{frame}
|
||||
\centering
|
||||
\Huge
|
||||
Basic terms
|
||||
\end{frame}
|
||||
\subsection{Raytracing}
|
||||
\begin{frame}{Raytracing}
|
||||
\begin{center}
|
||||
\begin{minipage}{0.4\linewidth}
|
||||
\flushleft Turn 3D model...
|
||||
\includegraphics[width=\linewidth]{presentation/img/proseminar_workbench.png}
|
||||
\end{minipage}
|
||||
\pause
|
||||
$\rightarrow$
|
||||
\hspace{10mm}
|
||||
\begin{minipage}{0.4\linewidth}
|
||||
\flushright ...into a physically accurate image
|
||||
\includegraphics[width=0.8\linewidth]{presentation/img/proseminar_cycles.png}
|
||||
\end{minipage}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}{Raytracing}
|
||||
\begin{block}{Task}
|
||||
\begin{itemize}
|
||||
\item Determine the color of each Pixel in the scene
|
||||
\item Color (light intensity) is given by the rendering integral:\\
|
||||
\[
|
||||
\underbrace{I(x,x^{\prime})}_{\text{Light transport intensity from }x \text{ to } x^{\prime}}=
|
||||
\underbrace{g(x,x')}_{\text{geometry term}}
|
||||
\left[
|
||||
\underbrace{\epsilon(x,x')}_{\text{emissive light}}+\underbrace{\int_S \rho(x,x',x'')I(x',x'')dx''}_{\text{light scattered towards the point}}
|
||||
\right]
|
||||
\]
|
||||
\begin{itemize}
|
||||
\item Attempts to capture the physical light transport phenomenon in a single equation
|
||||
\end{itemize}
|
||||
[\cite{ACM:rendering_equation}]
|
||||
\item Problem: This equation is not analytically solvable\\
|
||||
$\rightarrow$ Solve numerically using Monte-Carlo integration (i.e. raytracing)
|
||||
\end{itemize}
|
||||
\end{block}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}{Raytracing}
|
||||
\setbeamercovered{transparent}
|
||||
\begin{block}{Principle (simplified)}
|
||||
\begin{itemize}
|
||||
\item Cast rays from the camera towards the scene
|
||||
\item Calculate geometry intersection
|
||||
\item Trace rays from intersection point to all light sources
|
||||
\item Calculate color from emission and the sampled reflected light taking geometry into account (e.g. occlusion)
|
||||
\item Have the ray "bounce around" to account for global illumination
|
||||
\end{itemize}
|
||||
\end{block}
|
||||
\pause
|
||||
\begin{block}{Variables}
|
||||
Scene depends on lots of variables:
|
||||
\begin{itemize}
|
||||
\item Material properties (roughness, emission strength, color, transmissiveness...)
|
||||
\item Vertex positions
|
||||
\end{itemize}
|
||||
\end{block}
|
||||
\end{frame}
|
||||
\begin{frame}{Image synthesis - optical phenomena}
|
||||
\centering
|
||||
\includegraphics[width=0.38\linewidth]{presentation/img/proseminar_cycles_annotated.png}
|
||||
\end{frame}
|
||||
|
||||
\subsection{Differentiable rendering}
|
||||
\begin{frame}{Differentiable rendering}
|
||||
\begin{itemize}
|
||||
\item Given: Function mapping an 3D-scene to a real number (e.g. error function)
|
||||
\item Target: Calculate gradient of that function
|
||||
\item Required: Differentiate with respect to any scene parameter
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
104
presentation/modules/motivation.tex
Normal file
104
presentation/modules/motivation.tex
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
\section{Motivation - why differentiable rendering is important}
|
||||
\begin{frame}
|
||||
\centering
|
||||
\Huge
|
||||
Motivation - why differentiable rendering is important
|
||||
\end{frame}
|
||||
\begin{frame}{Importance of differentiable rendering}
|
||||
\begin{block}{Examples for Applications}
|
||||
\begin{itemize}
|
||||
\item Learning-based Inverse Rendering of Complex Indoor Scenes
|
||||
with Differentiable Monte Carlo Raytracing [\cite{ACM:inverse_rendering}]\\
|
||||
$\rightarrow$ Inverse rendering
|
||||
\item Generating Semantic Adversarial Examples with Differentiable Rendering [\cite{DBLP:journals/corr/abs-1910-00727}]\\
|
||||
$\rightarrow$ Machine learning
|
||||
\item Real-Time Lighting Estimation for Augmented Reality [\cite{IEEE:AR_lighting_estimation}]\\
|
||||
$\rightarrow$ Realistic real time shading for AR applications
|
||||
\item Acoustic Camera Pose Refinement [\cite{IEEE:Ac_cam_refinment}]\\
|
||||
$\rightarrow$ Optimize six degrees of freedom for acoustic underwater cameras
|
||||
\end{itemize}
|
||||
\end{block}
|
||||
\end{frame}
|
||||
\subsection{Inverse rendering}
|
||||
\begin{frame}{Inverse rendering}
|
||||
\begin{itemize}
|
||||
\item Conventional rendering: Synthesize an Image from a 3D scene
|
||||
\item Inverse rendering is solving the inverse problem: Synthesize a 3D scene from images
|
||||
\item 3D modelling can be hard and time consuming
|
||||
\item Approach:
|
||||
\begin{itemize}
|
||||
\item Approximate the 3D scene (often very coarse)
|
||||
\item Render the approximation differentiably
|
||||
\item Calculate the error between the render and the images
|
||||
\item Use ADAM or comparable gradient descent algorithm to minimize this error
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
|
||||
\begin{frame}{Inverse rendering - current example}
|
||||
\centering
|
||||
\includemedia[
|
||||
width=0.62\linewidth,height=0.35\linewidth,
|
||||
activate=onclick,
|
||||
addresource=proseminar_chair.mp4,
|
||||
playbutton=fancy,
|
||||
transparent,
|
||||
passcontext,
|
||||
flashvars={
|
||||
source=proseminar_chair.mp4
|
||||
&autoPlay=true
|
||||
}
|
||||
]{}{VPlayer.swf}
|
||||
\\
|
||||
|
||||
Source: \cite{ACM:inverse_rendering_signed_distance_function}
|
||||
\end{frame}
|
||||
\subsection{Adversarial image generation}
|
||||
\begin{frame}{Adversarial image generation}
|
||||
\begin{center}
|
||||
\begin{minipage}{0.4\linewidth}
|
||||
\begin{itemize}
|
||||
\item Common Problem in machine learning: Classification\\
|
||||
$\implies$ Given a set of labels and a set of data, assign a label to each element in the dataset
|
||||
\item Labeled data is needed to train classifier network
|
||||
\end{itemize}
|
||||
\pause
|
||||
\vspace{15mm}
|
||||
Image source: Auth0, \href{https://auth0.com/blog/captcha-can-ruin-your-ux-here-s-how-to-use-it-right/}{CAPTCHA Can Ruin Your UX. Here’s How to Use it Right}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.5\linewidth}
|
||||
\centering
|
||||
\includegraphics[width=0.5\linewidth]{presentation/img/recaptcha_example.png}
|
||||
\end{minipage}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
\begin{frame}{Adversarial image generation}
|
||||
\begin{itemize}
|
||||
\item Problem: Labeling training data is tedious and expensive\\
|
||||
$\implies$ We want to automatically generate training data
|
||||
\item One solution: Generative adversarial networks. Let two neural nets "compete"; a generator and a classifier. (e.g. AutoGAN [\cite{DBLP:AutoGAN}])\\
|
||||
$\implies$ Impossible to make semantic changes to the image (e.g. lighting) since no knowlege of the 3D scene exists
|
||||
\item Different solution: Generate image using differentiable raytracing, use gradient descent to optimize the result image to fall into a specific class\\
|
||||
$\implies$ Scene parameters can be manipulated
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}{Adversarial image generation - example [\cite{DBLP:journals/corr/abs-1910-00727}]}
|
||||
\begin{center}
|
||||
\begin{figure}
|
||||
\begin{minipage}{0.45\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/adversarial_rendering_results/correct_car.png}
|
||||
\includegraphics[width=\linewidth]{presentation/img/adversarial_rendering_results/correct_pedestrian.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.45\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/adversarial_rendering_results/incorrect_car.png}
|
||||
\includegraphics[width=\linewidth]{presentation/img/adversarial_rendering_results/incorrect_pedestrian.png}
|
||||
\end{minipage}
|
||||
\centering
|
||||
\caption{Left: Original images, features are correctly identified.\\
|
||||
Right: adversarial examples, silver car is not recognized and pedestrians are identified where there are none. Only semantic features (color, position, rotation) have been changed.}
|
||||
\label{fig:adv_img_example}
|
||||
\end{figure}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
137
presentation/modules/problems.tex
Normal file
137
presentation/modules/problems.tex
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
\section{Problems}
|
||||
\begin{frame}
|
||||
\centering
|
||||
\Huge
|
||||
Problems
|
||||
\end{frame}
|
||||
\subsection{Why differentiable rendering is hard}
|
||||
\begin{frame}{Why differentiable rendering is hard}
|
||||
\begin{itemize}
|
||||
\item Rendering integral contains the geometry term that is not differentiable
|
||||
\item The gradiant of the visibility can lead to dirac delta terms which have 0 probability of being sampled correctly [\cite{ACM:diracdelta},\cite{ACM:diffable_raytracing}]
|
||||
\item Differentiation with respect to certain scene parameters possible but we need to differentiate with respect to any scene parameter
|
||||
\end{itemize}
|
||||
\pause
|
||||
\begin{center}
|
||||
\begin{minipage}{0.4\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\begin{minipage}{0.5\linewidth}
|
||||
\begin{tikzpicture}[domain=-0.5:2]
|
||||
\draw[very thin,color=gray] (-0.1,-0.6) grid (2.1,2.1);
|
||||
|
||||
\draw[->] (-0.2,-0.5) -- (2.2,-0.5) node[right] {$\omega$};
|
||||
\draw[->] (0,-0.7) -- (0,2.2) node[above] {$V(x,\omega)$};
|
||||
|
||||
\draw (0,0) -- (1,0) plot coordinates {(0,0) (1,0)}[color=red];
|
||||
\draw (1,1) -- (2,1) plot coordinates {(1,1) (2,1)}[color=red];
|
||||
\draw (0,0) node[left] {$0$};
|
||||
\draw (0,1) node[left] {$1$};
|
||||
\draw (1,-0.7) node[below] {$\omega_0$};
|
||||
\end{tikzpicture}
|
||||
\end{minipage}
|
||||
\hspace{3mm}
|
||||
\begin{minipage}{0.4\linewidth}
|
||||
\caption{Visibility of a point $x$ with respect to $\omega$. Observe the discontinuity at $\omega_0$.}
|
||||
\label{fig:visibility}
|
||||
\end{minipage}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
% second diagram
|
||||
\begin{minipage}{0.5\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\begin{minipage}{0.5\linewidth}
|
||||
\begin{tikzpicture}[domain=-0.5:2]
|
||||
\draw[very thin,color=gray] (-0.1,-0.6) grid (2.1,2.1);
|
||||
|
||||
\draw[->] (-0.2,-0.5) -- (2.2,-0.5) node[right] {$\omega$};
|
||||
\draw[->] (0,-0.7) -- (0,2.2) node[above] {$\frac{\partial}{\partial\omega}V(x,\omega)$};
|
||||
|
||||
\draw (0,0) -- (2,0) plot coordinates {(0,0) (2,0)}[color=red];
|
||||
\draw (0,0) node[left] {$0$};
|
||||
\draw (0,1) node[left] {$1$};
|
||||
\draw (0,2) node[left] {$\infty$};
|
||||
\draw (1,2) node[color=red] {$\bullet$};
|
||||
\draw (1,-0.7) node[below] {$\omega_0$};
|
||||
\end{tikzpicture}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.4\linewidth}
|
||||
\caption{Differentiation of the left graph with respect to $\omega$. Observe the discontinuity at $\omega_0$ in the left graph leading to a dirac delta spike at $\omega_0$ in the differentiation.}
|
||||
\label{fig:dirac-delta-spike}
|
||||
\end{minipage}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
\begin{frame}{Geometry term}
|
||||
\centering
|
||||
\begin{minipage}{0.4\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/blockers.png}
|
||||
\end{minipage}
|
||||
\hspace{15mm}
|
||||
\begin{minipage}{0.4\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/blockers_diff.png}
|
||||
\end{minipage}
|
||||
\end{frame}
|
||||
\subsection{Former methods}
|
||||
\begin{frame}{Former methods}
|
||||
\begin{block}{Previous differentiable renderers considered by this paper}
|
||||
\begin{itemize}
|
||||
\item OpenDR [\cite{DBLP:OpenDR}]
|
||||
\item Neural 3D Mesh Renderer [\cite{DBLP:Neural3DKatoetal}]
|
||||
\item Both rasterization based (first render the image using rasterization, then approximate the gradients using the resulting color buffer)
|
||||
\item Focused on speed rather than precision
|
||||
\end{itemize}
|
||||
\end{block}
|
||||
\end{frame}
|
||||
\begin{frame}{Former methods - visualization}
|
||||
\begin{figure}
|
||||
\begin{minipage}{0.12\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/comparisons/plane.png}
|
||||
\caption{planar scene}
|
||||
\label{fig:planar-scene}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\hspace{2mm}
|
||||
\begin{minipage}{0.12\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/comparisons/opendr.png}
|
||||
\caption{OpenDR}
|
||||
\label{fig:grad-OpenDR}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\hspace{2mm}
|
||||
\begin{minipage}{0.12\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/comparisons/Neural.png}
|
||||
\caption{Neural}
|
||||
\label{fig:grad-Neural3DMesh}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\hspace{2mm}
|
||||
\begin{minipage}{0.12\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/comparisons/ours.png}
|
||||
\caption{this paper}
|
||||
\label{fig:grad-this}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\hspace{4mm}
|
||||
\begin{minipage}{0.3\linewidth}
|
||||
\caption{
|
||||
%A plane lit by a point light source. Images visualize a gradient with respect to the plane moving right. Since the light source remains static the gradient should only be $\ne 0$ at the boundaries. OpenDR and Neural are not able to correctly calculate the gradients as they are based on color buffer differences.\\
|
||||
Visualizations of gradients calculated by different differentiable renderers.\\
|
||||
Images: \cite{ACM:diffable_raytracing}
|
||||
}
|
||||
\label{fig:grad-explanation}
|
||||
\end{minipage}
|
||||
\end{figure}
|
||||
\pause
|
||||
$\implies$ Problems are caused at the edges and by approximation using color buffers
|
||||
\end{frame}
|
||||
226
presentation/modules/this_method.tex
Normal file
226
presentation/modules/this_method.tex
Normal file
|
|
@ -0,0 +1,226 @@
|
|||
\section{This method}
|
||||
\begin{frame}
|
||||
\centering
|
||||
\Huge
|
||||
This method
|
||||
\end{frame}
|
||||
\subsection{Edge sampling}
|
||||
\subsection{Hierarchical edge sampling}
|
||||
\subsection{conclusion - what can this method do?}
|
||||
% talk about limitations here!
|
||||
|
||||
\begin{frame}{Inverse rendering - Results in this paper}
|
||||
\begin{block}{Inverse rendering here}
|
||||
\begin{itemize}
|
||||
\item Fit camera pose, material parameters and light source intensity
|
||||
\item Scene: Strong indirect illumination and non lambertian materials
|
||||
\item Initial guess: Assign almost all objects a white color, arbitrary camera pose
|
||||
\item 177 parameters in total
|
||||
\item Absolute difference as loss function and ADAM optimizer
|
||||
\item Start at a resulution of $64\times 64$ and linearly increase to $512\times 512$ in 8 steps\\
|
||||
$\implies$ Avoid getting stuck in local minima of the loss function
|
||||
\end{itemize}
|
||||
\end{block}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}{Inverse rendering - results in this paper}
|
||||
\begin{center}
|
||||
\begin{minipage}{0.25\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/results/guess.png}
|
||||
\caption{Initial guess}
|
||||
\label{fig:results-guess}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\hspace{2mm}
|
||||
\begin{minipage}{0.25\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/results/photo.png}
|
||||
\caption{Target (photograph)}
|
||||
\label{fig:results-target}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\hspace{2mm}
|
||||
\begin{minipage}{0.25\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/results/result.png}
|
||||
\caption{Optimized image}
|
||||
\label{fig:results-optimized}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\end{center}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/triangles/img-027.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/shade/img-028.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/glossy/img-029.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/glossy_recv/img-030.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/specular/img-031.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/global_illumination/img-032.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.1\linewidth}
|
||||
\caption{initial guess}
|
||||
\label{fig:grid_init_guess}
|
||||
\end{minipage}
|
||||
\end{figure}
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%% second row %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\begin{figure}
|
||||
\centering
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/triangles/img-033.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/shade/img-034.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/glossy/img-035.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/glossy_recv/img-036.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/specular/img-037.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/global_illumination/img-038.png}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.1\linewidth}
|
||||
\caption{target images}
|
||||
\label{fig:grid_target}
|
||||
\end{minipage}
|
||||
\end{figure}
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%% third row %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\begin{figure}
|
||||
\centering
|
||||
\begin{minipage}{0.15\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/triangles/img-039.png}
|
||||
\caption{primary occlusion}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/shade/img-040.png}
|
||||
\caption{shadow}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/glossy/img-041.png}
|
||||
\caption{glossy}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/glossy_recv/img-042.png}
|
||||
\caption{glossy receiver}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.14\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/specular/img-043.png}
|
||||
\caption{near-specular}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.15\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/render_optimization/global_illumination/img-044.png}
|
||||
\caption{global illumination}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.08\linewidth}
|
||||
\caption{optimized result}
|
||||
\label{fig:grid_optimized}
|
||||
\end{minipage}
|
||||
\end{figure}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}{Inverse rendering - example from this paper}
|
||||
\centering
|
||||
\begin{minipage}{0.19\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/teapot_video/teapot_init.png}
|
||||
\vspace{0mm}
|
||||
\caption{initial guess}
|
||||
\label{fig:teapot_init}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.19\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/teapot_video/teapot_init_diff.png}
|
||||
\caption{difference\\
|
||||
initial $\leftrightarrow$ target}
|
||||
\label{fig:teapot_init_diff}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.19\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/teapot_video/teapot_target.png}
|
||||
\vspace{0mm}
|
||||
\caption{target image}
|
||||
\label{fig:teapot_target}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.19\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/teapot_video/teapot_final_diff.png}
|
||||
\caption{difference\\
|
||||
final $\leftrightarrow$ target}
|
||||
\label{fig:teapot_final_diff}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\begin{minipage}{0.19\linewidth}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{presentation/img/teapot_video/teapot_final.png}
|
||||
\vspace{0mm}
|
||||
\caption{final image}
|
||||
\label{fig:teapot_final}
|
||||
\end{figure}
|
||||
\end{minipage}
|
||||
\end{frame}
|
||||
|
||||
\begin{frame}{Inverse rendering - example from this paper}
|
||||
\centering
|
||||
\includemedia[
|
||||
width=0.62\linewidth,height=0.35\linewidth,
|
||||
activate=onclick,
|
||||
addresource=teapot.mp4,
|
||||
playbutton=fancy,
|
||||
transparent,
|
||||
passcontext,
|
||||
flashvars={
|
||||
source=teapot.mp4
|
||||
&autoPlay=true
|
||||
}
|
||||
]{}{VPlayer.swf}
|
||||
\\
|
||||
All media in this section taken from \cite{ACM:diffable_raytracing}
|
||||
\end{frame}
|
||||
Loading…
Add table
Add a link
Reference in a new issue