Update on Overleaf.

This commit is contained in:
uxwmp 2023-06-18 19:53:03 +00:00 committed by node
parent 4fe2650218
commit 845d19c72c

View file

@ -19,7 +19,7 @@
\begin{block}{Idea}
\begin{itemize}
\item Traditional sampling for continuous regions
\item Edge sampling the discontinuous part
\item Edge sampling for the discontinuous part
\end{itemize}
\end{block}
\end{frame}
@ -30,22 +30,73 @@
\end{frame}
\begin{frame}{Edge sampling - half spaces}
\begin{minipage}{0.5\linewidth}
\input{presentation/diagrams/halfspaces}
\end{minipage}
\begin{minipage}{0.45\linewidth}
\begin{block}{Principle}
\begin{itemize}
\item Edge divides scene into two halfspaces
\item Calculate color in either halfspace
\item Calculate gradient based on color difference
\item Occlusion $\implies$ No color difference $\implies\nabla=0$
\end{itemize}
\end{block}
\end{minipage}
\end{frame}
\subsection{conclusion - what can this method do?}
% talk about limitations here!
\begin{frame}[t]{Edge sampling - math background}
Color:
\only<1>{
\[
I = \underbrace{\iint}_{\text{Screen space domain}}
\underbrace{f(x,y;\Phi)}_{\text{Scene function; Parameter set } \Phi}dxdy
\]
}
\only<2-> {
\[
I = \iint f(x,y;\Phi)dxdy
\]
}
\only<2> {
Use half spaces to rewrite integral:
\[
\sum_i\iint
\underbrace{\theta(}_{\text{step function}}
\underbrace{\alpha_i(x,y)}_{\text{Edge equation formed by the triangle edge}})
\underbrace{f_i(x,y)}_{\text{half space (may contain heaviside step functions itself)}}
\]
}
\only<3->{
Use half spaces to rewrite integral:
\[
\sum_i\iint\theta(\alpha_i(x,y))f_i(x,y)
\]
Calculate gradient:
\[
\nabla\iint\theta(\alpha_i(x,y))f_i(x,y)dxdy =
\underbrace{\iint\delta(\alpha_i(x,y))\nabla\alpha_i(x,y)f_i(x,y)dxdy}_{\text{Edges (can be differentiated)}} +
\underbrace{\iint\nabla f_i(x,y)\theta(\alpha_i(x,y))dxdy}_{\text{Original pixel integral}}
\]
}
\end{frame}
\begin{frame}{Inverse rendering - Results in this paper}
\begin{block}{Inverse rendering here}
\begin{itemize}
\item Fit camera pose, material parameters and light source intensity
\item Parameters: Camera pose, material parameters, light source intensity
\item Scene: Strong indirect illumination and non lambertian materials
\item Initial guess: Assign almost all objects a white color, arbitrary camera pose
\item 177 parameters in total
\item Absolute difference as loss function and ADAM optimizer
\item Start at a resulution of $64\times 64$ and linearly increase to $512\times 512$ in 8 steps\\
$\implies$ Avoid getting stuck in local minima of the loss function
\item Initial guess: Almost all objects white, arbitrary camera pose
\item 177 parameters
\item Loss function: Absolute difference
\item ADAM optimizer
\item Start resolution of $64\times 64$, linearly increase to $512\times 512$ in 8 steps\\
$\implies$ Avoid local minima of the loss function
\end{itemize}
\end{block}
\end{frame}
@ -251,3 +302,27 @@
\\
All media in this section taken from \cite{ACM:diffable_raytracing}
\end{frame}
\subsection{Conclusion - What can this method do?}
\begin{frame}{Conclusion}
\setbeamercovered{transparent}
\begin{block}{Possibilities}
\begin{itemize}
\item Differentiate with respect to \emph{any} scene parameter
\item Arbitrary non-dirac-materials
\item Global illumination, occlusion, shadows etc...
\item Automatic differentiation (PyTorch) $\rightarrow$ Inverse rendering
\end{itemize}
\end{block}
\pause
\begin{block}{Some Limitations}
\begin{itemize}
\item Performance
\item Time domain (animations)
\item Interpenetrating meshes
\item Non-differentiable shaders
\end{itemize}
\end{block}
\end{frame}