Finished paper roguely
This commit is contained in:
parent
4b0d92af52
commit
3a923b278c
12 changed files with 390 additions and 288 deletions
|
|
@ -24,112 +24,116 @@
|
|||
\babel@aux{ngerman}{}
|
||||
\abx@aux@cite{1}
|
||||
\abx@aux@segm{0}{0}{1}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {1}Was ist maschinelles Lernen?}{3}{section.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {1.1}Klassifizierungsprobleme}{3}{subsection.1.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Bin\IeC {\"a}rklassifizierung\relax }}{4}{figure.caption.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {1}Was ist maschinelles Lernen?}{2}{section.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {1.1}Klassifizierungsprobleme}{2}{subsection.1.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Bin\IeC {\"a}rklassifizierung\relax }}{3}{figure.caption.2}\protected@file@percent }
|
||||
\providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}}
|
||||
\newlabel{Classification}{{1}{4}{Binärklassifizierung\relax }{figure.caption.2}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {1.2}Regressionsprobleme}{4}{subsection.1.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Regression\relax }}{4}{figure.caption.3}\protected@file@percent }
|
||||
\newlabel{Regression}{{2}{4}{Regression\relax }{figure.caption.3}{}}
|
||||
\newlabel{Classification}{{1}{3}{Binärklassifizierung\relax }{figure.caption.2}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {1.2}Regressionsprobleme}{3}{subsection.1.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Regression\relax }}{3}{figure.caption.3}\protected@file@percent }
|
||||
\newlabel{Regression}{{2}{3}{Regression\relax }{figure.caption.3}{}}
|
||||
\abx@aux@cite{4}
|
||||
\abx@aux@segm{0}{0}{4}
|
||||
\abx@aux@cite{5}
|
||||
\abx@aux@segm{0}{0}{5}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {1.3}Gefahren von maschinellem Lernen}{5}{subsection.1.3}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {1.3.1}Die Daten}{5}{subsubsection.1.3.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {1.3.2}Overfitting}{6}{subsubsection.1.3.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Overfitting\relax }}{6}{figure.caption.4}\protected@file@percent }
|
||||
\newlabel{Overfitting}{{3}{6}{Overfitting\relax }{figure.caption.4}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {2}Verschiedene Techniken maschinellen Lernens}{7}{section.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {2.1}\IeC {\"U}berwachtes Lernen}{7}{subsection.2.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Un\IeC {\"u}berwachtes Lernen}{7}{subsection.2.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}Best\IeC {\"a}rkendes Lernen}{7}{subsection.2.3}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {1.3}Gefahren von maschinellem Lernen}{4}{subsection.1.3}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {1.3.1}Die Daten}{4}{subsubsection.1.3.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {1.3.2}Overfitting}{5}{subsubsection.1.3.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Overfitting\relax }}{5}{figure.caption.4}\protected@file@percent }
|
||||
\newlabel{Overfitting}{{3}{5}{Overfitting\relax }{figure.caption.4}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {2}Verschiedene Techniken maschinellen Lernens}{6}{section.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {2.1}\IeC {\"U}berwachtes Lernen}{6}{subsection.2.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Un\IeC {\"u}berwachtes Lernen}{6}{subsection.2.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}Best\IeC {\"a}rkendes Lernen}{6}{subsection.2.3}\protected@file@percent }
|
||||
\abx@aux@cite{2}
|
||||
\abx@aux@segm{0}{0}{2}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {3}Neuronale Netze}{8}{section.3}\protected@file@percent }
|
||||
\newlabel{sec:neuronale-netze}{{3}{8}{Neuronale Netze}{section.3}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Maschinelles Lernen und menschliches Lernen}{8}{subsection.3.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Neuron \newline Quelle: simple.wikipedia.org/wiki/File:Neuron.svg\newline Copyright: CC Attribution-Share Alike von Nutzer Dhp1080,\newline bearbeitet}}{8}{figure.caption.5}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Der Aufbau eines neuronalen Netzes}{9}{subsection.3.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Berechnung des Ausgabevektors}{9}{subsection.3.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Ein einfaches neuronales Netz\relax }}{10}{figure.caption.6}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Der Plot der Sigmoid Funktion $\sigma (x)=\frac {e^x}{e^x+1}$\relax }}{11}{figure.caption.7}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces Formel zur Berechnung eines Ausgabevektors aus einem Eingabevektor durch ein Layer Neuronen. \relax }}{12}{figure.caption.8}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Der Lernprozess}{12}{subsection.3.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Fehlerfunktionen}{12}{subsection.3.5}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {3}Neuronale Netze}{7}{section.3}\protected@file@percent }
|
||||
\newlabel{sec:neuronale-netze}{{3}{7}{Neuronale Netze}{section.3}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Maschinelles Lernen und menschliches Lernen}{7}{subsection.3.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Neuron \newline Quelle: simple.wikipedia.org/wiki/File:Neuron.svg\newline Copyright: CC Attribution-Share Alike von Nutzer Dhp1080,\newline bearbeitet}}{7}{figure.caption.5}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Der Aufbau eines neuronalen Netzes}{8}{subsection.3.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Berechnung des Ausgabevektors}{8}{subsection.3.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Ein einfaches neuronales Netz\relax }}{9}{figure.caption.6}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Der Plot der Sigmoid Funktion $\sigma (x)=\frac {e^x}{e^x+1}$\relax }}{10}{figure.caption.7}\protected@file@percent }
|
||||
\newlabel{Sigmoid}{{6}{10}{Der Plot der Sigmoid Funktion $\sigma (x)=\frac {e^x}{e^x+1}$\relax }{figure.caption.7}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces Formel zur Berechnung eines Ausgabevektors aus einem Eingabevektor durch ein Layer Neuronen. \relax }}{11}{figure.caption.8}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Der Lernprozess}{11}{subsection.3.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Fehlerfunktionen}{11}{subsection.3.5}\protected@file@percent }
|
||||
\abx@aux@cite{3}
|
||||
\abx@aux@segm{0}{0}{3}
|
||||
\abx@aux@segm{0}{0}{3}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.1}MSE -- Durchschnittlicher quadratischer Fehler}{13}{subsubsection.3.5.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen quadratischen Fehler\relax }}{13}{figure.caption.9}\protected@file@percent }
|
||||
\newlabel{MSE_equation}{{8}{13}{Die Gleichung für den durchschnittlichen quadratischen Fehler\relax }{figure.caption.9}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.2}MAE -- Durchschnitztlicher absoluter Fehler}{13}{subsubsection.3.5.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen absoluten Fehler\relax }}{13}{figure.caption.10}\protected@file@percent }
|
||||
\newlabel{MAE_equation}{{9}{13}{Die Gleichung für den durchschnittlichen absoluten Fehler\relax }{figure.caption.10}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.3}Kreuzentropiefehler}{13}{subsubsection.3.5.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces Der Graph der Kreuzentropie Fehlerfunktion wenn das tats\IeC {\"a}chliche Label 1 ist\relax }}{14}{figure.caption.11}\protected@file@percent }
|
||||
\newlabel{CEL_Graph}{{10}{14}{Der Graph der Kreuzentropie Fehlerfunktion wenn das tatsächliche Label 1 ist\relax }{figure.caption.11}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces Die Gleichung f\IeC {\"u}r den Kreuzentropiefehler\relax }}{14}{figure.caption.12}\protected@file@percent }
|
||||
\newlabel{CEL_Function}{{11}{14}{Die Gleichung für den Kreuzentropiefehler\relax }{figure.caption.12}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen absoluten Fehler\relax }}{15}{figure.caption.13}\protected@file@percent }
|
||||
\newlabel{CEL_Finction_cummulative}{{12}{15}{Die Gleichung für den durchschnittlichen absoluten Fehler\relax }{figure.caption.13}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.6}Gradientenverfahren und Backpropagation}{15}{subsection.3.6}\protected@file@percent }
|
||||
\newlabel{Gradient_section}{{3.6}{15}{Gradientenverfahren und Backpropagation}{subsection.3.6}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces Die Gleichung f\IeC {\"u}r den Gradienten der Fehlerfunktion\relax }}{15}{figure.caption.14}\protected@file@percent }
|
||||
\newlabel{Gradient_Function}{{13}{15}{Die Gleichung für den Gradienten der Fehlerfunktion\relax }{figure.caption.14}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.6.1}Lernrate}{15}{subsubsection.3.6.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {14}{\ignorespaces Die Gleichung f\IeC {\"u}r die Anpassung eines einzelnen Parameters\relax }}{15}{figure.caption.15}\protected@file@percent }
|
||||
\newlabel{Learning_Rate_Function}{{14}{15}{Die Gleichung für die Anpassung eines einzelnen Parameters\relax }{figure.caption.15}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {15}{\ignorespaces $\eta $ ist hier zu gro\IeC {\ss } gew\IeC {\"a}hlt\relax }}{16}{figure.caption.16}\protected@file@percent }
|
||||
\newlabel{Learning_Rate_Graphic}{{15}{16}{$\eta $ ist hier zu groß gewählt\relax }{figure.caption.16}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.7}Verschiedene Layerarten}{16}{subsection.3.7}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.7.1}Convolutional Layers}{17}{subsubsection.3.7.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {16}{\ignorespaces Eine Verbildlichung der Vorg\IeC {\"a}nge in einem convolutional Layer\newline Aus einer Animation von\newline https://github.com/vdumoulin/conv\_arithmetic/blob/master/README.md Vincent Dumoulin, Francesco Visin - A guide to convolution arithmetic for deep learning (BibTeX)}}{17}{figure.caption.17}\protected@file@percent }
|
||||
\newlabel{Convolution_illustration}{{16}{17}{Eine Verbildlichung der Vorgänge in einem convolutional Layer\newline Aus einer Animation von\newline https://github.com/vdumoulin/conv\_arithmetic/blob/master/README.md\\ Vincent Dumoulin, Francesco Visin - A guide to convolution arithmetic for deep learning (BibTeX)}{figure.caption.17}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {17}{\ignorespaces Erkennt obere horizontale Kanten\relax }}{18}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {18}{\ignorespaces Erkennt linke vertikale Kanten\relax }}{18}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {19}{\ignorespaces Erkennt untere horizontale Kanten\relax }}{18}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {20}{\ignorespaces Erkennt rechte vertikale Kanten\relax }}{18}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {21}{\ignorespaces Das Beispielbild aus dem Mnist Datensatz\relax }}{18}{figure.caption.19}\protected@file@percent }
|
||||
\newlabel{Filter_Example_raw}{{21}{18}{Das Beispielbild aus dem Mnist Datensatz\relax }{figure.caption.19}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {22}{\ignorespaces Die jeweils oben stehenden Filter wurden auf das Beispielbild angewandt.\relax }}{18}{figure.caption.20}\protected@file@percent }
|
||||
\newlabel{Filter_output dargestellt}{{22}{18}{Die jeweils oben stehenden Filter wurden auf das Beispielbild angewandt.\relax }{figure.caption.20}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {23}{\ignorespaces Beispiele f\IeC {\"u}r low- mid- und high-level Features in Convolutional Neural Nets\newline Quelle: https://tvirdi.github.io/2017-10-29/cnn/}}{19}{figure.caption.21}\protected@file@percent }
|
||||
\newlabel{HL_features_conv}{{23}{19}{Beispiele für low- mid- und high-level Features in Convolutional Neural Nets\newline Quelle: https://tvirdi.github.io/2017-10-29/cnn/}{figure.caption.21}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.7.2}Pooling Layers}{19}{subsubsection.3.7.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {24}{\ignorespaces Max Pooling mit $2\times 2$ gro\IeC {\ss }en Submatritzen\newline Quelle: https://computersciencewiki.org/index.php/Max-pooling\_/\_Pooling CC BY NC SA Lizenz}}{20}{figure.caption.22}\protected@file@percent }
|
||||
\newlabel{Maxpool}{{24}{20}{Max Pooling mit $2\times 2$ großen Submatritzen\newline Quelle: https://computersciencewiki.org/index.php/Max-pooling\_/\_Pooling\\ CC BY NC SA Lizenz}{figure.caption.22}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {25}{\ignorespaces Average Pooling mit $2\times 2$ gro\IeC {\ss }en Submatritzen\newline Aus: Dominguez-Morales, Juan Pedro. (2018). Neuromorphic audio processing through real-time embedded spiking neural networks. Abbildung 33}}{20}{figure.caption.23}\protected@file@percent }
|
||||
\newlabel{AvgPool}{{25}{20}{Average Pooling mit $2\times 2$ großen Submatritzen\newline Aus: Dominguez-Morales, Juan Pedro. (2018). Neuromorphic audio processing through real-time embedded spiking neural networks. Abbildung 33}{figure.caption.23}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {26}{\ignorespaces Gegen\IeC {\"u}berstellung von Max und Average Pooling\relax }}{21}{figure.caption.24}\protected@file@percent }
|
||||
\newlabel{Pooling_Mnist}{{26}{21}{Gegenüberstellung von Max und Average Pooling\relax }{figure.caption.24}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {4}PyTorch}{21}{section.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Datenvorbereitung}{22}{subsection.4.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {27}{\ignorespaces Der Code zum Laden des MNIST Datensatzes\relax }}{22}{figure.caption.25}\protected@file@percent }
|
||||
\newlabel{MNIST_Dataloader_Code}{{27}{22}{Der Code zum Laden des MNIST Datensatzes\relax }{figure.caption.25}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.1}MSE -- Durchschnittlicher quadratischer Fehler}{12}{subsubsection.3.5.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen quadratischen Fehler\relax }}{12}{figure.caption.9}\protected@file@percent }
|
||||
\newlabel{MSE_equation}{{8}{12}{Die Gleichung für den durchschnittlichen quadratischen Fehler\relax }{figure.caption.9}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.2}MAE -- Durchschnitztlicher absoluter Fehler}{12}{subsubsection.3.5.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen absoluten Fehler\relax }}{12}{figure.caption.10}\protected@file@percent }
|
||||
\newlabel{MAE_equation}{{9}{12}{Die Gleichung für den durchschnittlichen absoluten Fehler\relax }{figure.caption.10}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.3}Kreuzentropiefehler}{12}{subsubsection.3.5.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces Der Graph der Kreuzentropie Fehlerfunktion wenn das tats\IeC {\"a}chliche Label 1 ist\relax }}{13}{figure.caption.11}\protected@file@percent }
|
||||
\newlabel{CEL_Graph}{{10}{13}{Der Graph der Kreuzentropie Fehlerfunktion wenn das tatsächliche Label 1 ist\relax }{figure.caption.11}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces Die Gleichung f\IeC {\"u}r den Kreuzentropiefehler\relax }}{13}{figure.caption.12}\protected@file@percent }
|
||||
\newlabel{CEL_Function}{{11}{13}{Die Gleichung für den Kreuzentropiefehler\relax }{figure.caption.12}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen absoluten Fehler\relax }}{14}{figure.caption.13}\protected@file@percent }
|
||||
\newlabel{CEL_Finction_cummulative}{{12}{14}{Die Gleichung für den durchschnittlichen absoluten Fehler\relax }{figure.caption.13}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.6}Gradientenverfahren und Backpropagation}{14}{subsection.3.6}\protected@file@percent }
|
||||
\newlabel{Gradient_section}{{3.6}{14}{Gradientenverfahren und Backpropagation}{subsection.3.6}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces Die Gleichung f\IeC {\"u}r den Gradienten der Fehlerfunktion\relax }}{14}{figure.caption.14}\protected@file@percent }
|
||||
\newlabel{Gradient_Function}{{13}{14}{Die Gleichung für den Gradienten der Fehlerfunktion\relax }{figure.caption.14}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.6.1}Lernrate}{14}{subsubsection.3.6.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {14}{\ignorespaces Die Gleichung f\IeC {\"u}r die Anpassung eines einzelnen Parameters\relax }}{14}{figure.caption.15}\protected@file@percent }
|
||||
\newlabel{Learning_Rate_Function}{{14}{14}{Die Gleichung für die Anpassung eines einzelnen Parameters\relax }{figure.caption.15}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {15}{\ignorespaces $\eta $ ist hier zu gro\IeC {\ss } gew\IeC {\"a}hlt\relax }}{15}{figure.caption.16}\protected@file@percent }
|
||||
\newlabel{Learning_Rate_Graphic}{{15}{15}{$\eta $ ist hier zu groß gewählt\relax }{figure.caption.16}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.7}Verschiedene Layerarten}{15}{subsection.3.7}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.7.1}Convolutional Layers}{16}{subsubsection.3.7.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {16}{\ignorespaces Eine Verbildlichung der Vorg\IeC {\"a}nge in einem convolutional Layer\newline Aus einer Animation von\newline https://github.com/vdumoulin/conv\_arithmetic/blob/master/README.md Vincent Dumoulin, Francesco Visin - A guide to convolution arithmetic for deep learning (BibTeX)}}{16}{figure.caption.17}\protected@file@percent }
|
||||
\newlabel{Convolution_illustration}{{16}{16}{Eine Verbildlichung der Vorgänge in einem convolutional Layer\newline Aus einer Animation von\newline https://github.com/vdumoulin/conv\_arithmetic/blob/master/README.md\\ Vincent Dumoulin, Francesco Visin - A guide to convolution arithmetic for deep learning (BibTeX)}{figure.caption.17}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {17}{\ignorespaces Erkennt obere horizontale Kanten\relax }}{17}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {18}{\ignorespaces Erkennt linke vertikale Kanten\relax }}{17}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {19}{\ignorespaces Erkennt untere horizontale Kanten\relax }}{17}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {20}{\ignorespaces Erkennt rechte vertikale Kanten\relax }}{17}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {21}{\ignorespaces Das Beispielbild aus dem Mnist Datensatz\relax }}{17}{figure.caption.19}\protected@file@percent }
|
||||
\newlabel{Filter_Example_raw}{{21}{17}{Das Beispielbild aus dem Mnist Datensatz\relax }{figure.caption.19}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {22}{\ignorespaces Die jeweils oben stehenden Filter wurden auf das Beispielbild angewandt.\relax }}{17}{figure.caption.20}\protected@file@percent }
|
||||
\newlabel{Filter_output dargestellt}{{22}{17}{Die jeweils oben stehenden Filter wurden auf das Beispielbild angewandt.\relax }{figure.caption.20}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {23}{\ignorespaces Beispiele f\IeC {\"u}r low- mid- und high-level Features in Convolutional Neural Nets\newline Quelle: https://tvirdi.github.io/2017-10-29/cnn/}}{18}{figure.caption.21}\protected@file@percent }
|
||||
\newlabel{HL_features_conv}{{23}{18}{Beispiele für low- mid- und high-level Features in Convolutional Neural Nets\newline Quelle: https://tvirdi.github.io/2017-10-29/cnn/}{figure.caption.21}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.7.2}Pooling Layers}{18}{subsubsection.3.7.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {24}{\ignorespaces Max Pooling mit $2\times 2$ gro\IeC {\ss }en Submatritzen\newline Quelle: https://computersciencewiki.org/index.php/Max-pooling\_/\_Pooling CC BY NC SA Lizenz}}{19}{figure.caption.22}\protected@file@percent }
|
||||
\newlabel{Maxpool}{{24}{19}{Max Pooling mit $2\times 2$ großen Submatritzen\newline Quelle: https://computersciencewiki.org/index.php/Max-pooling\_/\_Pooling\\ CC BY NC SA Lizenz}{figure.caption.22}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {25}{\ignorespaces Average Pooling mit $2\times 2$ gro\IeC {\ss }en Submatritzen\newline Aus: Dominguez-Morales, Juan Pedro. (2018). Neuromorphic audio processing through real-time embedded spiking neural networks. Abbildung 33}}{19}{figure.caption.23}\protected@file@percent }
|
||||
\newlabel{AvgPool}{{25}{19}{Average Pooling mit $2\times 2$ großen Submatritzen\newline Aus: Dominguez-Morales, Juan Pedro. (2018). Neuromorphic audio processing through real-time embedded spiking neural networks. Abbildung 33}{figure.caption.23}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {26}{\ignorespaces Gegen\IeC {\"u}berstellung von Max und Average Pooling\relax }}{20}{figure.caption.24}\protected@file@percent }
|
||||
\newlabel{Pooling_Mnist}{{26}{20}{Gegenüberstellung von Max und Average Pooling\relax }{figure.caption.24}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {4}PyTorch}{20}{section.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Datenvorbereitung}{21}{subsection.4.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {27}{\ignorespaces Der Code zum Laden des MNIST Datensatzes\relax }}{21}{figure.caption.25}\protected@file@percent }
|
||||
\newlabel{MNIST_Dataloader_Code}{{27}{21}{Der Code zum Laden des MNIST Datensatzes\relax }{figure.caption.25}{}}
|
||||
\abx@aux@cite{6}
|
||||
\abx@aux@segm{0}{0}{6}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Definieren des Netzes}{23}{subsection.4.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {28}{\ignorespaces Code um ein einfaches Netz in Pytorch zu definieren\relax }}{24}{figure.caption.26}\protected@file@percent }
|
||||
\newlabel{Net_simple_definition}{{28}{24}{Code um ein einfaches Netz in Pytorch zu definieren\relax }{figure.caption.26}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Definieren des Netzes}{22}{subsection.4.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {28}{\ignorespaces Code um ein einfaches Netz in Pytorch zu definieren\relax }}{23}{figure.caption.26}\protected@file@percent }
|
||||
\newlabel{Net_simple_definition}{{28}{23}{Code um ein einfaches Netz in Pytorch zu definieren\relax }{figure.caption.26}{}}
|
||||
\abx@aux@cite{7}
|
||||
\abx@aux@segm{0}{0}{7}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}Trainieren des Netzes}{25}{subsection.4.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {29}{\ignorespaces Code um das Netz auf einem Datensatz zu trainieren\relax }}{25}{figure.caption.27}\protected@file@percent }
|
||||
\newlabel{Code_train_loop}{{29}{25}{Code um das Netz auf einem Datensatz zu trainieren\relax }{figure.caption.27}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.4}Pytorch und weights and biases}{26}{subsection.4.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {5}Fallbeispiel I:\newline Ein Klassifizierungsnetzwerk f\IeC {\"u}r handgeschriebene Ziffern}{27}{section.5}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Aufgabe}{27}{subsection.5.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Der MNIST Datensatz}{27}{subsection.5.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.3}Das Netz}{28}{subsection.5.3}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}Trainieren des Netzes}{24}{subsection.4.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {29}{\ignorespaces Code um das Netz auf einem Datensatz zu trainieren\relax }}{24}{figure.caption.27}\protected@file@percent }
|
||||
\newlabel{Code_train_loop}{{29}{24}{Code um das Netz auf einem Datensatz zu trainieren\relax }{figure.caption.27}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.4}Pytorch und weights and biases}{25}{subsection.4.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {5}Ein Klassifizierungsnetzwerk f\IeC {\"u}r handgeschriebene Ziffern}{26}{section.5}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Aufgabe}{26}{subsection.5.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Der MNIST Datensatz}{26}{subsection.5.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.3}Das Netz}{26}{subsection.5.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {30}{\ignorespaces Der Code um das in diesem Projekt genutzte Klassifizierungsnetz zu definieren.\relax }}{27}{figure.caption.28}\protected@file@percent }
|
||||
\newlabel{net}{{30}{27}{Der Code um das in diesem Projekt genutzte Klassifizierungsnetz zu definieren.\relax }{figure.caption.28}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {31}{\ignorespaces Der Graph der ReLu Aktivierungsfunktion\relax }}{27}{figure.caption.29}\protected@file@percent }
|
||||
\newlabel{ReLu}{{31}{27}{Der Graph der ReLu Aktivierungsfunktion\relax }{figure.caption.29}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.4}Ergebnis}{28}{subsection.5.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {6}Fallbeispiel II:\newline Eine selbsttrainierende KI f\IeC {\"u}r Tic-Tac-Toe}{28}{section.6}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {6.1}Das Prinzip}{28}{subsection.6.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {6.2}Chance-Tree Optimierung}{28}{subsection.6.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {6.3}L\IeC {\"o}sung mittels eines neuronalen Netzes}{28}{subsection.6.3}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {6.4}Vergleich}{28}{subsection.6.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {7}Schlusswort}{28}{section.7}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {32}{\ignorespaces Ein Plot der Trefferquote aufgetragen gegen die Trainingszeit\relax }}{28}{figure.caption.30}\protected@file@percent }
|
||||
\newlabel{accuracy}{{32}{28}{Ein Plot der Trefferquote aufgetragen gegen die Trainingszeit\relax }{figure.caption.30}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {6}Schlusswort}{28}{section.6}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {33}{\ignorespaces Ein Plot des Kreuzentropiefehlers aufgetragen gegen die Trainingszeit\relax }}{29}{figure.caption.31}\protected@file@percent }
|
||||
\newlabel{loss}{{33}{29}{Ein Plot des Kreuzentropiefehlers aufgetragen gegen die Trainingszeit\relax }{figure.caption.31}{}}
|
||||
\bibcite{1}{1}
|
||||
\bibcite{2}{2}
|
||||
\bibcite{3}{3}
|
||||
|
|
@ -174,4 +178,6 @@
|
|||
ECDE410C14A506CF9F77A687B26F5474F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex,
|
||||
30299FD7E2187F6D53ED8A608F3958F2F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex,
|
||||
458D5F1AFC81AE7291A7AAAD852DC12AF6C426F58C5CCC27D3C7BD698FEC22DB.pygtex,
|
||||
B51E2DE1996D35173C012720F7A4FDA4F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex}
|
||||
B51E2DE1996D35173C012720F7A4FDA4F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex,
|
||||
AF2982B0D9BB868B8B4E6D0A6A863B1C53A0C9FE66949F8EC4BED65B31F6975B.pygtex,
|
||||
A26181649063E5A564A34B0EBDE29E28F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue