Finished paper roguely
This commit is contained in:
parent
4b0d92af52
commit
3a923b278c
12 changed files with 390 additions and 288 deletions
|
|
@ -24,112 +24,116 @@
|
|||
\babel@aux{ngerman}{}
|
||||
\abx@aux@cite{1}
|
||||
\abx@aux@segm{0}{0}{1}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {1}Was ist maschinelles Lernen?}{3}{section.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {1.1}Klassifizierungsprobleme}{3}{subsection.1.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Bin\IeC {\"a}rklassifizierung\relax }}{4}{figure.caption.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {1}Was ist maschinelles Lernen?}{2}{section.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {1.1}Klassifizierungsprobleme}{2}{subsection.1.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Bin\IeC {\"a}rklassifizierung\relax }}{3}{figure.caption.2}\protected@file@percent }
|
||||
\providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}}
|
||||
\newlabel{Classification}{{1}{4}{Binärklassifizierung\relax }{figure.caption.2}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {1.2}Regressionsprobleme}{4}{subsection.1.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Regression\relax }}{4}{figure.caption.3}\protected@file@percent }
|
||||
\newlabel{Regression}{{2}{4}{Regression\relax }{figure.caption.3}{}}
|
||||
\newlabel{Classification}{{1}{3}{Binärklassifizierung\relax }{figure.caption.2}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {1.2}Regressionsprobleme}{3}{subsection.1.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Regression\relax }}{3}{figure.caption.3}\protected@file@percent }
|
||||
\newlabel{Regression}{{2}{3}{Regression\relax }{figure.caption.3}{}}
|
||||
\abx@aux@cite{4}
|
||||
\abx@aux@segm{0}{0}{4}
|
||||
\abx@aux@cite{5}
|
||||
\abx@aux@segm{0}{0}{5}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {1.3}Gefahren von maschinellem Lernen}{5}{subsection.1.3}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {1.3.1}Die Daten}{5}{subsubsection.1.3.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {1.3.2}Overfitting}{6}{subsubsection.1.3.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Overfitting\relax }}{6}{figure.caption.4}\protected@file@percent }
|
||||
\newlabel{Overfitting}{{3}{6}{Overfitting\relax }{figure.caption.4}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {2}Verschiedene Techniken maschinellen Lernens}{7}{section.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {2.1}\IeC {\"U}berwachtes Lernen}{7}{subsection.2.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Un\IeC {\"u}berwachtes Lernen}{7}{subsection.2.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}Best\IeC {\"a}rkendes Lernen}{7}{subsection.2.3}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {1.3}Gefahren von maschinellem Lernen}{4}{subsection.1.3}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {1.3.1}Die Daten}{4}{subsubsection.1.3.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {1.3.2}Overfitting}{5}{subsubsection.1.3.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Overfitting\relax }}{5}{figure.caption.4}\protected@file@percent }
|
||||
\newlabel{Overfitting}{{3}{5}{Overfitting\relax }{figure.caption.4}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {2}Verschiedene Techniken maschinellen Lernens}{6}{section.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {2.1}\IeC {\"U}berwachtes Lernen}{6}{subsection.2.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Un\IeC {\"u}berwachtes Lernen}{6}{subsection.2.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}Best\IeC {\"a}rkendes Lernen}{6}{subsection.2.3}\protected@file@percent }
|
||||
\abx@aux@cite{2}
|
||||
\abx@aux@segm{0}{0}{2}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {3}Neuronale Netze}{8}{section.3}\protected@file@percent }
|
||||
\newlabel{sec:neuronale-netze}{{3}{8}{Neuronale Netze}{section.3}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Maschinelles Lernen und menschliches Lernen}{8}{subsection.3.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Neuron \newline Quelle: simple.wikipedia.org/wiki/File:Neuron.svg\newline Copyright: CC Attribution-Share Alike von Nutzer Dhp1080,\newline bearbeitet}}{8}{figure.caption.5}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Der Aufbau eines neuronalen Netzes}{9}{subsection.3.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Berechnung des Ausgabevektors}{9}{subsection.3.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Ein einfaches neuronales Netz\relax }}{10}{figure.caption.6}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Der Plot der Sigmoid Funktion $\sigma (x)=\frac {e^x}{e^x+1}$\relax }}{11}{figure.caption.7}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces Formel zur Berechnung eines Ausgabevektors aus einem Eingabevektor durch ein Layer Neuronen. \relax }}{12}{figure.caption.8}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Der Lernprozess}{12}{subsection.3.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Fehlerfunktionen}{12}{subsection.3.5}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {3}Neuronale Netze}{7}{section.3}\protected@file@percent }
|
||||
\newlabel{sec:neuronale-netze}{{3}{7}{Neuronale Netze}{section.3}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Maschinelles Lernen und menschliches Lernen}{7}{subsection.3.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Neuron \newline Quelle: simple.wikipedia.org/wiki/File:Neuron.svg\newline Copyright: CC Attribution-Share Alike von Nutzer Dhp1080,\newline bearbeitet}}{7}{figure.caption.5}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Der Aufbau eines neuronalen Netzes}{8}{subsection.3.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Berechnung des Ausgabevektors}{8}{subsection.3.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Ein einfaches neuronales Netz\relax }}{9}{figure.caption.6}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Der Plot der Sigmoid Funktion $\sigma (x)=\frac {e^x}{e^x+1}$\relax }}{10}{figure.caption.7}\protected@file@percent }
|
||||
\newlabel{Sigmoid}{{6}{10}{Der Plot der Sigmoid Funktion $\sigma (x)=\frac {e^x}{e^x+1}$\relax }{figure.caption.7}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces Formel zur Berechnung eines Ausgabevektors aus einem Eingabevektor durch ein Layer Neuronen. \relax }}{11}{figure.caption.8}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Der Lernprozess}{11}{subsection.3.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Fehlerfunktionen}{11}{subsection.3.5}\protected@file@percent }
|
||||
\abx@aux@cite{3}
|
||||
\abx@aux@segm{0}{0}{3}
|
||||
\abx@aux@segm{0}{0}{3}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.1}MSE -- Durchschnittlicher quadratischer Fehler}{13}{subsubsection.3.5.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen quadratischen Fehler\relax }}{13}{figure.caption.9}\protected@file@percent }
|
||||
\newlabel{MSE_equation}{{8}{13}{Die Gleichung für den durchschnittlichen quadratischen Fehler\relax }{figure.caption.9}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.2}MAE -- Durchschnitztlicher absoluter Fehler}{13}{subsubsection.3.5.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen absoluten Fehler\relax }}{13}{figure.caption.10}\protected@file@percent }
|
||||
\newlabel{MAE_equation}{{9}{13}{Die Gleichung für den durchschnittlichen absoluten Fehler\relax }{figure.caption.10}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.3}Kreuzentropiefehler}{13}{subsubsection.3.5.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces Der Graph der Kreuzentropie Fehlerfunktion wenn das tats\IeC {\"a}chliche Label 1 ist\relax }}{14}{figure.caption.11}\protected@file@percent }
|
||||
\newlabel{CEL_Graph}{{10}{14}{Der Graph der Kreuzentropie Fehlerfunktion wenn das tatsächliche Label 1 ist\relax }{figure.caption.11}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces Die Gleichung f\IeC {\"u}r den Kreuzentropiefehler\relax }}{14}{figure.caption.12}\protected@file@percent }
|
||||
\newlabel{CEL_Function}{{11}{14}{Die Gleichung für den Kreuzentropiefehler\relax }{figure.caption.12}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen absoluten Fehler\relax }}{15}{figure.caption.13}\protected@file@percent }
|
||||
\newlabel{CEL_Finction_cummulative}{{12}{15}{Die Gleichung für den durchschnittlichen absoluten Fehler\relax }{figure.caption.13}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.6}Gradientenverfahren und Backpropagation}{15}{subsection.3.6}\protected@file@percent }
|
||||
\newlabel{Gradient_section}{{3.6}{15}{Gradientenverfahren und Backpropagation}{subsection.3.6}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces Die Gleichung f\IeC {\"u}r den Gradienten der Fehlerfunktion\relax }}{15}{figure.caption.14}\protected@file@percent }
|
||||
\newlabel{Gradient_Function}{{13}{15}{Die Gleichung für den Gradienten der Fehlerfunktion\relax }{figure.caption.14}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.6.1}Lernrate}{15}{subsubsection.3.6.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {14}{\ignorespaces Die Gleichung f\IeC {\"u}r die Anpassung eines einzelnen Parameters\relax }}{15}{figure.caption.15}\protected@file@percent }
|
||||
\newlabel{Learning_Rate_Function}{{14}{15}{Die Gleichung für die Anpassung eines einzelnen Parameters\relax }{figure.caption.15}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {15}{\ignorespaces $\eta $ ist hier zu gro\IeC {\ss } gew\IeC {\"a}hlt\relax }}{16}{figure.caption.16}\protected@file@percent }
|
||||
\newlabel{Learning_Rate_Graphic}{{15}{16}{$\eta $ ist hier zu groß gewählt\relax }{figure.caption.16}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.7}Verschiedene Layerarten}{16}{subsection.3.7}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.7.1}Convolutional Layers}{17}{subsubsection.3.7.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {16}{\ignorespaces Eine Verbildlichung der Vorg\IeC {\"a}nge in einem convolutional Layer\newline Aus einer Animation von\newline https://github.com/vdumoulin/conv\_arithmetic/blob/master/README.md Vincent Dumoulin, Francesco Visin - A guide to convolution arithmetic for deep learning (BibTeX)}}{17}{figure.caption.17}\protected@file@percent }
|
||||
\newlabel{Convolution_illustration}{{16}{17}{Eine Verbildlichung der Vorgänge in einem convolutional Layer\newline Aus einer Animation von\newline https://github.com/vdumoulin/conv\_arithmetic/blob/master/README.md\\ Vincent Dumoulin, Francesco Visin - A guide to convolution arithmetic for deep learning (BibTeX)}{figure.caption.17}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {17}{\ignorespaces Erkennt obere horizontale Kanten\relax }}{18}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {18}{\ignorespaces Erkennt linke vertikale Kanten\relax }}{18}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {19}{\ignorespaces Erkennt untere horizontale Kanten\relax }}{18}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {20}{\ignorespaces Erkennt rechte vertikale Kanten\relax }}{18}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {21}{\ignorespaces Das Beispielbild aus dem Mnist Datensatz\relax }}{18}{figure.caption.19}\protected@file@percent }
|
||||
\newlabel{Filter_Example_raw}{{21}{18}{Das Beispielbild aus dem Mnist Datensatz\relax }{figure.caption.19}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {22}{\ignorespaces Die jeweils oben stehenden Filter wurden auf das Beispielbild angewandt.\relax }}{18}{figure.caption.20}\protected@file@percent }
|
||||
\newlabel{Filter_output dargestellt}{{22}{18}{Die jeweils oben stehenden Filter wurden auf das Beispielbild angewandt.\relax }{figure.caption.20}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {23}{\ignorespaces Beispiele f\IeC {\"u}r low- mid- und high-level Features in Convolutional Neural Nets\newline Quelle: https://tvirdi.github.io/2017-10-29/cnn/}}{19}{figure.caption.21}\protected@file@percent }
|
||||
\newlabel{HL_features_conv}{{23}{19}{Beispiele für low- mid- und high-level Features in Convolutional Neural Nets\newline Quelle: https://tvirdi.github.io/2017-10-29/cnn/}{figure.caption.21}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.7.2}Pooling Layers}{19}{subsubsection.3.7.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {24}{\ignorespaces Max Pooling mit $2\times 2$ gro\IeC {\ss }en Submatritzen\newline Quelle: https://computersciencewiki.org/index.php/Max-pooling\_/\_Pooling CC BY NC SA Lizenz}}{20}{figure.caption.22}\protected@file@percent }
|
||||
\newlabel{Maxpool}{{24}{20}{Max Pooling mit $2\times 2$ großen Submatritzen\newline Quelle: https://computersciencewiki.org/index.php/Max-pooling\_/\_Pooling\\ CC BY NC SA Lizenz}{figure.caption.22}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {25}{\ignorespaces Average Pooling mit $2\times 2$ gro\IeC {\ss }en Submatritzen\newline Aus: Dominguez-Morales, Juan Pedro. (2018). Neuromorphic audio processing through real-time embedded spiking neural networks. Abbildung 33}}{20}{figure.caption.23}\protected@file@percent }
|
||||
\newlabel{AvgPool}{{25}{20}{Average Pooling mit $2\times 2$ großen Submatritzen\newline Aus: Dominguez-Morales, Juan Pedro. (2018). Neuromorphic audio processing through real-time embedded spiking neural networks. Abbildung 33}{figure.caption.23}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {26}{\ignorespaces Gegen\IeC {\"u}berstellung von Max und Average Pooling\relax }}{21}{figure.caption.24}\protected@file@percent }
|
||||
\newlabel{Pooling_Mnist}{{26}{21}{Gegenüberstellung von Max und Average Pooling\relax }{figure.caption.24}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {4}PyTorch}{21}{section.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Datenvorbereitung}{22}{subsection.4.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {27}{\ignorespaces Der Code zum Laden des MNIST Datensatzes\relax }}{22}{figure.caption.25}\protected@file@percent }
|
||||
\newlabel{MNIST_Dataloader_Code}{{27}{22}{Der Code zum Laden des MNIST Datensatzes\relax }{figure.caption.25}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.1}MSE -- Durchschnittlicher quadratischer Fehler}{12}{subsubsection.3.5.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen quadratischen Fehler\relax }}{12}{figure.caption.9}\protected@file@percent }
|
||||
\newlabel{MSE_equation}{{8}{12}{Die Gleichung für den durchschnittlichen quadratischen Fehler\relax }{figure.caption.9}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.2}MAE -- Durchschnitztlicher absoluter Fehler}{12}{subsubsection.3.5.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen absoluten Fehler\relax }}{12}{figure.caption.10}\protected@file@percent }
|
||||
\newlabel{MAE_equation}{{9}{12}{Die Gleichung für den durchschnittlichen absoluten Fehler\relax }{figure.caption.10}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.5.3}Kreuzentropiefehler}{12}{subsubsection.3.5.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces Der Graph der Kreuzentropie Fehlerfunktion wenn das tats\IeC {\"a}chliche Label 1 ist\relax }}{13}{figure.caption.11}\protected@file@percent }
|
||||
\newlabel{CEL_Graph}{{10}{13}{Der Graph der Kreuzentropie Fehlerfunktion wenn das tatsächliche Label 1 ist\relax }{figure.caption.11}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces Die Gleichung f\IeC {\"u}r den Kreuzentropiefehler\relax }}{13}{figure.caption.12}\protected@file@percent }
|
||||
\newlabel{CEL_Function}{{11}{13}{Die Gleichung für den Kreuzentropiefehler\relax }{figure.caption.12}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen absoluten Fehler\relax }}{14}{figure.caption.13}\protected@file@percent }
|
||||
\newlabel{CEL_Finction_cummulative}{{12}{14}{Die Gleichung für den durchschnittlichen absoluten Fehler\relax }{figure.caption.13}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.6}Gradientenverfahren und Backpropagation}{14}{subsection.3.6}\protected@file@percent }
|
||||
\newlabel{Gradient_section}{{3.6}{14}{Gradientenverfahren und Backpropagation}{subsection.3.6}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces Die Gleichung f\IeC {\"u}r den Gradienten der Fehlerfunktion\relax }}{14}{figure.caption.14}\protected@file@percent }
|
||||
\newlabel{Gradient_Function}{{13}{14}{Die Gleichung für den Gradienten der Fehlerfunktion\relax }{figure.caption.14}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.6.1}Lernrate}{14}{subsubsection.3.6.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {14}{\ignorespaces Die Gleichung f\IeC {\"u}r die Anpassung eines einzelnen Parameters\relax }}{14}{figure.caption.15}\protected@file@percent }
|
||||
\newlabel{Learning_Rate_Function}{{14}{14}{Die Gleichung für die Anpassung eines einzelnen Parameters\relax }{figure.caption.15}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {15}{\ignorespaces $\eta $ ist hier zu gro\IeC {\ss } gew\IeC {\"a}hlt\relax }}{15}{figure.caption.16}\protected@file@percent }
|
||||
\newlabel{Learning_Rate_Graphic}{{15}{15}{$\eta $ ist hier zu groß gewählt\relax }{figure.caption.16}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {3.7}Verschiedene Layerarten}{15}{subsection.3.7}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.7.1}Convolutional Layers}{16}{subsubsection.3.7.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {16}{\ignorespaces Eine Verbildlichung der Vorg\IeC {\"a}nge in einem convolutional Layer\newline Aus einer Animation von\newline https://github.com/vdumoulin/conv\_arithmetic/blob/master/README.md Vincent Dumoulin, Francesco Visin - A guide to convolution arithmetic for deep learning (BibTeX)}}{16}{figure.caption.17}\protected@file@percent }
|
||||
\newlabel{Convolution_illustration}{{16}{16}{Eine Verbildlichung der Vorgänge in einem convolutional Layer\newline Aus einer Animation von\newline https://github.com/vdumoulin/conv\_arithmetic/blob/master/README.md\\ Vincent Dumoulin, Francesco Visin - A guide to convolution arithmetic for deep learning (BibTeX)}{figure.caption.17}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {17}{\ignorespaces Erkennt obere horizontale Kanten\relax }}{17}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {18}{\ignorespaces Erkennt linke vertikale Kanten\relax }}{17}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {19}{\ignorespaces Erkennt untere horizontale Kanten\relax }}{17}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {20}{\ignorespaces Erkennt rechte vertikale Kanten\relax }}{17}{figure.caption.18}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {21}{\ignorespaces Das Beispielbild aus dem Mnist Datensatz\relax }}{17}{figure.caption.19}\protected@file@percent }
|
||||
\newlabel{Filter_Example_raw}{{21}{17}{Das Beispielbild aus dem Mnist Datensatz\relax }{figure.caption.19}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {22}{\ignorespaces Die jeweils oben stehenden Filter wurden auf das Beispielbild angewandt.\relax }}{17}{figure.caption.20}\protected@file@percent }
|
||||
\newlabel{Filter_output dargestellt}{{22}{17}{Die jeweils oben stehenden Filter wurden auf das Beispielbild angewandt.\relax }{figure.caption.20}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {23}{\ignorespaces Beispiele f\IeC {\"u}r low- mid- und high-level Features in Convolutional Neural Nets\newline Quelle: https://tvirdi.github.io/2017-10-29/cnn/}}{18}{figure.caption.21}\protected@file@percent }
|
||||
\newlabel{HL_features_conv}{{23}{18}{Beispiele für low- mid- und high-level Features in Convolutional Neural Nets\newline Quelle: https://tvirdi.github.io/2017-10-29/cnn/}{figure.caption.21}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.7.2}Pooling Layers}{18}{subsubsection.3.7.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {24}{\ignorespaces Max Pooling mit $2\times 2$ gro\IeC {\ss }en Submatritzen\newline Quelle: https://computersciencewiki.org/index.php/Max-pooling\_/\_Pooling CC BY NC SA Lizenz}}{19}{figure.caption.22}\protected@file@percent }
|
||||
\newlabel{Maxpool}{{24}{19}{Max Pooling mit $2\times 2$ großen Submatritzen\newline Quelle: https://computersciencewiki.org/index.php/Max-pooling\_/\_Pooling\\ CC BY NC SA Lizenz}{figure.caption.22}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {25}{\ignorespaces Average Pooling mit $2\times 2$ gro\IeC {\ss }en Submatritzen\newline Aus: Dominguez-Morales, Juan Pedro. (2018). Neuromorphic audio processing through real-time embedded spiking neural networks. Abbildung 33}}{19}{figure.caption.23}\protected@file@percent }
|
||||
\newlabel{AvgPool}{{25}{19}{Average Pooling mit $2\times 2$ großen Submatritzen\newline Aus: Dominguez-Morales, Juan Pedro. (2018). Neuromorphic audio processing through real-time embedded spiking neural networks. Abbildung 33}{figure.caption.23}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {26}{\ignorespaces Gegen\IeC {\"u}berstellung von Max und Average Pooling\relax }}{20}{figure.caption.24}\protected@file@percent }
|
||||
\newlabel{Pooling_Mnist}{{26}{20}{Gegenüberstellung von Max und Average Pooling\relax }{figure.caption.24}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {4}PyTorch}{20}{section.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Datenvorbereitung}{21}{subsection.4.1}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {27}{\ignorespaces Der Code zum Laden des MNIST Datensatzes\relax }}{21}{figure.caption.25}\protected@file@percent }
|
||||
\newlabel{MNIST_Dataloader_Code}{{27}{21}{Der Code zum Laden des MNIST Datensatzes\relax }{figure.caption.25}{}}
|
||||
\abx@aux@cite{6}
|
||||
\abx@aux@segm{0}{0}{6}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Definieren des Netzes}{23}{subsection.4.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {28}{\ignorespaces Code um ein einfaches Netz in Pytorch zu definieren\relax }}{24}{figure.caption.26}\protected@file@percent }
|
||||
\newlabel{Net_simple_definition}{{28}{24}{Code um ein einfaches Netz in Pytorch zu definieren\relax }{figure.caption.26}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Definieren des Netzes}{22}{subsection.4.2}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {28}{\ignorespaces Code um ein einfaches Netz in Pytorch zu definieren\relax }}{23}{figure.caption.26}\protected@file@percent }
|
||||
\newlabel{Net_simple_definition}{{28}{23}{Code um ein einfaches Netz in Pytorch zu definieren\relax }{figure.caption.26}{}}
|
||||
\abx@aux@cite{7}
|
||||
\abx@aux@segm{0}{0}{7}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}Trainieren des Netzes}{25}{subsection.4.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {29}{\ignorespaces Code um das Netz auf einem Datensatz zu trainieren\relax }}{25}{figure.caption.27}\protected@file@percent }
|
||||
\newlabel{Code_train_loop}{{29}{25}{Code um das Netz auf einem Datensatz zu trainieren\relax }{figure.caption.27}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.4}Pytorch und weights and biases}{26}{subsection.4.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {5}Fallbeispiel I:\newline Ein Klassifizierungsnetzwerk f\IeC {\"u}r handgeschriebene Ziffern}{27}{section.5}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Aufgabe}{27}{subsection.5.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Der MNIST Datensatz}{27}{subsection.5.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.3}Das Netz}{28}{subsection.5.3}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}Trainieren des Netzes}{24}{subsection.4.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {29}{\ignorespaces Code um das Netz auf einem Datensatz zu trainieren\relax }}{24}{figure.caption.27}\protected@file@percent }
|
||||
\newlabel{Code_train_loop}{{29}{24}{Code um das Netz auf einem Datensatz zu trainieren\relax }{figure.caption.27}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {4.4}Pytorch und weights and biases}{25}{subsection.4.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {5}Ein Klassifizierungsnetzwerk f\IeC {\"u}r handgeschriebene Ziffern}{26}{section.5}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Aufgabe}{26}{subsection.5.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Der MNIST Datensatz}{26}{subsection.5.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.3}Das Netz}{26}{subsection.5.3}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {30}{\ignorespaces Der Code um das in diesem Projekt genutzte Klassifizierungsnetz zu definieren.\relax }}{27}{figure.caption.28}\protected@file@percent }
|
||||
\newlabel{net}{{30}{27}{Der Code um das in diesem Projekt genutzte Klassifizierungsnetz zu definieren.\relax }{figure.caption.28}{}}
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {31}{\ignorespaces Der Graph der ReLu Aktivierungsfunktion\relax }}{27}{figure.caption.29}\protected@file@percent }
|
||||
\newlabel{ReLu}{{31}{27}{Der Graph der ReLu Aktivierungsfunktion\relax }{figure.caption.29}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {5.4}Ergebnis}{28}{subsection.5.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {6}Fallbeispiel II:\newline Eine selbsttrainierende KI f\IeC {\"u}r Tic-Tac-Toe}{28}{section.6}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {6.1}Das Prinzip}{28}{subsection.6.1}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {6.2}Chance-Tree Optimierung}{28}{subsection.6.2}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {6.3}L\IeC {\"o}sung mittels eines neuronalen Netzes}{28}{subsection.6.3}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {subsection}{\numberline {6.4}Vergleich}{28}{subsection.6.4}\protected@file@percent }
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {7}Schlusswort}{28}{section.7}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {32}{\ignorespaces Ein Plot der Trefferquote aufgetragen gegen die Trainingszeit\relax }}{28}{figure.caption.30}\protected@file@percent }
|
||||
\newlabel{accuracy}{{32}{28}{Ein Plot der Trefferquote aufgetragen gegen die Trainingszeit\relax }{figure.caption.30}{}}
|
||||
\@writefile{toc}{\defcounter {refsection}{0}\relax }\@writefile{toc}{\contentsline {section}{\numberline {6}Schlusswort}{28}{section.6}\protected@file@percent }
|
||||
\@writefile{lof}{\defcounter {refsection}{0}\relax }\@writefile{lof}{\contentsline {figure}{\numberline {33}{\ignorespaces Ein Plot des Kreuzentropiefehlers aufgetragen gegen die Trainingszeit\relax }}{29}{figure.caption.31}\protected@file@percent }
|
||||
\newlabel{loss}{{33}{29}{Ein Plot des Kreuzentropiefehlers aufgetragen gegen die Trainingszeit\relax }{figure.caption.31}{}}
|
||||
\bibcite{1}{1}
|
||||
\bibcite{2}{2}
|
||||
\bibcite{3}{3}
|
||||
|
|
@ -174,4 +178,6 @@
|
|||
ECDE410C14A506CF9F77A687B26F5474F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex,
|
||||
30299FD7E2187F6D53ED8A608F3958F2F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex,
|
||||
458D5F1AFC81AE7291A7AAAD852DC12AF6C426F58C5CCC27D3C7BD698FEC22DB.pygtex,
|
||||
B51E2DE1996D35173C012720F7A4FDA4F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex}
|
||||
B51E2DE1996D35173C012720F7A4FDA4F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex,
|
||||
AF2982B0D9BB868B8B4E6D0A6A863B1C53A0C9FE66949F8EC4BED65B31F6975B.pygtex,
|
||||
A26181649063E5A564A34B0EBDE29E28F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex}
|
||||
|
|
|
|||
|
|
@ -1,60 +1,68 @@
|
|||
\boolfalse {citerequest}\boolfalse {citetracker}\boolfalse {pagetracker}\boolfalse {backtracker}\relax
|
||||
\babel@toc {ngerman}{}
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {1}{\ignorespaces Bin\IeC {\"a}rklassifizierung\relax }}{4}{figure.caption.2}%
|
||||
\contentsline {figure}{\numberline {1}{\ignorespaces Bin\IeC {\"a}rklassifizierung\relax }}{3}{figure.caption.2}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {2}{\ignorespaces Regression\relax }}{4}{figure.caption.3}%
|
||||
\contentsline {figure}{\numberline {2}{\ignorespaces Regression\relax }}{3}{figure.caption.3}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {3}{\ignorespaces Overfitting\relax }}{6}{figure.caption.4}%
|
||||
\contentsline {figure}{\numberline {3}{\ignorespaces Overfitting\relax }}{5}{figure.caption.4}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {4}{\ignorespaces Neuron \newline Quelle: simple.wikipedia.org/wiki/File:Neuron.svg\newline Copyright: CC Attribution-Share Alike von Nutzer Dhp1080,\newline bearbeitet}}{8}{figure.caption.5}%
|
||||
\contentsline {figure}{\numberline {4}{\ignorespaces Neuron \newline Quelle: simple.wikipedia.org/wiki/File:Neuron.svg\newline Copyright: CC Attribution-Share Alike von Nutzer Dhp1080,\newline bearbeitet}}{7}{figure.caption.5}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {5}{\ignorespaces Ein einfaches neuronales Netz\relax }}{10}{figure.caption.6}%
|
||||
\contentsline {figure}{\numberline {5}{\ignorespaces Ein einfaches neuronales Netz\relax }}{9}{figure.caption.6}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {6}{\ignorespaces Der Plot der Sigmoid Funktion $\sigma (x)=\frac {e^x}{e^x+1}$\relax }}{11}{figure.caption.7}%
|
||||
\contentsline {figure}{\numberline {6}{\ignorespaces Der Plot der Sigmoid Funktion $\sigma (x)=\frac {e^x}{e^x+1}$\relax }}{10}{figure.caption.7}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {7}{\ignorespaces Formel zur Berechnung eines Ausgabevektors aus einem Eingabevektor durch ein Layer Neuronen. \relax }}{12}{figure.caption.8}%
|
||||
\contentsline {figure}{\numberline {7}{\ignorespaces Formel zur Berechnung eines Ausgabevektors aus einem Eingabevektor durch ein Layer Neuronen. \relax }}{11}{figure.caption.8}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {8}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen quadratischen Fehler\relax }}{13}{figure.caption.9}%
|
||||
\contentsline {figure}{\numberline {8}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen quadratischen Fehler\relax }}{12}{figure.caption.9}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {9}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen absoluten Fehler\relax }}{13}{figure.caption.10}%
|
||||
\contentsline {figure}{\numberline {9}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen absoluten Fehler\relax }}{12}{figure.caption.10}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {10}{\ignorespaces Der Graph der Kreuzentropie Fehlerfunktion wenn das tats\IeC {\"a}chliche Label 1 ist\relax }}{14}{figure.caption.11}%
|
||||
\contentsline {figure}{\numberline {10}{\ignorespaces Der Graph der Kreuzentropie Fehlerfunktion wenn das tats\IeC {\"a}chliche Label 1 ist\relax }}{13}{figure.caption.11}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {11}{\ignorespaces Die Gleichung f\IeC {\"u}r den Kreuzentropiefehler\relax }}{14}{figure.caption.12}%
|
||||
\contentsline {figure}{\numberline {11}{\ignorespaces Die Gleichung f\IeC {\"u}r den Kreuzentropiefehler\relax }}{13}{figure.caption.12}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {12}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen absoluten Fehler\relax }}{15}{figure.caption.13}%
|
||||
\contentsline {figure}{\numberline {12}{\ignorespaces Die Gleichung f\IeC {\"u}r den durchschnittlichen absoluten Fehler\relax }}{14}{figure.caption.13}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {13}{\ignorespaces Die Gleichung f\IeC {\"u}r den Gradienten der Fehlerfunktion\relax }}{15}{figure.caption.14}%
|
||||
\contentsline {figure}{\numberline {13}{\ignorespaces Die Gleichung f\IeC {\"u}r den Gradienten der Fehlerfunktion\relax }}{14}{figure.caption.14}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {14}{\ignorespaces Die Gleichung f\IeC {\"u}r die Anpassung eines einzelnen Parameters\relax }}{15}{figure.caption.15}%
|
||||
\contentsline {figure}{\numberline {14}{\ignorespaces Die Gleichung f\IeC {\"u}r die Anpassung eines einzelnen Parameters\relax }}{14}{figure.caption.15}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {15}{\ignorespaces $\eta $ ist hier zu gro\IeC {\ss } gew\IeC {\"a}hlt\relax }}{16}{figure.caption.16}%
|
||||
\contentsline {figure}{\numberline {15}{\ignorespaces $\eta $ ist hier zu gro\IeC {\ss } gew\IeC {\"a}hlt\relax }}{15}{figure.caption.16}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {16}{\ignorespaces Eine Verbildlichung der Vorg\IeC {\"a}nge in einem convolutional Layer\newline Aus einer Animation von\newline https://github.com/vdumoulin/conv\_arithmetic/blob/master/README.md Vincent Dumoulin, Francesco Visin - A guide to convolution arithmetic for deep learning (BibTeX)}}{17}{figure.caption.17}%
|
||||
\contentsline {figure}{\numberline {16}{\ignorespaces Eine Verbildlichung der Vorg\IeC {\"a}nge in einem convolutional Layer\newline Aus einer Animation von\newline https://github.com/vdumoulin/conv\_arithmetic/blob/master/README.md Vincent Dumoulin, Francesco Visin - A guide to convolution arithmetic for deep learning (BibTeX)}}{16}{figure.caption.17}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {17}{\ignorespaces Erkennt obere horizontale Kanten\relax }}{18}{figure.caption.18}%
|
||||
\contentsline {figure}{\numberline {17}{\ignorespaces Erkennt obere horizontale Kanten\relax }}{17}{figure.caption.18}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {18}{\ignorespaces Erkennt linke vertikale Kanten\relax }}{18}{figure.caption.18}%
|
||||
\contentsline {figure}{\numberline {18}{\ignorespaces Erkennt linke vertikale Kanten\relax }}{17}{figure.caption.18}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {19}{\ignorespaces Erkennt untere horizontale Kanten\relax }}{18}{figure.caption.18}%
|
||||
\contentsline {figure}{\numberline {19}{\ignorespaces Erkennt untere horizontale Kanten\relax }}{17}{figure.caption.18}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {20}{\ignorespaces Erkennt rechte vertikale Kanten\relax }}{18}{figure.caption.18}%
|
||||
\contentsline {figure}{\numberline {20}{\ignorespaces Erkennt rechte vertikale Kanten\relax }}{17}{figure.caption.18}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {21}{\ignorespaces Das Beispielbild aus dem Mnist Datensatz\relax }}{18}{figure.caption.19}%
|
||||
\contentsline {figure}{\numberline {21}{\ignorespaces Das Beispielbild aus dem Mnist Datensatz\relax }}{17}{figure.caption.19}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {22}{\ignorespaces Die jeweils oben stehenden Filter wurden auf das Beispielbild angewandt.\relax }}{18}{figure.caption.20}%
|
||||
\contentsline {figure}{\numberline {22}{\ignorespaces Die jeweils oben stehenden Filter wurden auf das Beispielbild angewandt.\relax }}{17}{figure.caption.20}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {23}{\ignorespaces Beispiele f\IeC {\"u}r low- mid- und high-level Features in Convolutional Neural Nets\newline Quelle: https://tvirdi.github.io/2017-10-29/cnn/}}{19}{figure.caption.21}%
|
||||
\contentsline {figure}{\numberline {23}{\ignorespaces Beispiele f\IeC {\"u}r low- mid- und high-level Features in Convolutional Neural Nets\newline Quelle: https://tvirdi.github.io/2017-10-29/cnn/}}{18}{figure.caption.21}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {24}{\ignorespaces Max Pooling mit $2\times 2$ gro\IeC {\ss }en Submatritzen\newline Quelle: https://computersciencewiki.org/index.php/Max-pooling\_/\_Pooling CC BY NC SA Lizenz}}{20}{figure.caption.22}%
|
||||
\contentsline {figure}{\numberline {24}{\ignorespaces Max Pooling mit $2\times 2$ gro\IeC {\ss }en Submatritzen\newline Quelle: https://computersciencewiki.org/index.php/Max-pooling\_/\_Pooling CC BY NC SA Lizenz}}{19}{figure.caption.22}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {25}{\ignorespaces Average Pooling mit $2\times 2$ gro\IeC {\ss }en Submatritzen\newline Aus: Dominguez-Morales, Juan Pedro. (2018). Neuromorphic audio processing through real-time embedded spiking neural networks. Abbildung 33}}{20}{figure.caption.23}%
|
||||
\contentsline {figure}{\numberline {25}{\ignorespaces Average Pooling mit $2\times 2$ gro\IeC {\ss }en Submatritzen\newline Aus: Dominguez-Morales, Juan Pedro. (2018). Neuromorphic audio processing through real-time embedded spiking neural networks. Abbildung 33}}{19}{figure.caption.23}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {26}{\ignorespaces Gegen\IeC {\"u}berstellung von Max und Average Pooling\relax }}{21}{figure.caption.24}%
|
||||
\contentsline {figure}{\numberline {26}{\ignorespaces Gegen\IeC {\"u}berstellung von Max und Average Pooling\relax }}{20}{figure.caption.24}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {27}{\ignorespaces Der Code zum Laden des MNIST Datensatzes\relax }}{22}{figure.caption.25}%
|
||||
\contentsline {figure}{\numberline {27}{\ignorespaces Der Code zum Laden des MNIST Datensatzes\relax }}{21}{figure.caption.25}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {28}{\ignorespaces Code um ein einfaches Netz in Pytorch zu definieren\relax }}{24}{figure.caption.26}%
|
||||
\contentsline {figure}{\numberline {28}{\ignorespaces Code um ein einfaches Netz in Pytorch zu definieren\relax }}{23}{figure.caption.26}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {29}{\ignorespaces Code um das Netz auf einem Datensatz zu trainieren\relax }}{25}{figure.caption.27}%
|
||||
\contentsline {figure}{\numberline {29}{\ignorespaces Code um das Netz auf einem Datensatz zu trainieren\relax }}{24}{figure.caption.27}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {30}{\ignorespaces Der Code um das in diesem Projekt genutzte Klassifizierungsnetz zu definieren.\relax }}{27}{figure.caption.28}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {31}{\ignorespaces Der Graph der ReLu Aktivierungsfunktion\relax }}{27}{figure.caption.29}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {32}{\ignorespaces Ein Plot der Trefferquote aufgetragen gegen die Trainingszeit\relax }}{28}{figure.caption.30}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {figure}{\numberline {33}{\ignorespaces Ein Plot des Kreuzentropiefehlers aufgetragen gegen die Trainingszeit\relax }}{29}{figure.caption.31}%
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019/Debian) (preloaded format=pdflatex 2020.1.22) 31 JAN 2020 13:00
|
||||
This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019/Debian) (preloaded format=pdflatex 2020.1.22) 4 FEB 2020 23:18
|
||||
entering extended mode
|
||||
\write18 enabled.
|
||||
%&-line parsing enabled.
|
||||
|
|
@ -1547,36 +1547,35 @@ ailable
|
|||
ut line 4.
|
||||
LaTeX Font Info: Font shape `T1/LinuxBiolinumT-TLF/b/n' will be
|
||||
(Font) scaled to size 10.0pt on input line 4.
|
||||
|
||||
[1])
|
||||
)
|
||||
\tf@toc=\write8
|
||||
\openout8 = `Grundlagen_des_maschinellen_lernens.toc'.
|
||||
|
||||
[2]
|
||||
[1]
|
||||
|
||||
LaTeX Warning: Citation '1' on page 3 undefined on input line 49.
|
||||
LaTeX Warning: Citation '1' on page 2 undefined on input line 49.
|
||||
|
||||
<../graphics/Classification.png, id=230, 467.5869pt x 464.6961pt>
|
||||
<../graphics/Classification.png, id=199, 467.5869pt x 464.6961pt>
|
||||
File: ../graphics/Classification.png Graphic file (type png)
|
||||
<use ../graphics/Classification.png>
|
||||
Package pdftex.def Info: ../graphics/Classification.png used on input line 62.
|
||||
|
||||
(pdftex.def) Requested size: 137.9979pt x 137.14088pt.
|
||||
[3]
|
||||
<../graphics/Regression.png, id=237, 467.5869pt x 464.6961pt>
|
||||
[2]
|
||||
<../graphics/Regression.png, id=206, 467.5869pt x 464.6961pt>
|
||||
File: ../graphics/Regression.png Graphic file (type png)
|
||||
<use ../graphics/Regression.png>
|
||||
Package pdftex.def Info: ../graphics/Regression.png used on input line 72.
|
||||
(pdftex.def) Requested size: 137.9979pt x 137.14088pt.
|
||||
[4 <../graphics/classification.png> <../graphics/regression.png>]
|
||||
[3 <../graphics/classification.png> <../graphics/regression.png>]
|
||||
|
||||
LaTeX Warning: Citation '4' on page 5 undefined on input line 80.
|
||||
LaTeX Warning: Citation '4' on page 4 undefined on input line 80.
|
||||
|
||||
|
||||
LaTeX Warning: Citation '5' on page 5 undefined on input line 84.
|
||||
LaTeX Warning: Citation '5' on page 4 undefined on input line 84.
|
||||
|
||||
[5]
|
||||
<../graphics/overfitting.png, id=254, 467.5869pt x 529.0164pt>
|
||||
[4]
|
||||
<../graphics/overfitting.png, id=223, 467.5869pt x 529.0164pt>
|
||||
File: ../graphics/overfitting.png Graphic file (type png)
|
||||
<use ../graphics/overfitting.png>
|
||||
Package pdftex.def Info: ../graphics/overfitting.png used on input line 91.
|
||||
|
|
@ -1587,18 +1586,18 @@ Overfull \hbox (1.84744pt too wide) in paragraph at lines 88--98
|
|||
lem bei Klas-si-fi-zie-rungs-auf-ga-ben. Die Klas-
|
||||
[]
|
||||
|
||||
[6 <../graphics/overfitting.png>] [7]
|
||||
<../graphics/Neuron.png, id=272, 299.9205pt x 158.994pt>
|
||||
[5 <../graphics/overfitting.png>] [6]
|
||||
<../graphics/Neuron.png, id=241, 299.9205pt x 158.994pt>
|
||||
File: ../graphics/Neuron.png Graphic file (type png)
|
||||
<use ../graphics/Neuron.png>
|
||||
Package pdftex.def Info: ../graphics/Neuron.png used on input line 115.
|
||||
(pdftex.def) Requested size: 299.91975pt x 158.9936pt.
|
||||
|
||||
|
||||
LaTeX Warning: Citation '2' on page 8 undefined on input line 121.
|
||||
LaTeX Warning: Citation '2' on page 7 undefined on input line 121.
|
||||
|
||||
[8 <../graphics/Neuron.png>]
|
||||
<../graphics/Neural_Net.png, id=280, 548.16795pt x 432.8973pt>
|
||||
[7 <../graphics/Neuron.png>]
|
||||
<../graphics/Neural_Net.png, id=249, 548.16795pt x 432.8973pt>
|
||||
File: ../graphics/Neural_Net.png Graphic file (type png)
|
||||
<use ../graphics/Neural_Net.png>
|
||||
Package pdftex.def Info: ../graphics/Neural_Net.png used on input line 126.
|
||||
|
|
@ -1611,7 +1610,7 @@ Underfull \hbox (badness 10000) in paragraph at lines 123--130
|
|||
|
||||
LaTeX Warning: `h' float specifier changed to `ht'.
|
||||
|
||||
[9]
|
||||
[8]
|
||||
Missing character: There is no . in font nullfont!
|
||||
Missing character: There is no 0 in font nullfont!
|
||||
Missing character: There is no 1 in font nullfont!
|
||||
|
|
@ -1621,16 +1620,16 @@ Missing character: There is no t in font nullfont!
|
|||
|
||||
LaTeX Warning: `h' float specifier changed to `ht'.
|
||||
|
||||
[10 <../graphics/Neural_Net.png>]
|
||||
[9 <../graphics/Neural_Net.png>]
|
||||
|
||||
LaTeX Warning: `h' float specifier changed to `ht'.
|
||||
|
||||
[11] [12]
|
||||
[10] [11]
|
||||
|
||||
LaTeX Warning: Citation '3' on page 13 undefined on input line 234.
|
||||
LaTeX Warning: Citation '3' on page 12 undefined on input line 235.
|
||||
|
||||
|
||||
LaTeX Warning: Citation '3' on page 13 undefined on input line 245.
|
||||
LaTeX Warning: Citation '3' on page 12 undefined on input line 246.
|
||||
|
||||
Missing character: There is no . in font nullfont!
|
||||
Missing character: There is no 0 in font nullfont!
|
||||
|
|
@ -1647,181 +1646,181 @@ NOTE: coordinate (2Y1.6669998e0],3Y0.0e0]) has been dropped because it is unbou
|
|||
nded (in y). (see also unbounded coords=jump).
|
||||
NOTE: coordinate (2Y5.559998e-1],3Y0.0e0]) has been dropped because it is unbou
|
||||
nded (in y). (see also unbounded coords=jump).
|
||||
[13]
|
||||
[12]
|
||||
|
||||
LaTeX Warning: `h' float specifier changed to `ht'.
|
||||
|
||||
[14]
|
||||
<../graphics/gdf_big_lr.png, id=335, 484.57034pt x 482.0409pt>
|
||||
[13]
|
||||
<../graphics/gdf_big_lr.png, id=305, 484.57034pt x 482.0409pt>
|
||||
File: ../graphics/gdf_big_lr.png Graphic file (type png)
|
||||
<use ../graphics/gdf_big_lr.png>
|
||||
Package pdftex.def Info: ../graphics/gdf_big_lr.png used on input line 320.
|
||||
Package pdftex.def Info: ../graphics/gdf_big_lr.png used on input line 321.
|
||||
(pdftex.def) Requested size: 172.5pt x 171.60013pt.
|
||||
[15] [16 <../graphics/gdf_big_lr.png>]
|
||||
<../graphics/conv/conv008.png, id=352, 396.48125pt x 450.68375pt>
|
||||
[14] [15 <../graphics/gdf_big_lr.png>]
|
||||
<../graphics/conv/conv008.png, id=322, 396.48125pt x 450.68375pt>
|
||||
File: ../graphics/conv/conv008.png Graphic file (type png)
|
||||
<use ../graphics/conv/conv008.png>
|
||||
Package pdftex.def Info: ../graphics/conv/conv008.png used on input line 338.
|
||||
Package pdftex.def Info: ../graphics/conv/conv008.png used on input line 339.
|
||||
(pdftex.def) Requested size: 68.99895pt x 78.43071pt.
|
||||
[17 <../graphics/conv/conv008.png>]
|
||||
<../graphics/mnist_5/mnist_5_raw.png, id=362, 462.528pt x 346.896pt>
|
||||
[16 <../graphics/conv/conv008.png>]
|
||||
<../graphics/mnist_5/mnist_5_raw.png, id=332, 462.528pt x 346.896pt>
|
||||
File: ../graphics/mnist_5/mnist_5_raw.png Graphic file (type png)
|
||||
<use ../graphics/mnist_5/mnist_5_raw.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/mnist_5_raw.png used on input lin
|
||||
e 401.
|
||||
e 402.
|
||||
(pdftex.def) Requested size: 172.5pt x 129.37639pt.
|
||||
<../graphics/mnist_5/conv_only/mnist_5_upper_edges.png, id=363, 462.528pt x 346
|
||||
<../graphics/mnist_5/conv_only/mnist_5_upper_edges.png, id=333, 462.528pt x 346
|
||||
.896pt>
|
||||
File: ../graphics/mnist_5/conv_only/mnist_5_upper_edges.png Graphic file (type
|
||||
png)
|
||||
<use ../graphics/mnist_5/conv_only/mnist_5_upper_edges.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_only/mnist_5_upper_edges.png
|
||||
used on input line 410.
|
||||
used on input line 411.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
<../graphics/mnist_5/conv_only/mnist_5_left_edges.png, id=364, 462.528pt x 346.
|
||||
<../graphics/mnist_5/conv_only/mnist_5_left_edges.png, id=334, 462.528pt x 346.
|
||||
896pt>
|
||||
File: ../graphics/mnist_5/conv_only/mnist_5_left_edges.png Graphic file (type p
|
||||
ng)
|
||||
<use ../graphics/mnist_5/conv_only/mnist_5_left_edges.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_only/mnist_5_left_edges.png
|
||||
used on input line 415.
|
||||
used on input line 416.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
<../graphics/mnist_5/conv_only/mnist_5_lower_edges.png, id=365, 462.528pt x 346
|
||||
<../graphics/mnist_5/conv_only/mnist_5_lower_edges.png, id=335, 462.528pt x 346
|
||||
.896pt>
|
||||
File: ../graphics/mnist_5/conv_only/mnist_5_lower_edges.png Graphic file (type
|
||||
png)
|
||||
<use ../graphics/mnist_5/conv_only/mnist_5_lower_edges.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_only/mnist_5_lower_edges.png
|
||||
used on input line 420.
|
||||
used on input line 421.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
<../graphics/mnist_5/conv_only/mnist_5_right_edges.png, id=366, 462.528pt x 346
|
||||
<../graphics/mnist_5/conv_only/mnist_5_right_edges.png, id=336, 462.528pt x 346
|
||||
.896pt>
|
||||
File: ../graphics/mnist_5/conv_only/mnist_5_right_edges.png Graphic file (type
|
||||
png)
|
||||
<use ../graphics/mnist_5/conv_only/mnist_5_right_edges.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_only/mnist_5_right_edges.png
|
||||
used on input line 425.
|
||||
used on input line 426.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
<../graphics/features.png, id=369, 833.1125pt x 388.45125pt>
|
||||
<../graphics/features.png, id=339, 833.1125pt x 388.45125pt>
|
||||
File: ../graphics/features.png Graphic file (type png)
|
||||
<use ../graphics/features.png>
|
||||
Package pdftex.def Info: ../graphics/features.png used on input line 436.
|
||||
Package pdftex.def Info: ../graphics/features.png used on input line 437.
|
||||
(pdftex.def) Requested size: 345.0pt x 160.86047pt.
|
||||
|
||||
Underfull \hbox (badness 10000) in paragraph at lines 347--443
|
||||
Underfull \hbox (badness 10000) in paragraph at lines 348--444
|
||||
|
||||
[]
|
||||
|
||||
[18 <../graphics/mnist_5/mnist_5_raw.png> <../graphics/mnist_5/conv_only/mnist_
|
||||
[17 <../graphics/mnist_5/mnist_5_raw.png> <../graphics/mnist_5/conv_only/mnist_
|
||||
5_upper_edges.png> <../graphics/mnist_5/conv_only/mnist_5_left_edges.png> <../g
|
||||
raphics/mnist_5/conv_only/mnist_5_lower_edges.png> <../graphics/mnist_5/conv_on
|
||||
ly/mnist_5_right_edges.png>] [19 <../graphics/features.png>]
|
||||
<../graphics/MaxpoolSample2.png, id=391, 164.25pt x 68.547pt>
|
||||
ly/mnist_5_right_edges.png>] [18 <../graphics/features.png>]
|
||||
<../graphics/MaxpoolSample2.png, id=361, 164.25pt x 68.547pt>
|
||||
File: ../graphics/MaxpoolSample2.png Graphic file (type png)
|
||||
<use ../graphics/MaxpoolSample2.png>
|
||||
Package pdftex.def Info: ../graphics/MaxpoolSample2.png used on input line 454
|
||||
Package pdftex.def Info: ../graphics/MaxpoolSample2.png used on input line 455
|
||||
.
|
||||
(pdftex.def) Requested size: 241.49895pt x 100.79422pt.
|
||||
<../graphics/Average-Pooling-Example.png, id=392, 746.79pt x 337.26pt>
|
||||
<../graphics/Average-Pooling-Example.png, id=362, 746.79pt x 337.26pt>
|
||||
File: ../graphics/Average-Pooling-Example.png Graphic file (type png)
|
||||
<use ../graphics/Average-Pooling-Example.png>
|
||||
Package pdftex.def Info: ../graphics/Average-Pooling-Example.png used on input
|
||||
line 462.
|
||||
line 463.
|
||||
(pdftex.def) Requested size: 241.49895pt x 109.0627pt.
|
||||
File: ../graphics/mnist_5/conv_only/mnist_5_upper_edges.png Graphic file (type
|
||||
png)
|
||||
<use ../graphics/mnist_5/conv_only/mnist_5_upper_edges.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_only/mnist_5_upper_edges.png
|
||||
used on input line 472.
|
||||
used on input line 473.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
File: ../graphics/mnist_5/conv_only/mnist_5_left_edges.png Graphic file (type p
|
||||
ng)
|
||||
<use ../graphics/mnist_5/conv_only/mnist_5_left_edges.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_only/mnist_5_left_edges.png
|
||||
used on input line 477.
|
||||
used on input line 478.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
File: ../graphics/mnist_5/conv_only/mnist_5_lower_edges.png Graphic file (type
|
||||
png)
|
||||
<use ../graphics/mnist_5/conv_only/mnist_5_lower_edges.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_only/mnist_5_lower_edges.png
|
||||
used on input line 482.
|
||||
used on input line 483.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
File: ../graphics/mnist_5/conv_only/mnist_5_right_edges.png Graphic file (type
|
||||
png)
|
||||
<use ../graphics/mnist_5/conv_only/mnist_5_right_edges.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_only/mnist_5_right_edges.png
|
||||
used on input line 487.
|
||||
used on input line 488.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
<../graphics/mnist_5/conv_pool/upper_horiz_pooled.png, id=394, 462.528pt x 346.
|
||||
<../graphics/mnist_5/conv_pool/upper_horiz_pooled.png, id=364, 462.528pt x 346.
|
||||
896pt>
|
||||
File: ../graphics/mnist_5/conv_pool/upper_horiz_pooled.png Graphic file (type p
|
||||
ng)
|
||||
<use ../graphics/mnist_5/conv_pool/upper_horiz_pooled.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_pool/upper_horiz_pooled.png
|
||||
used on input line 492.
|
||||
used on input line 493.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
<../graphics/mnist_5/conv_pool/left_vert_pooled.png, id=395, 462.528pt x 346.89
|
||||
<../graphics/mnist_5/conv_pool/left_vert_pooled.png, id=365, 462.528pt x 346.89
|
||||
6pt>
|
||||
File: ../graphics/mnist_5/conv_pool/left_vert_pooled.png Graphic file (type png
|
||||
)
|
||||
<use ../graphics/mnist_5/conv_pool/left_vert_pooled.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_pool/left_vert_pooled.png us
|
||||
ed on input line 497.
|
||||
ed on input line 498.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
<../graphics/mnist_5/conv_pool/lower_horiz_pooled.png, id=396, 462.528pt x 346.
|
||||
<../graphics/mnist_5/conv_pool/lower_horiz_pooled.png, id=366, 462.528pt x 346.
|
||||
896pt>
|
||||
File: ../graphics/mnist_5/conv_pool/lower_horiz_pooled.png Graphic file (type p
|
||||
ng)
|
||||
<use ../graphics/mnist_5/conv_pool/lower_horiz_pooled.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_pool/lower_horiz_pooled.png
|
||||
used on input line 502.
|
||||
used on input line 503.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
<../graphics/mnist_5/conv_pool/right_vert_pooled.png, id=397, 462.528pt x 346.8
|
||||
<../graphics/mnist_5/conv_pool/right_vert_pooled.png, id=367, 462.528pt x 346.8
|
||||
96pt>
|
||||
File: ../graphics/mnist_5/conv_pool/right_vert_pooled.png Graphic file (type pn
|
||||
g)
|
||||
<use ../graphics/mnist_5/conv_pool/right_vert_pooled.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_pool/right_vert_pooled.png u
|
||||
sed on input line 507.
|
||||
sed on input line 508.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
<../graphics/mnist_5/conv_pool/upper_horiz_avgpooled.png, id=398, 462.528pt x 3
|
||||
<../graphics/mnist_5/conv_pool/upper_horiz_avgpooled.png, id=368, 462.528pt x 3
|
||||
46.896pt>
|
||||
File: ../graphics/mnist_5/conv_pool/upper_horiz_avgpooled.png Graphic file (typ
|
||||
e png)
|
||||
<use ../graphics/mnist_5/conv_pool/upper_horiz_avgpooled.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_pool/upper_horiz_avgpooled.pn
|
||||
g used on input line 512.
|
||||
g used on input line 513.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
<../graphics/mnist_5/conv_pool/left_vert_avgpooled.png, id=399, 462.528pt x 346
|
||||
<../graphics/mnist_5/conv_pool/left_vert_avgpooled.png, id=369, 462.528pt x 346
|
||||
.896pt>
|
||||
File: ../graphics/mnist_5/conv_pool/left_vert_avgpooled.png Graphic file (type
|
||||
png)
|
||||
<use ../graphics/mnist_5/conv_pool/left_vert_avgpooled.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_pool/left_vert_avgpooled.png
|
||||
used on input line 517.
|
||||
used on input line 518.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
<../graphics/mnist_5/conv_pool/lower_horiz_avgpooled.png, id=400, 462.528pt x 3
|
||||
<../graphics/mnist_5/conv_pool/lower_horiz_avgpooled.png, id=370, 462.528pt x 3
|
||||
46.896pt>
|
||||
File: ../graphics/mnist_5/conv_pool/lower_horiz_avgpooled.png Graphic file (typ
|
||||
e png)
|
||||
<use ../graphics/mnist_5/conv_pool/lower_horiz_avgpooled.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_pool/lower_horiz_avgpooled.pn
|
||||
g used on input line 522.
|
||||
g used on input line 523.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
<../graphics/mnist_5/conv_pool/right_vert_avgpooled.png, id=401, 462.528pt x 34
|
||||
<../graphics/mnist_5/conv_pool/right_vert_avgpooled.png, id=371, 462.528pt x 34
|
||||
6.896pt>
|
||||
File: ../graphics/mnist_5/conv_pool/right_vert_avgpooled.png Graphic file (type
|
||||
png)
|
||||
<use ../graphics/mnist_5/conv_pool/right_vert_avgpooled.png>
|
||||
Package pdftex.def Info: ../graphics/mnist_5/conv_pool/right_vert_avgpooled.png
|
||||
used on input line 527.
|
||||
used on input line 528.
|
||||
(pdftex.def) Requested size: 68.99895pt x 51.7463pt.
|
||||
|
||||
Underfull \hbox (badness 10000) in paragraph at lines 450--533
|
||||
Underfull \hbox (badness 10000) in paragraph at lines 451--534
|
||||
|
||||
[]
|
||||
|
||||
[20 <../graphics/MaxpoolSample2.png> <../graphics/Average-pooling-example.png>]
|
||||
[19 <../graphics/MaxpoolSample2.png> <../graphics/Average-pooling-example.png>]
|
||||
\openout5 = `Grundlagen_des_maschinellen_lernens.pyg'.
|
||||
|
||||
(./_minted-Grundlagen_des_maschinellen_lernens/default-pyg-prefix.pygstyle)
|
||||
|
|
@ -1842,7 +1841,7 @@ File: ts1txtt.fd 2000/12/15 v3.1
|
|||
|
||||
|
||||
(./_minted-Grundlagen_des_maschinellen_lernens/A4B374C1656F7019947BB217D7D8C34B
|
||||
F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex) [21 <../graphics/mnist_5/conv_pool/upp
|
||||
F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex) [20 <../graphics/mnist_5/conv_pool/upp
|
||||
er_horiz_pooled.png> <../graphics/mnist_5/conv_pool/left_vert_pooled.png> <../g
|
||||
raphics/mnist_5/conv_pool/lower_horiz_pooled.png> <../graphics/mnist_5/conv_poo
|
||||
l/right_vert_pooled.png> <../graphics/mnist_5/conv_pool/upper_horiz_avgpooled.p
|
||||
|
|
@ -1853,7 +1852,7 @@ t_avgpooled.png>]
|
|||
|
||||
|
||||
(./_minted-Grundlagen_des_maschinellen_lernens/0CC230EAEF969F875162D94A43EECC44
|
||||
F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex) [22]
|
||||
F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex) [21]
|
||||
\openout6 = `Grundlagen_des_maschinellen_lernens.pyg'.
|
||||
|
||||
|
||||
|
|
@ -1965,7 +1964,7 @@ F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex)
|
|||
(./_minted-Grundlagen_des_maschinellen_lernens/6361598B18370336863B24D1B6FAE96E
|
||||
F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex)
|
||||
|
||||
LaTeX Warning: Citation '6' on page 23 undefined on input line 613.
|
||||
LaTeX Warning: Citation '6' on page 22 undefined on input line 614.
|
||||
|
||||
\openout6 = `Grundlagen_des_maschinellen_lernens.pyg'.
|
||||
|
||||
|
|
@ -1976,14 +1975,14 @@ F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex)
|
|||
|
||||
|
||||
(./_minted-Grundlagen_des_maschinellen_lernens/5A0B7618927FB8AEA4B96F5552228CD8
|
||||
F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex) [23] [24]
|
||||
F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex) [22] [23]
|
||||
\openout5 = `Grundlagen_des_maschinellen_lernens.pyg'.
|
||||
|
||||
|
||||
(./_minted-Grundlagen_des_maschinellen_lernens/B39808E7C45EEBD5237FE1E82D273879
|
||||
53A0C9FE66949F8EC4BED65B31F6975B.pygtex)
|
||||
|
||||
LaTeX Warning: Citation '7' on page 25 undefined on input line 655.
|
||||
LaTeX Warning: Citation '7' on page 24 undefined on input line 656.
|
||||
|
||||
\openout6 = `Grundlagen_des_maschinellen_lernens.pyg'.
|
||||
|
||||
|
|
@ -1999,7 +1998,7 @@ F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex)
|
|||
|
||||
|
||||
(./_minted-Grundlagen_des_maschinellen_lernens/684559E268B2B047D1C82BB00E985301
|
||||
F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex) [25]
|
||||
F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex) [24]
|
||||
\openout6 = `Grundlagen_des_maschinellen_lernens.pyg'.
|
||||
|
||||
|
||||
|
|
@ -2020,49 +2019,70 @@ F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex)
|
|||
|
||||
(./_minted-Grundlagen_des_maschinellen_lernens/B51E2DE1996D35173C012720F7A4FDA4
|
||||
F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex)
|
||||
Overfull \hbox (1.4092pt too wide) in paragraph at lines 657--660
|
||||
Overfull \hbox (1.4092pt too wide) in paragraph at lines 658--661
|
||||
[] \T1/LinuxBiolinumT-TLF/m/n/10 das Pro-jekt in-itia-li-siert. Da-nach muss
|
||||
[]
|
||||
|
||||
|
||||
Package hyperref Warning: Token not allowed in a PDF string (PDFDocEncoding):
|
||||
(hyperref) removing `\newline' on input line 660.
|
||||
|
||||
[26]
|
||||
Overfull \hbox (3.56537pt too wide) in paragraph at lines 665--666
|
||||
[25]
|
||||
Overfull \hbox (3.56537pt too wide) in paragraph at lines 666--667
|
||||
\T1/LinuxBiolinumT-TLF/m/n/10 nor-ma-li-siert, was die-sen Da-ten-satz be-son-d
|
||||
ers ge-eig-net für Ein-stei-ger macht. Die meiÿ-
|
||||
[]
|
||||
|
||||
\openout5 = `Grundlagen_des_maschinellen_lernens.pyg'.
|
||||
|
||||
Package hyperref Warning: Token not allowed in a PDF string (PDFDocEncoding):
|
||||
(hyperref) removing `\newline' on input line 668.
|
||||
|
||||
(./_minted-Grundlagen_des_maschinellen_lernens/AF2982B0D9BB868B8B4E6D0A6A863B1C
|
||||
53A0C9FE66949F8EC4BED65B31F6975B.pygtex)
|
||||
\openout6 = `Grundlagen_des_maschinellen_lernens.pyg'.
|
||||
|
||||
|
||||
(./_minted-Grundlagen_des_maschinellen_lernens/A26181649063E5A564A34B0EBDE29E28
|
||||
F6C426F58C5CCC27D3C7BD698FEC22DB.pygtex)
|
||||
|
||||
LaTeX Warning: `h' float specifier changed to `ht'.
|
||||
|
||||
|
||||
Package Fancyhdr Warning: \headheight is too small (12.0pt):
|
||||
Make it at least 22.45502pt.
|
||||
Make it at least 24.02502pt.
|
||||
We now make it that large for the rest of the document.
|
||||
This may cause the page layout to be inconsistent, however.
|
||||
|
||||
[27] [28]
|
||||
Underfull \hbox (badness 10000) in paragraph at lines 685--689
|
||||
[26]
|
||||
<../graphics/Wandb_accuracy.png, id=448, 1758.57pt x 867.24pt>
|
||||
File: ../graphics/Wandb_accuracy.png Graphic file (type png)
|
||||
<use ../graphics/Wandb_accuracy.png>
|
||||
Package pdftex.def Info: ../graphics/Wandb_accuracy.png used on input line 721
|
||||
.
|
||||
(pdftex.def) Requested size: 345.0pt x 170.13667pt.
|
||||
<../graphics/Wandb_loss.png, id=450, 1758.57pt x 867.24pt>
|
||||
File: ../graphics/Wandb_loss.png Graphic file (type png)
|
||||
<use ../graphics/Wandb_loss.png>
|
||||
Package pdftex.def Info: ../graphics/Wandb_loss.png used on input line 728.
|
||||
(pdftex.def) Requested size: 345.0pt x 170.13667pt.
|
||||
[27]
|
||||
|
||||
LaTeX Warning: `h' float specifier changed to `ht'.
|
||||
|
||||
[28 <../graphics/Wandb_accuracy.png>] [29 <../graphics/Wandb_loss.png>]
|
||||
Underfull \hbox (badness 10000) in paragraph at lines 747--751
|
||||
\T1/LinuxBiolinumT-TLF/m/n/10 Quelle: https://towardsdatascience.com/common-los
|
||||
s-functions-in-machine-
|
||||
[]
|
||||
|
||||
|
||||
Underfull \hbox (badness 10000) in paragraph at lines 690--694
|
||||
Underfull \hbox (badness 10000) in paragraph at lines 752--756
|
||||
\T1/LinuxBiolinumT-TLF/m/n/10 https://www.bloomberg.com/news/articles/2019-12-1
|
||||
1/face-recognition-tech-
|
||||
[]
|
||||
|
||||
|
||||
Underfull \hbox (badness 10000) in paragraph at lines 695--699
|
||||
Underfull \hbox (badness 10000) in paragraph at lines 757--761
|
||||
\T1/LinuxBiolinumT-TLF/m/n/10 https://www.technologyreview.com/f/614986/ai-face
|
||||
-recognition-racist-us-
|
||||
[]
|
||||
|
||||
(./Grundlagen_des_maschinellen_lernens.lof [29]
|
||||
(./Grundlagen_des_maschinellen_lernens.lof [30]
|
||||
Overfull \hbox (21.8196pt too wide) in paragraph at lines 34--34
|
||||
\T1/LinuxBiolinumT-TLF/m/n/10 https://github.com/vdumoulin/conv_arithmetic/blo
|
||||
b/master/README.md
|
||||
|
|
@ -2084,15 +2104,15 @@ p/Max-pooling_/_Pooling
|
|||
\tf@lof=\write9
|
||||
\openout9 = `Grundlagen_des_maschinellen_lernens.lof'.
|
||||
|
||||
[30]
|
||||
Package atveryend Info: Empty hook `BeforeClearDocument' on input line 710.
|
||||
Package atveryend Info: Empty hook `AfterLastShipout' on input line 710.
|
||||
[31]
|
||||
Package atveryend Info: Empty hook `BeforeClearDocument' on input line 772.
|
||||
Package atveryend Info: Empty hook `AfterLastShipout' on input line 772.
|
||||
(./Grundlagen_des_maschinellen_lernens.aux)
|
||||
Package atveryend Info: Executing hook `AtVeryEndDocument' on input line 710.
|
||||
Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 710.
|
||||
Package atveryend Info: Executing hook `AtVeryEndDocument' on input line 772.
|
||||
Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 772.
|
||||
Package rerunfilecheck Info: File `Grundlagen_des_maschinellen_lernens.out' has
|
||||
not changed.
|
||||
(rerunfilecheck) Checksum: 21CF25714EC87AA39DD08877ED0E5E2E;2942.
|
||||
(rerunfilecheck) Checksum: F3BFAB17E10D5F868AE74D618EB3723D;2546.
|
||||
|
||||
|
||||
LaTeX Warning: There were undefined references.
|
||||
|
|
@ -2110,10 +2130,10 @@ un.xml'.
|
|||
|
||||
)
|
||||
Here is how much of TeX's memory you used:
|
||||
41613 strings out of 492609
|
||||
933901 string characters out of 6131462
|
||||
1497267 words of memory out of 5000000
|
||||
44599 multiletter control sequences out of 15000+600000
|
||||
41658 strings out of 492609
|
||||
935498 string characters out of 6131462
|
||||
1497875 words of memory out of 5000000
|
||||
44635 multiletter control sequences out of 15000+600000
|
||||
98228 words of font info for 130 fonts, out of 8000000 for 9000
|
||||
1143 hyphenation exceptions out of 8191
|
||||
62i,14n,100p,3352b,3452s stack positions out of 5000i,500n,10000p,200000b,80000s
|
||||
|
|
@ -2131,11 +2151,11 @@ ist/fonts/type1/public/txfonts/txsy.pfb></usr/share/texlive/texmf-dist/fonts/ty
|
|||
pe1/urw/times/utmb8a.pfb></usr/share/texlive/texmf-dist/fonts/type1/urw/times/u
|
||||
tmr8a.pfb></usr/share/texlive/texmf-dist/fonts/type1/urw/times/utmr8a.pfb></usr
|
||||
/share/texlive/texmf-dist/fonts/type1/urw/times/utmri8a.pfb>
|
||||
Output written on Grundlagen_des_maschinellen_lernens.pdf (31 pages, 1461523 by
|
||||
Output written on Grundlagen_des_maschinellen_lernens.pdf (32 pages, 1547143 by
|
||||
tes).
|
||||
PDF statistics:
|
||||
617 PDF objects out of 1000 (max. 8388607)
|
||||
519 compressed objects within 6 object streams
|
||||
131 named destinations out of 1000 (max. 500000)
|
||||
448 words of extra memory for PDF output out of 10000 (max. 10000000)
|
||||
614 PDF objects out of 1000 (max. 8388607)
|
||||
511 compressed objects within 6 object streams
|
||||
132 named destinations out of 1000 (max. 500000)
|
||||
418 words of extra memory for PDF output out of 10000 (max. 10000000)
|
||||
|
||||
|
|
|
|||
|
|
@ -27,14 +27,9 @@
|
|||
\BOOKMARK [2][-]{subsection.4.2}{Definieren des Netzes}{section.4}% 27
|
||||
\BOOKMARK [2][-]{subsection.4.3}{Trainieren des Netzes}{section.4}% 28
|
||||
\BOOKMARK [2][-]{subsection.4.4}{Pytorch und weights and biases}{section.4}% 29
|
||||
\BOOKMARK [1][-]{section.5}{Fallbeispiel I:Ein Klassifizierungsnetzwerk f\374r handgeschriebene Ziffern}{}% 30
|
||||
\BOOKMARK [1][-]{section.5}{Ein Klassifizierungsnetzwerk f\374r handgeschriebene Ziffern}{}% 30
|
||||
\BOOKMARK [2][-]{subsection.5.1}{Aufgabe}{section.5}% 31
|
||||
\BOOKMARK [2][-]{subsection.5.2}{Der MNIST Datensatz}{section.5}% 32
|
||||
\BOOKMARK [2][-]{subsection.5.3}{Das Netz}{section.5}% 33
|
||||
\BOOKMARK [2][-]{subsection.5.4}{Ergebnis}{section.5}% 34
|
||||
\BOOKMARK [1][-]{section.6}{Fallbeispiel II:Eine selbsttrainierende KI f\374r Tic-Tac-Toe}{}% 35
|
||||
\BOOKMARK [2][-]{subsection.6.1}{Das Prinzip}{section.6}% 36
|
||||
\BOOKMARK [2][-]{subsection.6.2}{Chance-Tree Optimierung}{section.6}% 37
|
||||
\BOOKMARK [2][-]{subsection.6.3}{L\366sung mittels eines neuronalen Netzes}{section.6}% 38
|
||||
\BOOKMARK [2][-]{subsection.6.4}{Vergleich}{section.6}% 39
|
||||
\BOOKMARK [1][-]{section.7}{Schlusswort}{}% 40
|
||||
\BOOKMARK [1][-]{section.6}{Schlusswort}{}% 35
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
|
|
@ -140,7 +140,7 @@ Der Ausgabevektor wird berechnet, indem:
|
|||
\item Der Bias des Neurons hinzuaddiert wird
|
||||
\item Die Aktivierungsfunktion auf diesen Wert angewandt wird
|
||||
\end{enumerate}
|
||||
Die Aktivierungsfunktion hat dabei die Rolle die Werte zu normieren. Sie sorgt also dafür, dass alle Werte innerhalb des Netzes im Intervall $[0, 1]$ bleiben. Es gibt eine Vielzahl von Aktivierungsfunktionen. Die häufigste ist die sogenannte \glqq Sigmoid'' Funktion:
|
||||
Die Aktivierungsfunktion hat dabei die Rolle die Werte zu normieren. Sie sorgt also dafür, dass alle Werte innerhalb des Netzes im Intervall $[0, 1]$ bleiben. Es gibt eine Vielzahl von Aktivierungsfunktionen. Die häufigste ist die in Abbildung \ref{Sigmoid} dargestellte \glqq Sigmoid'' Funktion.
|
||||
\begin{figure}[h]
|
||||
\begin{center}
|
||||
\begin{tikzpicture}
|
||||
|
|
@ -150,6 +150,7 @@ Die Aktivierungsfunktion hat dabei die Rolle die Werte zu normieren. Sie sorgt a
|
|||
\end{tikzpicture}
|
||||
\end{center}
|
||||
\caption{Der Plot der Sigmoid Funktion $\sigma(x)=\frac{e^x}{e^x+1}$}
|
||||
\label{Sigmoid}
|
||||
\end{figure}
|
||||
\newline
|
||||
Im Gegensatz dazu haben Gewichtungen typischerweise etwa den doppelten Wert der Eingaben. Alle Were werden jedoch automatisch im Lernprozess angepasst.
|
||||
|
|
@ -657,20 +658,81 @@ In Zeile 1 wird dafür zunächst das Netz instanziiert. In Zeile 3 und 4 Wird be
|
|||
In den im folgenden erläuterten Projekten wurde ein Framework namens weights and biases verwendet. Es erlaubt während des Trainingsprozesses auf einer Seite Parameter des Netzes und wichtige Characteristika des Lernprozesses sowie Eigenschaften der Hardware zu tracken. Da es mit Pytorch gut kompatibel ist sind hierfür nur wenige Zeilen Code notwendig. Zunächst wird mit\\ \mintinline{python}{wandb.init(project='Beispielprojekt')
|
||||
} das Projekt initialisiert. Danach muss angegeben werden, welches Netz betrachtet werden soll. Dafür verwendet man nachdem das Netz initialisiert wurde den Befehl \mintinline{python}{wandb.watch(model)}, wobei \mintinline{python}{model} der Instanz des Netzes entspricht. Danach kann einfach über\\
|
||||
\mintinline{python}{wandb.log({'loss': loss})} Zum Beispiel in der Training loop jedes Mahl der Fehler mitgeschrieben werden.
|
||||
\section{Fallbeispiel I:\newline Ein Klassifizierungsnetzwerk für handgeschriebene Ziffern}
|
||||
\section{Ein Klassifizierungsnetzwerk für handgeschriebene Ziffern}
|
||||
Die Klassifizierung handgeschriebener Ziffern aus dem MNIST Datensatz stellt etwa die \glqq Hello world'' Aufgabe des maschinellen Lernens dar. Sie ist gut zum Erlernen der verschiedenen Algorythmen geeignet, extrem gut Dokumentiert und daher leicht nachvollziehbar und ein Paradebeispiel eines Klassifizierungsproblemes. Wenn man sich mit maschinellem Lernen beschäftigt ist es also sehr wahrscheinlich, dass man als aller erstes ein Klassifizierungssystem für den MNIST Datensatz programmieren wird.
|
||||
\subsection{Aufgabe}
|
||||
Die Aufgabe besteht darin den die handgeschriebenen Ziffern des MNIST Datensatzes klassifizieren zu können. Das Ziel dabei ist es mit möglichst wenig Trainingsaufwand eine Genauigkeit von mindestens 97\% zu erreichen. Um dies zu bewältigen soll ein neuronales Netz in PyTorch programmiert und trainiert werden.
|
||||
\subsection{Der MNIST Datensatz}
|
||||
Der MNIST Datensatz ist ein Datensatz von $28\times28$ Pixel großen Graustufenbildern von handgeschriebenen Ziffern. Er weist 60000 Trainingsbilder und 10000 Testbilder auf und ist ein Teil des EMNIST Datensatzes vom National Institute for Standards and Technology, U.S. Department of Commerce. Der Datensatz ist frei unter http://yann.lecun.com/exdb/mnist/ verfügbar. Die Bilder sind bereits zentriert und normalisiert, was diesen Datensatz besonders geeignet für Einsteiger macht. Die meißten Bilder des Datensatzes sind auch von Menschen einfach zu erkennen, einige sind jedoch sehr schwierig einzuordnen und teilweise kaum als Zahl erkennbar. Aufgrund der Einfachheit der Daten sind durchaus hohe Erfolgsquoten zu erwarten. Diese liegen im Schnitt bei 98\%.
|
||||
Der MNIST Datensatz ist ein Datensatz aus $28\times28$ Pixel großen Graustufenbildern von handgeschriebenen Ziffern. Er weist 60000 Trainingsbilder und 10000 Testbilder auf und ist ein Teil des EMNIST Datensatzes vom National Institute for Standards and Technology, U.S. Department of Commerce. Der Datensatz ist frei unter http://yann.lecun.com/exdb/mnist/ verfügbar. Die Bilder sind bereits zentriert und normalisiert, was diesen Datensatz besonders geeignet für Einsteiger macht. Die meißten Bilder des Datensatzes sind auch von Menschen einfach zu erkennen, einige sind jedoch sehr schwierig einzuordnen und teilweise kaum als Zahl erkennbar. Aufgrund der Einfachheit der Daten sind durchaus hohe Erfolgsquoten zu erwarten. Diese liegen im Schnitt bei 98\%.
|
||||
\subsection{Das Netz}
|
||||
Das Netz wurde für diese vergleichsweise unkomplizierte Aufgabe einfach gehalten. Es weist drei hidden Layers auf, besteht insgesamt also aus fünf Layers, die alle klassische Lineare Layers sind. Die Aktivierungsfunktion ist überall eine ReLu\footnote{ReLu steht für rectified linear unit. Diese Aktivierungsfunktion ist neben den Sigmoid Funktionen ebenfalls eine sehr populäre Aktivierungsfunktion. Sie ist als $f(x)=\max(0,x)$ definiert und ist somit für alle $x\le0$ $0$ und für alle $x>0$ $x$.}, deren Plot in Abbildung \ref{ReLu} dargestellt ist. Da es sich um eine Klassifizierungsaufgabe handelt hat das Ausgabelayer 10 Ausgabeneuronen, die die 10 Ziffern repräsentieren. Es ist wie in Abbildung \ref{net} dargestellt, definiert.
|
||||
\begin{figure}[h]
|
||||
\begin{minted}[
|
||||
frame=lines,
|
||||
framesep=2mm,
|
||||
baselinestretch=1.2,
|
||||
fontsize=\footnotesize,
|
||||
linenos,
|
||||
autogobble
|
||||
]{python}
|
||||
class Net(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.fc1 = nn.Linear(28 * 28, 64)
|
||||
self.fc2 = nn.Linear(64, 120)
|
||||
self.fc3 = nn.Linear(120, 120)
|
||||
self.fc4 = nn.Linear(120, 64)
|
||||
self.fc5 = nn.Linear(64, 10)
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(self.fc1(x))
|
||||
x = F.relu(self.fc2(x))
|
||||
x = F.relu(self.fc3(x))
|
||||
x = F.relu(self.fc4(x))
|
||||
x = self.fc5(x)
|
||||
return F.log_softmax(x, dim=1)
|
||||
\end{minted}
|
||||
\caption{Der Code um das in diesem Projekt genutzte Klassifizierungsnetz zu definieren.}
|
||||
\label{net}
|
||||
\end{figure}
|
||||
\\
|
||||
Das erste Layer nimmt also einen Tensor der Größe $1\times 784$ an, da die Bilder genau so groß sind, und gibt einen $1\times 64$ großen Tensor aus. Dieser wird vom ersten hidden Layer angenommen, dass einen $1\times 120$ großen Tensor ausgibt. Im zweiten hidden Layer bleibt die Größe mit $1\times 120$ konstant und im dritten wird sie wieder auf $1\times 64$ reduiziert. Schlussendlich wird der Ausgabevektor mit den zehn Klassenwahrscheinlichkeiten berechnet.\\
|
||||
in der \mintinline{python}{forward()} Funktion wird jedem Layer außer dem output Layer noch eine ReLu Aktivierungsfunktion zugewiesen und der SoftMax sowie der Logarythmus werden angewandt.
|
||||
\begin{figure}[h]
|
||||
\begin{center}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}[width=0.5\linewidth,
|
||||
xmax=5,
|
||||
ymax=5,
|
||||
xmin=-5,
|
||||
samples=5,
|
||||
xlabel={x},
|
||||
ylabel={y}]
|
||||
\addplot[blue]{max(x,0)};
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\end{center}
|
||||
\caption{Der Graph der ReLu Aktivierungsfunktion}
|
||||
\label{ReLu}
|
||||
\end{figure}
|
||||
\subsection{Ergebnis}
|
||||
\section{Fallbeispiel II:\newline Eine selbsttrainierende KI für Tic-Tac-Toe}
|
||||
\subsection{Das Prinzip}
|
||||
\subsection{Chance-Tree Optimierung}
|
||||
\subsection{Lösung mittels eines neuronalen Netzes}
|
||||
\subsection{Vergleich}
|
||||
Das Netz wurde innerhalb von 2 Stunden 200 Epochen lang trainiert und erreicht am Ende eine Genauigkeit von $98,13\%$. Das verwendete System bestand aus einer Nvidia GeForce GTX 960, sowie einer CPU mit 4 Kernen, die hier jedoch nicht weiter relevant sind, da das Training auf der GPU stattfand, unter Ubuntu 18.04. Die Batchsize betrug 200. Der Kreuzentropiefehler wurde jedes mal, nachdem er berechnet wurde mit Hilfe von weights and biases geloggt. Nach jeder Epoche wurde das Netz außerdem auf dem Testdatensatz getestet und die Trefferquote wurde ebenfalls geloggt. Wie sich diese im Laufe der Zeit entwickelt ist in Abbildung \ref{accuracy} dargestellt.
|
||||
\begin{figure}[h]
|
||||
\includegraphics[width=\linewidth]{../graphics/Wandb_accuracy.png}
|
||||
\caption{Ein Plot der Trefferquote aufgetragen gegen die Trainingszeit}
|
||||
\label{accuracy}
|
||||
\end{figure}
|
||||
\\
|
||||
Aus den Daten geht hervor, dass der Anstieg der Trefferquote in den ersten 10 Epochen extrem groß ist. In den nächsten rund 65 Epochen schwankt sie zwischen etwa 97\% und 98\% und stagniert danach knapp über 98\% wo sie nur noch eine geringe Verbesserung aufweist. Das Netz hat also nach 75 Epochen seinen Höhepunkt erreicht und konnte sich nicht weiter verbessern. Dies korrespondiert auch mit dem in Abbildung \ref{loss} dargestellten Plot, der die Entwicklung des Kreuzentropiefehlers im Laufe des Trainings darstellt.
|
||||
\begin{figure}[h]
|
||||
\includegraphics[width=\linewidth]{../graphics/Wandb_loss.png}
|
||||
\caption{Ein Plot des Kreuzentropiefehlers aufgetragen gegen die Trainingszeit}
|
||||
\label{loss}
|
||||
\end{figure}
|
||||
\\
|
||||
Auch hier lässt sich ein extremes Abnehmen des Fehlers in den ersten 10 Epochen ablesen, das von einem starken Schwanken bis etwa zur 75. Epoche gefolgt ist. Von da an stagniert der Fehler bei 0. Das Ziel von 97\% Genauigkeit wurde also um einen Prozent überschritten und somit lässt sich feststellen, dass das Netz sehr gut in der Lage ist handgeschriebene Ziffern zu klassifizieren.
|
||||
\section{Schlusswort}
|
||||
Maschinelles Lernen ist ein extrem komplexes Forschungsgebiet, das ein enormes Potential aufweist. Die daraus hervorgehenden Technologien können das Leben revolutionieren und haben dies bereits in vielen Bereichen getan. Neuronale Netze stellen hier eine häufig verwendete Methode maschinellen Lernens dar. Sie sind an das menschliche Lernen angelehnt und können klassifizierungs, regressions und viele weitere Probleme lösen. Ihnen liegen algebraische Prozesse zu grunde, die aus dem Bereich der Statistik stammen. Um die Netze zu trainieren müssen große Datensätze vorhanden sein. Dies kann ein großes datenschutztechnisches Problem darstellen. Die Ausgabe neuronaler Netze ist außerdem nie zu 100\% verlässlich. Trotz des großen Potentiales ist maschinelles Lernen jedoch nicht dass Allheilmittel und kann zwar viele aber bei weitem nicht alle Probleme lösen und ist bei einem Großteil der Problemen schlichtweg nicht effizient und verlässlich genug. maschinelles Lernen hat dennoch einen Einzug in unser Alltagsleben gefunden und wir begegnen ihm am Flughafen, im Supermarkt und am Smartphone (Die Gesichtserkennungssoftware zum Entsperren vieler Geräte nutzt maschinelles Lernen um eine höhere Genauigkeit zu erzielen). Von einer Welt, die von selbstbewussten und intelligenten Maschinen beherrscht wird, sind wir allerdings noch weit entfernt.
|
||||
\newpage
|
||||
\begin{thebibliography}{99}
|
||||
\bibitem{1}
|
||||
|
|
|
|||
|
|
@ -1,82 +1,72 @@
|
|||
\boolfalse {citerequest}\boolfalse {citetracker}\boolfalse {pagetracker}\boolfalse {backtracker}\relax
|
||||
\babel@toc {ngerman}{}
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {section}{\numberline {1}Was ist maschinelles Lernen?}{3}{section.1}%
|
||||
\contentsline {section}{\numberline {1}Was ist maschinelles Lernen?}{2}{section.1}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {1.1}Klassifizierungsprobleme}{3}{subsection.1.1}%
|
||||
\contentsline {subsection}{\numberline {1.1}Klassifizierungsprobleme}{2}{subsection.1.1}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {1.2}Regressionsprobleme}{4}{subsection.1.2}%
|
||||
\contentsline {subsection}{\numberline {1.2}Regressionsprobleme}{3}{subsection.1.2}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {1.3}Gefahren von maschinellem Lernen}{5}{subsection.1.3}%
|
||||
\contentsline {subsection}{\numberline {1.3}Gefahren von maschinellem Lernen}{4}{subsection.1.3}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsubsection}{\numberline {1.3.1}Die Daten}{5}{subsubsection.1.3.1}%
|
||||
\contentsline {subsubsection}{\numberline {1.3.1}Die Daten}{4}{subsubsection.1.3.1}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsubsection}{\numberline {1.3.2}Overfitting}{6}{subsubsection.1.3.2}%
|
||||
\contentsline {subsubsection}{\numberline {1.3.2}Overfitting}{5}{subsubsection.1.3.2}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {section}{\numberline {2}Verschiedene Techniken maschinellen Lernens}{7}{section.2}%
|
||||
\contentsline {section}{\numberline {2}Verschiedene Techniken maschinellen Lernens}{6}{section.2}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {2.1}\IeC {\"U}berwachtes Lernen}{7}{subsection.2.1}%
|
||||
\contentsline {subsection}{\numberline {2.1}\IeC {\"U}berwachtes Lernen}{6}{subsection.2.1}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {2.2}Un\IeC {\"u}berwachtes Lernen}{7}{subsection.2.2}%
|
||||
\contentsline {subsection}{\numberline {2.2}Un\IeC {\"u}berwachtes Lernen}{6}{subsection.2.2}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {2.3}Best\IeC {\"a}rkendes Lernen}{7}{subsection.2.3}%
|
||||
\contentsline {subsection}{\numberline {2.3}Best\IeC {\"a}rkendes Lernen}{6}{subsection.2.3}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {section}{\numberline {3}Neuronale Netze}{8}{section.3}%
|
||||
\contentsline {section}{\numberline {3}Neuronale Netze}{7}{section.3}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {3.1}Maschinelles Lernen und menschliches Lernen}{8}{subsection.3.1}%
|
||||
\contentsline {subsection}{\numberline {3.1}Maschinelles Lernen und menschliches Lernen}{7}{subsection.3.1}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {3.2}Der Aufbau eines neuronalen Netzes}{9}{subsection.3.2}%
|
||||
\contentsline {subsection}{\numberline {3.2}Der Aufbau eines neuronalen Netzes}{8}{subsection.3.2}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {3.3}Berechnung des Ausgabevektors}{9}{subsection.3.3}%
|
||||
\contentsline {subsection}{\numberline {3.3}Berechnung des Ausgabevektors}{8}{subsection.3.3}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {3.4}Der Lernprozess}{12}{subsection.3.4}%
|
||||
\contentsline {subsection}{\numberline {3.4}Der Lernprozess}{11}{subsection.3.4}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {3.5}Fehlerfunktionen}{12}{subsection.3.5}%
|
||||
\contentsline {subsection}{\numberline {3.5}Fehlerfunktionen}{11}{subsection.3.5}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsubsection}{\numberline {3.5.1}MSE -- Durchschnittlicher quadratischer Fehler}{13}{subsubsection.3.5.1}%
|
||||
\contentsline {subsubsection}{\numberline {3.5.1}MSE -- Durchschnittlicher quadratischer Fehler}{12}{subsubsection.3.5.1}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsubsection}{\numberline {3.5.2}MAE -- Durchschnitztlicher absoluter Fehler}{13}{subsubsection.3.5.2}%
|
||||
\contentsline {subsubsection}{\numberline {3.5.2}MAE -- Durchschnitztlicher absoluter Fehler}{12}{subsubsection.3.5.2}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsubsection}{\numberline {3.5.3}Kreuzentropiefehler}{13}{subsubsection.3.5.3}%
|
||||
\contentsline {subsubsection}{\numberline {3.5.3}Kreuzentropiefehler}{12}{subsubsection.3.5.3}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {3.6}Gradientenverfahren und Backpropagation}{15}{subsection.3.6}%
|
||||
\contentsline {subsection}{\numberline {3.6}Gradientenverfahren und Backpropagation}{14}{subsection.3.6}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsubsection}{\numberline {3.6.1}Lernrate}{15}{subsubsection.3.6.1}%
|
||||
\contentsline {subsubsection}{\numberline {3.6.1}Lernrate}{14}{subsubsection.3.6.1}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {3.7}Verschiedene Layerarten}{16}{subsection.3.7}%
|
||||
\contentsline {subsection}{\numberline {3.7}Verschiedene Layerarten}{15}{subsection.3.7}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsubsection}{\numberline {3.7.1}Convolutional Layers}{17}{subsubsection.3.7.1}%
|
||||
\contentsline {subsubsection}{\numberline {3.7.1}Convolutional Layers}{16}{subsubsection.3.7.1}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsubsection}{\numberline {3.7.2}Pooling Layers}{19}{subsubsection.3.7.2}%
|
||||
\contentsline {subsubsection}{\numberline {3.7.2}Pooling Layers}{18}{subsubsection.3.7.2}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {section}{\numberline {4}PyTorch}{21}{section.4}%
|
||||
\contentsline {section}{\numberline {4}PyTorch}{20}{section.4}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {4.1}Datenvorbereitung}{22}{subsection.4.1}%
|
||||
\contentsline {subsection}{\numberline {4.1}Datenvorbereitung}{21}{subsection.4.1}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {4.2}Definieren des Netzes}{23}{subsection.4.2}%
|
||||
\contentsline {subsection}{\numberline {4.2}Definieren des Netzes}{22}{subsection.4.2}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {4.3}Trainieren des Netzes}{25}{subsection.4.3}%
|
||||
\contentsline {subsection}{\numberline {4.3}Trainieren des Netzes}{24}{subsection.4.3}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {4.4}Pytorch und weights and biases}{26}{subsection.4.4}%
|
||||
\contentsline {subsection}{\numberline {4.4}Pytorch und weights and biases}{25}{subsection.4.4}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {section}{\numberline {5}Fallbeispiel I:\newline Ein Klassifizierungsnetzwerk f\IeC {\"u}r handgeschriebene Ziffern}{27}{section.5}%
|
||||
\contentsline {section}{\numberline {5}Ein Klassifizierungsnetzwerk f\IeC {\"u}r handgeschriebene Ziffern}{26}{section.5}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {5.1}Aufgabe}{27}{subsection.5.1}%
|
||||
\contentsline {subsection}{\numberline {5.1}Aufgabe}{26}{subsection.5.1}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {5.2}Der MNIST Datensatz}{27}{subsection.5.2}%
|
||||
\contentsline {subsection}{\numberline {5.2}Der MNIST Datensatz}{26}{subsection.5.2}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {5.3}Das Netz}{28}{subsection.5.3}%
|
||||
\contentsline {subsection}{\numberline {5.3}Das Netz}{26}{subsection.5.3}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {5.4}Ergebnis}{28}{subsection.5.4}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {section}{\numberline {6}Fallbeispiel II:\newline Eine selbsttrainierende KI f\IeC {\"u}r Tic-Tac-Toe}{28}{section.6}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {6.1}Das Prinzip}{28}{subsection.6.1}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {6.2}Chance-Tree Optimierung}{28}{subsection.6.2}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {6.3}L\IeC {\"o}sung mittels eines neuronalen Netzes}{28}{subsection.6.3}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {subsection}{\numberline {6.4}Vergleich}{28}{subsection.6.4}%
|
||||
\defcounter {refsection}{0}\relax
|
||||
\contentsline {section}{\numberline {7}Schlusswort}{28}{section.7}%
|
||||
\contentsline {section}{\numberline {6}Schlusswort}{28}{section.6}%
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
\begin{Verbatim}[commandchars=\\\{\}]
|
||||
\PYG{n}{forward}\PYG{p}{()}
|
||||
\end{Verbatim}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
\begin{Verbatim}[commandchars=\\\{\}]
|
||||
\PYG{k}{class} \PYG{n+nc}{Net}\PYG{p}{(}\PYG{n}{nn}\PYG{o}{.}\PYG{n}{Module}\PYG{p}{):}
|
||||
\PYG{k}{def} \PYG{n+nf+fm}{\PYGZus{}\PYGZus{}init\PYGZus{}\PYGZus{}}\PYG{p}{(}\PYG{n+nb+bp}{self}\PYG{p}{):}
|
||||
\PYG{n+nb}{super}\PYG{p}{()}\PYG{o}{.}\PYG{n+nf+fm}{\PYGZus{}\PYGZus{}init\PYGZus{}\PYGZus{}}\PYG{p}{()}
|
||||
\PYG{n+nb+bp}{self}\PYG{o}{.}\PYG{n}{fc1} \PYG{o}{=} \PYG{n}{nn}\PYG{o}{.}\PYG{n}{Linear}\PYG{p}{(}\PYG{l+m+mi}{28} \PYG{o}{*} \PYG{l+m+mi}{28}\PYG{p}{,} \PYG{l+m+mi}{64}\PYG{p}{)}
|
||||
\PYG{n+nb+bp}{self}\PYG{o}{.}\PYG{n}{fc2} \PYG{o}{=} \PYG{n}{nn}\PYG{o}{.}\PYG{n}{Linear}\PYG{p}{(}\PYG{l+m+mi}{64}\PYG{p}{,} \PYG{l+m+mi}{120}\PYG{p}{)}
|
||||
\PYG{n+nb+bp}{self}\PYG{o}{.}\PYG{n}{fc3} \PYG{o}{=} \PYG{n}{nn}\PYG{o}{.}\PYG{n}{Linear}\PYG{p}{(}\PYG{l+m+mi}{120}\PYG{p}{,} \PYG{l+m+mi}{120}\PYG{p}{)}
|
||||
\PYG{n+nb+bp}{self}\PYG{o}{.}\PYG{n}{fc4} \PYG{o}{=} \PYG{n}{nn}\PYG{o}{.}\PYG{n}{Linear}\PYG{p}{(}\PYG{l+m+mi}{120}\PYG{p}{,} \PYG{l+m+mi}{64}\PYG{p}{)}
|
||||
\PYG{n+nb+bp}{self}\PYG{o}{.}\PYG{n}{fc5} \PYG{o}{=} \PYG{n}{nn}\PYG{o}{.}\PYG{n}{Linear}\PYG{p}{(}\PYG{l+m+mi}{64}\PYG{p}{,} \PYG{l+m+mi}{10}\PYG{p}{)}
|
||||
|
||||
\PYG{k}{def} \PYG{n+nf}{forward}\PYG{p}{(}\PYG{n+nb+bp}{self}\PYG{p}{,} \PYG{n}{x}\PYG{p}{):}
|
||||
\PYG{n}{x} \PYG{o}{=} \PYG{n}{F}\PYG{o}{.}\PYG{n}{relu}\PYG{p}{(}\PYG{n+nb+bp}{self}\PYG{o}{.}\PYG{n}{fc1}\PYG{p}{(}\PYG{n}{x}\PYG{p}{))}
|
||||
\PYG{n}{x} \PYG{o}{=} \PYG{n}{F}\PYG{o}{.}\PYG{n}{relu}\PYG{p}{(}\PYG{n+nb+bp}{self}\PYG{o}{.}\PYG{n}{fc2}\PYG{p}{(}\PYG{n}{x}\PYG{p}{))}
|
||||
\PYG{n}{x} \PYG{o}{=} \PYG{n}{F}\PYG{o}{.}\PYG{n}{relu}\PYG{p}{(}\PYG{n+nb+bp}{self}\PYG{o}{.}\PYG{n}{fc3}\PYG{p}{(}\PYG{n}{x}\PYG{p}{))}
|
||||
\PYG{n}{x} \PYG{o}{=} \PYG{n}{F}\PYG{o}{.}\PYG{n}{relu}\PYG{p}{(}\PYG{n+nb+bp}{self}\PYG{o}{.}\PYG{n}{fc4}\PYG{p}{(}\PYG{n}{x}\PYG{p}{))}
|
||||
\PYG{n}{x} \PYG{o}{=} \PYG{n+nb+bp}{self}\PYG{o}{.}\PYG{n}{fc5}\PYG{p}{(}\PYG{n}{x}\PYG{p}{)}
|
||||
\PYG{k}{return} \PYG{n}{F}\PYG{o}{.}\PYG{n}{log\PYGZus{}softmax}\PYG{p}{(}\PYG{n}{x}\PYG{p}{,} \PYG{n}{dim}\PYG{o}{=}\PYG{l+m+mi}{1}\PYG{p}{)}
|
||||
\end{Verbatim}
|
||||
Loading…
Add table
Add a link
Reference in a new issue