added chapter about loss functions
This commit is contained in:
parent
a3f984996c
commit
411d967069
9 changed files with 10321 additions and 135 deletions
|
|
@ -1,28 +1,40 @@
|
|||
\BOOKMARK [1][-]{section.1}{Was ist maschinelles Lernen?}{}% 1
|
||||
\BOOKMARK [2][-]{subsection.1.1}{Einsatzgebiete maschinellen Lernens}{section.1}% 2
|
||||
\BOOKMARK [1][-]{section.2}{Neuronale Netze}{}% 3
|
||||
\BOOKMARK [2][-]{subsection.2.1}{Maschinelles Lernen und menschliches Lernen}{section.2}% 4
|
||||
\BOOKMARK [2][-]{subsection.2.2}{Der Aufbau eines neuronalen Netzes}{section.2}% 5
|
||||
\BOOKMARK [2][-]{subsection.2.3}{Berechnung des Ausgabevektors}{section.2}% 6
|
||||
\BOOKMARK [2][-]{subsection.2.4}{Der Lernprozess}{section.2}% 7
|
||||
\BOOKMARK [3][-]{subsubsection.2.4.1}{Fehlerfunktionen}{subsection.2.4}% 8
|
||||
\BOOKMARK [3][-]{subsubsection.2.4.2}{Gradientenverfahren}{subsection.2.4}% 9
|
||||
\BOOKMARK [2][-]{subsection.2.5}{Verschiedene Layerarten}{section.2}% 10
|
||||
\BOOKMARK [3][-]{subsubsection.2.5.1}{Fully connected Layers}{subsection.2.5}% 11
|
||||
\BOOKMARK [3][-]{subsubsection.2.5.2}{Convolutional Layers}{subsection.2.5}% 12
|
||||
\BOOKMARK [3][-]{subsubsection.2.5.3}{Pooling Layers}{subsection.2.5}% 13
|
||||
\BOOKMARK [1][-]{section.3}{PyTorch}{}% 14
|
||||
\BOOKMARK [2][-]{subsection.3.1}{Datenvorbereitung}{section.3}% 15
|
||||
\BOOKMARK [2][-]{subsection.3.2}{Definieren des Netzes}{section.3}% 16
|
||||
\BOOKMARK [2][-]{subsection.3.3}{Trainieren des Netzes}{section.3}% 17
|
||||
\BOOKMARK [1][-]{section.4}{Fallbeispiel I:Ein Klassifizierungsnetzwerk f\374r handgeschriebene Ziffern}{}% 18
|
||||
\BOOKMARK [2][-]{subsection.4.1}{Aufgabe}{section.4}% 19
|
||||
\BOOKMARK [2][-]{subsection.4.2}{Der MNIST Datensatz}{section.4}% 20
|
||||
\BOOKMARK [2][-]{subsection.4.3}{Fragmentbasierte Erkennung}{section.4}% 21
|
||||
\BOOKMARK [2][-]{subsection.4.4}{Ergebnis}{section.4}% 22
|
||||
\BOOKMARK [1][-]{section.5}{Fallbeispiel II:Eine selbsttrainierende KI f\374r Tic-Tac-Toe}{}% 23
|
||||
\BOOKMARK [2][-]{subsection.5.1}{Das Prinzip}{section.5}% 24
|
||||
\BOOKMARK [2][-]{subsection.5.2}{Chance-Tree Optimierung}{section.5}% 25
|
||||
\BOOKMARK [2][-]{subsection.5.3}{L\366sung mittels eines neuronalen Netzes}{section.5}% 26
|
||||
\BOOKMARK [2][-]{subsection.5.4}{Vergleich}{section.5}% 27
|
||||
\BOOKMARK [1][-]{section.6}{Schlusswort}{}% 28
|
||||
\BOOKMARK [2][-]{subsection.1.1}{Klassifizierungsprobleme}{section.1}% 2
|
||||
\BOOKMARK [2][-]{subsection.1.2}{Regressionsprobleme}{section.1}% 3
|
||||
\BOOKMARK [2][-]{subsection.1.3}{Gefahren von maschinellem Lernen}{section.1}% 4
|
||||
\BOOKMARK [3][-]{subsubsection.1.3.1}{Eignung der Datens\344tze}{subsection.1.3}% 5
|
||||
\BOOKMARK [3][-]{subsubsection.1.3.2}{Overfitting}{subsection.1.3}% 6
|
||||
\BOOKMARK [3][-]{subsubsection.1.3.3}{Unbewusste Manipulation der Daten}{subsection.1.3}% 7
|
||||
\BOOKMARK [1][-]{section.2}{Verschiedene Techniken maschinellen lernens}{}% 8
|
||||
\BOOKMARK [2][-]{subsection.2.1}{\334berwachtes Lernen}{section.2}% 9
|
||||
\BOOKMARK [2][-]{subsection.2.2}{Un\374berwachtes Lernen}{section.2}% 10
|
||||
\BOOKMARK [2][-]{subsection.2.3}{Best\344rkendes Lernen}{section.2}% 11
|
||||
\BOOKMARK [1][-]{section.3}{Neuronale Netze}{}% 12
|
||||
\BOOKMARK [2][-]{subsection.3.1}{Maschinelles Lernen und menschliches Lernen}{section.3}% 13
|
||||
\BOOKMARK [2][-]{subsection.3.2}{Der Aufbau eines neuronalen Netzes}{section.3}% 14
|
||||
\BOOKMARK [2][-]{subsection.3.3}{Berechnung des Ausgabevektors}{section.3}% 15
|
||||
\BOOKMARK [2][-]{subsection.3.4}{Der Lernprozess}{section.3}% 16
|
||||
\BOOKMARK [2][-]{subsection.3.5}{Fehlerfunktionen}{section.3}% 17
|
||||
\BOOKMARK [3][-]{subsubsection.3.5.1}{MSE \205 Durchschnittlicher quadratischer Fehler}{subsection.3.5}% 18
|
||||
\BOOKMARK [3][-]{subsubsection.3.5.2}{MAE \205 Durchschnitztlicher absoluter Fehler}{subsection.3.5}% 19
|
||||
\BOOKMARK [3][-]{subsubsection.3.5.3}{Kreuzentropiefehler}{subsection.3.5}% 20
|
||||
\BOOKMARK [2][-]{subsection.3.6}{Gradientenverfahren}{section.3}% 21
|
||||
\BOOKMARK [2][-]{subsection.3.7}{Verschiedene Layerarten}{section.3}% 22
|
||||
\BOOKMARK [3][-]{subsubsection.3.7.1}{Fully connected Layers}{subsection.3.7}% 23
|
||||
\BOOKMARK [3][-]{subsubsection.3.7.2}{Convolutional Layers}{subsection.3.7}% 24
|
||||
\BOOKMARK [3][-]{subsubsection.3.7.3}{Pooling Layers}{subsection.3.7}% 25
|
||||
\BOOKMARK [1][-]{section.4}{PyTorch}{}% 26
|
||||
\BOOKMARK [2][-]{subsection.4.1}{Datenvorbereitung}{section.4}% 27
|
||||
\BOOKMARK [2][-]{subsection.4.2}{Definieren des Netzes}{section.4}% 28
|
||||
\BOOKMARK [2][-]{subsection.4.3}{Trainieren des Netzes}{section.4}% 29
|
||||
\BOOKMARK [1][-]{section.5}{Fallbeispiel I:Ein Klassifizierungsnetzwerk f\374r handgeschriebene Ziffern}{}% 30
|
||||
\BOOKMARK [2][-]{subsection.5.1}{Aufgabe}{section.5}% 31
|
||||
\BOOKMARK [2][-]{subsection.5.2}{Der MNIST Datensatz}{section.5}% 32
|
||||
\BOOKMARK [2][-]{subsection.5.3}{Fragmentbasierte Erkennung}{section.5}% 33
|
||||
\BOOKMARK [2][-]{subsection.5.4}{Ergebnis}{section.5}% 34
|
||||
\BOOKMARK [1][-]{section.6}{Fallbeispiel II:Eine selbsttrainierende KI f\374r Tic-Tac-Toe}{}% 35
|
||||
\BOOKMARK [2][-]{subsection.6.1}{Das Prinzip}{section.6}% 36
|
||||
\BOOKMARK [2][-]{subsection.6.2}{Chance-Tree Optimierung}{section.6}% 37
|
||||
\BOOKMARK [2][-]{subsection.6.3}{L\366sung mittels eines neuronalen Netzes}{section.6}% 38
|
||||
\BOOKMARK [2][-]{subsection.6.4}{Vergleich}{section.6}% 39
|
||||
\BOOKMARK [1][-]{section.7}{Schlusswort}{}% 40
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue