From 1ef305861d931a98b410371c6d61b959d8e365b7 Mon Sep 17 00:00:00 2001 From: "Jip J. Dekker" Date: Fri, 25 May 2018 12:13:57 +1000 Subject: [PATCH 1/2] Add initial abstract --- mini_proj/report/waldo.tex | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/mini_proj/report/waldo.tex b/mini_proj/report/waldo.tex index 2b101de..b4b1e81 100644 --- a/mini_proj/report/waldo.tex +++ b/mini_proj/report/waldo.tex @@ -24,11 +24,23 @@ \begin{document} \title{What is Waldo?} - \author{Kelvin Davis \and Jip J. Dekker\and Anthony Silvestere} + \author{Kelvin Davis \and Jip J. Dekker \and Anthony Silvestere} \maketitle \begin{abstract} - +% + The famous brand of picture puzzles ``Where's Waldo?'' relates well to many + unsolved image classification problem. This offers us the opportunity to + test different image classification methods on a data set that is both small + enough to compute in a reasonable time span and easy for humans to + understand. In this report we compare the well known machine learning + methods Naive Bayes, Support Vector Machines, $k$-Nearest Neighbors, and + Random Forest against the Neural Network Architectures LeNet, Fully + Convolutional Neural Networks, and Fully Convolutional Neural Networks. + \todo{I don't like this big summation but I think it is the important + information} + Our comparison shows that \todo{...} +% \end{abstract} \section{Introduction} From e4cf37d25a4d1e54788d0818ea2b7990fcdb3671 Mon Sep 17 00:00:00 2001 From: "Jip J. Dekker" Date: Fri, 25 May 2018 12:32:39 +1000 Subject: [PATCH 2/2] Naive Bayes description --- mini_proj/report/waldo.tex | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/mini_proj/report/waldo.tex b/mini_proj/report/waldo.tex index 8e21bbe..d101572 100644 --- a/mini_proj/report/waldo.tex +++ b/mini_proj/report/waldo.tex @@ -118,7 +118,17 @@ \paragraph{Naive Bayes Classifier} - \cite{naivebayes} + \cite{naivebayes} is a classification method according to Bayes' theorem, + shown in \Cref{eq:bayes}. Bayes' theorem allows us to calculate the + probability of an event taking into account prior knowledge of conditions of + the event in question. In classification this allows us to calculate the + probability that a new instance has a certain class based its features. We + then assign the class that has the highest probability. + + \begin{equation} + \label{eq:bayes} + P(A\mid B)=\frac {P(B\mid A)\,P(A)}{P(B)} + \end{equation} \paragraph{$k$-Nearest Neighbors}