From ed157f6efc9a021af5b1b9fcb5b73528b1e64e42 Mon Sep 17 00:00:00 2001 From: Kelvin Davis <273degreeskelvin@gmail.com> Date: Wed, 23 May 2018 22:52:58 +1000 Subject: [PATCH] Precision, recall and f_measure. --- mini_proj/waldo_model.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/mini_proj/waldo_model.py b/mini_proj/waldo_model.py index 0d9aa43..ef5c75d 100644 --- a/mini_proj/waldo_model.py +++ b/mini_proj/waldo_model.py @@ -7,7 +7,7 @@ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten, Input from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D -from keras.models import Model +from keras.models import Model from sklearn import svm, tree, naive_bayes, ensemble from _image_classifier import ImageClassifier @@ -35,21 +35,37 @@ def FCN(): conv3 = Conv2D(32, (2, 2), activation='relu', padding='same')(m_pool2) drop2 = Dropout(0.2)(conv3) # Drop some portion of features to prevent overfitting m_pool2 = MaxPooling2D(pool_size=(2, 2))(drop2) - + conv4 = Conv2D(164, (2, 2), activation='relu', padding='same')(m_pool2) flat = Flatten()(conv4) # Makes data 1D dense = Dense(64, activation='relu')(flat) # Fully connected layer - drop3 = Dropout(0.2)(dense) + drop3 = Dropout(0.2)(dense) classif = Dense(2, activation='softmax')(drop3) # Final layer to classify ## Define the model structure model = Model(inputs=inputs, outputs=classif) # Optimizer recommended Adadelta values (lr=0.01) - model.compile(optimizer=Adadelta(lr=0.1), loss='sparse_categorical_crossentropy', metrics=['accuracy']) + model.compile(optimizer=Adadelta(lr=0.1), loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model +def precision(y_true, y_pred): + y_pred = np.round(y_pred) + num = np.sum(np.logical_and(y_true, y_pred)) + den = np.sum(y_pred) + return np.divide(num, den) + +def recall(y_true, y_pred): + y_pred = np.round(y_pred) + num = np.sum(np.logical_and(y_true, y_pred)) + den = np.sum(y_true) + return np.divide(num, den) + +def f_measure(y_true, y_pred): + p = precision(y_true, y_pred) + r = recall(y_true, y_pred) + return 2 * p * r / (p + r) ## Open data im_train = np.load('Waldo_train_data.npy') @@ -65,7 +81,7 @@ naive_bayes_iclf = ImageClassifier(naive_bayes.GaussianNBd) ensemble_iclf = ImageClassifier(ensemble.RandomForestClassifier) ## Define training parameters -epochs = 20 # an epoch is one forward pass and back propogation of all training data +epochs = 20 # an epoch is one forward pass and back propogation of all training data batch_size = 5 #lrate = 0.01 #decay = lrate/epochs @@ -131,4 +147,3 @@ pred_lbl = model.predict(im_test, verbose=1, batch_size=batch_size) end = t.time() print("Images generated in {} seconds".format(end - start)) np.save('predicted_results.npy', pred_lbl) -