Wrote a script to write test results to a text file
This commit is contained in:
commit
a0675ce03f
15
mini_proj/test_nn.py
Normal file
15
mini_proj/test_nn.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
import numpy as np
|
||||||
|
from keras.models import Model
|
||||||
|
from keras.utils import to_categorical
|
||||||
|
|
||||||
|
pred_y = np.load("predicted_results.npy")
|
||||||
|
test_y = np.load("Waldo_test_lbl.npy")
|
||||||
|
|
||||||
|
test_y = to_categorical(test_y)
|
||||||
|
|
||||||
|
f = open("test_output.txt", 'w')
|
||||||
|
|
||||||
|
for i in range(0, len(test_y)):
|
||||||
|
print(pred_y[i], test_y[i], file=f)
|
||||||
|
|
||||||
|
f.close()
|
1344
mini_proj/test_output.txt
Normal file
1344
mini_proj/test_output.txt
Normal file
File diff suppressed because it is too large
Load Diff
@ -37,12 +37,12 @@ def FCN():
|
|||||||
m_pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
|
m_pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
|
||||||
|
|
||||||
conv3 = Conv2D(32, (3, 3), activation='relu', padding='same')(m_pool2)
|
conv3 = Conv2D(32, (3, 3), activation='relu', padding='same')(m_pool2)
|
||||||
# drop2 = Dropout(0.2)(conv3) # Drop some portion of features to prevent overfitting
|
drop2 = Dropout(0.2)(conv3) # Drop some portion of features to prevent overfitting
|
||||||
# m_pool2 = MaxPooling2D(pool_size=(2, 2))(drop2)
|
m_pool2 = MaxPooling2D(pool_size=(2, 2))(drop2)
|
||||||
|
|
||||||
# conv4 = Conv2D(64, (2, 2), activation='relu', padding='same')(m_pool2)
|
# conv4 = Conv2D(64, (2, 2), activation='relu', padding='same')(m_pool2)
|
||||||
|
|
||||||
flat = Flatten()(conv3) # Makes data 1D
|
flat = Flatten()(m_pool2) # Makes data 1D
|
||||||
dense = Dense(64, activation='relu')(flat) # Fully connected layer
|
dense = Dense(64, activation='relu')(flat) # Fully connected layer
|
||||||
drop3 = Dropout(0.2)(dense)
|
drop3 = Dropout(0.2)(dense)
|
||||||
classif = Dense(2, activation='sigmoid')(drop3) # Final layer to classify
|
classif = Dense(2, activation='sigmoid')(drop3) # Final layer to classify
|
||||||
@ -50,26 +50,55 @@ def FCN():
|
|||||||
## Define the model structure
|
## Define the model structure
|
||||||
model = Model(inputs=inputs, outputs=classif)
|
model = Model(inputs=inputs, outputs=classif)
|
||||||
# Optimizer recommended Adadelta values (lr=0.01)
|
# Optimizer recommended Adadelta values (lr=0.01)
|
||||||
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[f_measure])
|
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy', f1])
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def precision(y_true, y_pred):
|
def f1(y_true, y_pred):
|
||||||
y_pred = np.round(y_pred)
|
def recall(y_true, y_pred):
|
||||||
num = np.sum(np.logical_and(y_true, y_pred))
|
"""Recall metric.
|
||||||
den = np.sum(y_pred)
|
|
||||||
return np.divide(num, den)
|
|
||||||
|
|
||||||
def recall(y_true, y_pred):
|
Only computes a batch-wise average of recall.
|
||||||
y_pred = np.round(y_pred)
|
|
||||||
num = np.sum(np.logical_and(y_true, y_pred))
|
|
||||||
den = np.sum(y_true)
|
|
||||||
return np.divide(num, den)
|
|
||||||
|
|
||||||
def f_measure(y_true, y_pred):
|
Computes the recall, a metric for multi-label classification of
|
||||||
p = precision(y_true, y_pred)
|
how many relevant items are selected.
|
||||||
r = recall(y_true, y_pred)
|
"""
|
||||||
return 2 * p * r / (p + r)
|
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
|
||||||
|
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
|
||||||
|
recall = true_positives / (possible_positives + K.epsilon())
|
||||||
|
return recall
|
||||||
|
|
||||||
|
def precision(y_true, y_pred):
|
||||||
|
"""Precision metric.
|
||||||
|
|
||||||
|
Only computes a batch-wise average of precision.
|
||||||
|
|
||||||
|
Computes the precision, a metric for multi-label classification of
|
||||||
|
how many selected items are relevant.
|
||||||
|
"""
|
||||||
|
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
|
||||||
|
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
|
||||||
|
precision = true_positives / (predicted_positives + K.epsilon())
|
||||||
|
return precision
|
||||||
|
precision = precision(y_true, y_pred)
|
||||||
|
recall = recall(y_true, y_pred)
|
||||||
|
return 2*((precision*recall)/(precision+recall+K.epsilon()))
|
||||||
|
# def precision(y_true, y_pred):
|
||||||
|
# y_pred = K.round(y_pred)
|
||||||
|
# num = K.sum(tf.logical_and(y_true, y_pred))
|
||||||
|
# den = K.sum(y_pred)
|
||||||
|
# return K.divide(num, den)
|
||||||
|
|
||||||
|
# def recall(y_true, y_pred):
|
||||||
|
# y_pred = K.round(y_pred)
|
||||||
|
# num = K.sum(tf.logical_and(y_true, y_pred))
|
||||||
|
# den = K.sum(y_true)
|
||||||
|
# return K.divide(num, den)
|
||||||
|
|
||||||
|
# def f_measure(y_true, y_pred):
|
||||||
|
# p = precision(y_true, y_pred)
|
||||||
|
# r = recall(y_true, y_pred)
|
||||||
|
# return 2 * p * r / (p + r)
|
||||||
|
|
||||||
## Open data
|
## Open data
|
||||||
im_train = np.load('Waldo_train_data.npy')
|
im_train = np.load('Waldo_train_data.npy')
|
||||||
|
Reference in New Issue
Block a user