Cleaned a little bit of code
This commit is contained in:
parent
fcefa9b88d
commit
3ebdfb7de7
@ -25,7 +25,7 @@ from keras.utils import to_categorical
|
||||
'''
|
||||
Model definition define the network structure
|
||||
'''
|
||||
def FCN():
|
||||
def CNN():
|
||||
## List of model layers
|
||||
inputs = Input((3, 64, 64))
|
||||
|
||||
@ -33,7 +33,6 @@ def FCN():
|
||||
m_pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
|
||||
|
||||
conv2 = Conv2D(32, (3, 3), activation='relu', padding='same')(m_pool1)
|
||||
#drop1 = Dropout(0.2)(conv2) # Drop some portion of features to prevent overfitting
|
||||
m_pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
|
||||
|
||||
conv3 = Conv2D(32, (3, 3), activation='relu', padding='same')(m_pool2)
|
||||
@ -47,13 +46,81 @@ def FCN():
|
||||
drop3 = Dropout(0.2)(dense)
|
||||
classif = Dense(2, activation='sigmoid')(drop3) # Final layer to classify
|
||||
|
||||
## Define the model structure
|
||||
## Define the model start and end
|
||||
model = Model(inputs=inputs, outputs=classif)
|
||||
# Optimizer recommended Adadelta values (lr=0.01)
|
||||
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy', f1])
|
||||
|
||||
return model
|
||||
|
||||
'''
|
||||
Model definition for a fully convolutional (no dense layers) network structure
|
||||
'''
|
||||
def FCN():
|
||||
## List of model layers
|
||||
inputs = Input((3, 64, 64))
|
||||
|
||||
conv1 = Conv2D(16, (3, 3), activation='relu', padding='same', input_shape=(64, 64, 3))(inputs)
|
||||
m_pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
|
||||
|
||||
conv2 = Conv2D(32, (3, 3), activation='relu', padding='same')(m_pool1)
|
||||
m_pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
|
||||
|
||||
conv3 = Conv2D(32, (3, 3), activation='relu', padding='same')(m_pool2)
|
||||
drop2 = Dropout(0.2)(conv3) # Drop some portion of features to prevent overfitting
|
||||
m_pool2 = MaxPooling2D(pool_size=(2, 2))(drop2)
|
||||
|
||||
conv4 = Conv2D(64, (2, 2), activation='relu', padding='same')(m_pool2)
|
||||
|
||||
flat = Flatten()(conv4) # Makes data 1D
|
||||
drop3 = Dropout(0.2)(flat)
|
||||
classif = Dense(2, activation='sigmoid')(drop3) # Final layer to classify
|
||||
|
||||
## Define the model start and end
|
||||
model = Model(inputs=inputs, outputs=classif)
|
||||
# Optimizer recommended Adadelta values (lr=0.01)
|
||||
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy', f1])
|
||||
|
||||
return model
|
||||
|
||||
|
||||
'''
|
||||
Model definition for the network structure of LeNet
|
||||
Note: LeNet was designed to classify into 10 classes, but we are only performing binary classification
|
||||
'''
|
||||
def LeNet():
|
||||
## List of model layers
|
||||
inputs = Input((3, 64, 64))
|
||||
|
||||
conv1 = Conv2D(6, (5, 5), activation='relu', padding='valid', input_shape=(64, 64, 3))(inputs)
|
||||
m_pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
|
||||
|
||||
conv2 = Conv2D(16, (5, 5), activation='relu', padding='valid')(m_pool1)
|
||||
m_pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
|
||||
|
||||
flat = Flatten()(m_pool2) # Makes data 1D
|
||||
|
||||
dense1 = Dense(120, activation='relu')(flat) # Fully connected layer
|
||||
dense2 = Dense(84, activation='relu')(dense1) # Fully connected layer
|
||||
drop3 = Dropout(0.2)(dense2)
|
||||
classif = Dense(2, activation='sigmoid')(drop3) # Final layer to classify
|
||||
|
||||
## Define the model start and end
|
||||
model = Model(inputs=inputs, outputs=classif)
|
||||
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy', f1])
|
||||
|
||||
return model
|
||||
|
||||
'''
|
||||
AlexNet architecture
|
||||
'''
|
||||
def AlexNet():
|
||||
inputs = Input(shape=(3, 64, 64))
|
||||
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def f1(y_true, y_pred):
|
||||
def recall(y_true, y_pred):
|
||||
"""Recall metric.
|
||||
@ -110,14 +177,16 @@ lbl_train = to_categorical(lbl_train) # One hot encoding the labels
|
||||
lbl_test = to_categorical(lbl_test)
|
||||
|
||||
## Define model
|
||||
#model = CNN()
|
||||
model = FCN()
|
||||
#model = LeNet()
|
||||
# svm_iclf = ImageClassifier(svm.SVC)
|
||||
# tree_iclf = ImageClassifier(tree.DecisionTreeClassifier)
|
||||
# naive_bayes_iclf = ImageClassifier(naive_bayes.GaussianNBd)
|
||||
# ensemble_iclf = ImageClassifier(ensemble.RandomForestClassifier)
|
||||
|
||||
## Define training parameters
|
||||
epochs = 10 # an epoch is one forward pass and back propogation of all training data
|
||||
epochs = 25 # an epoch is one forward pass and back propogation of all training data
|
||||
batch_size = 150 # batch size - number of training example used in one forward/backward pass
|
||||
# (higher batch size uses more memory, smaller batch size takes more time)
|
||||
#lrate = 0.01 # Learning rate of the model - controls magnitude of weight changes in training the NN
|
||||
|
Reference in New Issue
Block a user