1
0
This repository has been archived on 2025-03-06. You can view files and clone it, but cannot push or open issues or pull requests.
ResearchMethods/mini_proj/waldo_model.py

129 lines
4.5 KiB
Python

import numpy as np
import sys
import time as t
'''
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Reshape, Merge, Permute
from keras.layers import Deconvolution2D, Convolution2D, MaxPooling2D, UpSampling2D, ZeroPadding2D
from keras.layers import Input
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
'''
from keras import backend as K
K.set_image_dim_ordering('th')
np.random.seed(7)
'''
Model definition
'''
def FCN():
## sample structure defined below
# inputs = Input((1, w, h))
# conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
# conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
# m_pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
# conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(m_pool1)
# drop1 = Dropout(0.2)(conv2)
# conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(drop1)
# m_pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
# conv7 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(m_pool6)
# conv7 = Convolution2D(1, 3, 3, activation='relu', border_mode='same')(conv7)
# up8x = UpSampling2D(size=(2, 2))(conv16x)
# merge8x = merge([up8x, m_pool3], mode='concat', concat_axis=1)
# conv8x = Convolution2D(1, 1, 1, activation='relu', border_mode='same')(merge8x)
# up4x = UpSampling2D(size=(2, 2))(conv8x)
# merge4x = merge([up4x, m_pool2], mode='concat', concat_axis=1)
# conv4x = Convolution2D(1, 1, 1, activation='relu', border_mode='same')(merge4x)
# up4x = UpSampling2D(size=(4, 4))(conv4x)
# model = Model(input=inputs, output=up4x)
# # Optimizer uses recommended Adadelta values
# model.compile(optimizer=Adadelta(lr=0.01), loss='categorical_crossentropy', metrics=['accuracy'])
return model
## Open data
im_train = np.load('Waldo_train_data.npy')
lbl_train = np.load('Waldo_test_lbl.npy')
im_test = np.load('Waldo_test_data.npy')
lbl_test = np.load('Waldo_test_lbl.npy')
## Define model
model = FCN()
## Define training parameters
epochs = 40 # an epoch is one forward pass and back propogation of all training data
batch_size = 5
#lrate = 0.01
#decay = lrate/epochs
# epoch - one forward pass and one backward pass of all training data
# batch size - number of training example used in one forward/backward pass
# (higher batch size uses more memory)
# learning rate - controls magnitude of weight changes in training the NN
## Train model
# Purely superficial output
sys.stdout.write("\nFitting model")
sys.stdout.flush()
for i in range(0, 3):
t.sleep(0.8)
sys.stdout.write('.')
sys.stdout.flush()
print()
# Outputs the model structure
for i in range(0, len(model.layers)):
print("Layer {}: {}".format(i, model.layers[i].output))
print('-'*30)
filepath = "checkpoint.hdf5" # Defines the model checkpoint file
checkpoint = ModelCheckpoint(filepath, verbose=1, save_best_only=False) # Defines the checkpoint process
callbacks_list = [checkpoint] # Adds the checkpoint process to the list of action performed during training
start = t.time() # Records time before training
# Fits model based on initial parameters
model.fit(im_train, lbl_train, nb_epoch=epochs, batch_size=batch_size,
verbose=2, shuffle=True, callbacks=callbacks_list)
# If getting a value error here, output of network and corresponding lbl_train
# data probably don't match
end = t.time() # Records time after tranining
print('Training Duration: {}'.format(end-start))
print('-'*30)
print("*** Saving FCN model and weights ***")
'''
# *To save model and weights seperately:
# save model as json file
model_json = model.to_json()
with open("UNet_model.json", "w") as json_file:
json_file.write(model_json)
# save weights as h5 file
model.save_weights("UNet_weights.h5")
print("\nModel weights and structure have been saved.\n")
'''
# Save model as one file
model.save('Waldo.h5')
print("\nModel weights and structure have been saved.\n")
## Testing the model
# Load test data
im_test, lbl_test = Load_Images()
# Show data stats
print('*'*30)
print(im_test.shape)
print(lbl_test.shape)
print('*'*30)
start = t.time()
# Passes the dataset through the model
pred_lbl = model.predict(im_test, verbose=1, batch_size=batch_size)
end = t.time()
print("Images generated in {} seconds".format(end - start))
np.save('Test/predicted_results.npy', pred_lbl)