Skip to content

Commit

Permalink
Feat: finish all parts
Browse files Browse the repository at this point in the history
  • Loading branch information
mvpcom committed Oct 13, 2017
1 parent 36d55bb commit 415492b
Showing 1 changed file with 62 additions and 6 deletions.
68 changes: 62 additions & 6 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,16 @@ def load_vgg(sess, vgg_path):
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'

return None, None, None, None, None
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)

graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name(vgg_input_tensor_name)
keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)

return image_input, keep_prob, layer3_out, layer4_out, layer7_out
tests.test_load_vgg(load_vgg, tf)


Expand All @@ -47,7 +56,31 @@ def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
return None
conv_1x1 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='same',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3)) # preserve spatial information

# let's start upsampling
output = tf.layers.conv2d_transpose(conv_1x1, 512, 4, strides=(2, 2), padding = 'same',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
#print(output.get_shape())
output = tf.add(output, vgg_layer4_out)
# 2, 2, 8
output = tf.layers.conv2d_transpose(output, 256, 4, strides=(2, 2), padding = 'same',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
#print(output.get_shape())

output = tf.add(output, vgg_layer3_out)
output = tf.layers.conv2d_transpose(output, num_classes, 8, strides=(8, 8), padding = 'same',
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
#print(output.get_shape())

#output = tf.add(output, vgg_layer4_out)
# output = tf.layers.conv2d_transpose(output, num_classes, 4, strides=(8, 8), padding = 'same',
# kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
#print(output.get_shape())
tf.Print(output, [tf.shape(output)])

return output
tests.test_layers(layers)


Expand All @@ -61,7 +94,11 @@ def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
return None, None, None
logits = tf.reshape(nn_last_layer, (-1, num_classes))
correct_label = tf.reshape(correct_label, (-1, num_classes))
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
train_op = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
tests.test_optimize(optimize)


Expand All @@ -80,11 +117,21 @@ def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_l
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
init = tf.global_variables_initializer()
sess.run(init)
# TODO: Implement function
pass
for epoch in range(epochs):
for image, label in get_batches_fn(batch_size):
#batchImages, batchLabels = get_batches_fn(batch_size)
_, loss = sess.run([train_op, cross_entropy_loss],feed_dict={correct_label: label,
input_image: image,
learning_rate: 1e-3,
keep_prob: 0.7})
print(loss)
tests.test_train_nn(train_nn)



def run():
num_classes = 2
image_shape = (160, 576)
Expand All @@ -98,23 +145,32 @@ def run():
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/

epochs = 100
batch_size = 20
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)

correct_label = tf.placeholder(tf.float32, name='correct_label')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')

# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

# TODO: Build NN using load_vgg, layers, and optimize function
input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)
nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes)
logits, train_op, cross_entropy_loss = optimize(nn_last_layer, correct_label, learning_rate, num_classes)

# TODO: Train NN using the train_nn function
train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate)

# TODO: Save inference data using helper.save_inference_samples
# helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)

helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
# OPTIONAL: Apply the trained model to a video


Expand Down

0 comments on commit 415492b

Please sign in to comment.