Skip to content

Commit

Permalink
File: upload jupyter notebook file
Browse files Browse the repository at this point in the history
  • Loading branch information
mvpcom committed Oct 13, 2017
1 parent 98fec34 commit 2fc1dd1
Showing 1 changed file with 270 additions and 0 deletions.
270 changes: 270 additions & 0 deletions p2SemanticSegmentation.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,270 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"### Design and Programming by Mojtaba Valipour @ Shiraz University, Udacity"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Copyright is important @2017\n"
]
}
],
"source": [
"'''\n",
"References:\n",
" - \n",
" - https://www.tensorflow.org\n",
"'''\n",
"print('Copyright is important @2017')"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[name: \"/cpu:0\"\n",
"device_type: \"CPU\"\n",
"memory_limit: 268435456\n",
"locality {\n",
"}\n",
"incarnation: 16308118712357310879\n",
", name: \"/gpu:0\"\n",
"device_type: \"GPU\"\n",
"memory_limit: 12051077530\n",
"locality {\n",
" bus_id: 1\n",
"}\n",
"incarnation: 16559124723434357649\n",
"physical_device_desc: \"device: 0, name: GeForce GTX TITAN X, pci bus id: 0000:0a:00.0\"\n",
", name: \"/gpu:1\"\n",
"device_type: \"GPU\"\n",
"memory_limit: 12051077530\n",
"locality {\n",
" bus_id: 1\n",
"}\n",
"incarnation: 10025864280914671720\n",
"physical_device_desc: \"device: 1, name: GeForce GTX TITAN X, pci bus id: 0000:09:00.0\"\n",
", name: \"/gpu:2\"\n",
"device_type: \"GPU\"\n",
"memory_limit: 12049085236\n",
"locality {\n",
" bus_id: 1\n",
"}\n",
"incarnation: 10544825921621946364\n",
"physical_device_desc: \"device: 2, name: GeForce GTX TITAN X, pci bus id: 0000:06:00.0\"\n",
", name: \"/gpu:3\"\n",
"device_type: \"GPU\"\n",
"memory_limit: 12047777792\n",
"locality {\n",
" bus_id: 1\n",
"}\n",
"incarnation: 9239224823666292773\n",
"physical_device_desc: \"device: 3, name: GeForce GTX TITAN X, pci bus id: 0000:05:00.0\"\n",
"]\n"
]
}
],
"source": [
"import tensorflow as tf\n",
"import os\n",
"%matplotlib inline\n",
"#os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0,1,2'\n",
"from tensorflow.python.client import device_lib\n",
"print(device_lib.list_local_devices())"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"TensorFlow Version: 1.3.0\n",
"Default GPU Device: /gpu:0\n",
"Tests Passed\n",
"Tests Passed\n",
"Tests Passed\n",
"INFO:tensorflow:Restoring parameters from b'./data/vgg/variables/variables'\n"
]
}
],
"source": [
"import os.path\n",
"import tensorflow as tf\n",
"import helper\n",
"import warnings\n",
"from distutils.version import LooseVersion\n",
"import project_tests as tests\n",
"\n",
"\n",
"# Check TensorFlow Version\n",
"assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)\n",
"print('TensorFlow Version: {}'.format(tf.__version__))\n",
"\n",
"# Check for a GPU\n",
"if not tf.test.gpu_device_name():\n",
" warnings.warn('No GPU found. Please use a GPU to train your neural network.')\n",
"else:\n",
" print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n",
"\n",
"\n",
"def run():\n",
" num_classes = 2\n",
" image_shape = (160, 576)\n",
" data_dir = './data'\n",
" runs_dir = './runs'\n",
" tests.test_for_kitti_dataset(data_dir)\n",
"\n",
" # Download pretrained vgg model\n",
" helper.maybe_download_pretrained_vgg(data_dir)\n",
"\n",
" # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.\n",
" # You'll need a GPU with at least 10 teraFLOPS to train on.\n",
" # https://www.cityscapes-dataset.com/\n",
" epochs = 25\n",
" batch_size = 20\n",
" with tf.Session() as sess:\n",
" # Path to vgg model\n",
" vgg_path = os.path.join(data_dir, 'vgg')\n",
" # Create function to get batches\n",
" get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n",
"\n",
" correct_label = tf.placeholder(tf.float32, name='correct_label')\n",
" learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n",
" \n",
" # OPTIONAL: Augment Images for better results\n",
" # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network\n",
"\n",
" # TODO: Build NN using load_vgg, layers, and optimize function\n",
" input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)\n",
" nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes)\n",
" logits, train_op, cross_entropy_loss = optimize(nn_last_layer, correct_label, learning_rate, num_classes)\n",
"\n",
" # TODO: Train NN using the train_nn function\n",
" train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n",
" correct_label, keep_prob, learning_rate)\n",
"\n",
" # TODO: Save inference data using helper.save_inference_samples\n",
" # helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n",
" helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n",
" # OPTIONAL: Apply the trained model to a video\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" run()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"num_classes = 2\n",
"image_shape = (160, 576)\n",
"data_dir = './data'\n",
"runs_dir = './runs'\n",
"tests.test_for_kitti_dataset(data_dir)\n",
"\n",
"# Download pretrained vgg model\n",
"helper.maybe_download_pretrained_vgg(data_dir)\n",
"\n",
"# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.\n",
"# You'll need a GPU with at least 10 teraFLOPS to train on.\n",
"# https://www.cityscapes-dataset.com/\n",
"epochs = 25\n",
"batch_size = 20\n",
"with tf.Session() as sess:\n",
" # Path to vgg model\n",
" vgg_path = os.path.join(data_dir, 'vgg')\n",
" # Create function to get batches\n",
" get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n",
"\n",
" correct_label = tf.placeholder(tf.float32, name='correct_label')\n",
" learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n",
"\n",
" # OPTIONAL: Augment Images for better results\n",
" # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network\n",
"\n",
" # TODO: Build NN using load_vgg, layers, and optimize function\n",
" input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)\n",
" nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes)\n",
" logits, train_op, cross_entropy_loss = optimize(nn_last_layer, correct_label, learning_rate, num_classes)\n",
"\n",
" # TODO: Train NN using the train_nn function\n",
" train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n",
" correct_label, keep_prob, learning_rate)\n",
"\n",
" # TODO: Save inference data using helper.save_inference_samples\n",
" # helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n",
" helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n",
" # OPTIONAL: Apply the trained model to a video\n",
" \n",
" saver = tf.train.Saver()\n",
" save_path = saver.save(sess, \"/tmp/model_01_good.ckpt\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python [conda env:udacityProjects]",
"language": "python",
"name": "conda-env-udacityProjects-py"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.4"
}
},
"nbformat": 4,
"nbformat_minor": 1
}

0 comments on commit 2fc1dd1

Please sign in to comment.