Forums - Tensorflow MNIST example

7 posts / 0 new
Last post
Tensorflow MNIST example
babbage
Join Date: 21 Feb 17
Posts: 9
Posted: Fri, 2017-08-04 04:14

Is there an example with Tensorflow python code on how to create a graph that is compatible with the "snpe-tensorflow-to-dlc" tool?

These rules are found in the documentation, but a code example would be easier to learn from

  1. All nodes belonging to a layer must be defined in a unique TensorFlow scope.
  2. A node can only belong to a single layer.

The snpe-tensorflow-to-dlc converter by default uses a strict layer resolution algorithm which requires all nodes in the Tensorflow graph to be resolved to a layer. If your graph has nodes which are not related to a layer such as training nodes, you may be required to use the -—allow_unconsumed_nodes converter option. 

I saved the mnist_deep.py example from: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples...

as a protobuf file and was unable to convert the .pb file to .dlc.

  • Up0
  • Down0
babbage
Join Date: 21 Feb 17
Posts: 9
Posted: Fri, 2017-08-04 04:21

This is the python code I am using. Just a few modifications such as: naming the input and output nodes, and saving a .pb file. 

# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
 
"""A deep MNIST classifier using convolutional layers.
 
See extensive documentation at
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
 
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
 
import argparse
import sys
import tempfile
 
from tensorflow.examples.tutorials.mnist import input_data
 
import tensorflow as tf
 
FLAGS = None
 
 
def deepnn(x):
  """deepnn builds the graph for a deep net for classifying digits.
 
  Args:
    x: an input tensor with the dimensions (N_examples, 784), where 784 is the
    number of pixels in a standard MNIST image.
 
  Returns:
    A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
    equal to the logits of classifying the digit into one of 10 classes (the
    digits 0-9). keep_prob is a scalar placeholder for the probability of
    dropout.
  """
  # Reshape to use within a convolutional neural net.
  # Last dimension is for "features" - there is only one here, since images are
  # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
  with tf.name_scope('reshape'):
    x_image = tf.reshape(x, [-1, 28, 28, 1])
 
  # First convolutional layer - maps one grayscale image to 32 feature maps.
  with tf.name_scope('conv1'):
    W_conv1 = weight_variable([5, 5, 1, 32])
    b_conv1 = bias_variable([32])
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
 
  # Pooling layer - downsamples by 2X.
  with tf.name_scope('pool1'):
    h_pool1 = max_pool_2x2(h_conv1)
 
  # Second convolutional layer -- maps 32 feature maps to 64.
  with tf.name_scope('conv2'):
    W_conv2 = weight_variable([5, 5, 32, 64])
    b_conv2 = bias_variable([64])
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
 
  # Second pooling layer.
  with tf.name_scope('pool2'):
    h_pool2 = max_pool_2x2(h_conv2)
 
  # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
  # is down to 7x7x64 feature maps -- maps this to 1024 features.
  with tf.name_scope('fc1'):
    W_fc1 = weight_variable([7 * 7 * 64, 1024])
    b_fc1 = bias_variable([1024])
 
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
 
  # Dropout - controls the complexity of the model, prevents co-adaptation of
  # features.
  with tf.name_scope('dropout'):
    keep_prob = tf.placeholder(tf.float32)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
 
  # Map the 1024 features to 10 classes, one for each digit
  with tf.name_scope('fc2'):
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
 
    y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
  return y_conv, keep_prob
 
 
def conv2d(x, W):
  """conv2d returns a 2d convolution layer with full stride."""
  return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
 
 
def max_pool_2x2(x):
  """max_pool_2x2 downsamples a feature map by 2X."""
  return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')
 
 
def weight_variable(shape):
  """weight_variable generates a weight variable of a given shape."""
  initial = tf.truncated_normal(shape, stddev=0.1)
  return tf.Variable(initial)
 
 
def bias_variable(shape):
  """bias_variable generates a bias variable of a given shape."""
  initial = tf.constant(0.1, shape=shape)
  return tf.Variable(initial)
 
 
def main(_):
  # Import data
  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
 
  # Create the model
  x = tf.placeholder(tf.float32, [None, 784], name="input_node")
 
  # Define loss and optimizer
  y_ = tf.placeholder(tf.float32, [None, 10], name="output_node")
 
  # Build the graph for the deep net
  y_conv, keep_prob = deepnn(x)
 
  with tf.name_scope('loss'):
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                            logits=y_conv)
  cross_entropy = tf.reduce_mean(cross_entropy)
 
  with tf.name_scope('adam_optimizer'):
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
 
  with tf.name_scope('accuracy'):
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    correct_prediction = tf.cast(correct_prediction, tf.float32)
  accuracy = tf.reduce_mean(correct_prediction)
 
  graph_location = tempfile.mkdtemp()
  print('Saving graph to: %s' % graph_location)
  train_writer = tf.summary.FileWriter(graph_location)
 
  with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    train_writer.add_graph(tf.get_default_graph())
    for i in range(1000):
      batch = mnist.train.next_batch(50)
      if i % 100 == 0:
        train_accuracy = accuracy.eval(feed_dict={
            x: batch[0], y_: batch[1], keep_prob: 1.0})
        print('step %d, training accuracy %g' % (i, train_accuracy))
      train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
 
    print('test accuracy %g' % accuracy.eval(feed_dict={
        x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
    tf.train.write_graph(tf.get_default_graph(), '.', 'mnist.pb', False)
if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument('--data_dir', type=str,
                      default='/tmp/tensorflow/mnist/input_data',
                      help='Directory for storing input data')
  FLAGS, unparsed = parser.parse_known_args()
  tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
  • Up0
  • Down0
babbage
Join Date: 21 Feb 17
Posts: 9
Posted: Fri, 2017-08-04 04:22

And this is the command I use to convert to dlc:

snpe-tensorflow-to-dlc --graph mnist.pb --input_dim "input_node" 28,28,1 --dlc test.dlc --out_node "output_node" --allow_unconsumed_nodes

I tried with and without the allow_unconsumed_nodes option.

  • Up0
  • Down0
oferr Moderator
Profile picture
Join Date: 26 Jul 17
Location: San Diego
Posts: 22
Posted: Fri, 2017-08-04 14:30

In the documentation, under the Examples Tutorial -> Tutorials Setup, it is explained how to get Inception v3 example from TensorFlow. 

https://storage.googleapis.com/download.tensorflow.org/models/inception_dec_2015.zip

You can use the python file of Inception v3 as an example for a model than can be converted.

Also, it would be helpful if you can provide the error than you receive from the converter. 

  • Up0
  • Down0
babbage
Join Date: 21 Feb 17
Posts: 9
Posted: Mon, 2017-08-07 06:11

Thanks for your reply. 

Edit: removed stackoverflow question

I am interested in the python script used to generate th Protobuf file from the archive in the link you have provided.

  • Up0
  • Down0
babbage
Join Date: 21 Feb 17
Posts: 9
Posted: Tue, 2017-08-08 02:14

I've managed to create a Protobuf file from the attached script. However, when I run the dlc conversion tool I get the following error:

2017-08-08 12:48:02.866322: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations.
2017-08-08 12:48:02.866820: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations.
2017-08-08 12:48:02.867320: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations.
2017-08-08 12:48:02.867320: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations.
2017-08-08 12:48:02.867320: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations.
2017-08-08 12:48:03,018 - 125 - ERROR - Encountered Error: ERROR_TF_UNABLE_TO_FIND_OUTPUT_OPERATION: Unable to find output operations for op name: "MatMul"
op: "MatMul"
input: "Reshape"
input: "fc1/Variable/read"
attr {
  key: "T"
  value {
    type: DT_FLOAT
  }
}
attr {
  key: "transpose_a"
  value {
    b: false
  }
}
attr {
  key: "transpose_b"
  value {
    b: false
  }
}
 in scope [<tf.Operation 'MatMul' type=MatMul>]
Traceback (most recent call last):
  File "/home/shaurya/snpe-1.2.2/bin/x86_64-linux-clang/snpe-tensorflow-to-dlc", line 119, in main
    converter.convert(args.dlc, args.model_version, converter_command)
  File "/home/shaurya/snpe-1.2.2/lib/python/converters/tensorflow/converter.py", line 324, in convert
    self._convert_layers()
  File "/home/shaurya/snpe-1.2.2/lib/python/converters/tensorflow/converter.py", line 353, in _convert_layers
    self._resolve_layers_from_scope(scope_name, scope_ops)
  File "/home/shaurya/snpe-1.2.2/lib/python/converters/tensorflow/converter.py", line 373, in _resolve_layers_from_scope
    candidate_descriptor = resolver.resolve_layer(scope_name, remaining_ops, graph_helper)
  File "/home/shaurya/snpe-1.2.2/lib/python/converters/tensorflow/layers/fullyconnected.py", line 55, in resolve_layer
    output_ops = GraphHelper.get_op_outputs_in_scope(matmul_op, scope_ops)
  File "/home/shaurya/snpe-1.2.2/lib/python/converters/tensorflow/util.py", line 191, in get_op_outputs_in_scope
    code_to_message.get_message('ERROR_TF_UNABLE_TO_FIND_OUTPUT_OPERATION')(operation, scope_ops))
OperationNotFoundError: ERROR_TF_UNABLE_TO_FIND_OUTPUT_OPERATION: Unable to find output operations for op name: "MatMul"
op: "MatMul"
input: "Reshape"
input: "fc1/Variable/read"
attr {
  key: "T"
  value {
    type: DT_FLOAT
  }
}
attr {
  key: "transpose_a"
  value {
    b: false
  }
}
attr {
  key: "transpose_b"
  value {
    b: false
  }
}
 in scope [<tf.Operation 'MatMul' type=MatMul>]
 
The python script I used:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
 
import argparse
import sys
import tempfile
 
import os
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.tools import freeze_graph
 
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.tools import freeze_graph
from tensorflow.python.training import saver as saver_lib
 
import tensorflow as tf
 
FLAGS = None
 
 
def deepnn(x_image):
    with tf.name_scope('conv1'):
        W_conv1 = weight_variable([5, 5, 1, 32])
        b_conv1 = bias_variable([32])
        h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
 
    # Pooling layer - downsamples by 2X.
    with tf.name_scope('pool1'):
        h_pool1 = max_pool_2x2(h_conv1)
 
    # Second convolutional layer -- maps 32 feature maps to 64.
    with tf.name_scope('conv2'):
        W_conv2 = weight_variable([5, 5, 32, 64])
        b_conv2 = bias_variable([64])
        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
 
    # Second pooling layer.
    with tf.name_scope('pool2'):
        h_pool2 = max_pool_2x2(h_conv2)
 
    # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
    # is down to 7x7x64 feature maps -- maps this to 1024 features.
    with tf.name_scope('fc1'):
        W_fc1 = weight_variable([7 * 7 * 64, 1024])
        b_fc1 = bias_variable([1024])
 
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
 
    # Dropout - controls the complexity of the model, prevents co-adaptation of
    # features.
    with tf.name_scope('dropout'):
        keep_prob = tf.placeholder(tf.float32)
        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
 
    # Map the 1024 features to 10 classes, one for each digit
    with tf.name_scope('fc2'):
        W_fc2 = weight_variable([1024, 10])
        b_fc2 = bias_variable([10])
 
    y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
    return y_conv, keep_prob
 
 
def conv2d(x, W):
    """conv2d returns a 2d convolution layer with full stride."""
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
 
 
def max_pool_2x2(x):
    """max_pool_2x2 downsamples a feature map by 2X."""
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1], padding='SAME')
 
 
def weight_variable(shape):
    """weight_variable generates a weight variable of a given shape."""
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)
 
 
def bias_variable(shape):
    """bias_variable generates a bias variable of a given shape."""
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
 
 
 
def main(_):
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
 
    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])
 
    # with tf.name_scope('reshape'):
    x_image = tf.reshape(x, [-1, 28, 28, 1], name="input_node")
 
    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 10])
 
 
    # Build the graph for the deep net
    y_conv, keep_prob = deepnn(x_image)
 
    y_output = tf.nn.softmax(y_conv, name="softmax_output")
 
    with tf.name_scope('loss'):
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                            logits=y_conv)
    cross_entropy = tf.reduce_mean(cross_entropy)
 
    with tf.name_scope('adam_optimizer'):
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
 
    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        correct_prediction = tf.cast(correct_prediction, tf.float32)
 
    accuracy = tf.reduce_mean(correct_prediction)
 
 
    temp_dir = "mnist"
    input_graph_name = "mnist.pb"
    output_graph_name = "mnist_frozen.pb"
    checkpoint_prefix = os.path.join(temp_dir, "saved_checkpoint")
    checkpoint_state_name = "checkpoint_state"
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(1):
            batch = mnist.train.next_batch(50)
            train_accuracy = accuracy.eval(feed_dict={
                x: batch[0], y_: batch[1], keep_prob: 1.0}, session=sess)
 
            train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}, session=sess)
 
            if i % 50 == 0:
                print('step %d, training accuracy %g' % (i, train_accuracy))
 
                print('test accuracy %g' % accuracy.eval(feed_dict={
                    x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}, session=sess))
 
        output = sess.run(y_output, feed_dict={x:mnist.test.images, keep_prob:1.0})
 
        saver = saver_lib.Saver()
        checkpoint_path = saver.save(sess, checkpoint_prefix, global_step=0, latest_filename=checkpoint_state_name)
 
        graph_io.write_graph(sess.graph, temp_dir, input_graph_name)
 
 
    tf.reset_default_graph()
    with session.Session() as sess:
        saver = tf.train.import_meta_graph(os.path.join(temp_dir, "saved_checkpoint-0.meta"))
        saver.restore(sess, os.path.join(temp_dir, "saved_checkpoint-0"))
        output_graph_def = sess.graph_def
        # for node in output_graph_def.node:
            # print(node)
        # output_node = sess.graph.get_tensor_by_name("softmax_output")
        # output = sess.run(output_node)
 
        input_graph_path = os.path.join(temp_dir, input_graph_name)
        input_saver_def_path = ""
        input_binary = False
        output_node_names = "softmax_output"
        restore_op_name = "save/restore_all"
        filename_tensor_name = "save/Const:0"
        output_graph_path = os.path.join(temp_dir, output_graph_name)
        clear_devices = False
        input_meta_graph = False
 
 
        freeze_graph.freeze_graph(
            input_graph_path, input_saver_def_path, input_binary, checkpoint_path,
            output_node_names, restore_op_name, filename_tensor_name,
            output_graph_path, clear_devices, "", input_meta_graph)
 
 
 
 
 
if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument('--data_dir', type=str,
                      default='/tmp/tensorflow/mnist/input_data',
                      help='Directory for storing input data')
  FLAGS, unparsed = parser.parse_known_args()
  tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)

The conversion tool command I am using is:  snpe-tensorflow-to-dlc --graph mnist_frozen.pb --input_dim input_node 28,28,1 --dlc wtf.dlc --out_node softmax_output

  • Up0
  • Down0
babbage
Join Date: 21 Feb 17
Posts: 9
Posted: Fri, 2017-08-11 00:28

Bumping this post in hope of getting support :)

  • Up0
  • Down0
or Register

Opinions expressed in the content posted here are the personal opinions of the original authors, and do not necessarily reflect those of Qualcomm Incorporated or its subsidiaries (“Qualcomm”). The content is provided for informational purposes only and is not meant to be an endorsement or representation by Qualcomm or any other party. This site may also provide links or references to non-Qualcomm sites and resources. Qualcomm makes no representations, warranties, or other commitments whatsoever about any non-Qualcomm sites or third-party resources that may be referenced, accessible from, or linked to this site.