- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
Hi,
I want to run a custom model implemented in Tensorflow on Cifar 10 dataset. However when I run this command : mvNCCompile -s 12 '100.meta' -o cifar_10_model -in 'data/Input' -on 'output1/output1'
it fails I get this error : [Error 5] Toolkit Error: Stage Details Not Supported: Equal
My model is :
`import tensorflow as tf
def model():
_IMAGE_SIZE = 32
_IMAGE_CHANNELS = 3
_NUM_CLASSES = 10
_RESHAPE_SIZE = 4_4_128
with tf.name_scope('data'):
x = tf.placeholder(tf.float32, shape=[None, _IMAGE_SIZE * _IMAGE_SIZE * _IMAGE_CHANNELS], name='Input')
y = tf.placeholder(tf.float32, shape=[None, _NUM_CLASSES], name='Output')
x_image = tf.reshape(x, [-1, _IMAGE_SIZE, _IMAGE_SIZE, _IMAGE_CHANNELS], name='images')
def variable_with_weight_decay(name, shape, stddev, wd):
dtype = tf.float32
var = variable_on_cpu( name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
with tf.variable_scope('conv1') as scope:
kernel = variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(x_image, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
#tf.summary.histogram('Convolution_layers/conv1', conv1)
tf.summary.scalar('Convolution_layers/conv1', tf.nn.zero_fraction(conv1))
norm1 = tf.nn.lrn(conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
pool1 = tf.nn.max_pool(norm1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
with tf.variable_scope('conv2') as scope:
kernel = variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
#tf.summary.histogram('Convolution_layers/conv2', conv2)
tf.summary.scalar('Convolution_layers/conv2', tf.nn.zero_fraction(conv2))
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
with tf.variable_scope('conv3') as scope:
kernel = variable_with_weight_decay('weights', shape=[3, 3, 64, 128], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_on_cpu('biases', [128], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
#tf.summary.histogram('Convolution_layers/conv3', conv3)
tf.summary.scalar('Convolution_layers/conv3', tf.nn.zero_fraction(conv3))
with tf.variable_scope('conv4') as scope:
kernel = variable_with_weight_decay('weights', shape=[3, 3, 128, 128], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_on_cpu('biases', [128], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(pre_activation, name=scope.name)
#tf.summary.histogram('Convolution_layers/conv4', conv4)
tf.summary.scalar('Convolution_layers/conv4', tf.nn.zero_fraction(conv4))
with tf.variable_scope('conv5') as scope:
kernel = variable_with_weight_decay('weights', shape=[3, 3, 128, 128], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')
biases = variable_on_cpu('biases', [128], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(pre_activation, name=scope.name)
#tf.summary.histogram('Convolution_layers/conv5', conv5)
tf.summary.scalar('Convolution_layers/conv5', tf.nn.zero_fraction(conv5))
norm3 = tf.nn.lrn(conv5, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm3')
pool3 = tf.nn.max_pool(norm3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
with tf.variable_scope('fully_connected1') as scope:
reshape = tf.reshape(pool3, [-1, _RESHAPE_SIZE])
dim = reshape.get_shape()[1].value
weights = variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004)
biases = variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
#tf.summary.histogram('Fully connected layers/fc1', local3)
tf.summary.scalar('Fully connected layers/fc1', tf.nn.zero_fraction(local3))
with tf.variable_scope('fully_connected2') as scope:
weights = variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004)
biases = variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
#tf.summary.histogram('Fully connected layers/fc2', local4)
tf.summary.scalar('Fully connected layers/fc2', tf.nn.zero_fraction(local4))
with tf.variable_scope('output1') as scope:
weights = variable_with_weight_decay('weights', [192, _NUM_CLASSES], stddev=1 / 192.0, wd=0.0)
biases = variable_on_cpu('biases', [_NUM_CLASSES], tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
#tf.summary.histogram('Fully connected layers/output', softmax_linear)
tf.summary.scalar('Fully connected layers/output', tf.nn.zero_fraction(softmax_linear))
global_step = tf.Variable(initial_value=0, name='global_step', trainable=False)
y_pred_cls = tf.argmax(softmax_linear, axis=1)
return x, y, softmax_linear, global_step, y_pred_cls`
You can find my meta file here : https://drive.google.com/file/d/19g3nR_CFbqOUYF4rCPk-udhSozx35RJJ/view?usp=sharing
Thank you !
Link Copied
- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
@AnnaBar I'm happy to help you debug this error. Thank you for providing your meta file, but I am unable to reproduce your issue without the index and weights file also. If you can provide those, that would be of great help. Thanks.
- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
Oh, of course, you can find them here: https://drive.google.com/open?id=1LzvSbpwRs6zHV7vsG-oyPGVeF2MQ3QUH
- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
I found what caused the error. It was the operation tf.nn.zero_fraction()
- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
@Annabar Removing any instances of tf.nn.zero_fraction() fixed the error?
- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
Yes !
- Subscribe to RSS Feed
- Mark Topic as New
- Mark Topic as Read
- Float this Topic for Current User
- Bookmark
- Subscribe
- Printer Friendly Page