serve - Ninguna variable para guardar error en Tensorflow
tensorflow serve (2)
El error aquí es bastante sutil. En In[8] crea un tf.Graph llamado tf.Graph y lo configura como predeterminado para el with graph.as_default(): . Esto significa que todas las variables se crean en el graph , y si imprime graph.all_variables() debería ver una lista de sus variables.
Sin embargo , tf.Session bloque with antes de crear (i) la tf.Session la tf.Session (ii) la tf.train.Saver . Esto significa que la sesión y el protector se crean en un gráfico diferente (el gráfico global predeterminado tf.Graph que se usa cuando no crea explícitamente uno y lo establece como predeterminado), que no contiene ninguna variable, ni ningún nodo en todos.
Hay al menos dos soluciones:
Como sugiere Yaroslav , puede escribir su programa sin usar el
with graph.as_default():que evita la confusión con múltiples gráficos. Sin embargo, esto puede llevar a colisiones de nombres entre diferentes celdas en su cuaderno de IPython, lo cual es incómodo cuando se usa eltf.train.Saver, ya que usa la propiedad delnamedetf.Variablecomo la clave en el archivo de punto de control.Puede crear el protector dentro del
with graph.as_default():y crear latf.Sessioncon un gráfico explícito, de la siguiente manera:with graph.as_default(): # [Variable and model creation goes here.] saver = tf.train.Saver() # Gets all variables in `graph`. with tf.Session(graph=graph) as sess: saver.restore(sess) # Do some work with the model....Alternativamente, puede crear la
tf.Sessiondentro delwith graph.as_default():en cuyo caso utilizará elgraphpara todas sus operaciones.
Estoy intentando guardar el modelo y luego reutilizarlo para clasificar mis imágenes, pero desafortunadamente estoy obteniendo errores al restaurar el modelo que he guardado.
El código en el que se ha creado el modelo :
# Deep Learning
# =============
#
# Assignment 4
# ------------
# In[25]:
# These are all the modules we''ll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
# In[37]:
pickle_file = ''notMNIST.pickle''
with open(pickle_file, ''rb'') as f:
save = pickle.load(f)
train_dataset = save[''train_dataset'']
train_labels = save[''train_labels'']
valid_dataset = save[''valid_dataset'']
valid_labels = save[''valid_labels'']
test_dataset = save[''test_dataset'']
test_labels = save[''test_labels'']
del save # hint to help gc free up memory
print(''Training set'', train_dataset.shape, train_labels.shape)
print(''Validation set'', valid_dataset.shape, valid_labels.shape)
print(''Test set'', test_dataset.shape, test_labels.shape)
print(test_labels)
# Reformat into a TensorFlow-friendly shape:
# - convolutions need the image data formatted as a cube (width by height by #channels)
# - labels as float 1-hot encodings.
# In[38]:
image_size = 28
num_labels = 10
num_channels = 1 # grayscale
import numpy as np
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, image_size, image_size, num_channels)).astype(np.float32)
#print(np.arange(num_labels))
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
#print(labels[0,:])
print(labels[0])
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print(''Training set'', train_dataset.shape, train_labels.shape)
print(''Validation set'', valid_dataset.shape, valid_labels.shape)
print(''Test set'', test_dataset.shape, test_labels.shape)
#print(labels[0])
# In[39]:
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
# Let''s build a small network with two convolutional layers, followed by one fully connected layer. Convolutional networks are more expensive computationally, so we''ll limit its depth and number of fully connected nodes.
# In[47]:
batch_size = 16
patch_size = 5
depth = 16
num_hidden = 64
graph = tf.Graph()
with graph.as_default():
# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
layer1_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, num_channels, depth], stddev=0.1),name="layer1_weights")
layer1_biases = tf.Variable(tf.zeros([depth]),name = "layer1_biases")
layer2_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, depth, depth], stddev=0.1),name = "layer2_weights")
layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]),name ="layer2_biases")
layer3_weights = tf.Variable(tf.truncated_normal(
[image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1),name="layer3_biases")
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]),name = "layer3_biases")
layer4_weights = tf.Variable(tf.truncated_normal(
[num_hidden, num_labels], stddev=0.1),name = "layer4_weights")
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]),name = "layer4_biases")
# Model.
def model(data):
conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding=''SAME'')
hidden = tf.nn.relu(conv + layer1_biases)
conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding=''SAME'')
hidden = tf.nn.relu(conv + layer2_biases)
shape = hidden.get_shape().as_list()
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
return tf.matmul(hidden, layer4_weights) + layer4_biases
# Training computation.
logits = model(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
test_prediction = tf.nn.softmax(model(tf_test_dataset))
# In[48]:
num_steps = 1001
#saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print(''Initialized'')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 50 == 0):
print(''Minibatch loss at step %d: %f'' % (step, l))
print(''Minibatch accuracy: %.1f%%'' % accuracy(predictions, batch_labels))
print(''Validation accuracy: %.1f%%'' % accuracy(
valid_prediction.eval(), valid_labels))
print(''Test accuracy: %.1f%%'' % accuracy(test_prediction.eval(), test_labels))
save_path = tf.train.Saver().save(session, "/tmp/model.ckpt")
print("Model saved in file: %s" % save_path)
Todo funciona bien y el modelo se almacena en la carpeta correspondiente.
He creado un archivo de python más en el que he intentado restaurar el modelo pero obteniendo un error allí
# In[1]:
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
# In[3]:
image_size = 28
num_labels = 10
num_channels = 1 # grayscale
import numpy as np
# In[4]:
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
# In[8]:
batch_size = 16
patch_size = 5
depth = 16
num_hidden = 64
graph = tf.Graph()
with graph.as_default():
''''''# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)''''''
# Variables.
layer1_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, num_channels, depth], stddev=0.1),name="layer1_weights")
layer1_biases = tf.Variable(tf.zeros([depth]),name = "layer1_biases")
layer2_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, depth, depth], stddev=0.1),name = "layer2_weights")
layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]),name ="layer2_biases")
layer3_weights = tf.Variable(tf.truncated_normal(
[image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1),name="layer3_biases")
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]),name = "layer3_biases")
layer4_weights = tf.Variable(tf.truncated_normal(
[num_hidden, num_labels], stddev=0.1),name = "layer4_weights")
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]),name = "layer4_biases")
# Model.
def model(data):
conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding=''SAME'')
hidden = tf.nn.relu(conv + layer1_biases)
conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding=''SAME'')
hidden = tf.nn.relu(conv + layer2_biases)
shape = hidden.get_shape().as_list()
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
return tf.matmul(hidden, layer4_weights) + layer4_biases
''''''# Training computation.
logits = model(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)''''''
# Predictions for the training, validation, and test data.
#train_prediction = tf.nn.softmax(logits)
#valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
#test_prediction = tf.nn.softmax(model(tf_test_dataset))
# In[17]:
#saver = tf.train.Saver()
with tf.Session() as sess:
# Restore variables from disk.
tf.train.Saver().restore(sess, "/tmp/model.ckpt")
print("Model restored.")
# Do some work with the model
error que estoy recibiendo es:
No hay variables para guardar
Cualquier ayuda sería apreciada
Está creando una nueva sesión en In[17] que borra sus variables. Además, no necesita usar bloques si solo tiene un gráfico predeterminado y una sesión predeterminada, puede hacer algo como esto
sess = tf.InteractiveSession()
layer1_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, num_channels, depth], stddev=0.1),name="layer1_weights")
tf.train.Saver().restore(sess, "/tmp/model.ckpt")