我定义了一个单输入层和输出层的神经网络。我的数据是csv格式,我已将其转换为tfrecord格式。使用tf.data api,对其进行批处理并按如下方式馈送:
-
功能:32(批量大小)x 24(功能列)
-
标签:32(批量大小)x 4(OneHot编码)
在运行图表时,它会引发ValueError。这是回溯:
文件“dummy.py”,第60行,in
train_summary,u=sess.run([trainstep],feed_dict=ground_truth:label,features:features)
文件“/usr/local/lib/python2.7/dist packages/tensorflow/python/client/session.py”,第895行,运行中
运行元数据)
文件“/usr/local/lib/python2.7/dist packages/tensorflow/python/client/session.py”,第1104行,运行中
%(np_val.shape,subfeed_t.name,str(subfeed_t.get_shape()))
值错误:无法将具有形状“”的张量u'SoftMax_Cross_熵_的形状值(32,4)与“Logits/Remaze_2:0”一起馈送。,)
以下是可以重现错误的最小代码:
import tensorflow as tf
import numpy as np
num_columns=24
num_classes=4
train_steps = 2
def model():
ground_truth_input = tf.placeholder(tf.float32,[None,num_classes]) #onehotencoded with depth 4
bottleneck_input = tf.placeholder(tf.float32,[None,num_columns]) #num_columns=24 keypoint features
#fully connected 1 : 24(num_input_features)x100
initial_value = tf.truncated_normal([num_columns, 100], stddev=0.001)
layer1_weights = tf.Variable(initial_value, name='hidden1_weights')
layer1_biases = tf.Variable(tf.zeros([100]), name='hidden1_biases')
logits_hidden1 = tf.matmul(bottleneck_input, layer1_weights) + layer1_biases
inp_activated=tf.nn.relu(logits_hidden1,name='hidden1_activation')
#fully connected 2 : 100x4(num_classes)
initial_value = tf.truncated_normal([100, num_classes], stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
layer_biases = tf.Variable(tf.zeros([num_classes]), name='final_biases')
logits = tf.matmul(inp_activated, layer_weights) + layer_biases
# loss function
loss_mean = tf.nn.softmax_cross_entropy_with_logits_v2(labels=ground_truth_input, logits=logits)
with tf.name_scope('train'):
optimizer = tf.train.MomentumOptimizer(learning_rate=0.1,use_nesterov=True,momentum=0.9)
train_op = optimizer.minimize(loss_mean, global_step=tf.train.get_global_step())
with tf.name_scope('SoftMax_Layer'):
final_tensor = tf.nn.softmax(logits,name='Softmax')
return train_op, ground_truth_input, bottleneck_input, loss_mean
trainStep, cross_entropy, features, ground_truth = model()
with tf.Session() as sess:
for i in range(2):
Label = np.eye(4)[np.random.choice(4,32)]
Features = np.random.rand(32,24)
train_summary, _ = sess.run([trainStep],feed_dict = {ground_truth : Label, features :Features})