为什么TensorFlow返回[[nan nan]]而不是CSV文件中的概率?

2024-06-01 11:05:45 发布

您现在位置:Python中文网/ 问答频道 /正文

这是我正在使用的代码。我试图得到一个1,0,或者希望得到一个真实测试集的结果概率。当我分割训练集并在训练集上运行它时,我得到了大约93%的准确率,但是当我训练程序并在实际的测试集上运行它(在第1列中没有1和0的填充)时,它只返回nan

import tensorflow as tf
import numpy as np
from numpy import genfromtxt
import sklearn

# Convert to one hot
def convertOneHot(data):
    y=np.array([int(i[0]) for i in data])
    y_onehot=[0]*len(y)
    for i,j in enumerate(y):
        y_onehot[i]=[0]*(y.max() + 1)
        y_onehot[i][j]=1
    return (y,y_onehot)


data = genfromtxt('cs-training.csv',delimiter=',')  # Training data
test_data = genfromtxt('cs-test-actual.csv',delimiter=',')  # Actual test data

#This part is to get rid of the nan's at the start of the actual test data
g = 0
for i in test_data:
    i[0] = 1
    test_data[g] = i
    g += 1

x_train=np.array([ i[1::] for i in data])
y_train,y_train_onehot = convertOneHot(data)

x_test=np.array([ i[1::] for i in test_data])
y_test,y_test_onehot = convertOneHot(test_data)
A=data.shape[1]-1 # Number of features, Note first is y
B=len(y_train_onehot[0])
tf_in = tf.placeholder("float", [None, A]) # Features
tf_weight = tf.Variable(tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)

# Training via backpropagation
tf_softmax_correct = tf.placeholder("float", [None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax))

# Train using tf.train.GradientDescentOptimizer
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)

# Add accuracy checking nodes
tf_correct_prediction = tf.equal(tf.argmax(tf_softmax,1), tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction, "float"))

saver = tf.train.Saver([tf_weight,tf_bias])

# Initialize and run
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

print("...")
# Run the training
for i in range(1):
    sess.run(tf_train_step, feed_dict={tf_in: x_train, tf_softmax_correct: y_train_onehot})
    #print y_train_onehot
    saver.save(sess, 'trained_csv_model')

    ans = sess.run(tf_softmax, feed_dict={tf_in: x_test})
    print ans

#Print accuracy
    #result = sess.run(tf_accuracy, feed_dict={tf_in: x_test, tf_softmax_correct: y_test_onehot})
#print result

当我打印ans时,我得到以下结果。

[[ nan  nan]
 [ nan  nan]
 [ nan  nan]
 ..., 
 [ nan  nan]
 [ nan  nan]
 [ nan  nan]]

我不知道我做错了什么。我只想让ans产生1,0,或者特别是一个概率数组,其中数组中的每个单元的长度都是2。

我不指望很多人能为我回答这个问题,但请至少尝试一下。我被困在这里等待天才的一刻,这一刻已经两天没有到来了,所以我想我会问。谢谢您!

test_data看起来像这样-

[[  1.00000000e+00   8.85519080e-01   4.30000000e+01 ...,   0.00000000e+00
0.00000000e+00   0.00000000e+00]
 [  1.00000000e+00   4.63295269e-01   5.70000000e+01 ...,   4.00000000e+00
0.00000000e+00   2.00000000e+00]
 [  1.00000000e+00   4.32750360e-02   5.90000000e+01 ...,   1.00000000e+00
0.00000000e+00   2.00000000e+00]
 ..., 
 [  1.00000000e+00   8.15963730e-02   7.00000000e+01 ...,   0.00000000e+00
0.00000000e+00              nan]
 [  1.00000000e+00   3.35456547e-01   5.60000000e+01 ...,   2.00000000e+00
1.00000000e+00   3.00000000e+00]
 [  1.00000000e+00   4.41841663e-01   2.90000000e+01 ...,   0.00000000e+00
0.00000000e+00   0.00000000e+00]]

数据中的第一个单位等于1的唯一原因是为了避免错误,我去掉了填充那个位置的nan。请注意,第一列之后的所有内容都是一个特性。第一列是我试图预测的。

编辑:

我把密码改成了-

import tensorflow as tf
import numpy as np
from numpy import genfromtxt
import sklearn
from sklearn.cross_validation import train_test_split
from tensorflow import Print

# Convert to one hot
def convertOneHot(data):
    y=np.array([int(i[0]) for i in data])
    y_onehot=[0]*len(y)
    for i,j in enumerate(y):
        y_onehot[i]=[0]*(y.max() + 1)
        y_onehot[i][j]=1
    return (y,y_onehot)


#buildDataFromIris()


data = genfromtxt('cs-training.csv',delimiter=',')  # Training data
test_data = genfromtxt('cs-test-actual.csv',delimiter=',')  # Test data

#for i in test_data[0]:
#    print i
#print test_data

#print test_data
g = 0
for i in test_data:
    i[0] = 1.
    test_data[g] = i
    g += 1

#print 1, test_data

x_train=np.array([ i[1::] for i in data])
y_train,y_train_onehot = convertOneHot(data)
#print len(x_train), len(y_train), len(y_train_onehot)

x_test=np.array([ i[1::] for i in test_data])
y_test,y_test_onehot = convertOneHot(test_data)
#for u in y_test_onehot[0]:
#    print u
#print y_test_onehot
#print len(x_test), len(y_test), len(y_test_onehot)
#print x_test[0]

#print '1'

#  A number of features, 4 in this example
#  B = 3 species of Iris (setosa, virginica and versicolor)
A=data.shape[1]-1 # Number of features, Note first is y
#print A
B=len(y_train_onehot[0])
#print B
#print y_train_onehot
tf_in = tf.placeholder("float", [None, A]) # Features
tf_weight = tf.Variable(tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)

tf_bias = tf.Print(tf_bias, [tf_bias], "Bias: ")
tf_weight = tf.Print(tf_weight, [tf_weight], "Weight: ")
tf_in = tf.Print(tf_in, [tf_in], "TF_in: ")
matmul_result = tf.matmul(tf_in, tf_weight)
matmul_result = tf.Print(matmul_result, [matmul_result], "Matmul: ")
tf_softmax = tf.nn.softmax(matmul_result + tf_bias)
print tf_bias
print tf_weight
print tf_in
print matmul_result

# Training via backpropagation
tf_softmax_correct = tf.placeholder("float", [None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax))

print tf_softmax_correct

# Train using tf.train.GradientDescentOptimizer
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)

# Add accuracy checking nodes
tf_correct_prediction = tf.equal(tf.argmax(tf_softmax,1), tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction, "float"))

print tf_correct_prediction
print tf_accuracy

#saver = tf.train.Saver([tf_weight,tf_bias])

# Initialize and run
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

print("...")
prediction = []
# Run the training
#probabilities = []
#print y_train_onehot
#print '-----------------------------------------'
for i in range(1):
    sess.run(tf_train_step, feed_dict={tf_in: x_train, tf_softmax_correct: y_train_onehot})
    #print y_train_onehot
    #saver.save(sess, 'trained_csv_model')

    ans = sess.run(tf_softmax, feed_dict={tf_in: x_test})
    print ans

打印出来后,我看到其中一个对象是布尔型的。我不知道这是否是问题所在,但请看下面的内容,看看是否有什么方法可以帮助您。

Tensor("Print_16:0", shape=TensorShape([Dimension(2)]), dtype=float32)
Tensor("Print_17:0", shape=TensorShape([Dimension(10), Dimension(2)]), dtype=float32)
Tensor("Print_18:0", shape=TensorShape([Dimension(None), Dimension(10)]), dtype=float32)
Tensor("Print_19:0", shape=TensorShape([Dimension(None), Dimension(2)]), dtype=float32)
Tensor("Placeholder_9:0", shape=TensorShape([Dimension(None), Dimension(2)]), dtype=float32)
Tensor("Equal_4:0", shape=TensorShape([Dimension(None)]), dtype=bool)
Tensor("Mean_4:0", shape=TensorShape([]), dtype=float32)
...
[[ nan  nan]
 [ nan  nan]
 [ nan  nan]
 ..., 
 [ nan  nan]
 [ nan  nan]
 [ nan  nan]]

Tags: intestimportfordatatftrainnan
2条回答

tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax))

这是我正在测试的一个项目的问题。具体来说,它最终是产生nan的0*log(0)。

如果替换为:

tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax + 1e-50)) 它应该可以避免这个问题。

我也用减数平均数而不是减数和。如果将批处理大小加倍并使用reduce_sum,则成本(和渐变的大小)将加倍。此外,当使用tf.print(它从一开始就打印到控制台tensorfow)时,它使它在改变批大小时更具可比性。

具体来说,这就是我现在调试时使用的:

cross_entropy = -tf.reduce_sum(y*tf.log(model + 1e-50)) ## avoid nan due to 0*log(0) cross_entropy = tf.Print(cross_entropy, [cross_entropy], "cost") #print to the console tensorflow was started from

我不知道直接的答案,但我知道如何调试它:^{}。这是一个op,它在tensorflow执行时打印值,并返回tensor以进行进一步的计算,因此您可以直接将它们嵌入到模型中。

试着把这些扔进去。而不是这一行:

tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)

尝试:

tf_bias = tf.Print(tf_bias, [tf_bias], "Bias: ")
tf_weight = tf.Print(tf_weight, [tf_weight], "Weight: ")
tf_in = tf.Print(tf_in, [tf_in], "TF_in: ")
matmul_result = tf.matmul(tf_in, tf_weight)
matmul_result = tf.Print(matmul_result, [matmul_result], "Matmul: ")
tf_softmax = tf.nn.softmax(matmul_result + tf_bias)

看看Tensorflow认为中间值是什么。如果NaNs在管道中出现得更早,它应该能让你更好地了解问题所在。祝你好运!如果你能从中得到一些数据,请随时跟进,我们会看看是否能让你更进一步。

更新添加:这里是一个精简的调试版本,我去掉了输入函数,只生成了一些随机数据:

import tensorflow as tf
import numpy as np

def dense_to_one_hot(labels_dense, num_classes=10):
  """Convert class labels from scalars to one-hot vectors."""
  num_labels = labels_dense.shape[0]
  index_offset = np.arange(num_labels) * num_classes
  labels_one_hot = np.zeros((num_labels, num_classes))
  labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
  return labels_one_hot

x_train=np.random.normal(0, 1, [50,10])
y_train=np.random.randint(0, 10, [50])
y_train_onehot = dense_to_one_hot(y_train, 10)

x_test=np.random.normal(0, 1, [50,10])
y_test=np.random.randint(0, 10, [50])
y_test_onehot = dense_to_one_hot(y_test, 10)

#  A number of features, 4 in this example
#  B = 3 species of Iris (setosa, virginica and versicolor)

A=10
B=10
tf_in = tf.placeholder("float", [None, A]) # Features
tf_weight = tf.Variable(tf.zeros([A,B]))
tf_bias = tf.Variable(tf.zeros([B]))
tf_softmax = tf.nn.softmax(tf.matmul(tf_in,tf_weight) + tf_bias)

tf_bias = tf.Print(tf_bias, [tf_bias], "Bias: ")
tf_weight = tf.Print(tf_weight, [tf_weight], "Weight: ")
tf_in = tf.Print(tf_in, [tf_in], "TF_in: ")
matmul_result = tf.matmul(tf_in, tf_weight)
matmul_result = tf.Print(matmul_result, [matmul_result], "Matmul: ")
tf_softmax = tf.nn.softmax(matmul_result + tf_bias)

# Training via backpropagation
tf_softmax_correct = tf.placeholder("float", [None,B])
tf_cross_entropy = -tf.reduce_sum(tf_softmax_correct*tf.log(tf_softmax))

# Train using tf.train.GradientDescentOptimizer
tf_train_step = tf.train.GradientDescentOptimizer(0.01).minimize(tf_cross_entropy)

# Add accuracy checking nodes
tf_correct_prediction = tf.equal(tf.argmax(tf_softmax,1), tf.argmax(tf_softmax_correct,1))
tf_accuracy = tf.reduce_mean(tf.cast(tf_correct_prediction, "float"))

print tf_correct_prediction
print tf_accuracy

init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)

for i in range(1):
    print "Running the training step"
    sess.run(tf_train_step, feed_dict={tf_in: x_train, tf_softmax_correct: y_train_onehot})
    #print y_train_onehot
    #saver.save(sess, 'trained_csv_model')

    print "Running the eval step"
    ans = sess.run(tf_softmax, feed_dict={tf_in: x_test})
    print ans

您应该看到以“Bias:”开头的行,等等

相关问题 更多 >