Deep Learning p8 ValueError when printing accuracy.
I'm following the DeepLearning tutorials and they have been great so far! A couple of things have become outdated but I have found updates for these. One error I found however is not really clear to me.
Traceback (most recent call last): File "-/TFDeepLearning/saving_and_restoring.py", line 135, in <module> test_neural_network() File "-/saving_and_restoring.py", line 132, in test_neural_network print('Accuracy:',accuracy.eval({x:test_x,y:test_y})) File "-AppDataLocalProgramsPythonPython35libsite-packagestensorflowpythonframeworkops.py", line 570, in eval return _eval_using_default_session(self, feed_dict, self.graph, session) File "-AppDataLocalProgramsPythonPython35libsite-packagestensorflowpythonframeworkops.py", line 4455, in _eval_using_default_session return session.run(tensors, feed_dict) File "-AppDataLocalProgramsPythonPython35libsite-packagestensorflowpythonclientsession.py", line 889, in run run_metadata_ptr) File "-AppDataLocalProgramsPythonPython35libsite-packagestensorflowpythonclientsession.py", line 1089, in _run np_val = np.asarray(subfeed_val, dtype=subfeed_dtype) File "-AppDataLocalProgramsPythonPython35libsite-packagesnumpycorenumeric.py", line 531, in asarray return array(a, dtype, copy=False, order=order) ValueError: setting an array element with a sequence.
I've narrowed the source down to the feed_dict . But comparing the code to the video tutorials and code provided on this site I've been unable to find a solution.
import tensorflow as tf import pickle import numpy as np import nltk from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer()
while epoch <= hm_epochs: if epoch != 1: saver.restore(sess,"model.ckpt") epoch_loss = 1 with open('lexicon-2500-2638.pickle','rb') as f: lexicon = pickle.load(f) print('lexicon length:',len(lexicon)) with open('train_set_shuffled.csv', buffering=20000, encoding='latin-1') as f: batch_x = [] batch_y = [] batches_run = 0 for line in f: label = line.split(':::')[0] tweet = line.split(':::')[1] current_words = word_tokenize(tweet.lower()) current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words: if word.lower() in lexicon: index_value = lexicon.index(word.lower()) # OR DO +=1, test both features[index_value] += 1 line_x = list(features) line_y = eval(label) batch_x.append(line_x) batch_y.append(line_y) if len(batch_x) >= batch_size: #possible problem source in feed)_dict _, c = sess.run([optimizer, cost], feed_dict={x: np.array(batch_x), y: np.array(batch_y)}) epoch_loss += c batch_x = [] batch_y = [] batches_run +=1 print('Batch run:',batches_run,'/',total_batches,'| Epoch:',epoch,'| Batch Loss:',c,)
saver.save(sess, "./model.ckpt") print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss) with open(tf_log,'a') as f: f.write(str(epoch)+'n') epoch +=1
train_neural_network(x)
def test_neural_network(): prediction = neural_network_model(x) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for epoch in range(hm_epochs): try: saver.restore(sess,"./model.ckpt") except Exception as e: print(str(e)) epoch_loss = 0
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct, 'float')) #accuracy = tf.cast(correct, 'float') feature_sets = [] labels = [] counter = 0 with open('processed-test-set.csv', buffering=20000) as f: for line in f: try: features = list(eval(line.split('::')[0])) label = list(eval(line.split('::')[1])) feature_sets.append(features) labels.append(label) counter += 1 except: pass print('Tested',counter,'samples.') #possible problem source test_x = np.array(feature_sets) test_y = np.array(labels) #this line gets a valuerror. print('Accuracy:',accuracy.eval({x:test_x,y:test_y}))
test_neural_network()
I had to make some adjustments for it to work to where it does, most importantly the amount of words in the lexicon. Is this a common problem? What process should I go through to find a correct solution?
You must be logged in to post. Please login or register an account.