import tensorflow as tffrom tensorflow.contrib import rnnimport numpy as npfrom tqdm import tqdmimport random
word_list=np.load('wordsList.npy') #FILE is available here https://github.com/adeshpande3/LSTM-Sentiment-Analysis/blob/master/training_data.tar.gz
word_vector=np.load('wordVectors.npy') #FILE is available here https://github.com/adeshpande3/LSTM-Sentiment-Analysis/blob/master/training_data.tar.gz
vectors=np.load('idsMatrix.npy') #FILE is available here https://github.com/adeshpande3/LSTM-Sentiment-Analysis/blob/master/training_data.tar.gzepoch=50batch_size=24iteration=int(len(vectors)//batch_size)def getTrainBatch(): labels = [] arr = np.zeros([batch_size, 250]) for i in range(batch_size): if (i % 2 == 0): num = random.randint(1,11499) labels.append(1) else: num = random.randint(13499,24999) labels.append(0) arr[i] = vectors[num-1:num] return arr, labels
labels_datr=[1,0] # 0 1 2labels2index={j:i for i,j in enumerate(labels_datr)}class LSTMclassifier(): def __init__(self, hdim, lables): tf.reset_default_graph()
# placeholders input_x = tf.placeholder(tf.int32, shape=[None, None],name='input') output_y = tf.placeholder(tf.int32, shape=[None,])
self.placeholder = {'input': input_x, 'output': output_y}
# word_embedding word_embedd = tf.get_variable('embedding', shape=[400000, 50], dtype=tf.float32, initializer=tf.constant_initializer(np.array(word_vector)), trainable=False) embedding_lookup = tf.nn.embedding_lookup(word_embedd, input_x)
# sequence_length sequence_le = tf.count_nonzero(input_x, axis=-1)
# model with tf.variable_scope('encoder') as scope: cells = rnn.LSTMCell(num_units=hdim) cell = rnn.DropoutWrapper(cell=cells, output_keep_prob=0.5)
model = tf.nn.bidirectional_dynamic_rnn(cell, cell, inputs=embedding_lookup, sequence_length=sequence_le, dtype=tf.float32)
final_output, (fs, fc) = model
# transform_output final_output_forward = tf.transpose(final_output[0], [1, 0, 2]) final_output_backward = tf.transpose(final_output[1], [1, 0, 2])
# state_output = tf.concat([fs.c, fc.c], axis=-1) final_output_both = tf.concat([final_output_forward[0], final_output_backward[0]], axis=-1)
# weights and fully_connected_layer weights = tf.get_variable('weights', shape=[2 * hdim, lables], dtype=tf.float32, initializer=tf.random_uniform_initializer(-0.01, 0.01))
bias = tf.get_variable('bias', shape=[lables,], dtype=tf.float32, initializer=tf.random_uniform_initializer(-0.01, 0.01))
# logits ( final_output_matrix )
logits_ = tf.matmul(final_output_both, weights) + bias
# normalization prob = tf.nn.softmax(logits_,name='prob') pred = tf.argmax(prob, axis=-1,name='predt')
# cross_entropy ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_, labels=output_y) loss = tf.reduce_mean(ce)
# accuracy accuracy = tf.reduce_mean( tf.cast( (tf.equal( tf.cast(pred, tf.int32), output_y)), tf.float32))
# training training_ = tf.train.AdamOptimizer(0.001).minimize(loss)
self.out = {'logits': logits_, 'prob': prob, 'pred': pred, 'loss': loss, 'accuracy': accuracy, 'train': training_ }
def rand_exe(model): saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(epoch): for j in tqdm(range(iteration)): # batch = vectors[j * batch_size:(j + 1) * batch_size] # tweets_data = np.array(padding_data([aa for aa, bb in batch])['input']) # labels_data = np.array([labels_datr.index(bb) for aa, bb in batch])
labess, tweetsr = getTrainBatch()
out_a = sess.run(model.out, feed_dict={model.placeholder['input']: labess, model.placeholder['output']: tweetsr}) print(j,out_a['accuracy'],out_a['loss'],i)
saver.save(sess, '/home/ayodhyankit/sentimnt_aadi/training_data/testing_fix/fix')if __name__ == '__main__': model = LSTMclassifier(250, len(labels_datr)) out = rand_exe(model) print(out['prob'],out['pred'])import numpy as npimport tensorflow as tf
data=np.load('/home/ayodhyankit/sentimnt_aadi/training_data/wordsList.npy')data_labe=['positive','negative']while True: user_in=input() splitted_data=user_in.split() new_data=[] for i in splitted_data: try: new_data.append(np.array(data).tolist().index(i)) print(np.array(data).tolist().index(i)) except ValueError: new_data.append(390000)
with tf.Session() as sess: saver = tf.train.import_meta_graph('/home/ayodhyankit/sentimnt_aadi/training_data/testing_fix/fix.meta') new = saver.restore(sess, tf.train.latest_checkpoint('/home/ayodhyankit/sentimnt_aadi/training_data/testing_fix/')) graph = tf.get_default_graph() input_x = graph.get_tensor_by_name("input:0") result = graph.get_tensor_by_name("prob:0") result1 = graph.get_tensor_by_name("predt:0") feed_dict = {input_x: [new_data]} predictions = result.eval(feed_dict=feed_dict) print(predictions)input : hello how are you
result : positive
input : i hate you john
result : positiveinput : i don't like her
result : positiveinput : i am very happy
result : positiveinput : i am very sad
result : positiveTo view this discussion on the web visit https://groups.google.com/a/tensorflow.org/d/msgid/discuss/c1e53c37-3df3-428c-9f85-32dbcd676f61%40tensorflow.org.--
You received this message because you are subscribed to the Google Groups "Discuss" group.
To unsubscribe from this group and stop receiving emails from it, send an email to discuss+unsubscribe@tensorflow.org.
To post to this group, send email to dis...@tensorflow.org.