From ee28ddb2d25a0cb4458ced0fbf1095dfdc0f538c Mon Sep 17 00:00:00 2001 From: lucifer4073 <99070111+lucifer4073@users.noreply.github.com> Date: Wed, 8 Mar 2023 21:13:42 +0530 Subject: [PATCH] Update LSTM_Regression.py This is my very first open-source contribution by me. The batch size and the cell size have been increased to take up bigger data sizes. The time steps have been reduced to ensure better data accuracy. --- nlp/LSTM_Regression.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nlp/LSTM_Regression.py b/nlp/LSTM_Regression.py index d6daa9bb..ae08d1a2 100644 --- a/nlp/LSTM_Regression.py +++ b/nlp/LSTM_Regression.py @@ -4,11 +4,11 @@ BATCH_START = 0 -TIME_STEPS = 20 -BATCH_SIZE = 50 +TIME_STEPS = 15 # smaller steps will be taken +BATCH_SIZE = 60 #better batch size INPUT_SIZE = 1 OUTPUT_SIZE = 1 -CELL_SIZE = 10 +CELL_SIZE = 20 #increased the cell size from 10 to 20 to store more cell size LR = 0.006 @@ -74,7 +74,7 @@ def add_output_layer(self): with tf.name_scope('Wx_plus_b'): self.pred = tf.matmul(l_out_x, Ws_out) + bs_out - def compute_cost(self): + def compute_cost(self): #the given fucntion reshapes the given target and computes the cost of the loss function losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(self.ys, [-1], name='reshape_target')], @@ -148,4 +148,4 @@ def _bias_variable(self, shape, name='biases'): if i % 20 == 0: print('cost: ', round(cost, 4)) result = sess.run(merged, feed_dict) - writer.add_summary(result, i) \ No newline at end of file + writer.add_summary(result, i)