diff --git a/longterm_baseline.py b/longterm_baseline.py
index 796a164375c00c501161aa8484a7b0bd37699a49..6433479f638ca97d48f1947ca80db00e66eb7746 100644
--- a/longterm_baseline.py
+++ b/longterm_baseline.py
@@ -22,5 +22,40 @@ realtime_shortterm_dataset_depth_size = 1024*1024
 longterm_dataset = []
 
 
+class LSTM_Shortterm_Predictor(nn.Module):
+    def __init__(self, input_dim, hidden_dim):
+        super(LSTMTagger, self).__init__()
+        self.hidden_dim = hidden_dim
+
+        # The input would be a tuple containing complex information. 
+        # Firstly, serialize these information into a tuple. 
+        self.serializer = nn.Linear(input_dim, lstm_idim)
+
+        # The LSTM hidden states
+        # with dimensionality hidden_dim.
+        self.lstm = nn.LSTM(lstm_idim, lstm_odim)
+
+        # The linear layer that maps from hidden state space to tag space
+        self.out = nn.Linear(hidden_odim, 1)
+
+    def forward(self, sample_seq):
+        input_seq = sample_seq.view(len(sample_seq), 1, 1)
+        lstm_in = self.serializer(input_seq)
+        lstm_out, _ = self.lstm(lstm_in)
+        predict_shortterm_trend = self.out(torch.tanh(lstm_out[-1:]))
+        return predict_shortterm_trend
+
+def aggtrade_to_impulsive_score_vector(aggtrade):
+    _, buys, sells = aggtrade
+    def get_factors(array_of_pairs):
+        values = [pair[0] for pair in array_of_pairs]
+        weights = [pair[1] for pair in array_of_pairs]
+        average = numpy.average(values, weights=weights)
+        variance = numpy.average((values-average)**2, weights=weights)
+        leader_price, leader_weight = array_of_pairs[0]
+        return (average, math.sqrt(variance), leader_price, leader_weight)
+
+    return get_factors(buys) + get_factors(sells)
+    
 
 
diff --git a/sample.py b/sample.py
index d14399389d0badc70458bb7ce53a452235174b48..4dbae46ddd4998c2f5c37d55740c8f591b22994a 100644
--- a/sample.py
+++ b/sample.py
@@ -14,7 +14,7 @@ def geni(i):
     else:
         return sin(i/3) - 0.1#, sin(i) - 0.1, sin(i)]
 
-input_seq = [geni(i) for i in range(10240)]
+input_seq = [geni(i) for i in range(1024)]
 
 EMBEDDING_DIM = 7
 HIDDEN_DIM = 7
@@ -40,7 +40,8 @@ class LSTMTagger(nn.Module):
 
 model = LSTMTagger(128, 128)
 loss_function = nn.NLLLoss()
-optimizer = optim.SGD(model.parameters(), lr=0.1)
+# optimizer = optim.SGD(model.parameters(), lr=0.1)
+optimizer = optim.RMSprop(model.parameters(), lr=0.05, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0.75, centered=False)
 
 # See what the scores are before training
 # Note that element i,j of the output is the score for tag j for word i.
@@ -73,12 +74,12 @@ for i in range(len(input_seq)):
     #  calling optimizer.step()
     # loss = loss_function(tag_scores, targets)
     #loss = loss_function(scout, torch.tensor(input_seq[i+129]))
-    loss = torch.abs(scout - input_seq[i+129])
+    loss = torch.square(scout - input_seq[i+129])
     print("DEBUG: LOSS=", loss)
     loss.backward(retain_graph=True)
     optimizer.step()
 
-    if i > 10240-1024:
+    if i > len(input_seq)-1024:
         real_xy.append(((i+129)/3, input_seq[i+129]))
         guess_xy.append(((i+129)/3, scout[0,0,0].tolist()))