Loss is not changing. Its remaining constant

26 Views Asked by At

My Loss is constant after training an LSTM Model for the yahoo dataset of GE https://finance.yahoo.com/quote/GE/history. Can someone help me out?

This is the code that I implemented

import numpy as np
from keras.layers import LSTM, Input, Dropout, Dense, RepeatVector, TimeDistributed
import pandas as pd
import matplotlib.pyplot as plt 
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.models import Model, Sequential
import seaborn as sns
from keras.optimizers import Adam
import warnings
warnings.filterwarnings("ignore")

dataframe = pd.read_csv("DataSets/GE.csv") 

df = dataframe[['Date', 'Close']]
df.loc[:, 'Date'] = pd.to_datetime(df['Date'])
sns.lineplot(x=df['Date'], y=df['Close'])

train = df.loc[df['Date'] <= pd.to_datetime('2017-12-31')]  # Corrected date for train
test = df.loc[df['Date'] > pd.to_datetime('2017-12-31')]

scaler = MinMaxScaler()
scaler = scaler.fit(train[['Close']])

train.iloc[:, train.columns.get_loc('Close')] = scaler.transform(train[['Close']])
test.iloc[:, test.columns.get_loc('Close')] = scaler.transform(test[['Close']])

seq_size = 30

def to_sequences(x, y, seq_size_func=1):
    x_values = []
    y_values = []

    for i in range(len(x)-seq_size_func):
        #print(i)
        x_values.append(x.iloc[i:(i+seq_size_func)].values)
        y_values.append(y.iloc[i+seq_size_func])
        
    return np.array(x_values), np.array(y_values)

trainX, trainY = to_sequences(train[['Close']], train['Close'], seq_size)
testX, testY = to_sequences(test[['Close']], test['Close'], seq_size)

input_shape = (trainX.shape[1], trainX.shape[2])
def build_model(learning_rate=0.001):
    model_f = Sequential()
    model_f.add(Input(shape=input_shape))
    model_f.add(LSTM(128))
    model_f.add(Dropout(rate=0.2))
    
    model_f.add(RepeatVector(trainX.shape[1]))
    
    model_f.add(LSTM(128, return_sequences=True))
    model_f.add(Dropout(rate=0.2))
    model_f.add(TimeDistributed(Dense(trainX.shape[2])))
    
    optimizer = Adam(learning_rate=learning_rate)  # Pass learning_rate to optimizer
    model_f.compile(optimizer=optimizer, loss='mae')
    return model_f

model = build_model(desired_learning_rate)
history = model.fit(trainX, trainY, epochs=5, batch_size=32, validation_split=0.1, verbose=1)

plt.plot(history.history['loss'], label='Training loss')
plt.plot(history.history['val_loss'], label='Validation loss')
plt.legend()

Epoch 1/5 396/396 ━━━━━━━━━━━━━━━━━━━━ 23s 47ms/step - loss: 0.1633 - val_loss: 0.3750 Epoch 2/5 396/396 ━━━━━━━━━━━━━━━━━━━━ 19s 48ms/step - loss: 0.1661 - val_loss: 0.3752 Epoch 3/5 396/396 ━━━━━━━━━━━━━━━━━━━━ 18s 45ms/step - loss: 0.1681 - val_loss: 0.3578 Epoch 4/5 396/396 ━━━━━━━━━━━━━━━━━━━━ 26s 66ms/step - loss: 0.1644 - val_loss: 0.3724 Epoch 5/5 396/396 ━━━━━━━━━━━━━━━━━━━━ 21s 52ms/step - loss: 0.1663 - val_loss: 0.3817

Graph

0

There are 0 best solutions below