LSTM integration of user-defined loss function - python

I would like to integrate a custom loss function for my LSTM in python. The code shows my approach so far.
How would I best implement the loss function shown in the images? How would I handle the constraint <0?
Thanks for any help!
Code
# Importing the libraries
ep=25 #Epochs
bs=32 #Batch-Size
vs=0.2 #Validation-Split
r=ep+1 #Range
# Importing the training set
dataset_train = pd.read_csv(r'C:\Users\Name\Desktop\Recurrent Neural Networks\JPM_train.csv',delimiter =';')
training_set = dataset_train.iloc[:, 1:2].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
# Creating a data structure with 60 timesteps and 1 output
X_train = []
y_train = []
X_val=[]
y_val=[]
for i in range(60, 1516):
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train, X_val, y_val = np.array(X_train), np.array(y_train), np.array(X_val), np.array(y_val)
# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
def custom_loss(y_true, y_pred):
if(#HERE):
loss=(predicted_stock_price-real_stock_price)^2
else:
loss=0
return loss
# Initialising the RNN
model = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
model.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50, return_sequences = True))
model.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50, return_sequences = True))
model.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
model.add(LSTM(units = 50))
model.add(Dropout(0.2))
# Adding the output layer
model.add(Dense(units = 1))
# Compiling the RNN
model.compile(optimizer = 'adam', loss = custom_loss ,metrics=['accuracy'])
# Fitting the RNN to the Training set
history=model.fit(X_train, y_train, epochs = ep, batch_size = bs, validation_split=vs)
# Getting the real stock price of 2017
dataset_test = pd.read_csv(r'C:\Users\Name\Desktop\Recurrent Neural Networks\JPM_test.csv',delimiter =';')
real_stock_price = dataset_test.iloc[:, 1:2].values
dataset_total = pd.concat((dataset_train['Preis'], dataset_test['Preis']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(60, 80):
X_test.append(inputs[i-60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = model.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
history_dict = history.history
print(history_dict.keys())
accuracy = history_dict['accuracy']
validation_accuracy = history_dict['val_accuracy']
loss = history_dict['loss']
validation_loss = history_dict['val_loss']
gs = gridspec.GridSpec(2, 2)
#plt.tight_layout()
#plt.subplots_adjust(hspace=1.0)
fig = plt.figure(figsize=(16,16))
# Visualising the results
ax = plt.subplot(gs[1, :]) # row 1, span all columns
plt.plot(real_stock_price, color = 'red', label = 'Real Google Stock Price')
plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price')
plt.title('Google Stock Price Prediction')
plt.xlabel('Time')
plt.ylabel('Google Stock Price')
plt.legend()
plt.show()
Only the Custom loss function
def custom_loss(y_true, y_pred):
if(#HERE):
loss=(predicted_stock_price-real_stock_price)^2
else:
loss=0
return loss
Pictures of the targeted loss function
Here is the link to the original text:
https://www.researchgate.net/publication/342094242_Deep_Stock_Predictions

You can use this loss function that calculates the current prediction (t1) minus the previous real_stock_price (t-1) :
def custom_loss(y_true, y_pred):
if((y_true[0]-y_true[1])*(y_pred-y_true[1])):
loss=(y_pred -y_true[0] )^2
else:
loss=0
return loss
I think that the derivatives in the backpropagation will not be affected by this shifting of time.

Related

how can i use a meta-heuristic method in lstm to predict stock market?

I have the code below and I want to add a meta-heuristic method (like pso, apso, krill herd , ...) to it to predict better , but I don't know where and how I must add it
is there any one who can help me whit this , its too essential for me
I'm new on python and need help
may thanks to any one who write the code for me or give me advise about how to do it
''''
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pandas_datareader as web
import datetime as dt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
# Load Data
company = 'ETH-USD'
start = dt.datetime(2012,1,1)
end = dt.datetime(2020,1,1)
data = web.DataReader(company, 'yahoo', start, end)
#print(data.last(5))
#print(data.info())
# Prepare Data
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(data['Close'].values.reshape(-1,1))
prediction_days = 60
x_train = []
y_train = []
for x in range(prediction_days, len(scaled_data)):
x_train.append(scaled_data[x - prediction_days:x, 0])
y_train.append(scaled_data[x, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
# Build The Model
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=1)) # Prediction of the next closing value
model.compile(optimizer='adam', loss= 'mean_squared_error')
model.fit(x_train, y_train, epochs=25, batch_size=32)
''' Test The Model Accuracy on Existing Data '''
# Load Test Data
test_start = dt.datetime(2020,1,1)
test_end = dt.datetime.now()
test_data = web.DataReader(company, 'yahoo', test_start, test_end)
actual_prices = test_data['Close'].values
total_dataset = pd.concat((data['Close'], test_data['Close']), axis=0)
model_inputs = total_dataset[len(total_dataset) - len(test_data) - prediction_days:].values
model_inputs = model_inputs.reshape(-1, 1)
model_inputs = scaler.transform(model_inputs)
# Make Predictions on Test Data
x_test = []
for x in range(prediction_days, len(model_inputs)):
x_test.append(model_inputs[x-prediction_days:x, 0])
x_test = np.array(x_test)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
predicted_prices = model.predict(x_test)
predicted_prices = scaler.inverse_transform(predicted_prices)
# Plot The Test Predictions
plt.plot(actual_prices, color="black", label=f"Actual {company} Price")
plt.plot(predicted_prices, color='green', label=f"Predicted {company} Price")
plt.title(f"{company} Share Price")
plt.xlabel('Time')
plt.ylabel(f'{company} Share Price')
plt.legend()
plt.show()
# Predict Next Day
real_data = [model_inputs[len(model_inputs) + 1 - prediction_days:len(model_inputs+1), 0]]
real_data = np.array(real_data)
real_data = np.reshape(real_data, (real_data.shape[0], real_data.shape[1],1))
prediction = model.predict(real_data)
prediction = scaler.inverse_transform(prediction)
print(f"Prediction: {prediction}")
''''

How to shape the input of a RNN with multiple features for each target?

I'm trying to learn how to use RNN for time-series predictions and in all the examples I'm seeing out there they use a sequence of prices to predict the following price. In the examples each target (Y_train[n]) is associated to a sequence or matrix composed of the last 30 prices/steps ([X_train[[n-1],[n-2]....,[n-30]).
However in the real world to accurately predict you need more than the sequence of the last 30 prices, you would also need other... should I say features? Like the last 30 values of volume or the last 30 values of a sentiment index.
So my question is:
How do you shape the input of an RNN with two sequences for each target (last 30 prices and last 30 volume values)? This is the example code I'm using with only 1 sequence to use as reference:
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
# Dividing Dataset (Test and Train)
train_lim = int(len(df) * 2 / 3)
training_set = df[:train_lim][['Close']]
test_set = df[train_lim:][['Close']]
# Normalizing
sc = MinMaxScaler(feature_range=(0, 1))
training_set_scaled = sc.fit_transform(training_set)
# Shaping Input
X_train = []
y_train = []
X_test = []
for i in range(30, training_set_scaled.size):
X_train.append(training_set_scaled[i - 30:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
for i in range(30, len(test_set)):
X_test.append(test_set.iloc[i - 30:i, 0])
X_test = np.array(X_test)
# Adding extra dimension ???
X_train = np.reshape(X_train, [X_train.shape[0], X_train.shape[1], 1])
X_test = np.reshape(X_test, [X_test.shape[0], X_test.shape[1], 1])
regressor = Sequential()
# LSTM layer 1
regressor.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
regressor.add(Dropout(0.2))
# LSTM layer 2,3,4
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units=50, return_sequences=True))
regressor.add(Dropout(0.2))
# LSTM layer 5
regressor.add(LSTM(units=50))
regressor.add(Dropout(0.2))
# Fully connected layer
regressor.add(Dense(units=1))
# Compiling the RNN
regressor.compile(optimizer='adam', loss='mean_squared_error')
# Fitting the RNN model
regressor.fit(X_train, y_train, epochs=120, batch_size=32)
The dataframe that I'm using is a standard OHLCV with a datetime index so it will look like this:
Datetime Open High Low Close Volume
01/01/2021 102.42 103.33 100.57 101.23 1990
02/01/2021 101.23 105.22 99.45 100.11 1970
... ... ... ... ... ...
01/12/2021 203.22 210.34 199.22 201.11 2600
You can follow exactly the same process, the only difference is that the length of the last dimension of the arrays with the input sequences (X_train and X_test) will be greater than one (as it will be equal to the number of external regressors plus one, where the plus one comes from the fact that the past values of the target are also used as an input).
import pandas as pd
import numpy as np
import yfinance as yf
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
pd.options.mode.chained_assignment = None
# define the target and features
target = ['Close']
features = ['Volume', 'High', 'Low']
# download the data
df = yf.download(tickers=['AAPL'], period='1y')
df = df[features + target]
# split the data
split = int(df.shape[0] * 2 / 3)
df_train = df.iloc[:split, :].copy()
df_test = df.iloc[split:, :].copy()
# scale the data
target_scaler = MinMaxScaler().fit(df_train[target])
df_train[target] = target_scaler.transform(df_train[target])
df_test[target] = target_scaler.transform(df_test[target])
features_scaler = MinMaxScaler().fit(df_train[features])
df_train[features] = features_scaler.transform(df_train[features])
df_test[features] = features_scaler.transform(df_test[features])
# extract the input sequences and output values
sequence_length = 30
X_train, y_train = [], []
for i in range(sequence_length, df_train.shape[0]):
X_train.append(df_train[features + target].iloc[i - sequence_length: i])
y_train.append(df_train[target].iloc[i])
X_train, y_train = np.array(X_train), np.array(y_train)
X_test, y_test = [], []
for i in range(sequence_length, df_test.shape[0]):
X_test.append(df_test[features + target].iloc[i - sequence_length: i])
y_test.append(df_test[target].iloc[i])
X_test, y_test = np.array(X_test), np.array(y_test)
print(X_train.shape)
# (138, 30, 4)
print(X_test.shape)
# (55, 30, 4)
# build and train the model
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=X_train.shape[1:]))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, y_train, epochs=120, batch_size=32)
model.evaluate(X_test, y_test)
# generate the test set predictions
y_pred = model.predict(X_test)
y_pred = target_scaler.inverse_transform(y_pred)
# plot the test set predictions
df['Predicted Close'] = np.nan
df['Predicted Close'].iloc[- y_pred.shape[0]:] = y_pred.flatten()
df[['Close', 'Predicted Close']].plot()

Why LSTM predictions have low values?

I need to predict the workload of a datacenter with N virtual machines. Data are structured in this manner:
id,date,hour,dayofweek,cpu,ram,ram_tot,users,id_vm
5fff03b99b56dba65a873e2a,2020-12-14,00:00,1,2,820,8000,10,1
5fff03ba9b56dba65a873e2c,2020-12-14,00:00,1,2,2458,16000,1,2
Data are: id, date, hour,day of week (1-7), CPU number of VM, RAM used, total RAM, number of users conncted to the related VM, VM id (1 or 2).
This are imported in a pandas dataframe. In the dataframe I build a column named peak and its value is 1 if there is a workload of a virtual machine (% ram used very high, >80%), 0 otherwise.
I build a timeseries dataset and I normalized it. I build a LSTM network to predict if there will be a peak of workload (the predicted variable is peak), with train and test phase
I have in validation phase very bad results: forecast values are very low respect the real values.
I suppose if the network works great when it predict a peak the related value is near 1.
This my code:
#read data from a mongo db and passed in a pandas dataframe
df = DataFrame(list_cur)
# calc for %mem used
df['pmem'] = (df['ram']/df['ram_tot'])*100
conditions = [(df['pmem'] <= 80), (df['pmem'] > 80)] #80
values = [0, 1]
df['peak'] = np.select(conditions, values)
df['datetime'] = df['data'] + ' ' + df['ora']
# extract hour and minutes to build 2 new columns
df[['hh','mm']] = df.ora.str.split(":", expand=True,)
# dataset with 6 features and 1 label
# oevery row of the dataset = 1 observation
dataset = df[['hh', 'mm', 'dayofweek', 'users', 'pmem', 'id_app', 'peak']]
# normalization of the dataset
sc = MinMaxScaler(feature_range = (0, 1))
dfn = sc.fit_transform(dataset)
# build temporal series
x = []
y = []
n_steps = 192
for i in range(len(dfn)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(dfn)-1:
break
# gather input and output parts of the pattern
seq_x, seq_y = dfn[i:end_ix, 0:5], dfn[end_ix, 6]
x.append(seq_x)
y.append(seq_y)
# splitting dataset in train and test
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
# convert in arrays
X_train = np.asarray(X_train, dtype=np.float32)
X_test = np.asarray(X_test, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.float32)
# LSTM neural network model
model = Sequential()
#Adding the first LSTM layer and some Dropout regularisation
model.add(LSTM(units = 6, return_sequences = True, input_shape = (X_train.shape[1], X_train.shape[2])))
model.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
model.add(LSTM(units = 32, return_sequences = True))
model.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
model.add(LSTM(units = 64, return_sequences = True))
model.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
model.add(LSTM(units = 32))
model.add(Dropout(0.2))
# Adding the output layer
model.add(Dense(units = 1))
model.summary()
# Compiling the LSTM
model.compile(loss = 'categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# Fitting the LSTM to the Training set
history = model.fit(X_train, y_train, epochs = 5, batch_size = 32, validation_data=(X_test, y_test))
model.evaluate(X_test, y_test, verbose=1, return_dict=True)
print("test loss, test acc:", history)
print("Generate predictions for all samples")
yhat = model.predict(X_test, verbose=1)
plot.figure(figsize=(20, 10))
y1 = np.array(y_test)
y2 = np.array(yhat[:, 0])
plt.plot(y1, label = "Test", marker="o", linewidth=0)
plt.plot(y2, label = "Previsto", marker="x",)
plt.xlabel('x - axis')
# Set the y axis label of the current axis.
plt.ylabel('y - axis')
# Set a title of the current axes.
plt.title('Two or more lines on same plot with suitable legends ')
# show a legend on the plot
plt.legend()
# Display a figure.
plt.show()
This is my result.
There is some error?
I am not sure, but try inverse-transform minmaxscaler on your output.

AttributeError: 'KerasRegressor' object has no attribute 'model'

I have this piece of code. But when I try to run the prediction value code there's an error
# Creating a data structure with n timesteps
X_test = []
for i in range(5, 25):
X_test.append(inputs[i-5:i, 0])
X_test = np.array(X_test)
# Reshape to a new dimension
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
# Identify the predicted values
predicted_number = regressor.predict(X_test)
# Inverse the scaling to put them back to the normal values
predicted_number = sc.inverse_transform(predicted_number)
the error is like this
AttributeError Traceback (most recent call last)
<ipython-input-364-17fa061596c6> in <module>()
1 # Identify the predicted values
----> 2 predicted_number = regressor.predict(X_test)
3 # Inverse the scaling to put them back to the normal values
4 predicted_number = sc.inverse_transform(predicted_number)
5 KerasRegressor.model
~\Anaconda3\lib\site-packages\keras\wrappers\scikit_learn.py in predict(self, x, **kwargs)
320 """
321 kwargs = self.filter_sk_params(Sequential.predict, kwargs)
--> 322 preds = np.array(self.model.predict(x, **kwargs))
323 if preds.shape[-1] == 1:
324 return np.squeeze(preds, axis=-1)
AttributeError: 'KerasRegressor' object has no attribute 'model'
in case if needed, below is the full script
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset_train = pd.read_csv('Datatraining.csv')
training_set = dataset_train.iloc[:, 1:2].values
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
X_train = []
y_train = []
for i in range(60, 72):
X_train.append(training_set_scaled[i-60:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, newshape = (X_train.shape[0], X_train.shape[1], 1))
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
regressor = Sequential()
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM( units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1) ))
regressor.add(Dropout(0.2))
# Adding the second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding the third LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
# Adding the fourth LSTM layer and some Dropout regularisation
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
regressor.add(Dense(units = 1))
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
regressor.fit(X_train, y_train, epochs = 300, batch_size = 32)
dataset_test = pd.read_csv('Datatesting.csv')
real_number_arrivals = dataset_test.iloc[:, 1:2].values
dataset_total = pd.concat( (downloads['China'], dataset_test['China']), axis = 0 )
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 72:].values
inputs = inputs.reshape(-1, 1)
inputs = sc.transform(inputs)
# Creating a data structure with n timesteps
X_test = []
for i in range(5, 25):
X_test.append(inputs[i-5:i, 0])
X_test = np.array(X_test)
# Reshape to a new dimension
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
# Identify the predicted values
predicted_number = regressor.predict(X_test)
# Inverse the scaling to put them back to the normal values
predicted_number = sc.inverse_transform(predicted_number)
any solution would be really helpful. Thanks in advance
You have forgotten to fit the model first.
You did not share the whole code, but I believe you have some X_train and Y_train somewhere in your code. So try this line:
regressor.fit(X_train, Y_train)
And then you can run the prediction.

Evaluating Regression Neural Network model's accuracy

I am new to machine learning and created a neural network for regression output. I have ~95000 training examples and ~24000 test examples. I want to know how can I evaluate my model and get train and test errors? How to know the accuracy of this regression model? My Y variable values ranges between 100-200 and X have 9 input features in the dataset.
Here is my code:
import pandas as pd
from keras.layers import Dense, Activation,Dropout
from keras.models import Sequential
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from matplotlib import pyplot
# Importing the dataset
# Importing the dataset
dataset = pd.read_csv('data2csv.csv')
X = dataset.iloc[:,1:10].values
y = dataset.iloc[:, :1].values
# Splitting the dataset into the Training set and Test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Initialising the ANN
model = Sequential()
# Adding the input layer and the first hidden layer
model.add(Dense(10, activation = 'relu', input_dim = 9))
# Adding the second hidden layer
model.add(Dense(units = 5, activation = 'sigmoid'))
model.add(Dropout(0.2))
# Adding the third hidden layer
model.add(Dense(units = 5, activation = 'relu'))
model.add(Dropout(0.2))
model.add(Dense(units = 5, activation = 'relu'))
model.add(Dense(units = 5, activation = 'relu'))
# Adding the output layer
model.add(Dense(units = 1))
#model.add(Dense(1))
# Compiling the ANN
model.compile(optimizer = 'adam', loss = 'mean_squared_error',metrics=['mae','mse','mape','cosine'])
# Fitting the ANN to the Training set
history=model.fit(X_train, y_train,validation_data=(X_val, y_val) ,batch_size = 1000, epochs = 100)
test_loss = model.evaluate(X_test,y_test)
loss = history.history['loss']
acc = history.history['mean_absolute_error']
val_loss = history.history['val_loss']
val_acc = history.history['val_mean_absolute_error']
mape_loss=history.history['mean_absolute_percentage_error']
cosine_los=history.history['cosine_proximity']
pyplot.plot(history.history['mean_squared_error'])
pyplot.plot(history.history['mean_absolute_error'])
pyplot.plot(history.history['mean_absolute_percentage_error'])
pyplot.plot(history.history['cosine_proximity'])
pyplot.show()
epochs = range(1, len(loss)+1)
plt.plot(epochs, loss, 'ro', label='Training loss')
plt.legend()
plt.show()
y_pred = model.predict(X_test)
plt.plot(y_test, color = 'red', label = 'Real data')
plt.plot(y_pred, color = 'blue', label = 'Predicted data')
plt.title('Prediction')
plt.legend()
plt.show()
[]
My test loss after model.evaluate. Note that here there are 5 loss functions as shown in the code.
1) 84.69654303799824
2) 7.030169963975834
3) 84.69654303799824
4) 5.241855282313331
5) -0.9999999996023872
To evaluate your model you can use evaluate method:
test_loss = model.evaluate(X_test, y_test)
It returns the loss on the given test data computed using the same loss function you used during training (i.e. mean_squared_error).
Further, If you want to get training loss at the end of each epoch you can use History object which is returned by fit method:
history = model.fit(...)
loss = history.history['loss']
The loss is a list containing the loss values of training at the end of each epoch. If you have used validation data when training the model (i.e. model.fit(..., validation_data=(X_val, y_val)) or have used any other metric like mean_absolute_error (i.e. model.compile(..., metrics=['mae'])), you can also access their values:
acc = history.history['mae']
val_loss = history.history['val_loss']
val_acc = history.history['val_mae']
Bonus: To plot the training loss curve:
epochs = range(1, len(loss)+1)
plt.plot(epochs, loss, 'ro', label='Training loss')
plt.legend()
plt.show()
To show validation loss while training:
model.fit(X_train, y_train, batch_size = 1000, epochs = 100, validation_data = (y_train,y_test))
I don't think you can easily get accuracy by plotting, since your input is 9 dimensional, you could plot the predicted y for each feature, just turn off the lines that join the dots i.e. plt.plot(x,y,'k.') note 'k' so no line, but I'm not sure if that will be useful.

Categories

Resources