I have a problem with the prediction. The program is designed to predict stock market prices. Here EUR / USD.
I trained the model and tested it with the test data. The results look good. At this point I can't get any further.
How can I make a prediction?
I tested the program with data from yesterday.
How can I predict today's data?
What should I enter and in what format?
import math
import matplotlib.pyplot as plt
import keras
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import *
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping
daten=open("C:")
df = pd.read_csv(daten)
df
training_set = df.iloc[436065:438073, 4:5]
test_set = df.iloc[438074:438170, 4:5]
sc = MinMaxScaler(feature_range = (0, 1))
training_set_scaled = sc.fit_transform(training_set)
X_train = []
y_train = []
for i in range(70, 2000):
X_train.append(training_set_scaled[i-70:i, 0])
y_train.append(training_set_scaled[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
n=50
dop=0.1
model = Sequential()#1
model.add(LSTM(units = n, return_sequences = True, input_shape = (X_train.shape[1], 1)))
model.add(Dropout(dop))#2
model.add(LSTM(units = n, return_sequences = True))
model.add(Dropout(dop))#3
model.add(LSTM(units = n, return_sequences = True))
model.add(Dropout(dop))#4
model.add(LSTM(units = n, return_sequences = True))
model.add(Dropout(dop))#5
model.add(LSTM(units = n, return_sequences = True))
model.add(Dropout(dop))#6
model.add(LSTM(units = n, return_sequences = True))
model.add(Dropout(dop))#7
model.add(LSTM(units = n))
model.add(Dropout(dop))
model.add(Dense(units = 1))
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
model.fit(X_train, y_train, epochs = 100, batch_size =32)
dataset_total = pd.concat((training_set, test_set), axis = 0)
inputs = dataset_total[len(dataset_total) - len(test_set) - 70:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
type(inputs.shape)
X_test = []
for i in range(70, 167):
X_test.append(inputs[i-70:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
print(X_test.shape)
predicted_stock_price = model.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
predicted_stock_price.shape
plt.plot(df.loc[438074:438170, "Close"])
plt.grid(True)
plt.plot(predicted_stock_price)
plt.grid(True)
Related
I wanna use predictions in real time, but I want to test that the results are the same as in the case of giving all the data as input only once. This is the code:
import numpy as np
from keras.layers import Input, Dense, SimpleRNN
from keras.models import Model
#MODEL 1, for training
x = Input(shape=(None, 1))
h = SimpleRNN(30, return_sequences=True)(x)
out = Dense(1)(h)
model = Model(x, out)
model.compile(loss='mse', optimizer='adam')
X_train = np.random.rand(100, 50, 1)
y_train = np.random.rand(100, 50, 1)
model.fit(X_train, y_train, verbose = False)
#MODEL 1, for predictions in real time
x = Input(batch_shape=(1, None, 1))
h = SimpleRNN(30, stateful=True, return_sequences=True)(x)
out = Dense(1)(h)
predict_model = Model(x, out)
predict_model.set_weights(model.get_weights())
X = np.random.rand(2, 2, 1)
predictions = model.predict(X, verbose = False)
for sim in range(len(predictions)):
for i in range(len(predictions[0])):
pred = predict_model.predict(X[sim:(sim+1), i:(i + 1), :], verbose = False)
print(pred[0][0]) #Predictions in real time
print(predictions[sim][i]) #Predictions with MODEL 1
print()
predict_model.reset_states()
It prints this:
[0.09156141]
[0.09156139]
[-0.38076958]
[-0.38076955]
[0.12214336]
[0.12214339]
[-0.52013564]
[-0.5201356]
The results must be exactly the same because both have the same weights. What is happening?
i am trying to forcasting crypto with tensorflow but its not working
whats the problem in this code?
why its not working?
thats my dataset:
and its my code:
x_train = df['o'].values
y_train = df['c'].values
x_train = x_train[::resolution]
y_train = y_train[::resolution]
Normalize the data
from sklearn.preprocessing import minmax_scale
x_train = minmax_scale(x_train, feature_range=(0, 1))
x_train = x_train.reshape(-1, 1)
split test and train
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.1)
Tensorflow
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
regressor = Sequential()
regressor.add(LSTM(units = 50, return_sequences = True, input_shape=x_train.shape))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50, return_sequences = True))
regressor.add(Dropout(0.2))
regressor.add(LSTM(units = 50))
regressor.add(Dropout(0.2))
regressor.add(Dense(units = 1))
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
regressor.fit(x_train, y_train, epochs = 100, batch_size = 32)
pred = regressor.predict(x_test)
import matplotlib.pyplot as plt
plt.plot(x_test, label='real data')
plt.plot(pred, label='predicted data')
plt.xlabel('Time')
plt.ylabel('Stock Price')
plt.legend()
plt.show()
its a problem:
I was following a python project to loosely predict the price of stocks when I encountered an index error with an LSTM model. This is the guide I was following and my code roughly matches: Prediction Tutorial. But for ease of access this is my code exactly:
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM, Dropout, Dense
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 20, 10
df = pd.read_csv('HistoricalData_Apple.csv')
df = df[['Date', 'Close/Last']]
df = df.replace({'\$':''}, regex=True)
df = df.astype({"Close/Last": float})
df["Date"] = pd.to_datetime(df.Date, format="%m/%d/%Y")
df.index = df['Date']
df = df.sort_index(ascending=True, axis=0)
data = pd.DataFrame(index=range(0, len(df)), columns=['Date', 'Close/Last'])
for i in range(0, len(data)):
data["Date"][i] = df['Date'][i]
data["Close/Last"][i] = df["Close/Last"][i]
scaler = MinMaxScaler(feature_range=(0, 1))
data.index = data.Date
data.drop("Date", axis=1, inplace=True)
final_data = data.values
train_data = final_data[0:200, :]
valid_data = final_data[200:, :]
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(final_data)
x_train_data, y_train_data = [], []
for i in range(60, len(train_data)):
x_train_data.append(scaled_data[i-60:i, 0])
y_train_data.append(scaled_data[i, 0])
lstm_model = Sequential()
lstm_model.add(LSTM(units=50, return_sequences=True, input_shape=
(np.shape(x_train_data)[1], 1)))
lstm_model.add(LSTM(units=50))
lstm_model.add(Dense(1))
model_data = data[len(data)-len(valid_data)-60:].values
model_data = model_data.reshape(-1, 1)
model_data = scaler.transform(model_data)
lstm_model.compile(loss='mean_squared_error', optimizer='adam')
lstm_model.fit(x_train_data, y_train_data, epochs=1, batch_size=1, verbose=2)
X_test=[]
for i in range(60, model_data.shape[0]):
X_test.append(model_data[i-60:i, 0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted_stock_price = lstm_model.predict(X_test)
predicted_stock_price = scaler.inverse_transform(predicted_stock_price)
train_data = data[:200]
valid_data = data[200:]
valid_data['Predictions'] = predicted_stock_price
plt.plot(train_data["Close"])
plt.plot(valid_data[['Close', "Predictions"]])
plt.show()
This code should be working according to the tutorial I was following, but every time I run the code I receive this error message:
2021-11-08 14:57:34.659018: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN)to use the following CPU instructions in performance-critical operations: AVX AVX2 To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
Traceback (most recent call last): File "C:/Users/ME/AppData/Roaming/JetBrains/PyCharmCE2021.2/scratches/scratch_1.py", line 42, in <module> lstm_model.add(LSTM(units=50, return_sequences=True, input_shape=(np.shape(x_train_data)[1], 1))) IndexError: tuple index out of range
I do not know what this means exactly or how to fix it.
The expected input shape of the LSTM layer is (batch, timesteps, features). As you have only one feature you can reshape your training sequences as follows
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
as you already did for the validation sequences.
import pandas as pd
import numpy as np
import yfinance as yf
from keras.models import Sequential
from keras.layers import LSTM, Dense
from sklearn.preprocessing import MinMaxScaler
pd.options.mode.chained_assignment = None
# download the data
df = yf.download(tickers=['AAPL'], period='10y')
# split the data
train_data = df[['Close']].iloc[: - 200, :]
valid_data = df[['Close']].iloc[- 200:, :]
# scale the data
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(train_data)
train_data = scaler.transform(train_data)
valid_data = scaler.transform(valid_data)
# extract the training sequences
x_train, y_train = [], []
for i in range(60, train_data.shape[0]):
x_train.append(train_data[i - 60: i, 0])
y_train.append(train_data[i, 0])
x_train = np.array(x_train)
y_train = np.array(y_train)
# extract the validation sequences
x_valid = []
for i in range(60, valid_data.shape[0]):
x_valid.append(valid_data[i - 60: i, 0])
x_valid = np.array(x_valid)
# reshape the sequences
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
x_valid = x_valid.reshape(x_valid.shape[0], x_valid.shape[1], 1)
# train the model
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=x_train.shape[1:]))
model.add(LSTM(units=50))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x_train, y_train, epochs=50, batch_size=128, verbose=1)
# generate the model predictions
y_pred = model.predict(x_valid)
y_pred = scaler.inverse_transform(y_pred)
y_pred = y_pred.flatten()
# plot the model predictions
df.rename(columns={'Close': 'Actual'}, inplace=True)
df['Predicted'] = np.nan
df['Predicted'].iloc[- y_pred.shape[0]:] = y_pred
df[['Actual', 'Predicted']].plot(title='AAPL')
My data just contains just 30 rows. I have attached the screenshots of my data. I need to predict the 31st value with pervious 30 vaues in pow column in that data screenshot
Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
# from keras.models import Sequential
# from keras.layers import Dense
# from keras.layers import LSTM
# from keras.layers import Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
data_training = pd.read_csv('G:\Agrima\Obj_Rec\power.csv')
data_training = data_training.iloc[:, 2:3].values
scaler = MinMaxScaler()
data_training = scaler.fit_transform(data_training)
print(data_training[0:10])
X_train = []
y_train = []
for i in range(10, data_training.shape[0]):
X_train.append(data_training[i-10:i])
y_train.append(data_training[i, 0])
X_train, y_train = np.array(X_train), np.array(y_train)
print(X_train.shape)
print(y_train.shape)
regressior = Sequential()
regressior.add(LSTM(units = 60, activation = 'relu', return_sequences = True, input_shape = (X_train.shape[0], 5)))
regressior.add(Dropout(0.2))
regressior.add(LSTM(units = 60, activation = 'relu', return_sequences = True))
regressior.add(Dropout(0.2))
regressior.add(LSTM(units = 80, activation = 'relu', return_sequences = True))
regressior.add(Dropout(0.2))
regressior.add(LSTM(units = 120, activation = 'relu'))
regressior.add(Dropout(0.2))
regressior.add(Dense(units = 1))
print(regressior.summary())
regressior.compile(optimizer='adam', loss = 'mean_squared_error')
regressior.fit(X_train, y_train, epochs=50, batch_size=32)
regressior.save_weights('my_model_weights.h5') #Dude this is imp.. Dont train each time.. Try googling how to load weight..or call me
#input new values to get new predicitons
new = 62 #this must be input of the last day data da --- like this it shld be : new = data_training[-1]
inputs = scaler.transform(new)
y_pred = regressior.predict(input)
print(y_pred)
# Visualising the results
plt.figure(figsize=(14,5))
plt.plot(X_train, color = 'red', label = 'Real Data')
plt.plot(y_pred, color = 'blue', label = 'Predicted Data')
plt.title('Power')
plt.xlabel('Time')
plt.ylabel('Power')
plt.legend()
plt.show()
Error log :
ValueError: Error when checking input: expected lstm_input to have
shape (20, 5) but got array with shape (10, 1)
I am attempting a CNN for classification of biological data (EEG data). However, after importing and splitting the data into train/test/dev sets and building the CNN, I cannot get the input array shape to match the expected array shape.
Note: Data contains 5 trials/samples for each participant(ID's) in the study, so GSS was used to ensure data from each participant was not mixed train and test sets.
Code and error as follows:
#Load Data
def load_all_data(filename):
import numpy as np
a = np.load(filename)
d = (dict(zip(("data1{}".format(k) for k in a), (a[k] for k in a))))
return d
filename = ("dataname.npz")
X = load_all_data(filename)['array_0']
y = load_all_data(filename)['array_1']
IDs = load_all_data(filename)['array_2']
#Split Test Data with Groupshuffle Split
from sklearn.model_selection import GroupShuffleSplit
import numpy as np
test_size = 0.2
gss = GroupShuffleSplit(n_splits = 1, test_size = 0.2)
for train,test in gss.split(X, y, IDs):
X_train = X[train]
y_train = y[train]
IDs_train = IDs[train]
X_test = X[test]
y_test = y[test]
IDs_test = IDs[test]
fileoutname = 'train_test_data'
np.savez(fileoutname,X_train, y_train, X_test, y_test,IDs_train,IDs_test)
#Split Train, Test Data
gss = GroupShuffleSplit(1, test_size)
for train,test in gss.split(X, y, IDs):
X_train2 = X[train]
y_train2 = y[train]
IDs_train = IDs[train]
X_dev = X[test]
y_dev = y[test]
IDs_test = IDs[test]
#Add dimension to X and Convert y to Categorical
X_train2 = np.expand_dims(X_train2,axis=0)
y_train2 = keras.utils.to_categorical(y_train2,num_classes=2)
X_dev = np.expand_dims(X_test,axis=0)
y_dev = keras.utils.to_categorical(y_test,num_classes=2)
X = np.expand_dims(X,axis=0)
#Build the CNN
def simpleCNN(self, units = 10):
import keras
from keras.layers import Dense
from keras.layers import Conv2D
from keras.layers import Flatten
from keras.models import Model, Input
inp = Input(shape = self.shape[1:], name='inp')
#layer 1
x = Conv2D(units, kernel_size=(1,1), strides = (1,1), activation='relu', data_format='channels_last')(inp)
#layer 2
x = Conv2D(units, kernel_size=(2,2), strides = (1,1), activation='relu', data_format='channels_last')(x)
#layer 3
x = Flatten()(x)
#layer4
out = Dense(2, activation='softmax',name='out')(x)
model = Model(inputs = inp, outputs = out)
return model
#Fit the Data
model = simpleCNN(X)
from keras.optimizers import Adamax
adamax = Adamax(lr=3e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0); #3e-4; 2e-3 is a default.
model.compile(optimizer=adamax, loss='categorical_crossentropy', metrics=['acc'])
model.fit(X_train2, y_train2, epochs=20, batch_size=32, verbose = 1, validation_data = (X_dev, y_dev))
ValueError: Error when checking input: expected inp to have shape (11459, 26, 60) but got array with shape (9065, 26, 60)