Why the neural network is not learning? - python

I am training a neural network with a simple dataset. I have tried different combinations of parameters, optimizers, learning rates ... but even after 20 epochs the network is still not learning anything.
I wonder where in the following code lies the problem?
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Input, Dense, Flatten
from tensorflow import keras
from livelossplot import PlotLossesKeras
from keras.models import Model
from sklearn.datasets import make_classification
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import pandas as pd
seed = 42
X, y = make_classification(n_samples=100000, n_features=2, n_redundant=0,
n_informative=2, random_state=seed)
print(f"Number of features: {X.shape[1]}")
print(f"Number of samples: {X.shape[0]}")
df = pd.DataFrame(np.concatenate((X,y.reshape(-1,1)), axis=1))
df.set_axis([*df.columns[:-1], 'Class'], axis=1, inplace=True)
df['Class'] = df['Class'].astype('int')
X = df.drop('Class', axis=1)
y = df['Class']
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
print(f"Train set: {X_train.shape}")
print(f"Validation set: {X_val.shape}")
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float64))
X_val_scaled = scaler.transform(X_val.astype(np.float64))
inputs = Input(shape=X_train_scaled.shape[1:])
h0 = Dense(5, activation='relu')(inputs)
h1 = Dense(5, activation='relu')(h0)
preds = Dense(1, activation = 'sigmoid')(h1)
model = Model(inputs=inputs, outputs=preds)
opt = keras.optimizers.Adam(lr=0.0001)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(X_train_scaled, y_train, batch_size=128, epochs=20, verbose=0,
validation_data=(X_val_scaled, y_val),
callbacks=[PlotLossesKeras()])
score_train = model.evaluate(X_train_scaled, y_train, verbose=0)
score_test = model.evaluate(X_val_scaled, y_val, verbose=0)
print('Train score:', score_train[0])
print('Train accuracy:', score_train[1])
print('Test score:', score_test[0])
print('Test accuracy:', score_test[1])
The code produces the following kind of output

You have used wrong loss function, change this line
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
to, for example,
model.compile(optimizer=opt, loss='mse', metrics=['accuracy'])
Categorical cross-entropy needs a one-hot encoded y which means, you have to have a 0 or a 1 for every class. MSE is just mean squared error, so it will work. But you might try some other losses as well.
your y:
[1,0,1]
one-hot encoded y:
[[0,1], [1,0], [0,1]]

Related

How to fine tune the last layers of neural network in target model for transfer learning?

I am learning how transfer learning works using this data https://www.kaggle.com/competitions/santander-customer-satisfaction/data .. so this is my simple source model code in tensorflow. and I am saving this model
import pandas as pd
pd.set_option('display.max_rows', None)
import numpy as np
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow as tf
""" # Read in the csv data using pandas
train = pd.read_csv('Z:\ADwork2\python\PM/train.csv',index_col=0)
test = pd.read_csv('Z:\ADwork2\python\PM/test.csv', index_col=0)
sample = pd.read_csv('Z:\ADwork2\python\PM/sample_submission.csv')
"""
# Read in the csv data using pandas
train = pd.read_csv('train.csv',index_col=0)
test = pd.read_csv('test.csv', index_col=0)
sample = pd.read_csv('sample_submission.csv')
train.dtypes.value_counts()
train.select_dtypes(include=['int64']).nunique()
features_to_drop = train.nunique()
features_to_drop = features_to_drop.loc[features_to_drop.values==1].index
# now drop these columns from both the training and the test datasets
train = train.drop(features_to_drop,axis=1)
test = test.drop(features_to_drop,axis=1)
train.isnull().values.any()
X = train.iloc[:,:-1]
y = train['TARGET']
y.value_counts().to_frame().T
from imblearn.over_sampling import SMOTE
X_resampled, y_resampled = SMOTE().fit_resample(X, y)
y_resampled.value_counts().to_frame().T
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X_resampled, y_resampled,
train_size=0.5,
test_size=0.2,
random_state=42,
shuffle=True)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
test = scaler.transform(test)
model = keras.Sequential(
[
keras.layers.Dense(units=9, activation="relu", input_shape=(X_train.shape[-1],) ),
# randomly delete 30% of the input units below
keras.layers.Dropout(0.3),
keras.layers.Dense(units=9, activation="relu"),
# the output layer, with a single neuron
keras.layers.Dense(units=1, activation="sigmoid"),
]
)
# save the initial weights for later
initial_weights = model.get_weights()
model.summary()
#keras.utils.plot_model(model, show_shapes=True)
learning_rate = 0.001
model.compile(optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
loss="binary_crossentropy",
metrics=keras.metrics.AUC()
)
history = model.fit(X_train, y_train,
epochs=500,
batch_size=1000,
validation_data=(X_val, y_val),
verbose=0)
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(
min_delta = 0.0002, # minimium amount of change to count as an improvement
patience = 20, # how many epochs to wait before stopping
restore_best_weights=True,
)
model.set_weights(initial_weights)
history = model.fit(X_train, y_train,
epochs=500,
batch_size=1000,
validation_data=(X_val, y_val),
verbose=0,
# add in our early stopping callback
callbacks=[early_stopping]
)
sample['TARGET'] = model.predict(test)
sample.to_csv('submission.csv',index=False)
#tf.keras.models.save_model()
model.save('modelcentral.h5')
I am saving this model and then loading this model into new python file in the target model
from pyexpat import model
import pandas as pd
pd.set_option('display.max_rows', None)
import numpy as np
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow as tf
import tryt
# Read in the csv data using pandas
train = pd.read_csv('train.csv',index_col=0)
test = pd.read_csv('test.csv', index_col=0)
sample = pd.read_csv('sample_submission.csv')
train.dtypes.value_counts()
train.select_dtypes(include=['int64']).nunique()
features_to_drop = train.nunique()
features_to_drop = features_to_drop.loc[features_to_drop.values==1].index
# now drop these columns from both the training and the test datasets
train = train.drop(features_to_drop,axis=1)
test = test.drop(features_to_drop,axis=1)
train.isnull().values.any()
X = train.iloc[:,:-1]
y = train['TARGET']
y.value_counts().to_frame().T
from imblearn.over_sampling import SMOTE
X_resampled, y_resampled = SMOTE().fit_resample(X, y)
y_resampled.value_counts().to_frame().T
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X_resampled, y_resampled,
train_size=0.5,
test_size=0.2,
random_state=42,
shuffle=True)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
test = scaler.transform(test)
#f.keras.models.load_model()
# It can be used to reconstruct the model identically.
model = keras.models.load_model("modelcentral.h5")
model.trainable=False
#layer1.trainable = False
#inputs = keras.Input(shape=(150, 150, 3))
learning_rate = 0.001
model.compile(optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
loss="binary_crossentropy",
metrics=keras.metrics.AUC()
)
history = model.fit(X_train, y_train,
epochs=500,
batch_size=1000,
validation_data=(X_val, y_val),
verbose=0)
model.summary()
for now I am just freezing all model layers but what if I need to fine tune last layers for example I HAVE BINARY Classification in source model and what if in the target model there is multi-classification. how can I fine tune last layers? i am following this repo https://github.com/rasbt/stat453-deep-learning-ss21/blob/main/L14/5-transfer-learning-vgg16_small.ipynb to learn fine-tuning of final layers for transfer learning but this code is in pytorch and on image data .. so I am confused
model.classifier[1].requires_grad = True
model.classifier[3].requires_grad = True
#For the last layer, because the number of class labels differs compared to ImageNet, we replace the output layer with your own output layer:
model.classifier[6] = torch.nn.Linear(4096, 10)
please help and if there is any mistake in current code then guide me
Given your source model:
import tensorflow as tf
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(units=9, activation="relu", input_shape=(10,) ),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(units=9, activation="relu"),
tf.keras.layers.Dense(units=1, activation="sigmoid"),
])
model.save('model.h5')
You can do something like this to replace your last layer with some other layer:
model = tf.keras.models.load_model("model.h5")
transfer_model = tf.keras.Sequential()
for idx, l in enumerate(model.layers):
if idx == len(model.layers) - 1:
transfer_model.add(tf.keras.layers.Dense(units=10, activation="softmax")) # add output layer with 10 different classes
else: transfer_model.add(l)
print(transfer_model.summary())
You can decide which layers you then want to freeze or make trainable using l.trainable = True / False. You could also do this all without the for loop if you prefer:
model.layers[0].trainable = True
model.layers[2].trainable = True
outputs = tf.keras.layers.Dense(units=10, activation="softmax")(model.layers[-2].output)
transfer_model = tf.keras.Model(inputs=model.input, outputs=outputs)

How to calculate loss with KerasClassifier

I'm using KerasClassifier from sklearn to wrap my Keras model in order to perform K-fold cross validation.
model = KerasClassifier(build_fn=create_model, epochs=20, batch_size=8, verbose = 1)
kfold = KFold(n_splits=10)
scoring = ['accuracy', 'precision', 'recall', 'f1']
results = cross_validate(estimator=model,
X=x_train,
y=y_train,
cv=kfold,
scoring=scoring,
return_train_score=True,
return_estimator=True)
Then I choose the best model between the 10 estimators returned by the function, according to metrics:
best_model = results['estimators'][2] #for example the second model
Now, I want to perform a predict on x_test and get accuracy and loss metrics. How can I do it? I tried model.evaluate(x_test, y_test) but the model is a KerasClassifier so I get an error.
Point is that your KerasClassifier instance mimics standard scikit-learn classifiers. In other terms, it is kind of a scikit-learn beast and, as is, it does not provide method .evaluate().
Therefore, you might just call best_model.score(X_test, y_test) which will automatically return the accuracy as standard sklearn classifiers do. On the other hand, you can access the loss values obtained during training via the history_ attribute of your KerasClassifier instance.
Here's an example:
!pip install scikeras
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split, cross_validate, KFold
import tensorflow as tf
import tensorflow.keras
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from scikeras.wrappers import KerasClassifier
X, y = make_classification(n_samples=100, n_features=20, n_informative=5, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
def build_nn():
ann = Sequential()
ann.add(Dense(20, input_dim=X_train.shape[1], activation='relu', name="Hidden_Layer_1"))
ann.add(Dense(1, activation='sigmoid', name='Output_Layer'))
ann.compile(loss='binary_crossentropy', optimizer= 'adam', metrics = 'accuracy')
return ann
keras_clf = KerasClassifier(model = build_nn, optimizer="adam", optimizer__learning_rate=0.001, epochs=100, verbose=0)
kfold = KFold(n_splits=10)
scoring = ['accuracy', 'precision', 'recall', 'f1']
results = cross_validate(estimator=keras_clf, X=X_train, y=y_train, scoring=scoring, cv=kfold, return_train_score=True, return_estimator=True)
best_model = results['estimator'][2]
# accuracy
best_model.score(X_test, y_test)
# loss values
best_model.history_['loss']
Eventually observe that, when in doubt, you can call dir(object) to get the list of all properties and methods of the specified object (dir(best_model) in your case).

Tensorflow Keras loss is NaN

as you can see below i try to create an MLP with tensorflow/keras. But unfortunately the loss is always NaN when fitting. Do you have any advice?
as a second error message i get the message "'Functional' object has no attribute 'score'" when trying to measure accuracy with model.score, but i think this is a problem that is triggered by the first one.
thanks to all
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from mpl_toolkits import mplot3d
from sklearn import datasets
from various import printShapes, printNumpy, print_Model_Accuracy, printLARGE, checkFormat
from sklearn.datasets import make_blobs
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
np.random.seed(1234)
#%matplotlib qt
#%matplotlib inline
plt.rcParams["figure.figsize"] = [4*2, 4*2]
if 0:
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.80, random_state=1234)
if 1:
X, y = make_blobs(n_features=4, centers=3, n_samples=1000, cluster_std = 5.0, random_state=1234)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=1234)
print ("Target Label Example: y_train[0]:")
print (y_train[0])
print (type(y_train[0]))
printLARGE("MLP classifier TENSORFLOW")
tf.random.set_seed(1234)
Epochs = 10
inputs = keras.Input(shape=(4,), name="digits")
x = layers.Dense(100, activation="tanh", name="dense_1")(inputs)
x = layers.Dense(4, activation="tanh", name="dense_2")(x)
outputs = layers.Dense(3, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.RMSprop(), # Optimizer
loss=keras.losses.SparseCategoricalCrossentropy(), # Loss function to minimize
metrics=[keras.metrics.SparseCategoricalAccuracy()], # List of metrics to monitor
)
printShapes(X_train, "X_train", y_train, "y_train")
# TRAINING
model.fit(X_train, y_train, batch_size=64, epochs=Epochs)
printShapes(X_test, "X_test", y_test, "y_test")
# INFERENCE
y_test_predproba = model.predict(X_test)
print(y_test_predproba)
y_test_pred = np.argmax(y_test_predproba, axis = 1)
print(y_test_pred)
print_Model_Accuracy(model, X_test, y_test, y_test_pred)
Using tanh activation function in the hidden layers does not make
any sense. It should be ReLU.
Using one more hidden layer will be better than using more units in the first layer. [for your task]
However, using more hidden layers makes the model more vulnerable to over-fitting, adding Dropout layers solves the issue.
Finally, your model should be,
inputs = keras.Input(shape=(4,), name="digits")
x = layers.Dense(32, activation="relu", name="dense_1")(inputs)
x = layers.Dropout(0.2)(x)
x = layers.Dense(24, activation="relu", name="dense_2")(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(16, activation="relu", name="dense_2")(x)
outputs = layers.Dense(3, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)

Tensorflow model accuracy low

So my main goal is to use data from 2018 and try to predict data for 2019. I'm using a GRU model and I have the following code. I have a few issues, I'm not sure if the code is actually correct or if I am missing something, and also for model.fit should I use validation_split=0.1 or validation_data=X_test,y_test since I'm using a different dataframe for tesing.
Regarding the accuracy, it is very small and doesn't make any sense and I have no idea why.
import pandas as pd
import tensorflow as tf
from keras.layers.core import Dense
from keras.layers.recurrent import GRU
from keras.models import Sequential
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from tensorboardcolab import TensorBoardColab, TensorBoardColabCallback
df = pd.read_csv('IF 10 PERCENT.csv',index_col=None)
#Loading Second Dataframe
df2 = pd.read_csv('2019 10minutes IF 10 PERCENT.csv',index_col=None)
tbc=TensorBoardColab() # Tensorboard
X_train= df[['WindSpeed_mps','AmbTemp_DegC','RotorSpeed_rpm','RotorSpeedAve','NacelleOrientation_Deg','MeasuredYawError','Pitch_Deg','WindSpeed1','WindSpeed2','WindSpeed3','GeneratorTemperature_DegC','GearBoxTemperature_DegC']]
X_train=X_train.values
y_train= df['Power_kW']
y_train=y_train.values
X_test= df2[['WindSpeed_mps','AmbTemp_DegC','RotorSpeed_rpm','RotorSpeedAve','NacelleOrientation_Deg','MeasuredYawError','Pitch_Deg','WindSpeed1','WindSpeed2','WindSpeed3','GeneratorTemperature_DegC','GearBoxTemperature_DegC']]
X_test=X_test.values
y_test= df2['Power_kW']
y_test=y_test.values
# conversion to numpy array
# scaling values for model
x_scale = MinMaxScaler()
y_scale = MinMaxScaler()
X_train= x_scale.fit_transform(X_train)
y_train= y_scale.fit_transform(y_train.reshape(-1,1))
X_test=x_scale.fit_transform(X_test)
y_test=y_scale.fit_transform(y_test.reshape(-1,1))
X_train = X_train.reshape((-1,1,12))
X_test = X_test.reshape((-1,1,12))
# splitting train and test
# creating model using Keras
model = Sequential()
model.add(GRU(units=512, return_sequences=True, input_shape=(1,12)))
model.add(GRU(units=256, return_sequences=True))
model.add(GRU(units=256))
model.add(Dense(units=1, activation='sigmoid'))
model.compile(loss=['mse'], optimizer='adam',metrics=['accuracy'])
model.summary()
#model.fit(X_train, y_train, batch_size=250, epochs=10, validation_split=0.1, verbose=1, callbacks=[TensorBoardColabCallback(tbc)])
model.fit(X_train, y_train, batch_size=250, epochs=10, validation_data=(X_test,y_test), verbose=1, callbacks=[TensorBoardColabCallback(tbc)])
score = model.evaluate(X_test, y_test)
print('Score: {}'.format(score))
print('Accuracy: {}'.format(acc))
y_predicted = model.predict(X_test)
y_predicted = y_scale.inverse_transform(y_predicted)
y_t
est = y_scale.inverse_transform(y_test)
plt.plot(y_predicted, label='Predicted')
plt.plot(y_test, label='Measurements')
plt.legend()
plt.show()
Thank you
It sounds to me that you are trying to solve a regression problem here. if it is so, It does not make sense to measure accuracy as a metric, since accuracy is about to measure the exact label matching. MSE should be pretty good for the regression

How can I implement the input of multiple regression in LSTM using keras?

here is my code
def create_dataset(signal_data, look_back=1):
dataX, dataY = [], []
for i in range(len(signal_data) - look_back):
dataX.append(signal_data[i:(i + look_back), 0])
dataY.append(signal_data[i + look_back, 0])
return np.array(dataX), np.array(dataY)
df = pd.read_csv('time_series.csv')
signal_data = df.Close.values.astype('float32')
signal_data = signal_data.reshape(len(df), 1)
scaler = MinMaxScaler(feature_range=(0, 1))
signal_data = scaler.fit_transform(signal_data)
train_size = int(len(signal_data) * 0.80)
test_size = len(signal_data) - train_size)
# val_size = len(signal_data) - train_size - test_size
train = signal_data[0:train_size]
# val = signal_data[train_size:train_size+val_size]
test = signal_data[train_size+val_size:len(signal_data)]
x_train, y_train = create_dataset(train, look_back)
# x_val, y_val = create_dataset(val, look_back)
x_test, y_test = create_dataset(test, look_back)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
# x_val = np.reshape(x_val, (x_val.shape[0], x_val.shape[1], 1))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
now I want to add df.Open and df.High and df.Low and df.Volume
how can I implement this code?
Should I just add to the signal data? I'm wondering how to add data so that I can train multiple features in the signal data.
I don't know where and how to implement it. I need your help.
Your valuable opinions and thoughts will be very much appreciated.
I made several modifications to your code. This should work. In summary:
I got fixed the lines of code where you were barcoding the selection of the variable 0. Now, the target variable stands on the last position and the others in the previous ones
I fixed the reshapes some of them were not needed and the others were fixed to keep all the dimensions
I fixed the model input shape, now you have 5 variables instead of 1
My general recommendations:
I would not use MinMaxScaler, it is dangerous because a single outlier can disturb all your distribution. Instead, use StandardScaler. More info here: http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
I would scale the data afterwards, when the train_x, test_x and their y respective counterparts are built. The reason why is because you are computing the statistics for scaling the data using the train and test set, i.e. future information. This is by all the means different to what you'll find when you try to run your code in a real situation. I.e. you'll have to scale the new data with past statistics. It is better to build a test set as close to the reality as possible.
How do you know that your model is big enough to model your data? I would get rid of the dropouts and run the model to see if it can overfit the data. If the model can overfit to the train data, it means that the model is big enough and you can start regularising your model to enhance generalisation. More info in this book: https://www.deeplearning.ai/machine-learning-yearning/
In the model metrics you choose accuracy, which is a classification metric. I would use one according to my type of problem (regression): for example "Mean Absolute Error".
I hope I managed to help you :D
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Conv2D, Reshape, TimeDistributed, Flatten, Conv1D,ConvLSTM2D, MaxPooling1D
from keras.layers.core import Dense, Activation, Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import tensorflow as tf
import matplotlib.pyplot as plt
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
def create_dataset(signal_data, look_back=1):
dataX, dataY = [], []
for i in range(len(signal_data) - look_back):
dataX.append(signal_data[i:(i + look_back), :])
dataY.append(signal_data[i + look_back, -1])
return np.array(dataX), np.array(dataY)
look_back = 20
df = pd.read_csv('kospi.csv')
signal_data = df[["Open", "Low", "High", "Volume", "Close"]].values.astype('float32')
scaler = MinMaxScaler(feature_range=(0, 1))
signal_data = scaler.fit_transform(signal_data)
train_size = int(len(signal_data) * 0.80)
test_size = len(signal_data) - train_size - int(len(signal_data) * 0.05)
val_size = len(signal_data) - train_size - test_size
train = signal_data[0:train_size]
val = signal_data[train_size:train_size+val_size]
test = signal_data[train_size+val_size:len(signal_data)]
x_train, y_train = create_dataset(train, look_back)
x_val, y_val = create_dataset(val, look_back)
x_test, y_test = create_dataset(test, look_back)
model = Sequential()
model.add(LSTM(128, input_shape=(None, 5),return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(128, input_shape=(None, 5)))
model.add(Dropout(0.3))
model.add(Dense(128))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.summary()
hist = model.fit(x_train, y_train, epochs=20, batch_size=32, verbose=2, validation_data=(x_val, y_val))
trainScore = model.evaluate(x_train, y_train, verbose=0)
model.reset_states()
print('Train Score: ', trainScore)
valScore = model.evaluate(x_val, y_val, verbose=0)
model.reset_states()
print('Validataion Score: ', valScore)
testScore = model.evaluate(x_test, y_test, verbose=0)
model.reset_states()
print('Test Score: ', testScore)
p = model.predict(x_test)
print(mean_squared_error(y_test, p))
import matplotlib.pyplot as pplt
pplt.plot(y_test)
pplt.plot(p)
pplt.legend(['testY', 'p'], loc='upper right')
pplt.show()

Categories

Resources