Value Error problem for machine learning model - python

I got a ValueError when using TensorFlow to create a model. I have tried debugging and checked the shapes from my model. I don't understand how or what this error means but I've narrowed the problem down to the Conv2D layer as causing the error. I also tried changing hyperparamters (i.e., batch size, microbatches, etc.).
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
!pip install tensorflow-privacy
import numpy as np
import tensorflow as tf
from tensorflow_privacy import *
import tensorflow_privacy
from matplotlib import pyplot as plt
import pylab as pl
import numpy as np
import pandas as pd
from tensorflow.keras.models import Model
from tensorflow.keras import datasets, layers, models, losses
from tensorflow.keras import backend as bke
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l1, l2, l1_l2 #meaning of norm
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
batch_size = 8
epochs = 4
microbatches = 8
inChannel = 1
kr = 0#1e-5
num_kernels=8
drop_perc=0.25
dim = 1
l2_norm_clip = 1.5
noise_multiplier = 1.3
learning_rate = 0.25
latent_dim = 0
def print_datashape():
print('genotype data: ', genotype_data.shape)
print('phenotype data: ', single_pheno.shape)
genotype_data = tf.random.uniform([4276, 28220],0,255,)
phenotype_data = tf.random.uniform([4276, 20],0,255,)
genotype_data = genotype_data.numpy()
phenotype_data = phenotype_data.numpy()
small_geno = genotype_data
single_pheno = phenotype_data[:, 1]
print_datashape()
df = small_geno
min_max_scaler = preprocessing.MinMaxScaler()
df = min_max_scaler.fit_transform(df)
scaled_pheno = min_max_scaler.fit_transform(single_pheno.reshape(-1,1)).reshape(-1)
feature_size= df.shape[1]
df = df.reshape(-1, feature_size, 1, 1)
print("df: ", df.shape)
print("scaled: ", scaled_pheno.shape)
# split train to train and valid
train_data,test_data,train_Y,test_Y = train_test_split(df, scaled_pheno, test_size=0.2, random_state=13)
train_X,valid_X,train_Y,valid_Y = train_test_split(train_data, train_Y, test_size=0.2, random_state=13)
def print_shapes():
print('train_X: {}'.format(train_X.shape))
print('train_Y: {}'.format(train_Y.shape))
print('valid_X: {}'.format(valid_X.shape))
print('valid_Y: {}'.format(valid_Y.shape))
input_shape= (feature_size, dim, inChannel)
predictor = tf.keras.Sequential()
predictor.add(layers.Conv2D(num_kernels, (5,1), padding='same', strides=(12, 1), activation='relu',input_shape= input_shape))
predictor.add(layers.AveragePooling2D(pool_size=(2,1)))
predictor.add(layers.Dropout(drop_perc))
predictor.add(layers.Flatten())
predictor.add(layers.Dense(int(feature_size / 4), activation='relu'))
predictor.add(layers.Dropout(drop_perc))
predictor.add(layers.Dense(int(feature_size / 10), activation='relu'))
predictor.add(layers.Dropout(drop_perc))
predictor.add(layers.Dense(1))
optimizer = DPKerasAdamOptimizer(learning_rate=learning_rate, l2_norm_clip=l2_norm_clip, noise_multiplier=noise_multiplier, num_microbatches=microbatches)
# compile
predictor.compile(loss='mse', optimizer=optimizer, metrics=['mse'])
#summary
predictor.summary()
print_shapes()
predictor.fit(train_X, train_Y,batch_size=batch_size,epochs=epochs,verbose=1, validation_data=(valid_X, valid_Y))
ValueError: Dimension size must be evenly divisible by 8 but is 1 for '{{node Reshape}} = Reshape[T=DT_FLOAT, Tshape=DT_INT32](mean_squared_error/weighted_loss/value, Reshape/shape)' with input shapes: [], [2] and with input tensors computed as partial shapes: input[1] = [8,?].

Related

The prediction for an image classification algorithm is not printing

I have trained a model for image classification in Jupyter Notebook, and have gotten to the point of testing an image to see if it is "Pasta = 0" or "Pizza = 1". The model is fitting at an accuracy between 70 and 80 percent on average for most epochs.
However, the value prediction is not printing. All that happens when the test image is processed is that the processing is shown as "Completed" with the time it took per step. There is no "Pasta" or "Pizza" printed.
Here is the code I have so far:
#Import all libraries needed.
import tensorflow as tf
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from tensorflow.keras import layers
from keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.optimizers import RMSprop
import cv2
import os
plt.style.use('fivethirtyeight')
img = image.load_img("D:/ML Datasets/FoodImageClassification/Training Data/Pizza/Pizza - Wikipedia.jpg")
plt.imshow(img)
cv2.imread("D:/ML Datasets/FoodImageClassification/Training Data/Pizza/Pizza - Wikipedia.jpg")
cv2.imread("D:/ML Datasets/FoodImageClassification/Training Data/Pizza/Pizza - Wikipedia.jpg").shape
#Training and Validation
train = ImageDataGenerator(rescale = 1/255)
validation = ImageDataGenerator(rescale = 1/255)
train_dataset = train.flow_from_directory('D:/ML Datasets/FoodImageClassification/Training Data/',target_size = (200,200),
batch_size = 4,
class_mode = 'binary')
validation_dataset = train.flow_from_directory('D:/ML Datasets/FoodImageClassification/Testing Data/',target_size = (200,200),
batch_size = 4,
class_mode = 'binary')
train_dataset.class_indices #This returns {'Pasta': 0, 'Pizza': 1}
train_dataset.classes
model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(16,(3,3),activation = 'relu',input_shape = (200,200,3)),
tf.keras.layers.MaxPool2D(2,2),
#
tf.keras.layers.Conv2D(32,(3,3),activation = 'relu'),
tf.keras.layers.MaxPool2D(2,2),
#
tf.keras.layers.Conv2D(32,(3,3),activation = 'relu'),
tf.keras.layers.MaxPool2D(2,2),
##
tf.keras.layers.Flatten(),
##
tf.keras.layers.Flatten(),
##
tf.keras.layers.Dense(512,activation='relu'),
##
tf.keras.layers.Dense(1,activation='sigmoid'),
##
tf.keras.layers.Dense(1,activation='softmax'),
##
tf.keras.layers.Dense(1,activation='sigmoid')
])
model.compile(loss = 'binary_crossentropy',
optimizer = RMSprop(learning_rate=0.30),
metrics = ['accuracy'])
model_fit = model.fit(train_dataset,
steps_per_epoch = 5,
epochs = 50,
validation_data = validation_dataset)
from skimage.transform import resize
img = image.load_img('D://ML Datasets/FoodImageClassification/Testing Data/Pasta/Garlic Butter Parmesan Pasta Recipe.jpg')
newsize = (200, 200)
img = img.resize(newsize)
# Shows the image in image viewer
img.show()
X = image.img_to_array(img)
X = np.expand_dims(X,axis = 0)
images = np.vstack([X])
val = model.predict(images)
if val == 0:
print("Pasta")
elif val == 1:
print("Pizza")
model.predict() returns a float between [0, 1] when the model's output uses sigmoid activation function. It will almost never return 0 or 1. You need to round your predictions with something like np.rint() and check that value, assuming a balanced training set of classes during training, some adjustments may need to be made to the actual prediction.
val = model.predict(images)
val = np.rint(val)
if val == 0:
print("Pasta")
elif val == 1:
print("Pizza")

How to forecast a univariate time series 20/30 days ahead using tensorflow LSTM?

I have used below code for training and validation. It gives decent result but I don't know the code to forecast n periods ahead (like 30/50 days ahead) using the trained model.
GitHub Link for the code with data output is here.
Import the libraries:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from keras.models import Sequential
from keras.layers import Dense,Dropout,Conv1D,Bidirectional
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings('ignore')
import tensorflow
np.random.seed(1)
tensorflow.random.set_seed(1)
Load the univariate time series data and normalize the values:
dataframe=pd.read_sas('train.sas7bdat')
dataframe['Datetime']=pd.to_datetime(dataframe['Datetime'],format='%d%b%Y:%H:%M:%S')
dataframe.set_index('Datetime',inplace=True)
data=dataframe
#filter input data according to datetime i.e: 01th May 2020
dataset=data[data.index>='2021-05-01 00:00:00']
# Replcae null value with previous 15 minute value
dataset.ffill(axis ='rows',inplace=True)
dataset.shape
def normalize_cols(df,cols):
"""Scale the values of each feature
according to the columns max value"""
data = df.loc[:,cols]
for col in cols:
scaler = lambda x: x / data[col].max()
data[col] = data[col].apply(scaler)
print(data[cols].head())
return data[cols].values
features = df.columns.values # columns to train model on
X = normalize_cols(df,features)
Turn each signal into a labeled dataset:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
window_size = 15 #15 # num. days per training sample
batch_size = 64 # num. of samples per epoch
buffer_size = 1000 # num of samples in memory for random selection
split_time = 400 # where to split the data for training/validation
forecast_length=10*24*4
def window_dataset(series, window_size, batch_size, shuffle_buffer):
"""Funtion to turn time series data into set of sequences
where the last value is the intended output of our model"""
ser = tf.expand_dims(series, axis=-1)
data = tf.data.Dataset.from_tensor_slices(series)
data = data.window(window_size + 1, shift=1, drop_remainder=True)
data = data.flat_map(lambda w: w.batch(window_size + 1))
data = data.shuffle(shuffle_buffer)
data = data.map(lambda w: (w[:-1], w[1:]))
return data.batch(batch_size).prefetch(1)
x_train = X[:split_time]
x_test = X[split_time:]
print(f"Training data shape: {x_train.shape}")
print(f"Validation data shape: {x_test.shape}")
train_set = window_dataset(x_train,window_size,batch_size,buffer_size)
keras.backend.clear_session()
Choose and connect the model components:
# 1D convolution layers
conv1 = layers.Conv1D(
filters=60,kernel_size=15,strides=1,
padding="causal",activation="relu",
input_shape=[None,len(features)])
conv2 = layers.Conv1D(
filters=60,kernel_size=5,strides=1,
padding="causal",activation="tanh")
# Bidirectional LSTM layers
lstm1 = layers.Bidirectional(layers.LSTM(50,return_sequences=True))
lstm2 = layers.Bidirectional(layers.LSTM(20,return_sequences=True))
# Model construction
inputs = layers.Input(shape=(None,len(features)))
x = conv1(inputs)
x = lstm1(x)
x = lstm2(x)
x = conv2(x)
x = layers.Dense(60,activation='relu')(x)
x = layers.Dropout(.1)(x)
x = layers.Dense(1,activation='tanh')(x)
outputs = layers.Lambda(lambda x: 25*abs(x))(x)
#outputs = layers.Lambda(lambda x: 1*abs(x))(x)
# SGD optimizer and Huber loss
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
loss = keras.losses.Huber()
model = keras.Model(inputs=inputs,outputs=outputs)
model.compile(optimizer,loss,
metrics=["mae"])
model.summary()
"""
### Train model
"""
epochs = 100
history = model.fit(train_set, epochs=epochs, verbose=1)
print(f"Model trained for {epochs} epochs")
Inspect training results:
def model_forecast(model, X, window_size):
"""Takes in numpy array, creates a windowed tensor
and predicts the following value on each window"""
data = tf.data.Dataset.from_tensor_slices(X)
data = data.window(window_size, shift=1, drop_remainder=True)
data = data.flat_map(lambda w: w.batch(window_size))
data = data.batch(32).prefetch(1)
forecast = model.predict(data)
return forecast
train_window = [i for i in range(split_time-window_size)]
forecast = model_forecast(model,x_train,window_size)
import seaborn as sns
plt.figure(figsize=(8,5),dpi=120)
sns.lineplot(train_window,forecast[:-1,1,0].reshape(-1),label='Forecast') #forecast[:-1,1,0]
sns.lineplot(train_window,X[:split_time-window_size].reshape(-1),label='actual_load')
Make predictions on test data:
val_window = [i for i in range(split_time,len(df)-window_size)]
forecast = model_forecast(model,x_test,window_size)
plt.figure(figsize=(8,5),dpi=120)
sns.lineplot(val_window,forecast[:-1,1,0].reshape(-1),label='Forecast')
sns.lineplot(val_window,X[split_time:-window_size].reshape(-1),label='actual_load')

Python TypeError: op needs to be operation [0 0 0]

I am trying to run a neural network using Tensorflow using Visual Studio Code but when I run the program I encounter a TypeError in my conv_x_to_var function. I have tried looking for possible solutions for this specific problem on StackOverflow but so far I have not found any possible solutions to my problem, besides another question that I had posted in December of last year. I have also tried to change my code to just return the function without the tensor but alas that doesn't work as well, as that solution just gives me another error.
Here is the TypeError I am receiving when I run my program:
Exception has occurred: TypeError
op needs to be an Operation: [0 0 0]
File "C:\Users\trevo\OneDrive\Desktop\vegaai-master\tensor_net\neuralnet.py", line 65, in conv_x_to_var
return Variable(tf.Tensor(np.array(x), (len(x), len(x)), np.float64))
File "C:\Users\trevo\OneDrive\Desktop\vegaai-master\tensor_net\neuralnet.py", line 85, in InitializeNeuralNet
origin = self.conv_x_to_var([0,0,0])
File "C:\Users\trevo\OneDrive\Desktop\vegaai-master\tensor_net\neuralnet.py", line 63, in __init__
self.InitializeNeuralNet()
File "C:\Users\trevo\OneDrive\Desktop\vegaai-master\tensor_net\neuralnet.py", line 132, in <module>
net = NetMain()
Here is my Python code in my neuralnet.py script:
from pickletools import optimize
from re import X
from turtle import shape
from sklearn import metrics
import tensorflow as tf
import numpy as np
from tensorflow.python.keras import layers
from tensorflow.python.keras.layers import Activation, Dense, Dropout
from tensorflow.python.ops.variables import Variable
from keras.models import Sequential
import matplotlib
from sklearn.model_selection import train_test_split
from tensorflow.python import keras
import matplotlib.pyplot as plt
from tensorflow.python.ops.linalg_ops import norm
import wolframalpha as wolf
from os import path, read
import autograd
import sys
import pandas as pd
from tqdm import tqdm, trange
class NetMain():
def __init__(self):
self.InitializeNeuralNet()
def conv_x_to_var(self, x):
return Variable(tf.Tensor(np.array(x), (len(x), len(x)), np.float64))
def conv_y_to_var(self, y):
return Variable(tf.Tensor(np.array(y), (len(y), len(y)), np.float64))
def conv_to_numpy(self, n):
return n.data.cpu().numpy()
def InitializeNeuralNet(self, activation='relu', loss='mean_sqaured_error', optimizer='sgd', metrics=['accuracy'], epochs=300, Nsamples=20000, max_Nfeatures=100):
#vegaMain = VegaAiMain()
xs = []
ys = []
for i in trange(Nsamples):
Nfeatures = np.random.randint(max_Nfeatures-40) + 40
interX = np.random.rand(Nfeatures, 3) * 50 - 50 * np.random.rand(3)
X = np.zeros((Nfeatures, Nfeatures, 6))
X[:,:,:3] = np.expand_dims(interX, 1)
X[:,:,3:] = np.expand_dims(interX, 0)
X = X.reshape(-1,6)
X = np.mgrid[1:2:len(X)]
origin = self.conv_x_to_var([0,0,0])
x1 = self.conv_x_to_var(X[:,:3]) - origin
x2 = self.conv_x_to_var(X[:,3:]) - origin
r1 = norm(x1)
r2 = norm(x2)
z = 1 / r1 / r2
z = np.sum(z) * 1e4
y = X.conv_to_numpy(z)
xs.append(np.mean(X, 0))
ys.append(y / (len(X)))
dataset = train_test_split(xs, ys, train_size=0.8)
(x_train, y_train), (x_test, y_test) = X.dataFrameRead
model = Sequential()
model.add(Dense(units=400, input_dim=6))
model.add(Activation(activation))
model.add(Dense(units=200))
model.add(Activation(activation))
model.add(Dense(units=100))
model.add(Dropout(0.2))
model.add(Dense(units=50))
model.add(Activation(activation))
model.add(Dropout(0.2))
model.add(Dense(units=25))
model.add(Activation(activation))
model.add(Dense(units=3))
sgd = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9, nesterov=True)
print(model.get_weights())
model.summary()
model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
model.fit(np.array(x_train), np.array(y_train), epochs=100, batch_size=50, nesterov=True)
loss_data = model.evaluate(np.array(x_test), np.array(y_test), batch_size=100)
classes = model.predict(np.array(x_test), batch_size=1)
print([classes[:10]])
print(y_test[:10])
net = NetMain()
What exactly can I do to fix this code? thank you!

I want to implement a multi layer neural network , but i m getting error of size-1 arrays can be converted to python scalars

I want to implement a multi layer neural network , but i m getting error in input layer of keras of that the size-1 arrays can be converted to python scalars only ,
Here is the code :
from keras.models import Sequential
from keras.layers import Activation
from keras import backend as K
from keras.layers.core import Dense
from keras.optimizers import SGD
from keras.metrics import categorical_crossentropy
import numpy as np
import cv2
import os
from random import randint
import matplotlib.pyplot as plt
#Loading the images
DataDir= r"E:\FYP\images_datasets\Training Data"
Categories=["Badshahi-Mosque"]
for category in Categories:
path=os.path.join(DataDir,category)
for img in os.listdir(path):
img_arr=cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
plt.imshow(img_arr,cmap="gray")
break
break
#Resizing the image
IMG_SIZE=(124,124)
new_array=cv2.resize(img_arr,(IMG_SIZE))
plt.imshow(new_array,cmap="gray")
plt.show()
print(new_array.shape)
training_data=[]
class_num1=[]
#Training the data
def create_training_data():
for category in Categories:
path=os.path.join(DataDir,category)
class_num=Categories.index(category)
for img in os.listdir(path):
try:
img_arr=cv2.imread(os.path.join(path,img))
new_array=cv2.resize(img_arr,(IMG_SIZE))
training_data.append([new_array,class_num])
class_num1.append([class_num])
except Exception as e:
pass
create_training_data()
print("Length of the training data is:",len(training_data))
classes = np.unique(class_num1)
nClasses = len(classes)
print('Total number of outputs : ', nClasses)
print('Output classes being able to be classified: ', classes)
import random
random.shuffle(training_data)
for i in training_data[:5]:
print("Labeling values before on hot enc are:",i[1])
import numpy as np
X=[]
train_labels=[]#One hot encoding values
train_data=[]#Floating values
trained_data=[]#Scalar and floating values
for features,lab in training_data:
X.append(features)
train_labels.append(lab)
for i in X:
train_data = i.astype('float32')
# print("Train data",train_data)
training_data1=[]
for i in train_data:
trained_data= (i - np.min(i)) / (np.max(i) - np.min(i))
training_data1=np.array(trained_data).ravel()
# print("Trained data",training_data1)
from tensorflow.keras import utils as np_utils
train_labels = np_utils.to_categorical(train_labels)
# print(train_labels)enter code here
model = Sequential()
model.add(Dense(units=15376,input_shape=(training_data1,),activation='relu'))
The error is occuring on above last layer on input shape , why this error is occuring , i have the shape of picture after resizing is 124*124 ,
As the Error suggests, the problem is with the Input Shape.
To fix the error, you can replace the code,
model.add(Dense(units=15376,input_shape=(training_data1,),activation='relu'))
with
model.add(Dense(units=15376,input_shape=(X.shape[0], X.shape[1], X.shape[2], X.shape[3]),activation='relu'))
In addition to that, you can make your code more efficient by replacing
for i in X:
train_data = i.astype('float32')
# print("Train data",train_data)
training_data1=[]
for i in train_data:
trained_data= (i - np.min(i)) / (np.max(i) - np.min(i))
training_data1=np.array(trained_data).ravel()
with
X = np.array(X, dtype = 'float32')
X = (X - np.min(X))/(np.max(X) - np.min(X))
Complete working code is shown below:
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.metrics import categorical_crossentropy
import numpy as np
import cv2
import os
from random import randint
import matplotlib.pyplot as plt
#Loading the images
DataDir= r"E:\FYP\images_datasets\Training Data"
Categories=["Badshahi-Mosque"]
for category in Categories:
path=os.path.join(DataDir,category)
for img in os.listdir(path):
img_arr=cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
plt.imshow(img_arr,cmap="gray")
break
break
#Resizing the image
IMG_SIZE=(124,124)
new_array=cv2.resize(img_arr,(IMG_SIZE))
plt.imshow(new_array,cmap="gray")
plt.show()
print(new_array.shape)
training_data=[]
class_num1=[]
#Training the data
def create_training_data():
for category in Categories:
path=os.path.join(DataDir,category)
class_num=Categories.index(category)
for img in os.listdir(path):
try:
img_arr=cv2.imread(os.path.join(path,img))
new_array=cv2.resize(img_arr,(IMG_SIZE))
training_data.append([new_array,class_num])
class_num1.append([class_num])
except Exception as e:
pass
create_training_data()
print("Length of the training data is:",len(training_data))
classes = np.unique(class_num1)
nClasses = len(classes)
print('Total number of outputs : ', nClasses)
print('Output classes being able to be classified: ', classes)
print(training_data[0][1])
import random
random.shuffle(training_data)
for i in training_data[:5]:
print("Labeling values before on hot enc are:",i[1])
import numpy as np
X=[]
train_labels=[]#One hot encoding values
train_data=[]#Floating values
trained_data=[]#Scalar and floating values
for features,lab in training_data:
X.append(features)
train_labels.append(lab)
temp = 0
X = np.array(X, dtype = 'float32')
X = (X - np.min(X))/(np.max(X) - np.min(X))
from tensorflow.keras import utils as np_utils
train_labels = np_utils.to_categorical(train_labels)
# print(train_labels)enter code here
model = Sequential()
model.add(Dense(units=15376,input_shape=(X.shape[0], X.shape[1], X.shape[2], X.shape[3]),activation='relu'))
Hope this helps. Happy Learning!

LSTM Keras confusion

#enumaris thank you for your answer. I'll try to explain my approach a bit:
I pushed the video frames through resnet model and got fature shapes of (k, 2048). I have the data into train/validation and test folders. Then I was writing this script:
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Activation, Dropout, Dense
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import argparse
import random
import cv2
import os
dataTrain = []
labelsTrain = []
# Prepare the Training Data. The .txt files contain name of the name of the
#file and the label which is 0,1,or 2 based on which class the video belongs
#to (nameVideo.npy 0)
with open('D:...\Data\/train_files.txt') as f:
trainingList = f.readlines()
for line in trainingList:
npyFiles = line.split( )
loadTrainingData = np.load(npyFiles[0])
dataTrain.append(loadTrainingData)
labelsTrain.append(npyFiles[1])
dataNp = np.array(dataTrain, dtype=object)
labelsNp = np.array(labelsTrain, dtype=object)
f.close()
dataVal = []
labelsVal = []
# Prepare the Validation Data
with open('D:\...\Data\/val_files.txt') as f:
valList = f.readlines()
for line in valList:
npyValFiles = line.split( )
loadValData = np.load(npyValFiles[0])
dataVal.append(loadValData)
labelsVal.append(npyValFiles[1])
f.close()
print(len(dataVal))
model = Sequential()
model.add(LSTM(32,
batch_input_shape=(None, None, 1),
return_sequences=True))
model.add(LSTM(32, return_sequences=True))
model.add(LSTM(32))
model.add(Dense(10, activation='softmax'))
model.compile(loss='mean_absolute_error',
optimizer='adam',
metrics=['accuracy'])
model.summary()
history = model.fit(dataTrain, labelsTrain,
epochs=10,
validation_data=(dataVal, labelsVal))
Which results in the following error:
ValueError: Error when checking model input: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 1 array(s), but instead got the following list of 3521 arrays.

Categories

Resources