Keras/Tensorflow training on GCP with TPU - python

I am trying to train a model on GCP with keras and tensorflow 1.15.
From now my code is similar to what I could do on colab, namely :
# TPUs
import tensorflow as tf
print(tf.__version__)
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver("tpu-name")
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
tpu_strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
print("Number of accelerators: ", tpu_strategy.num_replicas_in_sync)
import numpy as np
np.random.seed(123) # for reproducibility
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Convolution2D, MaxPooling2D, Input
from tensorflow.keras import utils
from tensorflow.keras.datasets import mnist, cifar10
from tensorflow.keras.models import Model
# 4. Load data into train and test sets
(X_train, y_train) = load_data(sets="gs://BUCKETS/dogscats/train/",target_size=img_size)
(X_test, y_test) = load_data(sets="gs://BUCKETS/dogscats/valid/",target_size=img_size)
print(X_train.shape, X_test.shape)
# 5. Preprocess input data
#X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
#X_test = X_test.reshape(X_test.shape[0], 28, 28,1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.0
X_test /= 255.0
print(y_train.shape, y_test.shape)
# 6. Preprocess class labels One hot encoding
Y_train = utils.to_categorical(y_train, 2)
Y_test = utils.to_categorical(y_test, 2)
print(Y_train.shape, Y_test.shape)
with tpu_strategy.scope():
model = make_model((img_size, img_size, 3))
# 8. Compile model
model.compile(loss='categorical_crossentropy',
optimizer="sgd",
metrics=['accuracy'])
model.summary()
batch_size = 1250 * tpu_strategy.num_replicas_in_sync
# 9. Fit model on training data
model.fit(X_train, Y_train, steps_per_epoch=len(X_train)//batch_size,
epochs=5, verbose=1)
But my data is on the bucket and my code is on an VM. So what I have to do ? I tried to load my data using "gs://BUCKETS" but it does not work. What should I do ?
EDIT : I add my code to load data, I forgot it sorry.
def load_data(sets="dogcats/train/", k = 5000, target_size=250):
# define location of dataset
folder = sets
photos, labels = list(), list()
# determine class
output = 0.0
for i, dog in enumerate(listdir(folder + "dogs/")):
if i >= k:
break
# load image
photo = load_img(folder + "dogs/" +dog, target_size=(target_size, target_size))
# convert to numpy array
photo = img_to_array(photo)
# store
photos.append(photo)
labels.append(output)
output = 1.0
for i, cat in enumerate(listdir(folder + "cats/") ):
if i >= k:
break
# load image
photo = load_img(folder + "cats/"+cat, target_size=(target_size, target_size))
# convert to numpy array
photo = img_to_array(photo)
# store
photos.append(photo)
labels.append(output)
# convert to a numpy arrays
photos = asarray(photos)
labels = asarray(labels)
print(photos.shape, labels.shape)
photos, labels = shuffle(photos, labels, random_state=0)
return photos, labels
EDIT2 : To complete the answer of #daudnadeem in case some other people are in the same case.
My goal was to get images from a bucket, so the code works well and allowed to get byte object. To transform it into image you just need to use PIL library:
from PIL import Image
from io import BytesIO
import numpy as np
from google.cloud import storage
client = storage.Client()
bucket = client.get_bucket("BUCKETS")
blob = bucket.get_blob('dogscats/train/<you-will-need-to-point-to-a-file-and-not-a-directory>')
data = blob.download_as_string()
img = Image.open(BytesIO(data))
img = np.array(img)

(X_train, y_train) = load_data(sets="gs://BUCKETS/dogscats/train/",target_size=img_size)
(X_test, y_test) = load_data(sets="gs://BUCKETS/dogscats/valid/",target_size=img_size)
This obviously won't work since essentially all you've done is given sets a string. What you need to do is download this data as a string, and then use that.
First install the package pip install google-cloud-storage or pip3 install google-cloud-storage
pip -> Python
pip3 -> Python3
Have a look at this, you will need a service account to interact with GCP from your code. For authentication purposes.
When you get your service account as a json, you need to do one of two things:
Set it as an env variable:
export GOOGLE_APPLICATION_CREDENTIALS="/home/user/Downloads/[FILE_NAME].json"
or my preferrable workaround
gcloud auth activate-service-account \
<repalce-with-email-from-json-file> \
--key-file=<path/to/your/json/file> --project=<name-of-your-gcp-project>
Now lets look at how you can use google-cloud-storage library to download your file as a string:
from google.cloud import storage
client = storage.Client()
bucket = client.get_bucket("BUCKETS")
blob = bucket.get_blob('/dogscats/train/<you-will-need-to-point-to-a-file-and-not-a-directory>')
data = blob.download_as_string()
Now that you have your data as a string, you can simply pass data into load data like so (X_train, y_train) = load_data(sets=data,target_size=img_size)
It sounds complex but heres a quick psuedo layout:
Install google-cloud-storage
Go to Google Cloud Platform Console -> IAM & Admin -> Service Accounts
Create service account with relative permissions (google-cloud-storage)
Download the (JSON) file, and remember location.
Activate service account
Download file as String and pass that string to your load_data(data)
Hope that helps!

Related

How to put skimage imread_collection through tensorflow

I'm trying to put a collection of images through a neural network, but I can't figure out how to get a large collection of images to go into a tensorflow model, as trying to convert the collection into a numpy array causes a memory error.
I should note that I am very new to tensorflow.
import numpy as np
from skimage.io import imread_collection
from tensorflow import keras
from tensorflow.keras import layers
def gen(arr):return(i.reshape(400*600*3) for i in arr) # Only used in Attempt2.
labelFile=open("lables_text_file.txt","r")
labels=labelFile.read()
labelFile.close()
labels=getTrain(labels)#Converts to a tuple containing the lables in order.
data = imread_collection("path_to_images/*.jpg", conserve_memory=True)
train=data[:-len(data)//4]
trainLabels=labels[:-len(data)//4]
test=data[-len(data)//4:]
testLabels=labels[-len(data)//4:]
#train = train.reshape(-1, 400*600*3) # Attempt1
#test = test.reshape(-1, 400*600*3) # Attempt1
#train = gen(train) # Attempt2
#test = gen(test) # Attempt2
trainLabels = keras.utils.to_categorical(trainLabels, 23)
testLabels = keras.utils.to_categorical(testLabels, 23)
model=keras.Sequential([keras.Input(shape=(400*600*3,)),
layers.Dense(600, name='hidden1', activation='relu'),
layers.Dense(400, name='hidden2', activation='relu'),
layers.Dense(46, name='hidden3', activation='relu'),
layers.Dense(23, activation="softmax")])
optimizer = keras.optimizers.Adam(learning_rate=0.0015)
model.compile(loss=keras.losses.CategoricalCrossentropy(), optimizer=optimizer, metrics=[keras.metrics.CategoricalAccuracy()])
model.fit(train,trainLabels,batch_size=128,epochs=8,validation_data=(test,testLabels), shuffle=True)
When I run the code as is, this is the result:
ValueError: Failed to find data adapter that can handle input: <class 'skimage.io.collection.ImageCollection'>, <class 'numpy.ndarray'>
When I try to use Attempt1, this is the result:
AttributeError: 'ImageCollection' object has no attribute 'reshape'
When I try to use Attempt2, this is the result:
ValueError: `y` argument is not supported when using python generator as input.
How can I put the data into `model.fit, such that it will successfully train the neural network?
I think I may have solved the problems.
Working code:
import numpy as np
from skimage.io import imread_collection
from tensorflow import keras
from tensorflow.keras import layers
def gen(arr,labels):return((arr[i].reshape(-1,400*600*3),labels[i].reshape(-1,23)) for i in range(len(arr)))
labelFile=open("lables_text_file.txt","r")
labels=labelFile.read()
labelFile.close()
labels=getTrain(labels)#Converts to a tuple containing the lables in order.
data = imread_collection("path_to_images/*.jpg", conserve_memory=True)
train=data[:-len(data)//4]
trainLabels=labels[:-len(data)//4]
test=data[-len(data)//4:]
testLabels=labels[-len(data)//4:]
#train = train.reshape(-1, 400*600*3) # Attempt1
#test = test.reshape(-1, 400*600*3) # Attempt1
trainLabels = keras.utils.to_categorical(trainLabels, 23)
testLabels = keras.utils.to_categorical(testLabels, 23)
train = gen(train,trainLabels) # Attempt2
test = gen(test,testLabels) # Attempt2
model=keras.Sequential([keras.Input(shape=(400*600*3,)),
layers.Dense(600, name='hidden1', activation='relu'),
layers.Dense(400, name='hidden2', activation='relu'),
layers.Dense(46, name='hidden3', activation='relu'),
layers.Dense(23, activation="softmax")])
optimizer = keras.optimizers.Adam(learning_rate=0.0015)
model.compile(loss=keras.losses.CategoricalCrossentropy(), optimizer=optimizer, metrics=[keras.metrics.CategoricalAccuracy()])
model.fit(train,None,batch_size=128,epochs=8,validation_data=(test,testLabels), shuffle=True)
The solution was to pass in a generator that returns two-tuples containing the input and label (instead of passing the labels in directly), but there were other problems that I may include in this answer if I get the time.

The accuracy problem of hand sign gestures recognition with using CNN in Python

Im working on my senior project in my university and I have only 2 days to fix this problem.I created a hand gesture recognition with using CNN in Python.I used 78000 images with 50x50px values.But I got stuck in the last part of my model.I can not improve my accuracy.When I start to train the data with 100 epochs,the first 15 epochs show 0,039 accuracy and it is horrible,because of that I'm not waiting the end of the train.Maybe it happens because of the values of conv2d or pooling because I don't know how to put the correct values into conv2d,pooling etc.
I'm new and I could not fix the problem.If you help me,I will be grateful for you
The code I wrote is given below;
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import pickle
import cv2
import os
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from PIL import Image
from numpy import asarray
DATADIR = "asl_alphabet_train"
CATEGORIES = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
X_train = []
y_train = []
X_test=[]
y_test=[]
IMG_SIZE=50
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR,category) # create path to dogs and cats
class_num = CATEGORIES.index(category) # get the classification (0 or a 1).
for img in tqdm(os.listdir(path)): # iterate over each image per dogs and cats
try:
img_array = cv2.imread(os.path.join(path,img)) # convert to array
#new_array = cv2.resize(img_array, (28, 50 )) # resize to normalize data size
X_train.append(img_array) # add this to our trainingdata
# add this to our X_train
y_train.append(class_num) # add this to our X_train
except Exception as e: # in the interest in keeping the output clean...
pass
create_training_data()
X_train = asarray(X_train)
y_train = asarray(y_train)
"""
nsamples, nx, ny = X_train.shape
X_train = X_train.reshape((nsamples,nx*ny))
"""
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2,random_state=0)
N = y_train.size
M = y_train.max()+1
resultArray = np.zeros((N,M),int)
idx = (np.arange(N)*M) + y_train
resultArray.ravel()[idx] = 1
y_train=resultArray
classifier=Sequential()
#convolution step
classifier.add(Convolution2D(filters=96, input_shape=(50,50,3), kernel_size=(11,11), padding='valid',activation="relu"))
#pooling step
classifier.add(MaxPooling2D(pool_size=(2,2)))
#convolution step
classifier.add(Convolution2D(filters=256,kernel_size=(11,11),padding="valid",activation="relu"))
#pooling step
classifier.add(MaxPooling2D(pool_size=(2,2)))
classifier.add(Convolution2D(filters=384,kernel_size=(3,3),padding="valid",activation="relu"))
classifier.add(MaxPooling2D(pool_size=(2,2)))
#flatten step
classifier.add(Flatten())
#Dense(Fully connected step)
classifier.add(Dense(output_dim=128,activation="relu"))
#Dropout to decrease the possibility of overfitting
classifier.add(Dropout(0.5))
#Dense to determine the output
classifier.add(Dense(output_dim=26,activation="softmax"))
#compile step
classifier.compile(optimizer="adam",loss="categorical_crossentropy",metrics=["accuracy"])
enter code here
classifier.fit(X_train,y_train,epochs=100,batch_size=32)
filename="CNN_TEST.sav"
pickle.dump(classifier, open(filename, 'wb'))
y_pred=classifier.predict(X_test)
print(y_pred)
Would recommend the following :
1) Reduce the kernel size in the first two convolutional layers of your model.
2) I believe the MaxPooling layer is not necessary after every convolutional layer. Do verify this.
3) A DropOut of 0.5 could drop out a large number of essential neurons, you might want to lower that.
4) Vary the number of epochs and see how your model performs each time.
Plot "train accuracy vs val accuracy" and "train loss vs val loss" at each attempt and see if your model overfits or underfits.

Model gives great training and testing accuracy but not working well when doing prediction

Hi there I am doing transfer learning with deep learning using VGG16 pre-trained model. I want to extract features from VGG16 to build my own model as I only have access to CPU. Here is my build and train setup.
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import confusion_matrix
from keras.applications import Xception, VGG16, ResNet50
conv_base = VGG16(weights='imagenet',include_top=False,input_shape=(224, 224, 3))
conv_base.summary()
base_dir = 'NewDCDatatset'
train_dir = os.path.join(base_dir, 'Train')
validation_dir = os.path.join(base_dir, 'Validation')
test_dir = os.path.join(base_dir, 'Test')
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 5
def extract_features(directory, sample_count):
features = np.zeros(shape=(sample_count, 7 , 7 , 512))
labels = np.zeros(shape=(sample_count,2))
generator = datagen.flow_from_directory(directory,target_size=(224, 224),batch_size=batch_size,class_mode='categorical')
i = 0
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size : (i + 1) * batch_size] = features_batch
labels[i * batch_size : (i + 1) * batch_size] = labels_batch
i += 1
if i * batch_size >= sample_count:
break
return features, labels
train_features, train_labels = extract_features(train_dir, 2000)
validation_features, validation_labels = extract_features(validation_dir,420 )
test_features, test_labels = extract_features(test_dir, 420)
train_features = np.reshape(train_features, (2000, 7 * 7 * 512))
validation_features = np.reshape(validation_features, (420, 7 * 7 * 512))
test_features = np.reshape(test_features, (420, 7 * 7 * 512))
from keras import models
from keras import layers
from keras import optimizers
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_dim=7 * 7 * 512))
model.add(layers.Dense(2, activation='softmax'))
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc'])
history = model.fit(train_features, train_labels,epochs=2,batch_size=5,shuffle=True)
print(model.evaluate(test_features,test_labels))
model.save('TLFACE.h5')
#predictions = model.predict_generator(test_features,steps = 5)
#print(predictions)
And here is the setup for the way I am predicting the new images through my model to classify cat and dog but I am not getting that much accurate results that seldom I am able to correctly classify any image. I don't what mistake I am doing, is it matter of resizing image or what when predicting.
from keras.models import load_model
deep = load_model('TLFACE.h5')
from PIL import Image
import numpy as np
import cv2
file_nam = '4705.jpg'
img = cv2.imread(file_nam)
img = cv2.imshow('frame',img)
cv2.waitKey(1000)
img = Image.open(file_nam).convert("L")
img = img.resize((256,98))
im2arr = np.array(img)
im2arr = im2arr.reshape(1,25088)
# Predicting the Test set r1esults
y_pred = deep.predict(im2arr)
print(y_pred)
print(y_pred[0][0])
According to your code, your model receives a feature vector of size 1 x 7*7*512 = 1 x 25088. This features is the encoding of an image in the conv_base model (implemented in your extract_features method).
However in your example at prediction time you just take an image, reshape it to (256,98), then flatten to (1,25088). You totally skip the extract_features phase. In other words, you try to predict on a totally different data then you use during training (and the automated testing under the fit method). You need to use the exact same preprocessing method for testing, which means first rescale the image to size 224x224x3, then extracting the features using the conv_base model, and finally use your new model to perform the prediction over the extracted features.
Having said that, I want to suggest that you use very "dirty" implementation, this is not organized properly and it easy to get confused that way. You actually want to build a single model that perform classification on an image, and not do all the heavy coding of extracting features etc. to do so you can just use something like:
pretrained_vgg16 = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
pretrained_vgg16.trainable = False # This is important if you don't want to modify your base model weights during training.
new_model = models.Sequential([pretrained_vgg16,
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(2,activation='softmax')])
In addition, please note that it is very importnat to decide if you want to modify the pre-trained model weights during training or not. if not, you need to envoke the pretrained_vgg16.trainable = False line.
Now you can use new_model.fit with a generator that load the images and rescale them to 224x224x3, without explicitly extracting the features. Another important comment is that the original VGG16 model was probably trained on a normalized data (which means pixels values were normalized to be in range [0,1]), you also need to perform the same preprocessing to your images for ideal results.

How to use tensorflow_io's IODataset?

I'm trying to write a program that can uses malicious pcap files as datasets and predicts if other pcaps files have malicious packets in them.
After some digging through the Tensorflow doumentation, I have found TensorIO, but I can't figure out how to use the dataset to create a model and predict with it.
Here's my code:
%tensorflow_version 2.x
import tensorflow as tf
import numpy as np
from tensorflow import keras
try:
import tensorflow_io as tfio
import tensorflow_datasets as tfds
except:
!pip install tensorflow-io
!pip install tensorflow-datasets
import tensorflow_io as tfio
import tensorflow_datasets as tfds
# print(tf.__version__)
dataset = tfio.IODataset.from_pcap("dataset.pcap")
print(dataset) # <PcapIODataset shapes: ((), ()), types: (tf.float64, tf.string)>
(Using Google Collab)
Iv'e tried looking for answers online, but couldn't find any.
I have downloaded two pcap files and concatenated them. Later I have extracted the packet_timestamp and packet_data. Request you to preprocess the packet_data as per your requirement. If you have any labels to be added, you can add to the training dataset (In the below model example, I have created a dummy labels of all zero and adding as a column). If it is in a file then you can zip them to pcap files. Passing a dataset of (feature, label) pairs is all that's needed for Model.fit and Model.evaluate:
Below is an example of packet_data preprocessing - May be you can modify like if packet_data is valid then labels = valid else malicious.
%tensorflow_version 2.x
import tensorflow as tf
import tensorflow_io as tfio
import numpy as np
# Create an IODataset from a pcap file
first_file = tfio.IODataset.from_pcap('/content/fuzz-2006-06-26-2594.pcap')
second_file = tfio.IODataset.from_pcap(['/content/fuzz-2006-08-27-19853.pcap'])
# Concatenate the Read Files
feature = first_file.concatenate(second_file)
# List for pcap
packet_timestamp_list = []
packet_data_list = []
# some dummy labels
labels = []
packets_total = 0
for v in feature:
(packet_timestamp, packet_data) = v
packet_timestamp_list.append(packet_timestamp.numpy())
packet_data_list.append(packet_data.numpy())
labels.append(0)
if packets_total == 0:
assert np.isclose(
packet_timestamp.numpy()[0], 1084443427.311224, rtol=1e-15
) # we know this is the correct value in the test pcap file
assert (
len(packet_data.numpy()[0]) == 62
) # we know this is the correct packet data buffer length in the test pcap file
packets_total += 1
assert (
packets_total == 43
) # we know this is the correct number of packets in the test pcap file
Below is example of using in Model - The model won't work because I have not handled the packet_data which is of string type. Do the preprocessing as explained as per your requirement and use in the model.
%tensorflow_version 2.x
import tensorflow as tf
import tensorflow_io as tfio
import numpy as np
# Create an IODataset from a pcap file
first_file = tfio.IODataset.from_pcap('/content/fuzz-2006-06-26-2594.pcap')
second_file = tfio.IODataset.from_pcap(['/content/fuzz-2006-08-27-19853.pcap'])
# Concatenate the Read Files
feature = first_file.concatenate(second_file)
# List for pcap
packet_timestamp = []
packet_data = []
# some dummy labels
labels = []
# add 0 as label. You can use your actual labels here
for v in feature:
(timestamp, data) = v
packet_timestamp.append(timestamp.numpy())
packet_data.append(data.numpy())
labels.append(0)
## Do the preprocessing of packet_data here
# Add labels to the training data
# Preprocess the packet_data to convert string to meaningful value and use here
train_ds = tf.data.Dataset.from_tensor_slices(((packet_timestamp,packet_data), labels))
# Set the batch size
train_ds = train_ds.shuffle(5000).batch(32)
##### PROGRAM WILL RUN SUCCESSFULLY TILL HERE. TO USE IN THE MODEL DO THE PREPROCESSING OF PACKET DATA AS EXPLAINED ###
# Have defined some simple model
model = tf.keras.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100),
tf.keras.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_ds, epochs=2)
Hope this answers your question. Happy Learning.

How to use a image set in RNN model

Hello I am trying to do my first RNN using Keras and Tensorflow, but I am getting stuck on an issue or reshaping my images to fit into the model.
I have looked at this post but could not figure out about the reshaping:
Keras - Input a 3 channel image into LSTM
What I have is a bunch of images that are taken at every frame in a video. I saved all the frames outside of python so I have a very large folder of images.I separated the frames into 21 frames for a segment so 21 images per motion that I want to capture. I want to read in these 21 images as one sequence. I have the same sequence captured from multiple cameras/angles which I want to us in this model. What I want to try is to model a movement and see if a person is doing this movement or not, so it is a binary model yes or no basically. Not the most sophisticated but its a learning process to use this model and keras.
I need help figuring out how to use these images inside the keras model. I have looked at a few tutorials on MINST data set but that didnt help me figure this out.
Any help will be appreciated.
This is the error that is given to me when I try to train the model
ValueError: Error when checking input: expected lstm_1_input to have 3 dimensions, but got array with shape (2026, 200, 200, 1)
My code is this:
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from tqdm import tqdm
import cv2
import os
import numpy as np
imageSize = 200
#create lables for each image
def labelImage(img):
wordLabel = img.split('.')[-3]
#Conversion to one hot array [lat,not]
if wordLabel == "FWAC":
return[1,0]
else:
return[0,1]
#Process images and add lables
#Convert data into an array and add its lable
def makeTrainingData():
print("Creating Training Data")
trainingData = []
for img in tqdm(os.listdir(trainDir)):
label = labelImage(img)
path = os.path.join(trainDir,img)
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (imageSize,imageSize))
trainingData.append([np.array(img),np.array(label)])
#Save the array file to load it into other models if needed
np.save("trainingData.npy", trainingData)
print("Training Data Saved")
return trainingData
#process the testing data in the same manner
def processTestData():
print("Creating Testing Data")
testData = []
for img in tqdm(os.listdir(testDri)):
print("image", img)
path = os.path.join(testDri, img)
imgNum = img.split(".")[0]
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (imageSize, imageSize))
testData.append([np.array(img), imgNum])
np.save("testingData.npy", testData)
print("Testing Data Saved")
return testData
rnnSize = 512
model = Sequential()
model.add(LSTM(rnnSize, input_shape=(imageSize, imageSize)))
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dense(50))
model.add(Activation('sigmoid'))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='mean_squared_error', optimizer='adam',metrics=['accuracy'])
#Data
trainDir = "D:/TrainingDataSets/TrainingSet/"
testDri = "D:/TrainingDataSets/TestingSet/"
#trainData = makeTrainingData()
#testData = processTestData()
trainData = np.load('trainingData.npy')
testData = np.load("testingData.npy")
#resize the image to this See above
train = trainData[:-500]
test = trainData[-200:]
x = []
y = []
for xi in trainData:
x.append(xi[0].reshape((-1, imageSize, imageSize)))
y.append(xi[1])
x_train = np.array([i[0] for i in train]).reshape(-1,imageSize, imageSize,1)
y_train = [i[1] for i in train]
test_x = np.array([i[0] for i in test]).reshape(-1,imageSize , imageSize,1)
test_y = [i[1] for i in test]
epoch = 5
batchSize = 100
model.fit(x_train, y_train, epochs=epoch, batch_size= batchSize, verbose=1, shuffle=False)
For the error before dense layers add this line:
model.add(Flatten())
Previously, you should import:
from keras.layers import Flatten

Categories

Resources