Train image multiclassification model using TensorFlow or any suitable model - python

Below dataframe contains image paths and the output columns(A1 to A7).
Output columns show what category the image belongs to. There are
seven different categories. Image can be of multiple category.
(A0...A7).
Total image count is 30000.
I want to train a model on this data using tensor flow. Need help for
data_generator for this data as my resources are getting exausted.
Even for batch size 2 it is giving error 'ResourceExhaustedError'
My CPU 13 GB and GPU 15 GB.
DataFrame:
Image Path
A0
A1
A2
A3
A4
A5
A6
Img Path1
1
1
0
0
0
0
0
Img Path2
1
1
0
0
0
0
0
Img Path3
0
1
1
0
0
0
0
'''
'''
Img Pathn
0
0
0
0
0
0
1
My Code for model building:
def data_generator():
for i, study_instance in enumerate(meta_seg.StudyInstanceUID.unique()):
for dcm in os.listdir(DATA_DIR + f"/train_images/{study_instance}"):
train_labels = []
path = DATA_DIR + f"/train_images/{study_instance}/{dcm}"
#print(path)
img = load_dicom(path)
img = np.resize(img, (512, 512))
# normalize image
img = img / 255.0
img = tf.expand_dims(img, axis=-1)
img = tf.image.grayscale_to_rgb(img)
train_labels.extend([
meta_seg.loc[i, "A0"],
meta_seg.loc[i, "A1"],
meta_seg.loc[i, "A2"],
meta_seg.loc[i, "A3"],
meta_seg.loc[i, "A4"],
meta_seg.loc[i, "A5"],
meta_seg.loc[i, "A6"]])
yield img, train_labels
train_data = tf.data.Dataset.from_generator(data_generator, (tf.float32, tf.int8))
def configure_for_performance(data):
data = data.cache()
data = data.batch(2)
data = data.prefetch(buffer_size=tf.data.AUTOTUNE)
return data
train_data = configure_for_performance(train_data)
val_data = configure_for_performance(val_data)
def cnn_model():
model = Sequential()
Layer 1...
Layer 2...

I have a sample code, it may help with the database working memory problem, a label number you can replace it with data [ A0, A1, A2, A3, A4, A5, A6 ]. Try to change the Loss and Optimizer function.
Sample I am using with "Street Fighters" game as discrete outputs.
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(np.reshape(output_picture[np.argmax(result)], (1, 1, 1, 60, 78, 3)) , dtype=tf.float32), tf.constant(np.reshape(action, (1, 1, 2, 3, 2, 1)))))
Sample Codes: Use database buffers.
import os
from os.path import exists
import tensorflow as tf
import h5py
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
filters = 32
kernel_size = (3, 3)
strides = 1
database_buffer = "F:\\models\\buffer\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
database_buffer_dir = os.path.dirname(database_buffer)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
if not exists(database_buffer_dir) :
os.mkdir(database_buffer_dir)
print("Create directory: " + database_buffer_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Functions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def conv_batchnorm_relu(filters, kernel_size, strides=1):
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding = 'same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
])
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64))
model.add(tf.keras.layers.Dense(10))
model.summary()
return model
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
# Create hdf5 file
hdf5_file = h5py.File(database_buffer, mode='w')
# Train images
hdf5_file['x_train'] = train_images
hdf5_file['y_train'] = train_labels
# Test images
hdf5_file['x_test'] = test_images
hdf5_file['y_test'] = test_labels
hdf5_file.close()
# Visualize dataset train sample
hdf5_file = h5py.File(database_buffer, mode='r')
x_train = hdf5_file['x_train'][0: 10000]
x_test = hdf5_file['x_test'][0: 100]
y_train = hdf5_file['y_train'][0: 10000]
y_test = hdf5_file['y_test'][0: 100]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam( learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam' )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.MeanSquaredLogarithmicError(reduction=tf.keras.losses.Reduction.AUTO, name='mean_squared_logarithmic_error')
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = conv_batchnorm_relu(filters, kernel_size, strides=1)
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(x_train, y_train, epochs=1 ,validation_data=(x_train, y_train))
model.save_weights(checkpoint_path)
input('...')
Plable with famous retro game, I like his herriken kicks actions.

Related

I am having trouble displaying images for prediction data. ValueError: Expected image array to have rank 3 (single image). Got array with shape: ()

Here I am trying to display the prediction data and the image along with it. But I am struggling quite allot to solve this. I am expecting this to show a random image from the prediction and show if it a normal xray or the other.
I have tried allot to show this data accurately. sometimes I have succeeded in showing the image data (but only from the test data) and I have mainly struggled to show the outcome of that prediction. furthermore I came across this code as shown above but I am having trouble fixing it. I am very unsure what to do.
you can create dataset and use dataset.shuffle() for random input, please follow the templates that are easy. You can do more experiments by adding
Random sequences
Image augmentations
Multiple dataset, larger input, different image types
Masking or other techniques
Sample: dataset input shuffles
dataset = tf.data.Dataset.range(n)
dataset = dataset.shuffle(3, reshuffle_each_iteration=True)
dataset = dataset.shuffle(10, reshuffle_each_iteration=True)
Sample: << Input data, random input create a more reliable working process but not always. Larger dataset the model need to working for the same >>
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)
list_file = []
list_file_actual = []
list_label = []
list_label_actual = [ 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt' ]
for file in files.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_label.append(1)
for file in files_2.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_label.append(9)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
log_dir = checkpoint_dir
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(list_file, dtype=tf.int64), shape=(10, 1, 32, 32, 4), dtype=tf.int64),tf.constant(list_label, shape=(10, 1, 1), dtype=tf.int64)))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Reshape((128, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(10),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if( logs['accuracy'] >= 0.95 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=100, epochs=10000, callbacks=[custom_callback] )
model.save_weights(checkpoint_path)
plt.figure(figsize=(5,2))
plt.title("Actors recognitions")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(5, 2, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')
Output:

CNN - LSTM always predicts the same class

I am trying to predict a sequence of images. Before I was using only a CNN that took as input these concatenated images, but it didn't give me very good results with some databases.
I am using two types of databases, one takes a single image and classifies it. The other database takes a sequence of images and classifies it. So I use total_x_test_indexes=tf.expand_dims(total_x_test_indexes, axis=1) to generalize the model when only classifies one image.
As I saw I could better use a CNN and then apply a LSTM and I saw how to do it here.
But I'm only getting confusion matrices like this, classifying almost everything to a class.
My code is this:
inp = Input((None,size_image,size_image,1), ragged=True)
x = TimeDistributed(cnn)(inp)
x = LSTM(25)(x)
size_predictions=len(dicTiposNumbers)
print("tamaƱo ",size_predictions)
out = Dense(size_predictions)(x)
model = Model(inp, out)
print(model.summary())
opt = keras.optimizers.Adam(learning_rate=0.05)
opt = keras.optimizers.SGD(learning_rate=0.15)
# Compile the model
model.compile(optimizer=opt,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
print('------------------------------------------------------------------------')
print(f'Training for fold {fold_no} ...')
total_x_train_indexes=tf.gather(total_x,indices=train)
total_y_train_indexes=tf.gather(total_y,indices=train)
total_x_train_indexes=tf.expand_dims(total_x_train_indexes, axis=1)
print("shape after gather",np.shape(total_x_train_indexes[0]))
history = model.fit(total_x_train_indexes, total_y_train_indexes,
batch_size=512,
epochs=5)
But I'm getting this and similar with other databases with more classes:
From your question, determine the network's purpose and input data responsive. I created a simple custom layer telling you that the process layer is nothing than simple calculation at each layer data process output is from convolution layers and the dense layers.
Sample: Improving method is done by compare of input / output and try to expands the effects.
Confusion matrix, he tries to see the effects of overall model training and predict with input data.
import os
from os.path import exists
import tensorflow as tf
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
BATCH_SIZE = 1
IMAGE_SIZE = ( 21, 16 )
objects_classes = [ 'plane', 'helicopter', 'truck' ]
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class / Definition
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class MyLSTMLayer( tf.keras.layers.LSTM ):
def __init__(self, units, return_sequences, return_state):
super(MyLSTMLayer, self).__init__( units, return_sequences=True, return_state=False )
self.num_units = units
def build(self, input_shape):
self.kernel = self.add_weight("kernel",
shape=[int(input_shape[-1]),
self.num_units])
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
def gen():
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
# rescale=1./255,
# shear_range=0.2,
# zoom_range=0.2,
# horizontal_flip=True
)
train_generator = train_generator.flow_from_directory(
'F:\\temp\\image_catagorize',
classes=[ 'plane', 'helicopter', 'truck' ],
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
color_mode='grayscale',
class_mode='sparse', # None # categorical # binary # sparse
subset='training')
return train_generator
train_generator = gen()
val_generator = train_generator
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if( logs['accuracy'] >= 0.97 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
mycustomlayer = MyLSTMLayer( 64, True, False )
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( IMAGE_SIZE[0], IMAGE_SIZE[1], 1 ), name="Input_01Layer"),
tf.keras.layers.UpSampling2D(size=(4, 4), name="UpSampling2DLayer_01"),
tf.keras.layers.Normalization(mean=3., variance=2., name="NormalizationLayer_01"),
tf.keras.layers.Normalization(mean=4., variance=6., name="NormalizationLayer_02"),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', name="Conv2DLayer_01"),
tf.keras.layers.MaxPooling2D((3, 3), name="MaxPooling2DLayer_01"),
tf.keras.layers.Conv2D(32, (2, 2), activation='relu', name="Conv2DLayer_02"),
tf.keras.layers.MaxPooling2D((2, 2), name="MaxPooling2DLayer_02"),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', name="Conv2DLayer_03"),
tf.keras.layers.Reshape(( 7 * 11, 64 )),
###
mycustomlayer,
###
tf.keras.layers.Dense(512, activation='relu', name="DenseLayer_01"),
tf.keras.layers.Flatten(name="FlattenLayer_01"),
tf.keras.layers.Dense(192, activation='relu', name="DenseLayer_02"),
tf.keras.layers.Dense(3, name="DenseLayer_03"),
], name="MyModelClassification")
model.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.SGD(
learning_rate=0.000001,
momentum=0.5,
nesterov=True,
name='SGD',
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(train_generator, validation_data=val_generator, batch_size=100, epochs=3, callbacks=[custom_callback] )
model.save_weights(checkpoint_path)
PATH = os.path.join('F:\\temp\\image_catagorize\\helicopter', '*.png')
files = tf.data.Dataset.list_files(PATH)
list_file = []
for file in files.take(20):
image = tf.io.read_file( file )
image = tf.io.decode_png( image, channels=1, dtype=tf.dtypes.uint8, name='decode_png' )
image = tf.keras.preprocessing.image.img_to_array(image)
image = tf.image.resize(image, IMAGE_SIZE, method='nearest')
list_file.append(image)
PATH = os.path.join('F:\\temp\\image_catagorize\\plane', '*.png')
files = tf.data.Dataset.list_files(PATH)
for file in files.take(8):
image = tf.io.read_file( file )
image = tf.io.decode_png( image, channels=1, dtype=tf.dtypes.uint8, name='decode_png' )
image = tf.keras.preprocessing.image.img_to_array(image)
image = tf.image.resize(image, IMAGE_SIZE, method='nearest')
list_file.append(image)
PATH = os.path.join('F:\\temp\\image_catagorize\\Truck', '*.png')
files = tf.data.Dataset.list_files(PATH)
for file in files.take(8):
image = tf.io.read_file( file )
image = tf.io.decode_png( image, channels=1, dtype=tf.dtypes.uint8, name='decode_png' )
image = tf.keras.preprocessing.image.img_to_array(image)
image = tf.image.resize(image, IMAGE_SIZE, method='nearest')
list_file.append(image)
plt.figure(figsize=(6, 6))
plt.title("Actors recognitions")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(6, 6, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(objects_classes[tf.math.argmax(score)]))
plt.show()

Why does Tensorflow Classification Algorithm make same prediction all the time

import os
os.add_dll_directory("C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2/bin")
import tensorflow as tf
from keras import models
from keras import Sequential
from keras import layers
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
import os
print(len(os.listdir('COVID-19_Radiography_Dataset/COVID/images')))
print(len(os.listdir('COVID-19_Radiography_Dataset/Normal/images')))
import cv2
img = cv2.imread('COVID-19_Radiography_Dataset/Normal/images/Normal-10005.png')
import matplotlib.pyplot as plt
#plt.imshow(img)
#plt.show()
print(img.shape)
import pandas as pd
import numpy as np
df = pd.read_excel('COVID-19_Radiography_Dataset/COVID.metadata.xlsx')
print(df.head())
urls = os.listdir('COVID-19_Radiography_Dataset/COVID/images')
path = "COVID-19_Radiography_Dataset/COVID/images/" + urls[0]
print(path)
def loadImages(path, urls, target):
images = []
labels = []
for i in range(len(urls)):
img_path = path + "/" + urls[i]
img = cv2.imread(img_path)
img = img / 255.0
img = cv2.resize(img, (100, 100))
images.append(img)
labels.append(target)
images = np.asarray(images)
return images, labels
covid_path = "COVID-19_Radiography_Dataset/COVID/images"
covidUrl = os.listdir(covid_path)
covidImages, covidTargets = loadImages(covid_path, covidUrl, 1)
print(len(covidUrl))
print(len(covidImages))
normal_path = "COVID-19_Radiography_Dataset/Normal/images"
normal_urls = os.listdir(normal_path)
normalImages, normalTargets = loadImages(normal_path, normal_urls, 0)
viral_path = "COVID-19_Radiography_Dataset/Viral Pneumonia/images"
viral_urls = os.listdir(viral_path)
viralImages, viralTargets = loadImages(viral_path, viral_urls, 0)
print(covidImages.shape)
print(normalImages.shape)
print(viralImages.shape)
data = np.r_[covidImages, normalImages, viralImages]
print(data.shape)
targets = np.r_[covidTargets, normalTargets, viralTargets]
print(targets.shape)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(data, targets, test_size=0.25)
#import tensorflow as tf
#from tensorflow import keras
#from keras import models
#from keras import Sequential
#from keras import layers
#from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
##
model = Sequential([
Conv2D(32, 3, input_shape=(100,100,3), activation='relu'),
MaxPooling2D(),
Conv2D(16, 3, activation='relu'),
MaxPooling2D(),
Conv2D(16, 3, activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(256, activation='relu'),
Dense(1, activation='sigmoid')
])
print(model.summary())
model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(),metrics=['accuracy'])
model.fit(x_train, y_train,batch_size=32,epochs=15,validation_data=(x_test, y_test))
plt.plot(model.history.history['accuracy'], label = 'train accuracy')
plt.plot(model.history.history['val_accuracy'],label = 'test_accuracy')
plt.legend()
plt.show()
plt.plot(model.history.history['loss'], label = 'train loss')
plt.plot(model.history.history['val_loss'],label = 'test_loss')
plt.legend()
plt.show()
url = "https://radiologyassistant.nl/assets/_1-old-1.jpg"
testimage = tf.keras.utils.get_file('X-ray', origin=url)
img = tf.keras.utils.load_img(
testimage, target_size=(100, 100)
)
img_array = tf.keras.utils.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print(predictions)
print(score)
Its supposed to classify it as COVID, normal, or Viral, however it the prediction always returns 1... I want it to return one of the three classes, I am not quite sure why it keeps returning 1 when printing predictions, please help. I am using a data set of COVID related pneumonia chest X-rays, Normal Chest x-rays and viral pneumonia chest xrays.
it is for medical instruments BinaryCrossEntrophy classification because it required the most accurate from 0 to 1. I skipped the model path and sample as there are no data examples but one answer repeating may occur for these reasons.
Model training and learning rates, the model needs to learn in appropriate manners and stop or re-train in order that why image classification uses a custom callback class when testing ( for actually not use the custom callback class that is because data wide ranges and variety of data must be learning, the early stop may work well with scopes data but not always for new data )
Folding and variety, I used to train numbers recognitions from Thailand's public automatic telephony box exchange which is a large dataset. Create datasets as folds working with sample data and new data you will see the results as a long-time learning dense layer is made senses but longer-time convolution layers are critical parameters, those input voices are different by personality and domestic cultures.
Feature extractions and enchantment methods, sometimes input images are huge, and repeats of data, mathematics functions and wave properties work well with natural logarithms ( not only waves, bones, body and response are )
Mapping and cross functions, image masking, feature mapping by related data than feature extraction such as options selection, dictionary, and command and language model but for image classification image masking and info data ( geographic data, sexual, age and related )
Model transform and database learning, you will find sometimes for convenience use after training the model we transform knowledge into a simple dense layers model that we can remove or add layers and parameters for better results.
The code is already correct experiments is the tasks.
Sample: ( My template ) << There are many tasks to create a pair of sandals than you know to wear and feel comfortable to see grandmother >>
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)
list_file = []
list_file_actual = []
list_label = []
list_label_actual = [ 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt' ]
for file in files.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_label.append(1)
for file in files_2.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_label.append(9)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
log_dir = checkpoint_dir
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(list_file, dtype=tf.int64), shape=(10, 1, 32, 32, 4), dtype=tf.int64),tf.constant(list_label, shape=(10, 1, 1), dtype=tf.int64)))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Reshape((128, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(10),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
# if( logs['loss'] <= 0.2 ):
# self.model.stop_training = True
if( logs['accuracy'] >= 0.60 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=100, epochs=10000, callbacks=[custom_callback] )
model.save_weights(checkpoint_path)
plt.figure(figsize=(5,2))
plt.title("Actors recognitions")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(5, 2, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')

Unable to download CIFAR-10 dataset

I am attempting to run a GoogLeNet code, but when I run it, for some reason it says
[INFO] loading CIFAR-10 data...
[INFO] compiling model..
but when my friend runs the same code, his shows
[INFO] loading CIFAR-10 data...
Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
[INFO] compiling model..
Is there a reason why mine does not show that? I have posted the full code below, which is the exact same code that my friend is also running. The only difference on our system, is that his laptop does not have a GPU, and my desktop runs on GEFORCE RTX 3080.
# Python: 3.6
# keras: 2.2.4 for GoogLeNet on CIFAR-10
# Tensorflow :1.13.0
# cuda toolkit: 10.0
# cuDNN: 7.4.2
# scikit-learn 0.20.2
# Imutils
# NumPy
# set the matplotlib backend so figures can be saved in the background
import matplotlib
matplotlib.use("Agg")
# import packages
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelBinarizer
from pipeline.nn.conv import MiniGoogLeNet
from pipeline.callbacks import TrainingMonitor
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
from keras.optimizers import SGD
from keras.datasets import cifar10
import numpy as np
import argparse
import os
# define the total number of epochs to train for along with initial learning rate
NUM_EPOCHS =70
INIT_LR = 5e-3
def poly_decay(epoch):
# initialize the maximum number of epochs, base learning rate,
# and power of the polynomial
maxEpochs = NUM_EPOCHS
baseLR = INIT_LR
power = 1.0
# compute the new learning rate based on polynomial decay
alpha = baseLR * (1 - (epoch / float(maxEpochs))) ** power
# return the new learning rate
return alpha
# construct the argument parser
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required = True, help = "path to output model")
ap.add_argument("-o", "--output", required = True,
help = "path to output directory (logs, plots, etc.)")
args = vars(ap.parse_args())
# load the training and testing data, converting the image from integers to floats
print("[INFO] loading CIFAR-10 data...")
((trainX, trainY), (testX, testY)) = cifar10.load_data()
trainX = trainX.astype("float")
testX = testX.astype("float")
# apply mean subtraction to the data
mean = np.mean(trainX, axis = 0)
trainX -= mean
testX -= mean
# convert the labels from integers to vectors
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)
# initialize the label name for CIFAR-10 dataset
labelNames = ["airplane", "automobile", "bird", "cat", "deer",
"dog", "frog", "horse", "ship", "truck"]
# construct the image generator for data augmentation
aug = ImageDataGenerator(width_shift_range = 0.1, height_shift_range = 0.1,
horizontal_flip = True, fill_mode = "nearest")
# construct the set of callbacks
figPath = os.path.sep.join([args["output"], "{}.png".format(os.getpid())])
jsonPath = os.path.sep.join([args["output"], "{}.json".format(os.getpid())])
callbacks = [TrainingMonitor(figPath, jsonPath = jsonPath),
LearningRateScheduler(poly_decay)]
# initialize the optimizer and model
print("[INFO] compiling model...")
opt = SGD(lr = INIT_LR, momentum = 0.9)
model = MiniGoogLeNet.build(width = 32, height = 32, depth = 3, classes = 10)
model.compile(loss = "categorical_crossentropy", optimizer = opt, metrics = ["accuracy"])
#model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate=0.001), loss = 'categorical_crossentropy', metrics = ["accuracy"])
# train the network
print("[INFO] training network...")
model.fit(aug.flow(trainX, trainY, batch_size = 64),
validation_data = (testX, testY), steps_per_epoch = len(trainX) // 64,
epochs = NUM_EPOCHS, callbacks = callbacks, verbose = 1)
# evaluate network
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size = 64)
print(classification_report(testY.argmax(axis = 1),
predictions.argmax(axis = 1), target_names = labelNames))
# save the network to disk
print("[INFO] serializing network...")
model.save(args["model"])
#Run Command: python.exe googlenet_cifar10.py --model output/minigooglenet_cifar10.hdf5 --output output
Have you looked at the local datasets?
[ Local datasets ]:
C:\Users\Jirayu Kaewprateep\.keras\datasets
[ Sample ]:
import os
from os.path import exists
import tensorflow as tf
import tensorflow_datasets as tfds
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class_10_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Reshape((128, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(10),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if( logs['accuracy'] >= 0.97 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( train_images, train_labels, batch_size=100, epochs=50, callbacks=[custom_callback] )
model.save_weights(checkpoint_path)
plt.figure(figsize=(6, 6))
plt.title("Actors recognitions")
for i in range( 36 ):
img = tf.keras.preprocessing.image.array_to_img(
test_images[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(6, 6, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(test_images[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(class_10_names[tf.math.argmax(score)]))
plt.show()
input('...')

Can not save Tensorflow model when it contains batchnormalization layer

I am trying to save a custom Tensorflow model after 1 epoch training. When the model contains BatchNormalization layer it can not be saved. I can see that "fused_batch_norm" is can not be serialized. How can I call another BatchNormalization layer which can be serialized and saved with both ".h5" and ".pb" format. I am using Tensorflow 2.8 with Tensorflow-metal on MacOS.
def conv_batchnorm_relu(x, filters, kernel_size, strides=1):
# s
x = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding = 'same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.ReLU()(x)
return x
TypeError: Layer tf.compat.v1.nn.fused_batch_norm was passed non-JSON-serializable arguments. Arguments had types: {'scale': <class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>, 'offset': <class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>, 'mean': <class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>, 'variance': <class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>, 'epsilon': <class 'float'>, 'is_training': <class 'bool'>, 'data_format': <class 'str'>}. They cannot be serialized out when saving the model.
Edit: I used tf.keras.layers.experimental.SyncBatchNormalization()function instead of tf.keras.layers.BatchNormalization() and problem solved for now.
What I understood from your given template is, you can not use support function this way for adding custom layers in your model. Typically you inherit from keras.Model when you need the model methods like: Model.fit ,Model.evaluate, and Model.save
Here is a example code for better understanding
class ExampleBlock(tf.keras.Model):
def __init__(self, kernel_size, filter1,filter2):
super(ExampleBlock, self).__init__(name='')
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
# You can as many layer you like
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
return tf.nn.relu(x)
After defining your model you can create checkpoint after each epoch by defining the call back function.
import keras
model_save_path = "/content/drive/MyDrive/Project/" #path for saving checkpoint
checkpoint = keras.callbacks.ModelCheckpoint(model_save_path+'checkpoint_{epoch:02d}', save_freq='epoch')
You can use this checkpoint in the callback funtion
H = model.fit(
aug.flow(trainX, trainY, batch_size=BS), validation_data=(validX, validY), steps_per_epoch=len(trainX) // BS, epochs=EPOCHS,
class_weight=classWeight,
verbose=1,callbacks=[checkpoint])
You can apply matrix properties or you can create a model with a support function.
Sample:
import os
from os.path import exists
import tensorflow as tf
import h5py
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
filters = 32
kernel_size = (3, 3)
strides = 1
database_buffer = "F:\\models\\buffer\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
database_buffer_dir = os.path.dirname(database_buffer)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
if not exists(database_buffer_dir) :
os.mkdir(database_buffer_dir)
print("Create directory: " + database_buffer_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Functions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def conv_batchnorm_relu(filters, kernel_size, strides=1):
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding = 'same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
])
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64))
model.add(tf.keras.layers.Dense(10))
model.summary()
return model
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
# Create hdf5 file
hdf5_file = h5py.File(database_buffer, mode='w')
# Train images
hdf5_file['x_train'] = train_images
hdf5_file['y_train'] = train_labels
# Test images
hdf5_file['x_test'] = test_images
hdf5_file['y_test'] = test_labels
hdf5_file.close()
# Visualize dataset train sample
hdf5_file = h5py.File(database_buffer, mode='r')
x_train = hdf5_file['x_train'][0: 10000]
x_test = hdf5_file['x_test'][0: 100]
y_train = hdf5_file['y_train'][0: 10000]
y_test = hdf5_file['y_test'][0: 100]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam( learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam' )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.MeanSquaredLogarithmicError(reduction=tf.keras.losses.Reduction.AUTO, name='mean_squared_logarithmic_error')
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = conv_batchnorm_relu(filters, kernel_size, strides=1)
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(x_train, y_train, epochs=1 ,validation_data=(x_train, y_train))
model.save_weights(checkpoint_path)
input('...')

Categories

Resources