I'm just beginning to start with machine learning and want to predict values/sales in a timeseries. I found this two blog posts, which basically match what I'm looking for.
Basics of Time Series Prediction - Setup of the Timeseries and Datasets found in here
Techniques for Time Series Prediction - NN Setup in here
Instead of predicting the value for the next timestep I would like to predict the value 4 timesteps ahead. Originally I have weekly data, so I want to predict the value 4 weeks / 1 month ahead.
As I understand this, I therefor need to change the "label" the model is trained with, which will be done within the function windowed_dataset() (Source 2).
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-1], window[-1])) # <-- change will be in here
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
If I change dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-1], window[-1])) to dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-4], window[-1])) the labels in my opinion are correctly adjusted to my goal.
But running the next step
dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
print(dataset)
l0 = tf.keras.layers.Dense(1, input_shape=[window_size])
model = tf.keras.models.Sequential([l0])
model.compile(loss="mse", optimizer=tf.keras.optimizers.SGD(learning_rate=1e-6, momentum=0.9))
model.fit(dataset,epochs=100,verbose=0)
throws an error:
runcell('Build model', 'C:/Users/USER/Desktop/Local/Prediction/untitled0.py')
<PrefetchDataset element_spec=(TensorSpec(shape=(None, None), dtype=tf.float32, name=None), TensorSpec(shape=(None,), dtype=tf.float32, name=None))>
Traceback (most recent call last):
File "C:\Users\USER\Desktop\Local\Prediction\untitled0.py", line 102, in <module>
model.fit(dataset,epochs=100,verbose=0)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\USER\Anaconda3\lib\site-packages\tensorflow\python\eager\execute.py", line 54, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
InvalidArgumentError: Graph execution error:
Detected at node 'sequential_9/dense_10/BiasAdd' defined at (most recent call last):
File "C:\Users\USER\Anaconda3\lib\runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\USER\Anaconda3\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\USER\Anaconda3\lib\site-packages\spyder_kernels\console\__main__.py", line 23, in <module>
start.main()
File "C:\Users\USER\Anaconda3\lib\site-packages\spyder_kernels\console\start.py", line 328, in main
kernel.start()
File "C:\Users\USER\Anaconda3\lib\site-packages\ipykernel\kernelapp.py", line 677, in start
self.io_loop.start()
File "C:\Users\USER\Anaconda3\lib\site-packages\tornado\platform\asyncio.py", line 199, in start
self.asyncio_loop.run_forever()
File "C:\Users\USER\Anaconda3\lib\asyncio\base_events.py", line 596, in run_forever
self._run_once()
File "C:\Users\USER\Anaconda3\lib\asyncio\base_events.py", line 1890, in _run_once
handle._run()
File "C:\Users\USER\Anaconda3\lib\asyncio\events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\USER\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 457, in dispatch_queue
await self.process_one()
File "C:\Users\USER\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 446, in process_one
await dispatch(*args)
File "C:\Users\USER\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 353, in dispatch_shell
await result
File "C:\Users\USER\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 648, in execute_request
reply_content = await reply_content
File "C:\Users\USER\Anaconda3\lib\site-packages\ipykernel\ipkernel.py", line 353, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\Users\USER\Anaconda3\lib\site-packages\ipykernel\zmqshell.py", line 533, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "C:\Users\USER\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2901, in run_cell
result = self._run_cell(
File "C:\Users\USER\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2947, in _run_cell
return runner(coro)
File "C:\Users\USER\Anaconda3\lib\site-packages\IPython\core\async_helpers.py", line 68, in _pseudo_sync_runner
coro.send(None)
File "C:\Users\USER\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3172, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File "C:\Users\USER\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3364, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "C:\Users\USER\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3444, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "C:\Users\USER\AppData\Local\Temp/ipykernel_15784/4252985979.py", line 1, in <module>
runcell('Build model', 'C:/Users/USER/Desktop/Local/Prediction/untitled0.py')
File "C:\Users\USER\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 673, in runcell
exec_code(cell_code, filename, ns_globals, ns_locals,
File "C:\Users\USER\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 465, in exec_code
exec(compiled, ns_globals, ns_locals)
File "C:\Users\USER\Desktop\Local\Prediction\untitled0.py", line 102, in <module>
model.fit(dataset,epochs=100,verbose=0)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\engine\training.py", line 1409, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\engine\training.py", line 1051, in train_function
return step_function(self, iterator)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\engine\training.py", line 1040, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\engine\training.py", line 1030, in run_step
outputs = model.train_step(data)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\engine\training.py", line 889, in train_step
y_pred = self(x, training=True)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\engine\training.py", line 490, in __call__
return super().__call__(*args, **kwargs)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\engine\base_layer.py", line 1014, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 92, in error_handler
return fn(*args, **kwargs)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\engine\sequential.py", line 374, in call
return super(Sequential, self).call(inputs, training=training, mask=mask)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\engine\functional.py", line 458, in call
return self._run_internal_graph(
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\engine\functional.py", line 596, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\engine\base_layer.py", line 1014, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 92, in error_handler
return fn(*args, **kwargs)
File "C:\Users\USER\Anaconda3\lib\site-packages\keras\layers\core\dense.py", line 232, in call
outputs = tf.nn.bias_add(outputs, self.bias)
Node: 'sequential_9/dense_10/BiasAdd'
Matrix size-incompatible: In[0]: [24,9], In[1]: [12,1]
[[{{node sequential_9/dense_10/BiasAdd}}]] [Op:__inference_train_function_231055]
What am I missing here? Is there another, better approach to model the timeseries?
Note: Somewhere in the future I also would like to add more parameters/indicators to the model to test if this increases the accuracy.
Edit:
Creation of Data and Series:
#%% Setup
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
#%% Creating Timeseries
def plot_series(time, series, format="-", start=0, end=None):
plt.plot(time[start:end], series[start:end], format)
plt.xlabel("Time")
plt.ylabel("Value")
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
time = np.arange(4 * 365 + 1, dtype="float32")
baseline = 10
series = trend(time, 0.1)
baseline = 10
amplitude = 40
slope = 0.05
noise_level = 5
# Create the series
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
# Update with noise
series += noise(time, noise_level, seed=42)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
#%% Create Data Sets
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
plt.figure(figsize=(10, 6))
plot_series(time_train, x_train)
plt.show()
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plt.show()
Parameters:
#%% Set Parameters
window_size = 4
batch_size = 4
shuffle_buffer_size = 10
Changing dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-1], window[-1])) to dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-4], window[-1])) will actually influence the window, as one can find out looking at the outputs of these following steps:
# Previous steps within function
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(12 + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(12 + 1))
# Regular approach predicting the next value
dataset_1 = dataset.shuffle(3).map(lambda window: (window[:-1], window[-1]))
counter = 0
for n in dataset_1:
counter +=1
if counter == 1:
print(n)
Output:
> (<tf.Tensor: shape=(12,), dtype=float32, numpy=
array([52.48357 , 49.35275 , 53.314735, 57.711823, 48.934444, 48.931244,
57.982895, 53.897125, 47.67393 , 52.68371 , 47.591717, 47.506374],
dtype=float32)>, <tf.Tensor: shape=(), dtype=float32, numpy=50.959415>)
# Approach predicting a value 4 timesteps ahead
dataset_4 = dataset.shuffle(3).map(lambda window: (window[:-4], window[-1]))
counter = 0
for n in dataset_4:
counter +=1
if counter == 1:
print(n)
Output:
> (<tf.Tensor: shape=(9,), dtype=float32, numpy=
array([52.48357 , 49.35275 , 53.314735, 57.711823, 48.934444, 48.931244,
57.982895, 53.897125, 47.67393 ], dtype=float32)>, <tf.Tensor: shape=(), dtype=float32, numpy=50.959415>)
In the first (regular) case the tensor has shape (12,) while in the second case the tensor has shape (9,).
It is therefor necessary to adjust the window_size in the following steps to reflect the actual shape and its influences e.g. on the length of the result array.
window_size_adjusted = window_size - 3
dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
#%% Build model
l0 = tf.keras.layers.Dense(1, input_shape=[window_size_adjusted])
model = tf.keras.models.Sequential([l0])
model.compile(loss="mse", optimizer=tf.keras.optimizers.SGD(learning_rate=1e-6, momentum=0.9))
model.fit(dataset,epochs=100,verbose=0)
#%% Forecast
forecast = []
for time in range(len(series) - window_size_adjusted):
forecast.append(model.predict(series[time:time + window_size_adjusted][np.newaxis]))
forecast = forecast[split_time-window_size_adjusted:]
results = np.array(forecast)[:, 0, 0]
Related
This is the code used by me to create a CNN. I have imported the images from google drive.
TRAIN_PATH = "drive/MyDrive/CXR/Dataset/Train/"
VAL_PATH = "drive/MyDrive/CXR/Dataset/Test/"
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.layers import *
from keras.models import *
from keras.preprocessing import image
#CNN based model in Keras
model = Sequential()
model.add(Conv2D(32,kernel_size=(3,3),activation = 'relu',input_shape=(224,224,3)))
model.add(Conv2D(64,(3,3),activation = 'relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3),activation = 'relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(128,(3,3),activation = 'relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss=keras.losses.binary_crossentropy,optimizer='adam',metrics=['accuracy'])
model.summary()
#Train from scratch
train_datagen = image.ImageDataGenerator(
rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
)
test_dataset = image.ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'drive/MyDrive/CXR/Dataset/Train/',
target_size = (244,244),
batch_size = 32,
class_mode ='binary'
)
train_generator.class_indices
validation_generator = test_dataset.flow_from_directory(
'drive/MyDrive/CXR/Dataset/Test/',
target_size = (244,244),
batch_size = 32,
class_mode ='binary'
)
hist = model.fit(
train_generator,
steps_per_epoch=8,
epochs = 10,
validation_data = validation_generator,
validation_steps=2
)
When I call the last block of code:
hist = model.fit(
train_generator,
steps_per_epoch=8,
epochs = 10,
validation_data = validation_generator,
validation_steps=2
)
The code is working fine except the last block where I am getting an InvalidArgumentError as given below:
Epoch 1/10
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-19-f0dfa9c816cb> in <module>
4 epochs = 10,
5 validation_data = validation_generator,
----> 6 validation_steps=2
7
8 )
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
53 ctx.ensure_initialized()
54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
57 if name is not None:
InvalidArgumentError: Graph execution error:
Detected at node 'sequential/flatten/Reshape' defined at (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.7/dist-packages/traitlets/config/application.py", line 846, in launch_instance
app.start()
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelapp.py", line 612, in start
self.io_loop.start()
File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 149, in start
self.asyncio_loop.run_forever()
File "/usr/lib/python3.7/asyncio/base_events.py", line 541, in run_forever
self._run_once()
File "/usr/lib/python3.7/asyncio/base_events.py", line 1786, in _run_once
handle._run()
File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "/usr/local/lib/python3.7/dist-packages/tornado/ioloop.py", line 690, in <lambda>
lambda f: self._run_callback(functools.partial(callback, future))
File "/usr/local/lib/python3.7/dist-packages/tornado/ioloop.py", line 743, in _run_callback
ret = callback()
File "/usr/local/lib/python3.7/dist-packages/tornado/gen.py", line 787, in inner
self.run()
File "/usr/local/lib/python3.7/dist-packages/tornado/gen.py", line 748, in run
yielded = self.gen.send(value)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 365, in process_one
yield gen.maybe_future(dispatch(*args))
File "/usr/local/lib/python3.7/dist-packages/tornado/gen.py", line 209, in wrapper
yielded = next(result)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 268, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg))
File "/usr/local/lib/python3.7/dist-packages/tornado/gen.py", line 209, in wrapper
yielded = next(result)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 545, in execute_request
user_expressions, allow_stdin,
File "/usr/local/lib/python3.7/dist-packages/tornado/gen.py", line 209, in wrapper
yielded = next(result)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/ipkernel.py", line 306, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/zmqshell.py", line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2855, in run_cell
raw_cell, store_history, silent, shell_futures)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2881, in _run_cell
return runner(coro)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/async_helpers.py", line 68, in _pseudo_sync_runner
coro.send(None)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 3058, in run_cell_async
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 3249, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-14-f0dfa9c816cb>", line 6, in <module>
validation_steps=2
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1409, in fit
tmp_logs = self.train_function(iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1051, in train_function
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1040, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1030, in run_step
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 889, in train_step
y_pred = self(x, training=True)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 490, in __call__
return super().__call__(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py", line 1014, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 92, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/sequential.py", line 374, in call
return super(Sequential, self).call(inputs, training=training, mask=mask)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/functional.py", line 459, in call
inputs, training=training, mask=mask)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/functional.py", line 596, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py", line 1014, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 92, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/layers/reshaping/flatten.py", line 98, in call
return tf.reshape(inputs, flattened_shape)
Node: 'sequential/flatten/Reshape'
Input to reshape is a tensor with 3211264 values, but the requested shape requires a multiple of 86528
[[{{node sequential/flatten/Reshape}}]] [Op:__inference_train_function_1202]
Currently, I am using Google Colab for better performance. Can you please help me solve this problem? Why isn't my model training the data?
Am running a 4 class semantic segmentation problem using Deeplabv3+ and I get the graph execution error as soon as the training starts.
I have identified the problem after searching the web for solutions. The problem is with the labels. My labels are 2, 4, 6, 8 instead of 0,1,2,3.
Currently the model trains after adjusting to num_classes: 9 so as to cater for all the labels. sounds weird since I should encode the labels to 0,1,2,3. I have failed to encode the labels to to fit in my main code so as to get rid of the invalid labels.
Can some help me with the script and where to fix it in my code? Thank you :)
This is the code that i generated and i will be glad if anyone goes through it
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
import albumentations as A
from glob import glob
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
path = "D:/K/deeplabv3plus/datasets"
os.chdir(path)
os.listdir(os.path.join('128_patches', 'masks'))[:924]
os.listdir(os.path.join('128_patches', 'images'))[:924]
config = {
'IMG_PATH': os.path.join('128_patches', 'images'),
'LABEL_PATH': os.path.join('128_patches', 'masks'),
'NUM_CLASSES': 4,
'BATCH_SIZE': 2,
'IMAGE_SIZE': 128
}
##### Building Dataset
image_paths = glob(os.path.join(config['IMG_PATH'], '*'), recursive=True)
mask_paths = glob(os.path.join(config['LABEL_PATH'], '*'), recursive=True)
#image_paths_train, image_paths_test, mask_paths_train, mask_paths_test = train_test_split(image_paths, mask_paths, shuffle=True)
image_paths_train1, image_paths_test1, mask_paths_train1, mask_paths_test1 = train_test_split(image_paths, mask_paths, test_size=0.15)
image_paths_train, image_paths_test, mask_paths_train, mask_paths_test = train_test_split(image_paths_train1, mask_paths_train1, test_size=0.15)
config['DATASET_LENGTH'] = len(image_paths_train)
def preprocess(image_path, mask_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, size=[config['IMAGE_SIZE'], config['IMAGE_SIZE']])
img = tf.cast(img, tf.float32) / 255.0
mask = tf.io.read_file(mask_path)
# Only one channel for masks, denoting the class and NOT image colors
mask = tf.image.decode_png(mask, channels=1)
mask = tf.image.resize(mask, size=[config['IMAGE_SIZE'], config['IMAGE_SIZE']])
mask = tf.cast(mask, tf.float32)
return img, mask
def augment_dataset_tf(img, mask):
# Augmentations should always be performed on both an input image and a mask if applied at all
if tf.random.uniform(()) > 0.5:
img = tf.image.flip_left_right(img)
mask = tf.image.flip_left_right(mask)
if tf.random.uniform(()) > 0.5:
img = tf.image.flip_up_down(img)
mask = tf.image.flip_up_down(mask)
if tf.random.uniform(()) > 0.5:
img = tf.image.rot90(img)
mask = tf.image.rot90(mask)
return img, mask
def albumentations(img, mask):
# Augmentation pipeline - each of these has an adjustable probability
# of being applied, regardless of other transforms
transform = A.Compose([
A.HorizontalFlip(p=0.5),
A.RandomBrightnessContrast(p=0.3),
A.Transpose(p=0.5),
A.VerticalFlip(p=0.5),
A.HorizontalFlip(p=0.5),
A.Rotate(limit=70),
# CoarseDropout is the new Cutout implementation
A.CoarseDropout(p=0.5, max_holes=12, max_height=24, max_width=24)
])
# Apply transforms and extract image and mask
transformed = transform(image=img, mask=mask)
transformed_image = transformed['image']
transformed_mask = transformed['mask']
# Cast to TF Floats and return
transformed_image = tf.cast(transformed_image, tf.float32)
transformed_mask = tf.cast(transformed_mask, tf.float32)
return transformed_image, transformed_mask
def create_dataset_tf(images, masks, augment):
dataset = tf.data.Dataset.from_tensor_slices((images, masks)).shuffle(len(images))
dataset = dataset.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
if augment:
dataset = dataset.map(apply_albumentations, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(config['BATCH_SIZE'], drop_remainder=True).prefetch(tf.data.AUTOTUNE).repeat()
else:
dataset = dataset.batch(config['BATCH_SIZE'], drop_remainder=True).prefetch(tf.data.AUTOTUNE)
return dataset
def apply_albumentations(img, mask):
aug_img, aug_mask = tf.numpy_function(func=albumentations, inp=[img, mask], Tout=[tf.float32, tf.float32])
aug_img = tf.ensure_shape(aug_img, shape=[config['IMAGE_SIZE'], config['IMAGE_SIZE'], 3])
aug_mask = tf.ensure_shape(aug_mask, shape=[config['IMAGE_SIZE'], config['IMAGE_SIZE'], 1])
return aug_img, aug_mask
train_set = create_dataset_tf(image_paths_train, mask_paths_train, augment=False)
test_set = create_dataset_tf(image_paths_test, mask_paths_test, augment=False)
for img_batch, mask_batch in train_set.take(2):
for i in range(len(img_batch)):
fig, ax = plt.subplots(1, 2)
ax[0].imshow(img_batch[i].numpy())
ax[1].imshow(mask_batch[i].numpy())
# Turns into atrous_block with dilation_rate > 1
def conv_block(block_input, num_filters=256, kernel_size=(3, 3), dilation_rate=1, padding="same"):
x = keras.layers.Conv2D(num_filters, kernel_size=kernel_size, dilation_rate=dilation_rate, padding="same")(block_input)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation('relu')(x)
return x
# Atrous Spatial Pyramid Pooling
def ASPP(inputs):
# 4 conv blocks with dilation rates at `[1, 6, 12, 18]`
conv_1 = conv_block(inputs, kernel_size=(1, 1), dilation_rate=1)
conv_6 = conv_block(inputs, kernel_size=(3, 3), dilation_rate=6)
conv_12 = conv_block(inputs, kernel_size=(3, 3), dilation_rate=12)
conv_18 = conv_block(inputs, kernel_size=(3, 3), dilation_rate=18)
dims = inputs.shape
# Image Pooling -> (256, 256, 3) -> (1, 1, filter_num) -> (32, 32, 256)
x = keras.layers.AveragePooling2D(pool_size=(dims[-3], dims[-2]))(inputs)
x = conv_block(x, kernel_size=1)
out_pool = keras.layers.UpSampling2D(size=(dims[-3] // x.shape[1], dims[-2] // x.shape[2]))(x)
x = keras.layers.Concatenate()([conv_1, conv_6, conv_12, conv_18, out_pool])
return conv_block(x, kernel_size=1)
def define_deeplabv3_plus(image_size, num_classes, backbone):
model_input = keras.Input(shape=(image_size, image_size, 3))
if backbone == 'resnet':
resnet101 = keras.applications.ResNet152(
weights="imagenet",
include_top=False,
input_tensor=model_input)
x = resnet101.get_layer("conv4_block6_2_relu").output
low_level = resnet101.get_layer("conv2_block3_2_relu").output
elif backbone == 'effnet':
effnet = keras.applications.EfficientNetV2B1(
weights="imagenet",
include_top=False,
input_tensor=model_input)
x = effnet.get_layer("block5e_activation").output
low_level = effnet.get_layer("block2a_expand_activation").output
aspp_result = ASPP(x)
upsampled_aspp = keras.layers.UpSampling2D(size=(4, 4))(aspp_result)
low_level = conv_block(low_level, num_filters=48, kernel_size=1)
x = keras.layers.Concatenate()([upsampled_aspp, low_level])
x = conv_block(x)
x = keras.layers.UpSampling2D(size=(4, 4))(x)
model_output = keras.layers.Conv2D(num_classes, kernel_size=(1, 1), padding="same", activation='softmax')(x)
return keras.Model(inputs=model_input, outputs=model_output)
#model = define_deeplabv3_plus(config['IMAGE_SIZE'], config['NUM_CLASSES'], 'resnet')
model = define_deeplabv3_plus(config['IMAGE_SIZE'], config['NUM_CLASSES'], 'effnet')
model.summary()
from keras import backend as K
def dice_coef(y_true, y_pred, smooth=1e-7):
y_true_f = K.flatten(K.one_hot(K.cast(y_true, 'int32'), num_classes=config['NUM_CLASSES'])[...,1:])
y_pred_f = K.flatten(y_pred[...,1:])
intersect = K.sum(y_true_f * y_pred_f, axis=-1)
denom = K.sum(y_true_f + y_pred_f, axis=-1)
return K.mean((2. * intersect / (denom + smooth)))
def dice_loss(y_true, y_pred):
return 1 - dice_coef(y_true, y_pred)
class MeanIoU(tf.keras.metrics.MeanIoU):
def __init__(self,
y_true=None,
y_pred=None,
num_classes=None,
name=None,
dtype=None):
super(MeanIoU, self).__init__(num_classes = num_classes,name=name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.math.argmax(y_pred, axis=-1)
return super().update_state(y_true, y_pred, sample_weight)
reduceLr = keras.callbacks.ReduceLROnPlateau(patience=5, factor=0.3, monitor='val_sparse_categorical_accuracy')
early_stopping = keras.callbacks.EarlyStopping(patience=10, monitor='val_sparse_categorical_accuracy', restore_best_weights=True)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
#loss=soft_dice_loss,
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy",
MeanIoU(num_classes=config['NUM_CLASSES']),
dice_coef])
history = model.fit(train_set,
epochs=100,
steps_per_epoch=int(config['DATASET_LENGTH']/config['BATCH_SIZE']),
validation_data=test_set,
callbacks=[reduceLr, early_stopping])
Now, below is the error that comes up the moment i begin to train the model
Epoch 1/100
2022-10-17 09:57:16.605349: I tensorflow/stream_executor/cuda/cuda_dnn.cc:384] Loaded cuDNN version 8500
Traceback (most recent call last):
File "C:\Users\Windows\AppData\Local\Temp\ipykernel_10008\204393382.py", line 12, in <module>
history = model.fit(train_set,
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\tensorflow\python\eager\execute.py", line 54, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
InvalidArgumentError: Graph execution error:
Detected at node 'confusion_matrix/assert_less/Assert/AssertGuard/Assert' defined at (most recent call last):
File "C:\Users\Windows\anaconda3\envs\Kal\lib\runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\Windows\anaconda3\envs\Kal\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\spyder_kernels\console\__main__.py", line 24, in <module>
start.main()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\spyder_kernels\console\start.py", line 332, in main
kernel.start()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\kernelapp.py", line 677, in start
self.io_loop.start()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\tornado\platform\asyncio.py", line 199, in start
self.asyncio_loop.run_forever()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\asyncio\base_events.py", line 600, in run_forever
self._run_once()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\asyncio\base_events.py", line 1896, in _run_once
handle._run()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\asyncio\events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\kernelbase.py", line 471, in dispatch_queue
await self.process_one()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\kernelbase.py", line 460, in process_one
await dispatch(*args)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\kernelbase.py", line 367, in dispatch_shell
await result
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\kernelbase.py", line 662, in execute_request
reply_content = await reply_content
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\ipkernel.py", line 360, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\zmqshell.py", line 532, in run_cell
return super().run_cell(*args, **kwargs)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\IPython\core\interactiveshell.py", line 2914, in run_cell
result = self._run_cell(
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\IPython\core\interactiveshell.py", line 2960, in _run_cell
return runner(coro)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\IPython\core\async_helpers.py", line 78, in _pseudo_sync_runner
coro.send(None)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\IPython\core\interactiveshell.py", line 3185, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\IPython\core\interactiveshell.py", line 3377, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\IPython\core\interactiveshell.py", line 3457, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "C:\Users\Windows\AppData\Local\Temp\ipykernel_10008\204393382.py", line 12, in <module>
history = model.fit(train_set,
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\training.py", line 1409, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\training.py", line 1051, in train_function
return step_function(self, iterator)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\training.py", line 1040, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\training.py", line 1030, in run_step
outputs = model.train_step(data)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\training.py", line 894, in train_step
return self.compute_metrics(x, y, y_pred, sample_weight)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\training.py", line 987, in compute_metrics
self.compiled_metrics.update_state(y, y_pred, sample_weight)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\compile_utils.py", line 501, in update_state
metric_obj.update_state(y_t, y_p, sample_weight=mask)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\utils\metrics_utils.py", line 70, in decorated
update_op = update_state_fn(*args, **kwargs)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\metrics\base_metric.py", line 140, in update_state_fn
return ag_update_state(*args, **kwargs)
File "C:\Users\Windows\AppData\Local\Temp\ipykernel_10008\1378111743.py", line 12, in update_state
return super().update_state(y_true, y_pred, sample_weight)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\metrics\metrics.py", line 2494, in update_state
current_cm = tf.math.confusion_matrix(
Node: 'confusion_matrix/assert_less/Assert/AssertGuard/Assert'
Detected at node 'confusion_matrix/assert_less/Assert/AssertGuard/Assert' defined at (most recent call last):
File "C:\Users\Windows\anaconda3\envs\Kal\lib\runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\Windows\anaconda3\envs\Kal\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\spyder_kernels\console\__main__.py", line 24, in <module>
start.main()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\spyder_kernels\console\start.py", line 332, in main
kernel.start()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\kernelapp.py", line 677, in start
self.io_loop.start()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\tornado\platform\asyncio.py", line 199, in start
self.asyncio_loop.run_forever()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\asyncio\base_events.py", line 600, in run_forever
self._run_once()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\asyncio\base_events.py", line 1896, in _run_once
handle._run()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\asyncio\events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\kernelbase.py", line 471, in dispatch_queue
await self.process_one()
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\kernelbase.py", line 460, in process_one
await dispatch(*args)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\kernelbase.py", line 367, in dispatch_shell
await result
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\kernelbase.py", line 662, in execute_request
reply_content = await reply_content
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\ipkernel.py", line 360, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\ipykernel\zmqshell.py", line 532, in run_cell
return super().run_cell(*args, **kwargs)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\IPython\core\interactiveshell.py", line 2914, in run_cell
result = self._run_cell(
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\IPython\core\interactiveshell.py", line 2960, in _run_cell
return runner(coro)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\IPython\core\async_helpers.py", line 78, in _pseudo_sync_runner
coro.send(None)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\IPython\core\interactiveshell.py", line 3185, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\IPython\core\interactiveshell.py", line 3377, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\IPython\core\interactiveshell.py", line 3457, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "C:\Users\Windows\AppData\Local\Temp\ipykernel_10008\204393382.py", line 12, in <module>
history = model.fit(train_set,
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\training.py", line 1409, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\training.py", line 1051, in train_function
return step_function(self, iterator)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\training.py", line 1040, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\training.py", line 1030, in run_step
outputs = model.train_step(data)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\training.py", line 894, in train_step
return self.compute_metrics(x, y, y_pred, sample_weight)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\training.py", line 987, in compute_metrics
self.compiled_metrics.update_state(y, y_pred, sample_weight)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\engine\compile_utils.py", line 501, in update_state
metric_obj.update_state(y_t, y_p, sample_weight=mask)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\utils\metrics_utils.py", line 70, in decorated
update_op = update_state_fn(*args, **kwargs)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\metrics\base_metric.py", line 140, in update_state_fn
return ag_update_state(*args, **kwargs)
File "C:\Users\Windows\AppData\Local\Temp\ipykernel_10008\1378111743.py", line 12, in update_state
return super().update_state(y_true, y_pred, sample_weight)
File "C:\Users\Windows\anaconda3\envs\Kal\lib\site-packages\keras\metrics\metrics.py", line 2494, in update_state
current_cm = tf.math.confusion_matrix(
Node: 'confusion_matrix/assert_less/Assert/AssertGuard/Assert'
2 root error(s) found.
(0) INVALID_ARGUMENT: assertion failed: [`labels` out of bound] [Condition x < y did not hold element-wise:] [x (confusion_matrix/control_dependency:0) = ] [6 6 6...] [y (confusion_matrix/Cast_2:0) = ] [4]
[[{{node confusion_matrix/assert_less/Assert/AssertGuard/Assert}}]]
[[confusion_matrix/assert_less_1/Assert/AssertGuard/pivot_f/_31/_61]]
(1) INVALID_ARGUMENT: assertion failed: [`labels` out of bound] [Condition x < y did not hold element-wise:] [x (confusion_matrix/control_dependency:0) = ] [6 6 6...] [y (confusion_matrix/Cast_2:0) = ] [4]
[[{{node confusion_matrix/assert_less/Assert/AssertGuard/Assert}}]]
0 successful operations.
0 derived errors ignored. [Op:__inference_train_function_17948]
I will be very glad if anyone can help with the debug.
You can change your label by using
dataset = dataset.map(lambda x, y: (x, y/2-1), num_parallel_calls=tf.data.AUTOTUNE)
inside the function create_dataset_tf. It will map the label from 2, 4, 6, 8 to 0, 1, 2, 3.
I am doing a personal project where I use Computer Vision and a Backtracking algorithm to solve sudoku puzzles. This error is suddenly popping up when I tried to setup the project on a new computer. This is the file where I train my model for the CV part.
from tabnanny import verbose
from turtle import pu
import numpy
import cv2
import matplotlib.pyplot as plot
from keras.models import model_from_json
json_file =open('model/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loadedModel = model_from_json(loaded_model_json)
loadedModel.load_weights('model/model.h5')
print("Loaded saved model from disk.")
def predictNumber(image):
imageResize = cv2.resize(image,(28,28))
imageResizeCopy = imageResize.reshape(1, 1, 28, 28)
#loadedModelPred = loadedModel.predict_classes(imageResizeCopy, verbose=0)
loadedModelPred = numpy.argmax(loadedModel.predict(imageResizeCopy), axis=1)
return loadedModelPred[0]
def extract(puzzle):
puzzle = cv2.resize(puzzle, (450,450))
grid = numpy.zeros([9,9])
for i in range(9):
for j in range(9):
image = puzzle[i*50:(i+1)*50,j*50:(j+1)*50]
if image.sum()>25000:
grid[i][j] = predictNumber(image)
else:
grid[i][j] =0;
return grid.astype(int)
This above code block is part of the code that apparently brings up the following error.
2022-09-17 21:29:46.532 Uncaught app exception
Traceback (most recent call last):
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 556, in _run_script
exec(code, module.__dict__)
File "C:\Users\kvnka\OneDrive - Trinity College Dublin\GitHub\sudoku-solver\app.py", line 27, in <module>
grid = numberExtract.extract(image)
File "C:\Users\kvnka\OneDrive - Trinity College Dublin\GitHub\sudoku-solver\cv\numberExtract.py", line 31, in extract
grid[i][j] = predictNumber(image)
File "C:\Users\kvnka\OneDrive - Trinity College Dublin\GitHub\sudoku-solver\cv\numberExtract.py", line 20, in predictNumber
loadedModelPred = numpy.argmax(loadedModel.predict(imageResizeCopy), axis=1)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\tensorflow\python\eager\execute.py", line 54, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Graph execution error:
Detected at node 'sequential_1/max_pooling2d_1/MaxPool' defined at (most recent call last):
File "C:\Program Files\Python310\lib\threading.py", line 973, in _bootstrap
self._bootstrap_inner()
File "C:\Program Files\Python310\lib\threading.py", line 1016, in _bootstrap_inner
self.run()
File "C:\Program Files\Python310\lib\threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 295, in _run_script_thread
self._run_script(request.rerun_data)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 556, in _run_script
exec(code, module.__dict__)
File "C:\Users\kvnka\OneDrive - Trinity College Dublin\GitHub\sudoku-solver\app.py", line 27, in <module>
grid = numberExtract.extract(image)
File "C:\Users\kvnka\OneDrive - Trinity College Dublin\GitHub\sudoku-solver\cv\numberExtract.py", line 31, in extract
grid[i][j] = predictNumber(image)
File "C:\Users\kvnka\OneDrive - Trinity College Dublin\GitHub\sudoku-solver\cv\numberExtract.py", line 20, in predictNumber
loadedModelPred = numpy.argmax(loadedModel.predict(imageResizeCopy), axis=1)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\training.py", line 2344, in predict
tmp_batch_outputs = self.predict_function(iterator)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\training.py", line 2131, in predict_function
return step_function(self, iterator)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\training.py", line 2117, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\training.py", line 2105, in run_step
ras\engine\sequential.py", line 412, in call
return super().call(inputs, training=training, mask=mask)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\functional.py", line 510, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\functional.py", line 667, in _run_interras\engine\sequential.py", line 412, in call
return super().call(inputs, training=training, mask=mask)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\functional.py", line 510, in call return self._run_internal_graph(inputs, training=training, mask=mask)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\functional.py", line 667, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\engine\base_layer.py", line 1107, in __call__ outputs = call_fn(inputs, *args, **kwargs)
File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\utils\traceback_utils.py", line 96, in error_handler
return fn(*args, **kwargs) File "C:\Users\kvnka\AppData\Roaming\Python\Python310\site-packages\keras\layers\pooling\base_pooling2d.py", line 84, in call
outputs = self.pool_function(
Node: 'sequential_1/max_pooling2d_1/MaxPool'
Default MaxPoolingOp only supports NHWC on device type CPU
[[{{node sequential_1/max_pooling2d_1/MaxPool}}]] [Op:__inference_predict_function_290]
I'm a bit clueless about what's causing this error.
The problem is at the line:
imageResizeCopy = imageResize.reshape(1, 1, 28, 28)
NHWC stands for (n_samples, height, width, channels) but you are reshaping your image in a channel first format (n_samples, channels, height, width).
Channel first is usually used for PyTorch by the way, while TensorFlow's default format is channel last. You only have to reshape your image accordingly.
You want to obtain a shape like this: (1, 28, 28, 1).
I'm trying to create a graph for the outputs:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import wrapt
from sklearn.model_selection import train_test_split
from keras import models
from keras import layers
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import confusion_matrix
# read data
data=pd.read_csv('adult-stretch.data', header=None)
# convert to arrays
x=data.iloc[:, :4].to_numpy()
t=data[4].replace(['T','F'],[0,1])
t=t.to_numpy()
# split the dataset
xTrain, xTest, tTrain, tTest = train_test_split(x, t, test_size=0.2, random_state=3)
# convert to categorical (hot encoding)
tTrainHot = to_categorical(tTrain)
tTestHot = to_categorical(tTest)
# create network
net=models.Sequential()
#net.add(layers.Dense(5,activation="relu", input_shape=(4,)))
net.add(layers.Dense(5,activation="relu", input_shape=(np.size(x,1),)))
#net.add(layers.Dense(3,activation="sigmoid"))
net.add(layers.Dense(np.size(tTrainHot, 1),activation="sigmoid"))
print(net.summary()) #see the network structure
# set network train parameters
net.compile(loss='categorical_crossentropy', metrics='accuracy')
#train the network
history = net.fit(xTrain, tTrainHot, epochs=300, validation_split=0.2)
plt.close('all')
plt.plot(history.history['loss'],label='Training Loss',linewidth=3)
plt.plot(history.history['val_loss'],label='Validation Loss',linewidth=3)
plt.legend()
plt.grid('on')
plt.title('Loss')
plt.figure()
plt.plot(history.history['accuracy'],label='Training Accuracy',linewidth=3)
plt.plot(history.history['val_accuracy'],label='Validation Accuracy',linewidth=3)
plt.legend()
plt.grid('on')
plt.title('Accuracy')
lossTest, accTest = net.evaluate(xTest, tTestHot)
print('accTest=', accTest)
# real output (hot-encoded)
yTestHot = net.predict(xTest)
# convert yTest to label vector
yTest = np.argmax(yTestHot, axis=1)
print(confusion_matrix(tTest, yTest))
and i just keep getting this graph execution error:
Detected at node 'sequential_23/Cast' defined at (most recent call last):
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/spyder_kernels/console/__main__.py", line 23, in <module>
start.main()
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/spyder_kernels/console/start.py", line 328, in main
kernel.start()
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/ipykernel/kernelapp.py", line 677, in start
self.io_loop.start()
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/tornado/platform/asyncio.py", line 199, in start
self.asyncio_loop.run_forever()
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/asyncio/base_events.py", line 596, in run_forever
self._run_once()
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/asyncio/base_events.py", line 1890, in _run_once
handle._run()
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/ipykernel/kernelbase.py", line 457, in dispatch_queue
await self.process_one()
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/ipykernel/kernelbase.py", line 446, in process_one
await dispatch(*args)
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/ipykernel/kernelbase.py", line 353, in dispatch_shell
await result
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/ipykernel/kernelbase.py", line 648, in execute_request
reply_content = await reply_content
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/ipykernel/ipkernel.py", line 353, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/ipykernel/zmqshell.py", line 533, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/IPython/core/interactiveshell.py", line 2901, in run_cell
result = self._run_cell(
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/IPython/core/interactiveshell.py", line 2947, in _run_cell
return runner(coro)
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/IPython/core/async_helpers.py", line 68, in _pseudo_sync_runner
coro.send(None)
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/IPython/core/interactiveshell.py", line 3172, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/IPython/core/interactiveshell.py", line 3364, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/IPython/core/interactiveshell.py", line 3444, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "/var/folders/v6/jqcvfqns1gn7zy2m3pcvzrqm0000gn/T/ipykernel_702/4012300658.py", line 1, in <module>
runfile('/Users/ispasdinu-ioan/Downloads/Lab/Lab3/nnga3_ex1.py', wdir='/Users/ispasdinu-ioan/Downloads/Lab/Lab3')
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/spyder_kernels/customize/spydercustomize.py", line 577, in runfile
exec_code(file_code, filename, ns_globals, ns_locals,
File "/Users/ispasdinu-ioan/opt/anaconda3/lib/python3.9/site-packages/spyder_kernels/customize/spydercustomize.py", line 465, in exec_code
exec(compiled, ns_globals, ns_locals)
File "/Users/ispasdinu-ioan/Downloads/Lab/Lab3/nnga3_ex1.py", line 40, in <module>
history = net.fit(xTrain, tTrainHot, epochs=300, validation_split=0.2)
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/engine/training.py", line 1384, in fit
tmp_logs = self.train_function(iterator)
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/engine/training.py", line 1021, in train_function
return step_function(self, iterator)
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/engine/training.py", line 1010, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/engine/training.py", line 1000, in run_step
outputs = model.train_step(data)
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/engine/training.py", line 859, in train_step
y_pred = self(x, training=True)
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/engine/base_layer.py", line 1096, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/utils/traceback_utils.py", line 92, in error_handler
return fn(*args, **kwargs)
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/engine/sequential.py", line 374, in call
return super(Sequential, self).call(inputs, training=training, mask=mask)
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/engine/functional.py", line 451, in call
return self._run_internal_graph(
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/engine/functional.py", line 571, in _run_internal_graph
y = self._conform_to_reference_input(y, ref_input=x)
File "/Users/ispasdinu-ioan/.local/lib/python3.9/site-packages/keras/engine/functional.py", line 671, in _conform_to_reference_input
tensor = tf.cast(tensor, dtype=ref_input.dtype)
Node: 'sequential_23/Cast'
Cast string to float is not supported
[[{{node sequential_23/Cast}}]] [Op:__inference_train_function_10601]
As ML models train on numerics, you are passing input of type object to the neural network. Changing the input to numerics will help. You can use pd.get_dummies() to one-hot encode the categorical features as follows:
data1=pd.get_dummies(data[0])
data2=pd.get_dummies(data[1])
data3=pd.get_dummies(data[2])
data4=pd.get_dummies(data[3])
#you can concatenate the one-hot encoded values by using pd.concat
data = pd.concat([data1,data2,data3,data4,data[4]], axis='columns')
After one-hot encoding the dataset looks like this
Please find the working code here. Thank you.
I have a simple 2 layer Tensorflow model that I am trying to train on a dataset of equal-sized stereo audio files to tell me if the sound is coming more from the left side or the right side. This means the input is an array of 3072 by 2 arrays and the output is an array of 1's and 0's to represent left and right.
The problem is that when I run the program, it fails at model.fit() with an invalid argument error.
Code:
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 18 15:51:56 2022
#author: andre
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from datetime import datetime
from sklearn import metrics
from scipy.io import wavfile
import os
import glob
# Load in Right Side .WAV Data.
X1 = []
count1 = 0
database_path = "C:\\Users\\andre\\OneDrive\\Documents\\ESI2022\\MLDatabases\\Right\\"
for filename in glob.glob(os.path.join(database_path, '*.wav')):
X1.append(wavfile.read(filename)[1])
count1 = count1 + 1
# Load in Left side .WAV Data.
X2 = []
count2 = 0
database_path2 = "C:\\Users\\andre\\OneDrive\\Documents\\ESI2022\\MLDatabases\\Right\\"
for filename2 in glob.glob(os.path.join(database_path2, '*.wav')):
X2.append(wavfile.read(filename2)[1])
count2 = count2 + 1
# Get the smallest size audio file (this will be sample size input to model)
sample_size = len(X1[0])
for data in X1:
if len(data) < sample_size:
sample_size = len(data)
# Make audio data into equal size chunks
X1e = []
for i in X1:
num_chunks = len(i)//sample_size
for j in range(num_chunks):
X1e.append(i[(j+1)*sample_size-sample_size:(j+1)*sample_size])
X1 = X1e
X2e = []
for i in X2:
num_chunks = len(i)//sample_size
for j in range(num_chunks):
X2e.append(i[(j+1)*sample_size-sample_size:(j+1)*sample_size])
X2=X2e
del X1e
del X2e
# Create Output data that is the same length as the input data.
Y1 = np.ones([X1.__len__()],dtype='float32').tolist()
Y2 = np.zeros([X2.__len__()],dtype='float32').tolist()
# Concatenate Left and Right .WAV data and output data as numpy arrays.
X1.extend(X2)
X = np.asarray(X1)
Y = np.asarray(Y1+Y2).astype(np.int16)
#X=list(X)
#Y=list(Y)
# Split data into test training data.
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.2,random_state=0,shuffle=True)
'''
print(X[1])
time = np.linspace(0.,33792, 33792)
plt.plot(time, X[1][:,1], label="Left channel")
plt.plot(time, X[1][:,0], label="Right channel")
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Amplitude")
plt.show()
'''
# Create the Model
model = Sequential()
# Add a LSTM layer with 1 output, and ambiguous input data length.
model.add(layers.LSTM(1,batch_input_shape=(1,sample_size,2),return_sequences=True))
model.add(layers.LSTM(1,return_sequences=False))
# Compile Model
#history = model.compile(loss='mean_absolute_error', metrics=['accuracy'],optimizer='adam',output='sparse_categorical_crossentropy')
optimizer = Adam(learning_rate=2*1e-4)
'''
history = model.compile(optimizer=optimizer, loss={
'output': 'sparse_categorical_crossentropy', },
metrics={
'output': 'sparse_categorical_accuracy', },
sample_weight_mode='temporal')
'''
history = model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer="adam",
metrics=["accuracy"],
)
model.summary()
# Define Training Parameters
num_epochs = 200
num_batch_size = 1
# Save the most accurate model to file. (Verbosity Gives more information)
checkpointer = ModelCheckpoint(filepath="SavedModels/checkpointModel.hdf5", verbose=1,save_best_only=True)
# Start the timer
start = datetime.now()
# Train the model
model.fit(X_train,Y_train,batch_size=num_batch_size, epochs=num_epochs, validation_data=(X_test,Y_test), callbacks=[checkpointer],verbose=1)
# Get and Print Model Validation Accuracy
test_accuracy=model.evaluate(X_test,Y_test,verbose=0)
print(test_accuracy[1])
Output & error:
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_2 (LSTM) (1, 3072, 1) 16
lstm_3 (LSTM) (1, 1) 12
=================================================================
Total params: 28
Trainable params: 28
Non-trainable params: 0
_________________________________________________________________
Epoch 1/200
2022-02-07 09:40:36.348127: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cudart64_110.dll'; dlerror: cudart64_110.dll not found
2022-02-07 09:40:36.348459: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
2022-02-07 09:40:43.978976: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'nvcuda.dll'; dlerror: nvcuda.dll not found
2022-02-07 09:40:43.979029: W tensorflow/stream_executor/cuda/cuda_driver.cc:269] failed call to cuInit: UNKNOWN ERROR (303)
2022-02-07 09:40:43.985710: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: DESKTOP-0FFTIDB
2022-02-07 09:40:43.986092: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: DESKTOP-0FFTIDB
2022-02-07 09:40:43.990164: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2022-02-07 09:40:48.470415: W tensorflow/core/framework/op_kernel.cc:1745] OP_REQUIRES failed at sparse_xent_op.cc:103 : INVALID_ARGUMENT: Received a label value of 1 which is outside the valid range of [0, 1). Label values: 1
2022-02-07 09:58:29.070767: W tensorflow/core/framework/op_kernel.cc:1745] OP_REQUIRES failed at sparse_xent_op.cc:103 : INVALID_ARGUMENT: Received a label value of 1 which is outside the valid range of [0, 1). Label values: 1
Traceback (most recent call last):
File "C:\Users\andre\OneDrive\Documents\ESI2022\PythonScripts\BeltML\testML.py", line 127, in <module>
model.fit(X_train,Y_train,batch_size=num_batch_size, epochs=num_epochs, validation_data=(X_test,Y_test), callbacks=[checkpointer],verbose=1)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\execute.py", line 58, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
InvalidArgumentError: Received a label value of 1 which is outside the valid range of [0, 1). Label values: 1
[[node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits
(defined at C:\ProgramData\Anaconda3\lib\site-packages\keras\backend.py:5113)
]] [Op:__inference_train_function_9025]
Errors may have originated from an input operation.
Input Source operations connected to node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits:
In[0] sparse_categorical_crossentropy/Reshape_1 (defined at C:\ProgramData\Anaconda3\lib\site-packages\keras\backend.py:5109)
In[1] sparse_categorical_crossentropy/Reshape (defined at C:\ProgramData\Anaconda3\lib\site-packages\keras\backend.py:3561)
Operation defined at: (most recent call last)
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\console\__main__.py", line 23, in <module>
start.main()
File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\console\start.py", line 328, in main
kernel.start()
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelapp.py", line 677, in start
self.io_loop.start()
File "C:\ProgramData\Anaconda3\lib\site-packages\tornado\platform\asyncio.py", line 199, in start
self.asyncio_loop.run_forever()
File "C:\ProgramData\Anaconda3\lib\asyncio\base_events.py", line 570, in run_forever
self._run_once()
File "C:\ProgramData\Anaconda3\lib\asyncio\base_events.py", line 1859, in _run_once
handle._run()
File "C:\ProgramData\Anaconda3\lib\asyncio\events.py", line 81, in _run
self._context.run(self._callback, *self._args)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 457, in dispatch_queue
await self.process_one()
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 446, in process_one
await dispatch(*args)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 353, in dispatch_shell
await result
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 648, in execute_request
reply_content = await reply_content
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\ipkernel.py", line 353, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\zmqshell.py", line 533, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2901, in run_cell
result = self._run_cell(
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2947, in _run_cell
return runner(coro)
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\async_helpers.py", line 68, in _pseudo_sync_runner
coro.send(None)
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3172, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3364, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3444, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "C:\Users\andre\AppData\Local\Temp/ipykernel_3604/1229251547.py", line 1, in <module>
runfile('C:/Users/andre/OneDrive/Documents/ESI2022/PythonScripts/BeltML/testML.py', wdir='C:/Users/andre/OneDrive/Documents/ESI2022/PythonScripts/BeltML')
File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 577, in runfile
exec_code(file_code, filename, ns_globals, ns_locals,
File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 465, in exec_code
exec(compiled, ns_globals, ns_locals)
File "C:\Users\andre\OneDrive\Documents\ESI2022\PythonScripts\BeltML\testML.py", line 127, in <module>
model.fit(X_train,Y_train,batch_size=num_batch_size, epochs=num_epochs, validation_data=(X_test,Y_test), callbacks=[checkpointer],verbose=1)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1216, in fit
tmp_logs = self.train_function(iterator)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 878, in train_function
return step_function(self, iterator)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 867, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 860, in run_step
outputs = model.train_step(data)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 809, in train_step
loss = self.compiled_loss(
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\losses.py", line 245, in call
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\losses.py", line 1737, in sparse_categorical_crossentropy
return backend.sparse_categorical_crossentropy(
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\backend.py", line 5113, in sparse_categorical_crossentropy
res = tf.nn.sparse_softmax_cross_entropy_with_logits(
According to the documentation, the argument labels must be a batch_size vector with values in [0, num_classes)
From your logs:
received label value of 1 which is outside the valid range of [0, 1)
Perhaps the framework thinks that you have only one class because I also see that your Neural Network also has just 1 output.
Maybe for applying that SparseSoftmaxCrossEntropyWithLogits loss function you need 2 outputs. And your labels must be either 0 or 1.