Using Keras Without GPU - python

I want to train a Neural Network using Keras but when I want to build the model I get the following error
2022-03-14 09:38:10.526372: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected
2022-03-14 09:38:10.526465: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (HSKP02): /proc/driver/nvidia/version does not exist
2022-03-14 09:38:10.527391: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
I tried to solve this error by writing
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
before importing Keras but I still get this error. After this error my code fits the data with the validation set with model.fit() but I get another error
Traceback (most recent call last):
File "shallownet_ex.py", line 44, in <module>
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1)
File ".../venv/lib/python3.8/site-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File ".../venv/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 1147, in autograph_handler
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 860, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 918, in compute_loss
return self.compiled_loss(
File ".../venv/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File ".../venv/lib/python3.8/site-packages/keras/losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File ".../venv/lib/python3.8/site-packages/keras/losses.py", line 245, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File ".../venv/lib/python3.8/site-packages/keras/losses.py", line 1789, in categorical_crossentropy
return backend.categorical_crossentropy(
File ".../venv/lib/python3.8/site-packages/keras/backend.py", line 5083, in categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
ValueError: Shapes (None, 4) and (None, 3) are incompatible
The code I'm using looks like this
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from preprocesing import ImageToArrayPreprocessor, SimplePreprocesssor
from datasets import SimpleDatasetLoader
from neuralnetworks.conv import ShallowNet
from keras.optimizers import gradient_descent_v2
from imutils import paths
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
args = vars(ap.parse_args())
imagePaths = list(paths.list_images(args["dataset"]))
sp = SimplePreprocesssor(32, 32)
iap = ImageToArrayPreprocessor()
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)
opt = gradient_descent_v2.SGD(learning_rate=0.005)
model = ShallowNet.build(width=32, height=32, depth=3, classes=3)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['acc'])
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1)
The simpleloader is a function that just loads the images and the simplepreprocesor just resizes the images and I think the error is inside the shallownet.py that looks like this
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.core import Activation, Flatten, Dense
from keras import backend as K
class ShallowNet():
#staticmethod
def build(width, height, depth, classes):
model = Sequential()
inputShape = (height, width, depth)
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(classes))
model.add(Activation("softmax"))
return model
What I deduce is that as my computer doesn't have a GPU I can't perform the training and then I can't fit the model. There is a way to perform this training?

Related

Weird Error in Regression Model using Artificial Neural Networks in Tensorflow

I am running this code.
import math
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras import Model
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.layers import Dense, Dropout
from sklearn.model_selection import train_test_split
from tensorflow.keras.losses import MeanSquaredLogarithmicError
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
df_new = df_deduped.select_dtypes(include=numerics)
X = df_new[['Duration',
'Customers_Affected',
'Customers_Served',
'Total_Minutes',
'CostOfOutage',
'PercentAffected']]
y = df_new[['HT_Outages']]
print(X.head(1))
print(y.head(1))
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.2)
object= StandardScaler()
# standardization
X_train_scaled = object.fit_transform(X_train)
X_test_scaled = object.fit_transform(X_test)
hidden_units1 = 160
hidden_units2 = 480
hidden_units3 = 256
learning_rate = 0.01
# Creating model using the Sequential in tensorflow
def build_model_using_sequential():
model = Sequential([
Dense(hidden_units1, kernel_initializer='normal', activation='relu'),
Dropout(0.2),
Dense(hidden_units2, kernel_initializer='normal', activation='relu'),
Dropout(0.2),
Dense(hidden_units3, kernel_initializer='normal', activation='relu'),
Dense(1, kernel_initializer='normal', activation='linear')
])
return model
# build the model
model = build_model_using_sequential()
# loss function
msle = MeanSquaredLogarithmicError()
model.compile(
loss=msle,
optimizer=Adam(learning_rate=learning_rate),
metrics=[msle]
)
# train the model
history = model.fit(
x_train_scaled,
y_train,
epochs=25,
batch_size=64,
validation_split=0.2
)
def plot_history(history, key):
plt.plot(history.history[key])
plt.plot(history.history['val_'+key])
plt.xlabel("Epochs")
plt.ylabel(key)
plt.legend([key, 'val_'+key])
plt.show()
# Plot the history
plot_history(history, 'mean_squared_logarithmic_error')
Everything is fine up to this point. When I run the following line of code...
x_test['prediction'] = model.predict(x_test_scaled)
I get this error...
ValueError Traceback (most recent call last)
~\AppData\Local\Temp\1\ipykernel_23656\555877386.py in <module>
6 #df.head()
7
----> 8 x_test['prediction'] = model.predict(x_test_scaled)
~\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py in error_handler(*args, **kwargs)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
~\Anaconda3\lib\site-packages\keras\engine\training.py in tf__predict_function(iterator)
13 try:
14 do_return = True
---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
16 except:
17 do_return = False
ValueError: in user code:
File "C:\Users\RS\Anaconda3\lib\site-packages\keras\engine\training.py", line 2137, in predict_function *
return step_function(self, iterator)
File "C:\Users\RS\Anaconda3\lib\site-packages\keras\engine\training.py", line 2123, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\RS\Anaconda3\lib\site-packages\keras\engine\training.py", line 2111, in run_step **
outputs = model.predict_step(data)
File "C:\Users\RS\Anaconda3\lib\site-packages\keras\engine\training.py", line 2079, in predict_step
return self(x, training=False)
File "C:\Users\RS\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\RS\Anaconda3\lib\site-packages\keras\engine\input_spec.py", line 277, in assert_input_compatibility
raise ValueError(
ValueError: Exception encountered when calling layer 'sequential_23' (type Sequential).
Input 0 of layer "dense_44" is incompatible with the layer: expected axis -1 of input shape to have value 18, but received input with shape (None, 1)
Call arguments received by layer 'sequential_23' (type Sequential):
• inputs=tf.Tensor(shape=(None, 1), dtype=float32)
• training=False
• mask=None
I am trying to follow the example from this link.
https://www.analyticsvidhya.com/blog/2021/08/a-walk-through-of-regression-analysis-using-artificial-neural-networks-in-tensorflow/?

Few errors occured when training the facial expression recognition model

I am working on a project to recognise facial expressions and train the facial expression recognition model by using convolutional neural network(CNN). In this project, I am using Tensorflow 2.4 version and Python 3.8.8 version
The output:
Found 18282 images belonging to 5 classes.
Found 7178 images belonging to 7 classes.
Below is the error that I got:
2023-01-11 00:09:29.625187: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
c:/Users/Documents/Bachelor of Computer Science/FYP/Code/Program/Backup Test/TrainEmotionDetector.py:53: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
emotion_model_info = emotion_model.fit_generator(
Epoch 1/20
2023-01-11 00:09:31.756943: W tensorflow/tsl/framework/cpu_allocator_impl.cc:82] Allocation of 31719424 exceeds 10% of free system memory.
Traceback (most recent call last):
File "c:/Users/Documents/Bachelor of Computer Science/FYP/Code/Program/Backup Test/TrainEmotionDetector.py", line 53, in <module>
emotion_model_info = emotion_model.fit_generator(
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 2604, in fit_generator
return self.fit(
File "C:\Users\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py", line 52, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Graph execution error:
Detected at node 'categorical_crossentropy/softmax_cross_entropy_with_logits' defined at (most recent call last):
File "c:/Users/Documents/Code/Program/Backup Test/TrainEmotionDetector.py", line 53, in <module>
emotion_model_info = emotion_model.fit_generator(
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 2604, in fit_generator
return self.fit(
File "C:\Users\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1650, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1249, in train_function
return step_function(self, iterator)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1233, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1222, in run_step
outputs = model.train_step(data)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1024, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1082, in compute_loss
return self.compiled_loss(
File "C:\Users\anaconda3\lib\site-packages\keras\engine\compile_utils.py", line 265, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "C:\Users\anaconda3\lib\site-packages\keras\losses.py", line 152, in __call__
losses = call_fn(y_true, y_pred)
File "C:\Users\anaconda3\lib\site-packages\keras\losses.py", line 284, in call
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "C:\Users\anaconda3\lib\site-packages\keras\losses.py", line 2004, in categorical_crossentropy
return backend.categorical_crossentropy(
File "C:\Users\anaconda3\lib\site-packages\keras\backend.py", line 5538, in categorical_crossentropy
return tf.nn.softmax_cross_entropy_with_logits(
Node: 'categorical_crossentropy/softmax_cross_entropy_with_logits'
logits and labels must be broadcastable: logits_size=[64,7] labels_size=[64,5]
[[{{node categorical_crossentropy/softmax_cross_entropy_with_logits}}]] [Op:__inference_train_function_1181]
2023-01-11 00:09:32.976764: W tensorflow/core/kernels/data/generator_dataset_op.cc:108] Error occurred when finalizing GeneratorDataset iterator: FAILED_PRECONDITION: Python interpreter state is not initialized. The process may be terminated.
[[{{node PyFunc}}]]
Below is the full code:
# import required packages
import cv2
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
# Initialize image data generator with rescaling
train_data_gen = ImageDataGenerator(rescale=1./255)
validation_data_gen = ImageDataGenerator(rescale=1./255)
# Preprocess all test images
train_generator = train_data_gen.flow_from_directory(
'data/train',
target_size=(48, 48),
batch_size=64,
color_mode="grayscale",
class_mode='categorical')
# Preprocess all train images
validation_generator = validation_data_gen.flow_from_directory(
'data/test',
target_size=(48, 48),
batch_size=64,
color_mode="grayscale",
class_mode='categorical')
# create model structure
emotion_model = Sequential()
emotion_model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48, 48, 1)))
emotion_model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Dropout(0.25))
emotion_model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Dropout(0.25))
emotion_model.add(Flatten())
emotion_model.add(Dense(1024, activation='relu'))
emotion_model.add(Dropout(0.5))
emotion_model.add(Dense(7, activation='softmax'))
cv2.ocl.setUseOpenCL(False)
emotion_model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.0001, decay=1e-6), metrics=['accuracy'])
# Train the neural network/model
emotion_model_info = emotion_model.fit_generator(
train_generator,
steps_per_epoch=28709 // 64,
epochs=20,
validation_data=validation_generator,
validation_steps=7178 // 64)
# save model structure in jason file
model_json = emotion_model.to_json()
with open("model/emotion_model.json", "w") as json_file:
json_file.write(model_json)
# save trained model weight in .h5 file
emotion_model.save_weights('model/emotion_model.h5')
I have upgraded the tensorflow to the latest version by using pip install --upgrade tensorflow but nothing works. It is expected to write the saved model into the emotion_model.json and emotion_model.h5. Please help to solve this problem.
logits and labels must be broadcastable:
logits_size=[64,7] labels_size=[64,5]
Well, you kind of gave it away when you mentioned
the first set of images had just 5 distinct labels
while the next set had 7.
Prune two of those label classes and you'll be back in business.

How to fine tune VGG16 with ImageNet dataset without error?

I am new in computer vision, and I want to fine tune VGG16 with imagenet64 dataset. But after downloading imagenet64 dataset with below code and then fine tuning, I got an error. Please help me to solve this error. thank you very much.
import os
import shutil
from keras.preprocessing.image import ImageDataGenerator
from keras.applications import VGG16
from keras import models, layers, optimizers
import matplotlib.image as mpimg
import math
checkpoints = '/content/drive/MyDrive/colab_files/imagenet64/'
if not os.path.exists('imagenet64'):
if not os.path.exists(checkpoints + 'imagenet64.tar'):
print("Downloading archive...")
os.chdir(checkpoints)
!wget https://pjreddie.com/media/files/imagenet64.tar
os.chdir('/content/')
print("Copying to local runtime...")
shutil.copy(checkpoints + 'imagenet64.tar', './imagenet64.tar')
print("Uncompressing...")
!tar -xf imagenet64.tar
print("Data ready!")
train_dir='/content/imagenet64/train'
val_dir='/content/imagenet64/val'
batch_size=40
epochs=30
train_datagen= ImageDataGenerator()
val_datagen= ImageDataGenerator()
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (224,224),
batch_size = batch_size,
class_mode = 'categorical',
subset='training'
)
validation_generator = val_datagen.flow_from_directory(
val_dir,
target_size = (224,224),
batch_size = batch_size,
class_mode = 'categorical',
subset='training'
)
model_fine=model_n
model_fine.trainable = True
set_trainable = False
for layer in model_fine.layers:
if layer.name=='block5_conv1':
set_trainable = True
if set_trainable == True:
layer.trainable = True
else:
layer.trainable = False
model_fine.compile(loss='categorical_crossentropy', optimizer=optimizers.Nadam(lr=1e-5), metrics=['acc'])
h = model_fine.fit(
train_generator,
steps_per_epoch = train_generator.samples // batch_size,
epochs = epochs,
validation_data = validation_generator,
validation_steps = validation_generator.samples // batch_size,
)
I tried to fine tune VGG16 with imagenet64 dataset and I expect to fine tune model. But I got an error:
Found 1281166 images belonging to 1000 classes.
Found 50000 images belonging to 1000 classes.
/usr/local/lib/python3.8/dist-packages/keras/optimizers/optimizer_v2/nadam.py:78: UserWarning: The lr argument is deprecated, use learning_rate instead.
super(Nadam, self).init(name, **kwargs)
Epoch 1/30
StagingError Traceback (most recent call last)
in
34
35 model_fine.compile(loss='categorical_crossentropy', optimizer=optimizers.Nadam(lr=1e-5), metrics=['acc'])
---> 36 h = model_fine.fit(
37 train_generator,
38 steps_per_epoch = train_generator.samples // batch_size,
1 frames
/usr/local/lib/python3.8/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1125 except Exception as e: # pylint:disable=broad-except
1126 if hasattr(e, "ag_error_metadata"):
-> 1127 raise e.ag_error_metadata.to_exception(e)
1128 else:
1129 raise
StagingError: in user code:
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 1051, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 1040, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 1030, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 893, in train_step
self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
File "/usr/local/lib/python3.8/dist-packages/keras/optimizers/optimizer_v2/optimizer_v2.py", line 539, in minimize
return self.apply_gradients(grads_and_vars, name=name)
File "/usr/local/lib/python3.8/dist-packages/keras/optimizers/optimizer_v2/optimizer_v2.py", line 646, in apply_gradients
self._create_all_weights(var_list)
File "/usr/local/lib/python3.8/dist-packages/keras/optimizers/optimizer_v2/optimizer_v2.py", line 860, in _create_all_weights
self._create_slots(var_list)
File "/usr/local/lib/python3.8/dist-packages/keras/optimizers/optimizer_v2/nadam.py", line 87, in _create_slots
var_dtype = var_list[0].dtype.base_dtype
IndexError: list index out of range

Resolving "InvalidArgumentError: No OpKernel was registered to support Op 'CudnnRNNV2'" for LSTM

I'm trying to build an LSTM Neural Network for Reinforcement Learning and price prediction, however, I keep getting the error as seen below.
I updated tensorflow to 2.4 and installed tensorflow-gpu, and am using using an Nvidia GPU - CUDA and the Cudnn toolkit is installed. The only thought is perhaps it's not targeting or gaining access to my GPU? As in the Error below - it says Registered devices (CPU) - obviously, not including the GPU.
I also tried adding
os.environ['CUDA_VISIBLE_DEVICES'] =1
In order to have tensorflow run off my CPU, but also had no luck with that either, as I get the same error.
Python 3.7.4 (default, Aug 9 2019, 18:34:13) [MSC v.1915 64 bit (AMD64)]
Type "copyright", "credits" or "license" for more information.
IPython 7.8.0 -- An enhanced Interactive Python.
runfile('C:/Users/james/Data Science/LSTM/LSTM 1.py', wdir='C:/Users/james/Data Science/LSTM')
Train on 227 samples, validate on 26 samples
Epoch 1/50
64/227 [=======>......................] - ETA: 4sTraceback (most recent call last):
File "<ipython-input-1-95a3ed21b0c0>", line 1, in <module>
runfile('C:/Users/james/Data Science/LSTM/LSTM 1.py', wdir='C:/Users/james/Data Science/LSTM')
File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/james/Data Science/LSTM/LSTM 1.py", line 140, in <module>
validation_split=0.1
File "C:\Users\james\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training.py", line 819, in fit
use_multiprocessing=use_multiprocessing)
File "C:\Users\james\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 342, in fit
total_epochs=epochs)
File "C:\Users\james\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 128, in run_one_epoch
batch_outs = execution_function(iterator)
File "C:\Users\james\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py", line 98, in execution_function
distributed_function(input_fn))
File "C:\Users\james\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\def_function.py", line 568, in __call__
result = self._call(*args, **kwds)
File "C:\Users\james\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\def_function.py", line 632, in _call
return self._stateless_fn(*args, **kwds)
File "C:\Users\james\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\function.py", line 2363, in __call__
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
File "C:\Users\james\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\function.py", line 1611, in _filtered_call
self.captured_inputs)
File "C:\Users\james\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\function.py", line 1692, in _call_flat
ctx, args, cancellation_manager=cancellation_manager))
File "C:\Users\james\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\function.py", line 545, in call
ctx=ctx)
File "C:\Users\james\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\eager\execute.py", line 67, in quick_execute
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
InvalidArgumentError: No OpKernel was registered to support Op 'CudnnRNNV2' used by {{node sequential/bidirectional/forward_cu_dnnlstm/CudnnRNNV2}}with these attrs: [seed=0, dropout=0, T=DT_FLOAT, input_mode="linear_input", direction="unidirectional", rnn_mode="lstm", is_training=true, seed2=0]
Registered devices: [CPU]
Registered kernels:
device='GPU'; T in [DT_HALF]
device='GPU'; T in [DT_FLOAT]
device='GPU'; T in [DT_DOUBLE]
[[sequential/bidirectional/forward_cu_dnnlstm/CudnnRNNV2]] [Op:__inference_distributed_function_5441]
The code is as follows:
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 19:41:02 2021
#author: james
"""
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
import pandas as pd
import seaborn as sns
from pylab import rcParams
import matplotlib.pyplot as plt
from matplotlib import rc
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import Bidirectional, Dropout, Activation, Dense, LSTM
from tensorflow.python.keras.layers import CuDNNLSTM
from tensorflow.keras.models import Sequential
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
#import data
csv_path = r"C:\Users\james\Data Science\LSTM\ICX-USD.csv"
df = pd.read_csv(csv_path, parse_dates=['Date'])
df = df.sort_values('Date')
df.head()
df.shape
ax = df.plot(x='Date', y='Close');
ax.set_xlabel("Date")
ax.set_ylabel("Close Price (USD)")
# Normalization
scaler = MinMaxScaler()
close_price = df.Close.values.reshape(-1, 1)
scaled_close = scaler.fit_transform(close_price)
scaled_close.shape
np.isnan(scaled_close).any()
scaled_close = scaled_close[~np.isnan(scaled_close)]
scaled_close = scaled_close.reshape(-1, 1)
np.isnan(scaled_close).any()
#Preprocessing
SEQ_LEN = 100
def to_sequences(data, seq_len):
d = []
for index in range(len(data) - seq_len):
d.append(data[index: index + seq_len])
return np.array(d)
def preprocess(data_raw, seq_len, train_split):
data = to_sequences(data_raw, seq_len)
num_train = int(train_split * data.shape[0])
X_train = data[:num_train, :-1, :]
y_train = data[:num_train, -1, :]
X_test = data[num_train:, :-1, :]
y_test = data[num_train:, -1, :]
return X_train, y_train, X_test, y_test
X_train, y_train, X_test, y_test = preprocess(scaled_close, SEQ_LEN, train_split = 0.95)
X_train.shape
X_test.shape
#Model
DROPOUT = 0.2
WINDOW_SIZE = SEQ_LEN - 1
model = keras.Sequential()
model.add(Bidirectional(CuDNNLSTM(WINDOW_SIZE, return_sequences=True),
input_shape=(WINDOW_SIZE, X_train.shape[-1])))
model.add(Dropout(rate=DROPOUT))
model.add(Bidirectional(CuDNNLSTM((WINDOW_SIZE * 2), return_sequences=True)))
model.add(Dropout(rate=DROPOUT))
model.add(Bidirectional(CuDNNLSTM(WINDOW_SIZE, return_sequences=False)))
model.add(Dense(units=1))
model.add(Activation('linear'))
#Training
model.compile(
loss='mean_squared_error',
optimizer='adam'
)
BATCH_SIZE = 64
history = model.fit(
X_train,
y_train,
epochs=50,
batch_size=BATCH_SIZE,
shuffle=False,
validation_split=0.1
)
model.evaluate(X_test, y_test)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#Prediction
y_hat = model.predict(X_test)
y_test_inverse = scaler.inverse_transform(y_test)
y_hat_inverse = scaler.inverse_transform(y_hat)
plt.plot(y_test_inverse, label="Actual Price", color='green')
plt.plot(y_hat_inverse, label="Predicted Price", color='red')
plt.title('Bitcoin price prediction')
plt.xlabel('Time [days]')
plt.ylabel('Price')
plt.legend(loc='best')
plt.show();
Any assistance would be greatly appreciated.

How to convert keras float predictions to int?

I'm getting the error
File "/anaconda/envs/tf3/lib/python3.6/site-packages/keras/engine/training.py", line 830, in compile
sample_weight, mask)
File "/anaconda/envs/tf3/lib/python3.6/site-packages/keras/engine/training.py", line 445, in weighted
score_array *= weights
File "/anaconda/envs/tf3/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py", line 898, in binary_op_wrapper
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
File "/anaconda/envs/tf3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 932, in convert_to_tensor
as_ref=False)
File "/anaconda/envs/tf3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1022, in internal_convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "/anaconda/envs/tf3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 866, in _TensorTensorConversionFunction
(dtype.name, t.dtype.name, str(t)))
ValueError: Tensor conversion requested dtype int32 for Tensor with dtype float32: 'Tensor("global_average_pooling2d_1_sample_weights:0", shape=(?,), dtype=float32)'
during the training phase.
Running the latest Keras (2.1.3) and TensorFlow (1.5) through Conda.
Here is a minimum code that reproduces the error:
from keras.layers import Input, Conv2D, GlobalAveragePooling2D
from keras.models import Model
import keras.backend as K
import numpy as np
def test_loss(y_input, x_input):
x1 = K.cast(x_input, dtype='int32')
y1 = K.cast(y_input, dtype='int32')
loss = K.square(x1 - y1)
reduced_loss = K.cumsum(loss)
return reduced_loss
train_data = 10*np.random.rand(1600, 18,18,512)
validation_data = 10*np.random.rand(200, 18,18,512)
Y_train = np.random.rand(1600, 803)
Y_test = np.random.rand(200, 803)
#model
inputs = Input(shape=train_data.shape[1:])
x = Conv2D(803, (1,1), activation='sigmoid')(inputs)
predictions = GlobalAveragePooling2D(input_shape=train_data.shape[1:])(x)
model = Model(inputs=inputs, outputs=predictions)
model.summary()
model.compile(optimizer='adam', loss=test_loss, metrics=['accuracy'])
model.fit(train_data, Y_train,
epochs=200,
batch_size=1,
validation_data=(validation_data, Y_test))

Categories

Resources