cannot import name 'TimeSeriesGenerator' from 'keras.preprocessing.sequence' - python

I'm new to keras and trying to work with this, however, I have problem in the imports.
I can import all the following packages:
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Nadam
from tensorflow.keras.layers import Input, LSTM, Dense
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, TerminateOnNaN
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
but when I try to import the time series generatore I get an error:
from keras.preprocessing.sequence import TimeSeriesGenerator
>>>mportError: cannot import name 'TimeSeriesGenerator' from 'keras.preprocessing.sequence' (C:\path\myuser\anaconda3\envs\keras1\lib\site-packages\keras\preprocessing\sequence.py)
This happens after I have created new environment, installed first tensorflow, but nothing changes and I keep getting this error.
What am I missing? how can I solve it and use the timeseries generator?

You misspelled the import, it should be TimeseriesGenerator (lowercase s)

Related

module 'keras.layers.normalization' has no attribute 'BatchNormalizationBase'

when i run this code
import os
from autokeras import
StructuredDataClassifier
import stellargraph as sg
from stellargraph.mapper import
FullBatchNodeGenerator
from tensorflow.keras import layers,
optimizers, losses, metrics, Model
from sklearn import preprocessing,
model_selection
from IPython.display import display, HTML
import matplotlib.pyplot as plt
%matplotlib inline
i have this error
AttributeError: module 'keras.layers.normalization' has no attribute 'BatchNormalizationBase'
Knowing that this code has been run many times without any problems
In my case which had a structure like this:
I added these two lines to the__init__.py file:
from keras.layers.normalization.layer_normalization import *
from keras.layers.normalization.batch_normalization import *
And problem fixed.
restart runtime then reinstall keras library

Issue with opening h5 file with Python code in MATLAB environment

I have an issue with calling Python code in MATLAB. My Python code involves predicting the battery state of charge using LSTM with attention ANN based on the inputs sent from MATLAB. The prediction is then sent back to MATLAB. I already have previously trained weights and biases saved in an h5 file, which is loaded and used in the Python code. Below is the Python code:
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from tensorflow import keras
from tensorflow.keras import Sequential
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dense
from tensorflow.keras import optimizers
import matplotlib.pyplot as plt
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras import backend as K
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Dropout, InputLayer
import h5py
#to create sequential data
def create_inout_sequences(input_data, tw):
inout_seq = []
L = len(input_data)
for i in range(L-tw):
train_seq = input_data[i:i+tw]
#train_out = output_data[i:i+tw]
inout_seq.append(train_seq)
return inout_seq
def search(inputs):
class attention(Layer):
def __init__(self, return_sequences=True,**kwargs):
self.return_sequences = return_sequences
super(attention,self).__init__()
def build(self, input_shape):
self.W=self.add_weight(name="att_weight", shape=(input_shape[-1],1),
initializer="normal")
self.b=self.add_weight(name="att_bias", shape=(input_shape[1],1),
initializer="zeros")
super(attention,self).build(input_shape)
def call(self, x):
e=(K.dot(x,self.W)+self.b)
a = K.softmax(e, axis=1)
output = x*a
if self.return_sequences:
return output
return K.sum(output, axis=1)
def get_config(self):
# For serialization with 'custom_objects'
config = super().get_config()
config['return_sequences'] = self.return_sequences
return config
#convert test data to sequential form
inputs=np.array(inputs)
inputs=np.tile(inputs, (36, 1))
inputs_new=create_inout_sequences(inputs, 35)
inputs_new=np.array(inputs_new)
model1 = Sequential()
model1.add(InputLayer(input_shape=(35,5)))
model1.add((LSTM(22, return_sequences=True)))
model1.add(attention(return_sequences=False))
model1.add(Dense(104, activation="relu"))
model1.add(Dropout(0.2))
model1.add(Dense(1, activation="sigmoid"))
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.01,
decay_steps=10000,
decay_rate=0.99)
model1.compile(optimizer=tf.keras.optimizers.Adam(epsilon=1e-08,learning_rate=lr_schedule),loss='mse')
#call previously trained weights
model1.load_weights('SOC_weights.h5')
x=float(model1.predict(inputs_new, batch_size=100,verbose=0))
return x # send prediction to Matlab
Note: I am using Python 3.6, tensorflow version: 2.5, keras version: 2.4.3, h5py version: 3.1.0, cython version: 0.28
I am able to run this code without any error on Python, but have issues when used in MATLAB 2020a... below is my MATLAB code:
pyenv('Version','3.6');
py.importlib.import_module('tensorflow');
py.importlib.import_module('testingSOC'); % file containing the Python codes
inputs=[0.555555556,0.435139205,0.68313128,0.499987472,0.241225578];% test inputs
SOC_output=py.testingSOC.search(inputs)
Below is the error received on Matlab:
Error using training>load_weights (line 2312)
Python Error: ImportError: `load_weights` requires h5py when loading weights from HDF5.
Error in testingSOC>search (line 87)
the error looks like h5py is not identified by MATLAB, so I have tried reinstalling h5py by using the command prompt (I am using Windows 10):
pip uninstall h5py
pip install h5py
but no changes...
I have also tried with tensorflow version: 2.2, keras version 2.4.3, h5py version 2.10 and cython version 0.29 but still get the same error.
I would really appreciate if you guys can provide an insight in solving this issue, and if there are any fundamental parts that I have missed. I would be glad to share more details if required.
Thanks!
Thanks to #TimRoberts for pointing out about including 'py.importlib.import_module('h5py')' which helped me in resolving this issue.Below is my solution, for those who would like to refer:
When I included 'py.importlib.import_module('h5py')' in my matlab codes, I received the following error:
Error using h5>init h5py.h5 (line 1)
Python Error: ImportError: DLL load failed: The specified procedure could not be found.
It looks like Python environment seems to use Matlab's h5 library in my case, which does not have the same features as Python's h5 library...I found that there is an option of running Python codes as a separate process which seems to be working for me (as seen in this link):
https://www.mathworks.com/help/matlab/matlab_external/out-of-process-execution-of-python-functionality.html?searchHighlight=out%20of%20process%20python&s_tid=srchtitle

Unable to import SGD and Adam from 'keras.optimizers'

Trying to run---
from keras.optimizers import SGD, Adam,
I get this error---
Traceback (most recent call last):
  File "C:\Users\usn\Downloads\CNN-Image-Denoising-master ------after the stopping\CNN-Image-Denoising-master\CNN_Image_Denoising.py", line 15, in <module>
    from keras.optimizers import SGD, Adam
ImportError: cannot import name 'SGD' from 'keras.optimizers'
as well as this error, if I remove the SGD from import statement---
ImportError: cannot import name 'Adam' from 'keras.optimizers'
I can't find a single solution for this.
I have Keras and TensorFlow installed. I tried running the program in a virtualenv (no idea how that would help, but a guide similar to what I want mentioned it) but it still doesn't work. If anything, virtualenv makes it worse because it doesn't recognize any of the installed modules. I am using Python 3.9. Running the program in cmd because all the IDEs just create more trouble.
I am stumped. My knowledge of Python is extremely basic; I just found this thing on GitHub. Any help would be greatly appreciated.
This simple modification fixed my problem:
from tensorflow.keras.optimizers import SGD
Instead of :
from keras.optimizers import SGD
write :
from keras.optimizers import gradient_descent_v2
and then use it like this:
sgd = gradient_descent_v2.SGD(...)
--
To the people suggesting using
from tensorflow.keras.optimizers import SGD
it only works if you use TensorFlow throughout your whole program. If you want to use keras specifically, importing tensorflow.keras.optimizers won't work as it will conflict with other parts of your program. In this case use my solution instead.
Have a look at https://github.com/tensorflow/tensorflow/issues/23728:
from tensorflow.keras.optimizers import RMSprop
instead of :
from keras.optimizers import RMSprop
It worked for me.
Write :
from keras.optimizers import gradient_descent_v2
instead of :
from keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical
It works just as well for to_categorical.

Unhealthy deployment

I deploy a keras model (in python) to ML Azure. The deployment ends with the unhealthy state. What does that mean?
I deploy my model with this code :
image_config = ContainerImage.image_configuration(execution_script='script.py',
runtime='python',
conda_file='config_conda.yml')
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
description='')
service = Webservice.deploy_from_model(workspace=ws,
name=model_name,
deployment_config=aciconfig,
models=[model],
image_config=image_config)
service.wait_for_deployment(show_output=True)
In the config_conda.yml file, what is the difference between the "pip" section and the "dependencies" section ?
I use the following packages in my script.py:
import pandas as pd
import numpy as np
import string
#scikit-learn
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelEncoder
import nltk
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
# Word2vec
import gensim
# Keras
from tensorflow import keras
from keras import metrics
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import GlobalMaxPool1D
from keras import utils
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import multiprocessing
See https://learn.microsoft.com/en-us/azure/machine-learning/how-to-deploy-and-where?tabs=azcli#understanding-service-state for understanding service state.
Also see troubleshooting steps
"dependencies" will be installed with conda install x whereas things listed under "pip" will be installed with pip install x. Try to use the conda version whenever possible as it uses precompiled binaries that are less likely to cause issues.
In my experience, the endpoint ends up in Unhealthy state when there is something wrong with either the scoring script or the .yml file (script.py and config_conda.yml, in your case). You can use this command to see the logs and this normally tells you the issue:
print(service.get_logs())
Another debugging method is to try to deploy the service as a LocalService first:
myenv = Environment.from_conda_specification(name = "myenv", file_path = "config_conda.yml")
inference_config = InferenceConfig(entry_script = "script.py", environment = myenv)
deployment_config = LocalWebservice.deploy_configuration(port=6789)
local_service = Model.deploy(ws, "local-test", [model], inference_config, deployment_config)
local_service.wait_for_deployment(show_output = True)
In this way, all steps of the Docker container building process (incl. packages installation) are printed out. You can then delete the local service (local_service.delete()) when you're done.
By the way, you can also deploy ACI web services using Model.deploy instead of Webservice.deploy_from_model (see https://learn.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice(class)?view=azure-ml-py). This is usually faster, as far as I've seen

Keras Running Inference with Multiple Models' at once

I'm training models with a genetic algorithm, and the fitness function first builds the model with certain parameters, and then runs inference on the dataset with that model.
Obviously, genetic algorithms are very parallelizable, but I am running into problems loading and running multiple models at once. I am using Python's multiprocessing library and running the function that loads the model and runs inference using the Pool method. I get an error when doing this:
Blas GEMM launch failed : a.shape=(32, 50), b.shape=(50, 200), m=32, n=200, k=50
[[{{node lstm_1/while/body/_1/MatMul_1}}]] [Op:__inference_keras_scratch_graph_494]
Function call stack:
keras_scratch_graph
Not sure what's happening here, but the error isn't thrown when the models are not parallelized.
Any help is super appreciated.
Here is the code:
import tensorflow as tf
from keras import regularizers
from keras.optimizers import SGD
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, ELU, LSTM, PReLU, GRU, CuDNNGRU, CuDNNLSTM
from keras.callbacks import EarlyStopping, ModelCheckpoint
import numpy as np
from sklearn import preprocessing
from sklearn.preprocessing import minmax_scale
import matplotlib.pyplot as plt
import math, random, copy, pickle, time, statistics
import pandas as pd
import multiprocessing as mp
def buildModel(windowSize):
#model to use
sample_model = Sequential()
sample_model.add(CuDNNLSTM(50, input_shape = (windowSize, 5), return_sequences=False))
sample_model.add(Dense(50, activation="relu"))
sample_model.add(Dense(3, activation="softmax"))
sample_model.compile(optimizer="adam", loss="categorical_crossentropy")
sample_model.build()
#record weight and bias shape
modelShape = []
for i in range(len(sample_model.layers)):
layerShape = []
for x in range(len(sample_model.layers[i].get_weights())):
layerShape.append(sample_model.layers[i].get_weights()[x].shape)
modelShape.append(layerShape)
return(sample_model, modelShape)
model = BuildModel(120)
pool = mp.Pool(mp.cpu_count())
results = [pool.apply(model.predict, args=(np.array(features[x]), batch_size=len(features[i]))) for x in range(len(features))]
pool.close()
The features I'm using aren't important, they could all be lists of 120 random numbers or something. I didn't actually add the features I use because they are huge and from a really big file.
I just want to be able to run model.predict inside the pool.apply[] multiprocessing function so that I can run multiple predictions concurrently.

Categories

Resources