'Sequential' object has no attribute '_compile_metrics' - python

My tensorflow is version 2.4.1
i imported modules like this
### import modules
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense, Conv2D, MaxPool2D, BatchNormalization, Dropout
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import pandas as pd
import scipy
%matplotlib inline
Then i try to create simple compile model like this
def compile_model(model):
# YOUR CODE HERE
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
So my testing function is like this
test_model = Sequential([Dense(100),
Dense(2, activation='softmax')])
compile_model(test_model)
assert isinstance(test_model.optimizer, tf.keras.optimizers.Adam)
assert hasattr(test_model, 'loss')
assert test_model.loss == 'sparse_categorical_crossentropy'
assert ['accuracy'] == test_model._compile_metrics
del test_model
After i ran above code blocks i got this error
AttributeError: 'Sequential' object has no attribute '_compile_metrics'
But i can't seems find any actual document about _compile_metrics
Am i missing something or is it about tensorflow version?
Please help.
Thanks!

Basically, it is about the version, so the sample that i got suppose to run on Tensorflow 2.0.0 but i ran it on 2.4.0 so if i ran the code in 2.0.0 then it works fine.

Update
The answer by OP will only work in TF 2.0, 2.1 only. From TF 2.2 - 2.5, it won't work.
To get the metric name, like accuracy you have to run the model at least one epoch or on a single batch.
def compile_model(model):
# YOUR CODE HERE
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
test_model = Sequential([Dense(256, ),
Dense(2, activation='softmax')])
compile_model(test_model)
assert isinstance(test_model.optimizer, tf.keras.optimizers.Adam)
assert hasattr(test_model, 'loss')
assert test_model.loss == 'sparse_categorical_crossentropy'
Run-on single epoch with dummy set
test_model.fit(x = np.random.uniform(0,1, (37432,512)),
y = np.random.randint(0,2, (37432,1)))
test_model.loss # sparse_categorical_crossentropy
test_model.metrics_names # ['loss', 'accuracy']
assert 'loss' == test_model.metrics_names[0]
assert 'accuracy' == test_model.metrics_names[1]

Related

Data preparation for neural network in Python

I want to learn how to prepare data for training samples in python. I found a simple example of a neural network that predicts the stock price. At the moment I am not interested in the accuracy of training the network, but I am interested in how to take any data and prepare it for submission to the neural network.
As an example, I took these stocks over the past 5 years. As planned, the neural network accepts data for the last 50 days as input and predicts the course for the next 5 days. To do this, I read the .csv file, processed the data in such a way that after the transformation I got two dataframes, the first one is responsible for the input data, and the second for the output.
The problem is, no matter what I do, I keep getting errors and so I cannot complete the training. What am I doing wrong? The code is shown below:
import matplotlib.pylab as plt
import torch
import random
import numpy as np
import pandas as pd
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import normalize
import pandas_profiling as pprf
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, BatchNormalization, LeakyReLU
from tensorflow.keras.layers import Activation, Input, MaxPooling1D, Dropout
from tensorflow.keras.layers import AveragePooling1D, Conv1D, Flatten
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam, RMSprop, SGD
from tensorflow.keras.utils import plot_model
from IPython.display import display, Image
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
data = pd.read_csv('F:\\YNDX_ME.csv')[::]
data = data.drop('Date',axis=1)
data = data.drop('Adj Close',axis=1)
data = data.drop(np.where(data['Volume'] == 0)[0])
data = data.reset_index(drop=True)
#profiler = pprf.ProfileReport(data)
#profiler.to_file(r'F:\profiling.html')
days_edu = 50
days_pred = 5
df_edu_list = []
for i in range(len(data.index)-days_edu-days_pred+1):
df_temp = []
for j in range(days_edu):
df_temp.extend(data.loc[i+j,:].tolist())
df_edu_list.append(df_temp)
df_edu_out_list = []
for i in range(len(data.index)-days_edu-days_pred+1):
df_temp = []
for j in range(5):
df_temp.extend(data.loc[i+j+days_edu,:].tolist())
df_edu_out_list.append(df_temp)
df_edu_train = pd.DataFrame(df_edu_list[:int(len(df_edu_list)*0.8)])
df_edu_val = pd.DataFrame(df_edu_list[int(len(df_edu_list)*0.8):])
df_edu_train_out = pd.DataFrame(df_edu_out_list[:int(len(df_edu_out_list)*0.8)])
df_edu_val_out = pd.DataFrame(df_edu_out_list[int(len(df_edu_out_list)*0.8):])
df_edu_train = normalize(df_edu_train.values)
df_edu_val = normalize(df_edu_val.values)
df_edu_train_out = normalize(df_edu_train_out.values)
df_edu_val_out = normalize(df_edu_val_out.values)
df_edu_train = np.expand_dims(df_edu_train,axis=0)
df_edu_train_out = np.expand_dims(df_edu_train_out,axis=0)
model = Sequential()
model.add(Conv1D(filters=32, kernel_size=5, padding="same", strides=1, input_shape= (959,250),data_format='channels_first'))
model.add(Conv1D(32, 5))
model.add(Dropout(0.3))
model.add(Conv1D(16, 5))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(25, activation=None))
optimizer = Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, amsgrad=False)
model.compile(optimizer=optimizer, loss='mae', metrics=['accuracy'])
EPOCHS = 1000
model.fit(df_edu_train, df_edu_train_out, epochs=EPOCHS)
Error:
InvalidArgumentError: Conv2DCustomBackpropFilterOp only supports NHWC.
[[node gradient_tape/sequential/conv1d/Conv1D/Conv2DBackpropFilter
(defined at C:\Users\nick0\anaconda3\lib\site-packages\keras\optimizer_v2\optimizer_v2.py:464)
]] [Op:__inference_train_function_1046]
Errors may have originated from an input operation.
Input Source operations connected to node gradient_tape/sequential/conv1d/Conv1D/Conv2DBackpropFilter:
In[0] sequential/conv1d/Conv1D/ExpandDims (defined at C:\Users\nick0\anaconda3\lib\site-packages\keras\layers\convolutional.py:231)
In[1] gradient_tape/sequential/conv1d/Conv1D/ShapeN:
In[2] gradient_tape/sequential/conv1d/Conv1D/Reshape:
Update:
Changed data_format = 'channels_first' to data_format = 'channels_last'. The training began, but as I understood, the training took place on the entire training set, i.e. the neural network just thought that there was one example and it was trained on it specifically. How to make the neural network take each line in turn? is each line essentially a separate example?

AttributeError: 'Node' object has no attribute 'input_masks'

I created a network, but got the error:
AttributeError: in user code:
C:\Users\LocalAdmin\.conda\envs\newenvt\lib\site-packages\keras_contrib\metrics\crf_accuracies.py:23 crf_viterbi_accuracy *
mask = crf._inbound_nodes[idx].input_masks[0]
AttributeError: 'Node' object has no attribute 'input_masks'
My Code:
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_hub as hub
from tqdm import tqdm
from tensorflow.keras import Input,Model
from tensorflow.keras.layers import Dense, TimeDistributed, SpatialDropout1D, Bidirectional, LSTM, Lambda
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.compat.v1.keras import backend as K
from keras_contrib.layers import CRF
np.random.seed(1234567890)
# Neural Network
input_text = Input(shape=(max_len,), dtype=tf.string)
embedding = Lambda(ElmoEmbedding, output_shape=(max_len, 961), trainable=False)(input_text)
x = Bidirectional(LSTM(units=496, return_sequences=True, recurrent_dropout=0.1, dropout=0.1))(embedding)
model = TimeDistributed(Dense(50, activation="relu"))(x)
crf = CRF(n_tags, sparse_target=True) # CRF layer, n_tags+1(PAD)
out = crf(model) # output
model = Model(input_text, out)
model.compile(optimizer="rmsprop", loss=crf.loss_function, metrics=[crf.accuracy])
model.summary()
history = model.fit(np.array(x_train), y_train, batch_size=batch_size, epochs=1, verbose=1)
Does anyone know how to fix it? My python version ist 3.8 and my tensorflow version is 2.4.0.
Thank you in advance for your help!
keras-contrib is deprecated and unmaintained, and incompatible with TF2:
Keras-contrib is deprecated. Use TensorFlow Addons.
Use TensorFlow Addons (tfa) instead, in your case tfa.layers.CRF
I have encountered the same problem as you. I still have problems after replacing the new version of CRF.
got a error:
TypeError: Value passed to parameter 'x' has DataType bool not in list of allowed values: float32, float64, int32, uint8, int16, int8, int64, bfloat16, uint16, float16, uint32, uint64

AttributeError: module 'tensorflow.keras.backend' has no attribute 'tf' in Custom Regularizer

I am making using a custom regularizer that I will use in the first hidden layer weights in my model
Data
XTrain,YTrain),(XTest,YTest)=mnist.load_data()
XTrain=XTrain.reshape(XTrain.shape[0],XTrain.shape[1]*XTrain.shape[2])
XTest=XTest.reshape(XTest.shape[0],XTest.shape[1]*XTest.shape[2])
YTrain=to_categorical(YTrain)
YTest=to_categorical(YTest)
Custom Regularizer
def layer1_reg(weight_matrix):
return 0.01*K.sum(K.sqrt(K.tf.reduce_sum(K.square(weight_matrix), axis=1)))
Model
from keras.layers import Dense,Dropout
from keras.utils import to_categorical
from keras.models import Sequential
from keras.optimizers import Adam
import tensorflow as tf
import tensorflow.keras.backend as K
model=Sequential()
model.add(Dense(500,activation='relu',input_shape=(784,),kernel_regularizer=layer1_reg))
model.add(Dropout(0.1))
model.add(Dense(100,activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(10,activation='softmax'))
model.compile(loss="categorical_crossentropy",optimizer=Adam(lr=0.0001))
model.fit(x=XTrain,y=YTrain,batch_size=32,epochs=10)
Error Message
AttributeError Traceback (most recent call last)
<ipython-input-125-db27f84e12fe> in <module>()
22
23 model=Sequential()
---> 24 model.add(Dense(500,activation='relu',input_shape=(784,),kernel_regularizer=layer1_reg))
25 model.add(Dropout(0.1))
26 model.add(Dense(100,activation='relu'))
5 frames
<ipython-input-124-dc68394d8544> in layer1_reg(weight_matrix)
1 def layer1_reg( weight_matrix):
----> 2 return 0.01*K.sum(K.sqrt(K.tf.reduce_sum(K.square(weight_matrix), axis=1)))
AttributeError: module 'tensorflow.keras.backend' has no attribute 'tf'
How to fix this error in my latest tensorflow and keras version as some people have suggested to use the previous version of tensorflow, but I cannot do that based upon my newer tensorflow and keras requirements setup?
In function:
def layer1_reg(weight_matrix,l1=0.01):
return l1*K.sum(K.sqrt(K.tf.reduce_sum(K.square(weight_matrix), axis=1)))
I'm guessing K is
import keras.backened as K
K has not attribute tf(tensorflow)
you should rather use tf.reduce_sum() instead of K.tf.reduce_sum()
and most important try to use only one module tensorflow.keras or keras both are different

Tensorboard callback not writing the training metrics

When the model is taking sufficiently long to infer (i.e. enough parameters and data big enough), and when profile_batch is on, the TensorBoard callback fails to write the training metrics to the log events (at least they are not visible in Tensorboard).
Here is the code used to get that failure:
import os.path as op
import time
import numpy as np
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Conv2D, Input
from tensorflow.keras.models import Model
size = 512
im = Input((size, size, 1))
im_conv = Conv2D(512, 3, padding='same', activation='relu')(im)
im_conv = Conv2D(1, 3, padding='same', activation='linear')(im_conv)
model = Model(im, im_conv)
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
data = np.random.rand(1, size, size, 1)
run_id = f'{int(time.time())}'
log_dir = op.join('logs', run_id)
tboard_cback = TensorBoard(
log_dir=log_dir,
histogram_freq=0,
write_graph=False,
write_images=False,
profile_batch=2,
)
model.fit(
x=data,
y=data,
validation_data=[data, data],
callbacks=[tboard_cback,],
epochs=100,
verbose=0,
);
Here is the Tensorboard viz I have:
Is there something wrong with the way I am using this callback?
I use Python 3.6.8, tensorflow 2.0.0 on GPU (but the behaviour is the same on CPU).
So apparently, this is due to the profiling done in the callback. We can disable it via profile_batch=0. The issue is ongoing and to be followed here: https://github.com/tensorflow/tensorboard/issues/2084

theano error from keras

I am running a keras script (no direct call to theano in my script) and I get the following error:
TypeError: ('An update must have the same type as the original shared
variable (shared_var=<TensorType(float32, matrix)>,
shared_var.type=TensorType(float32, matrix),
update_val=Elemwise{add,no_inplace}.0,
update_val.type=TensorType(float64, matrix)).',
'If the difference is related to the broadcast pattern,
you can call the tensor.unbroadcast(var, axis_to_unbroadcast[, ...])
function to remove broadcastable dimensions.')
I have seen the error from folks running theano directly, but not through keras. Not sure what I should do, since I am not dealing with tensors directly.
the problem was that there is a change in keras version (I am currently using keras 0.3.2 with theano 0.8.0) and what used to be fine does not work well with he new keras version.
The following was the original code, and see the fix below.
from keras.models import Sequential
import keras.optimizers
from keras.layers.core import Dense, Dropout
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.layers.core import Activation
from keras.optimizers import SGD, Adam
from sklearn.preprocessing import StandardScaler
from sklearn.base import BaseEstimator, RegressorMixin
class NnRegression(BaseEstimator, RegressorMixin):
def __init__(self, apply_standart_scaling=True,
dropx=[0.2, 0.5, 0.5], nb_neuronx=[50, 30], nb_epoch=105, validation_split=0.,
verbose=1):
self.apply_standart_scaling = apply_standart_scaling
self.dropx = dropx
self.nb_neuronx = nb_neuronx
self.nb_epoch = nb_epoch
self.validation_split = validation_split
self.verbose = verbose
def fit(self, X, y):
nb_features = X.shape[1]
self.standart_scaling = StandardScaler() if self.apply_standart_scaling else None
if self.standart_scaling:
X = self.standart_scaling.fit_transform(X)
model = Sequential()
model.add(Dropout(input_shape = (nb_features,),p= self.dropx[0]))
model.add(Dense(output_dim = self.nb_neuronx[0], init='glorot_uniform'))
model.add(PReLU())
model.add(BatchNormalization(self.nb_neuronx[0],)))
model.add(Dropout(self.dropx[1]))
model.add(Dense(self.nb_neuronx[1], init='glorot_uniform'))
model.add(PReLU())
model.add(BatchNormalization(self.nb_neuronx[0],)))
model.add(Dropout(self.dropx[2]))
model.add(Dense(1, init='glorot_uniform'))
nn_verbose = 1 if self.verbose>0 else 0
optz = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(optimizer=Adam(),loss='mse')
model.fit(X, y, batch_size=16,
nb_epoch=self.nb_epoch, validation_split=self.validation_split, verbose=nn_verbose)
self.model = model
def predict(self, X):
if self.standart_scaling:
X = self.standart_scaling.transform(X)
return self.model.predict_proba(X, verbose=0)
well, it turns out that the problem is this single line of code:
model.add(BatchNormalization(self.nb_neuronx[0],)))
It should actually be:
model.add(BatchNormalization())
because the number of neurons has no business within the normalization layer (however this did not bother in a previous keras version).
This apparently causes theano to generate new weights that are not float32 but float64, and that triggers the message above.

Categories

Resources