Related
Getting the following error when I execute the below code:
TypeError: Cannot iterate over a Tensor with unknown first dimension.
How to solve this? The error is in the line output_gcn = gcn(input_layer)
I tried reshaping the input_layer, but it didnt work
What is the problem and how to solve it?
Please let me know the solution as early as possible, as I am doing something apart from learning and have deadlines to meet
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from stellargraph.data import UniformRandomWalk
#from stellargraph.layer import GCN
from stellargraph import StellarGraph
from tensorflow.keras import layers, Model, optimizers
from stellargraph.mapper import FullBatchNodeGenerator
from stellargraph.layer import GCN
from stellargraph.layer import node2vec
from stellargraph import StellarGraph
#from stellargraph.draw import draw
#generator = PaddedGraphGenerator(graphs=graphs)`
pro_tweets = pprocess[0:10000]
labels = df_encoded[['label_mild', 'label_moderate', 'label_non-depressed',
'label_severe']]
np.array(labels)
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(pro_tweets)
#print(vectors)
print(vectors.shape)
similarity_matrix = cosine_similarity(vectors)
adj_matrix = np.zeros(similarity_matrix.shape)
adj_matrix[similarity_matrix > 0] = similarity_matrix[similarity_matrix > 0]
#print(adj_matrix)
#print(adj_matrix.shape[0])
graph = StellarGraph(adj_matrix, node_features=vectors)
rw = UniformRandomWalk(graph)
walks = rw.run(nodes=list(range(adj_matrix.shape[0])), length=5, n=1)
gcn = GCN(layer_sizes=[32, 16], activations=["relu", "relu"], generator =
FullBatchNodeGenerator(graph, method="gcn"))
#input_layer = GCN.get_input_layer(graph)
input_layer = layers.Input(shape = (vectors.shape[1],), dtype="float32", name="input")
print(input_layer.shape)
print(input_layer)
#reshaped_input_layer = tf.reshape(input_layer, [vectors.shape[1],])
import tensorflow as tf
output_gcn = gcn(input_layer)
#input_layer = layers.Input(shape=(adj_matrix.shape[0],adj_matrix.shape[1]),
dtype="int32", name="input")
#output_layer = gcn(input_layer)
output_embedding = node2vec(output_dim=16)(output_gcn)
dense_layer = layers.Dense(16, activation="relu")(output_embedding)
output_layer = layers.Dense(4, activation="softmax")(dense_layer)
'''create the final dense layer
dense_layer = layers.Dense(16, activation="relu")(output_layer)
output_layer = layers.Dense(1, activation="sigmoid")(dense_layer)'''
model = Model(inputs=input_layer, outputs=output_layer)
model.compile(optimizer=optimizers.Adam(lr=0.01), loss="binary_crossentropy", metrics=
["acc"])
X_train, X_test, y_train, y_test = train_test_split(walks, labels, test_size=0.2,
random_state=42)
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=50)
test_predictions = model.predict(X_test)
test_predictions = np.round(test_predictions)
accuracy = (test_predictions == y_test).mean()
print("Accuracy: {:.4f}".format(accuracy))
train_predictions = model.predict(X_train)
train_predictions = np.round(train_predictions)
accuracy = (train_predictions == y_train).mean()
print("Accuracy: {:.4f}".format(accuracy))]
Hi guys im trying to do some AI text classification with Keras and is giving me this error. Probably my layers are bad or something like that but dont really know the "Unimplemented" error.
This is my code:
history = model.fit(X_train, y_train,
epochs=100,
verbose=True,
validation_data=(X_test, y_test),
batch_size=10)
The error is:
`
UnimplementedError: Graph execution error:
Detected at node 'binary_crossentropy/Cast' defined at (most recent call last)
`
DonĀ“t know why is this happening.
Rest of the code:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import RandomizedSearchCV
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
import os
# print(os.listdir("../input"))
plt.style.use('ggplot')
filepath_dict = {'films': 'reviews_filmaffinity.scv'}
df_list = []
for source, filepath in filepath_dict.items():
df = pd.read_table('reviews_filmaffinity.csv', sep='\|\|', header=0, engine='python')
df['source'] = source
df_list.append(df)
df = pd.concat(df_list)
df_films = df[df['source'] == 'films']
df_films['texto'] = df_films['review_title'] + ' ' + df_films['review_text']
sentences = df_films['texto'].values
df_films['polaridad'] = df['review_rate'].apply(lambda x: 'positivo' if x > 6
else ('negativo' if x < 4
else 'neutro'))
y = df_films['polaridad'].values
sentences_train, sentences_test, y_train, y_test = train_test_split(sentences, y, test_size=0.2, random_state=0)
vectorizer = CountVectorizer()
vectorizer.fit(sentences_train)
X_train = vectorizer.transform(sentences_train)
X_test = vectorizer.transform(sentences_test)
X_train
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
score = classifier.score(X_test, y_test)
print("Accuracy:", score)
input_dim = X_train.shape[1] # Number of features
model = Sequential()
model.add(layers.Dense(10, input_dim=input_dim, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
I searched online but i dont figured out how to fix that... its driving me crazy
I am trying to deploy classification model for fall and no fall using accelerometer data in raspberry pi 4. However something seems wrong with either tensorflow or numpy when reshaping them.
Exact error that I am getting is
ValueError: The target structure is of type <class 'NoneType'>
None
However the input structure is a sequence (<class 'list'>) of length 0.
[]
nest cannot guarantee that it is safe to map one to the other.
Deployment code can be found here.
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
import board
import scipy.stats as stats
import adafruit_mpu6050
from math import atan2,degrees,pow
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
print("Inititating")
model_trained = load_model('model_cnn.h5')
print('\n',model_trained.summary())
i2c = board.I2C()
mpu = adafruit_mpu6050.MPU6050(i2c)
mpu_accelerometer_range = adafruit_mpu6050.Range.RANGE_4_G
data1 = []
i = 0
while(i<2000):
i+=1
ax,ay,az = mpu.acceleration
data1.append([ax,ay,az])
df = pd.DataFrame(data1)
#print('length of df',len(df))
columns = ['x','y','z']
df.columns = columns
#print(df)
Fs = 50
frame_size = Fs*4
hop_size = Fs*2
frames = []
N_FEATURES = 3
for i in range(0,len(df)-frame_size,hop_size):
x = df['x'].values[i:i+frame_size]
y = df['y'].values[i:i+frame_size]
z = df['z'].values[i:i+frame_size]
frames.append([x,y,z])
#print('leeeee',len(frames))
# converting frames to numpy array
frames = np.asarray(frames).reshape(-1,frame_size,N_FEATURES)
#print('frames',len(frames))
k = int((len(df)-frame_size)/hop_size)+1
print('type is',type(k))
X_test = frames.reshape(k,200,3,1)
y_pred = model_trained.predict_classes(X_test)
print(y_pred)
I am using hopping window approach on cnn data having 3 columns x-axis acceleration, y-axis acceleration, z-axis acceleration.
When I run below code outside of while loop, model gives output but that can not be used for real time.
X_test = frames.reshape(k,200,3,1)
y_pred = model_trained.predict_classes(X_test)
print(y_pred)
Training code
# Importing
import numpy as np
import pandas as pd
#import seaborn as sns
import tensorflow as tf
#import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout, BatchNormalization,MaxPooling2D
from tensorflow.keras.layers import Conv2D,MaxPool2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import load_model
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "2"
df = pd.read_csv('fall_nofall_10_with_labels.csv')
df.columns = ['a','activity','time','realtime','x','y','z','gyro-x','gyro-y','gyro-z']
data = df.drop(['a','realtime','gyro-x','gyro-y','gyro-z'],axis=1)
print(data['activity'].value_counts())
# # sampling rate
Fs = 50
activities = data['activity'].value_counts().index
balanced_data = data.drop(['time'], axis = 1).copy()
balanced_data['activity'].value_counts()
from sklearn.preprocessing import LabelEncoder
label = LabelEncoder()
balanced_data['label'] = label.fit_transform(df['activity'])
### Frame Prepration
import scipy.stats as stats
Fs = 50
frame_size = Fs*4 #(4 seconds)
# 200x200x3 will be feeded in
hop_size = Fs*2 #(How much overlap) make advancement with 100 data samples
def get_frames(df,frame_size,hop_size):
N_FEATURES = 3 # input feature is x,y and z
frames = []
labels = []
for i in range(0,len(df) - frame_size,hop_size):
x = df['x'].values[i:i+frame_size] # 0 to 4 second then 1 to 5 seconds
y = df['y'].values[i:i+frame_size]
z = df['z'].values[i:i+frame_size]
# activity which comes most number of time we'll be considering that
label = stats.mode(df['label'][i:i+frame_size])[0][0]
# labels = label[0][0]
frames.append([x,y,z])
labels.append(label)
# convert into numpy array
frames = np.asarray(frames).reshape(-1,frame_size,N_FEATURES)
labels = np.asarray(labels)
return frames,labels
X,y = get_frames(balanced_data,frame_size,hop_size)
print(X.shape,y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0, stratify = y)
X_train.shape, X_test.shape
X_train[0].shape, X_test[0].shape
X_train = X_train.reshape(39,200,3,1)
X_test = X_test.reshape(10,200,3,1)
X_train[0].shape,X_test[0].shape
# ## Creating CNN model
model = Sequential()
model.add(Conv2D(16,(2,2),activation='relu',input_shape=X_train[0].shape))
# model.add(MaxPooling2D(pool_size=(1,1)))
model.add(Dropout(0.1))
model.add(Conv2D(64,(2,2),activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2,activation='sigmoid'))
model.summary()
model.compile(optimizer=Adam(learning_rate = 0.001), loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
history = model.fit(X_train, y_train, epochs = 10, validation_data= (X_test, y_test), verbose=1)
model.save("model_cnn.h5")
# load the model
model_trained = load_model('model_cnn.h5')
# summary of the model
print('\n',model_trained.summary())
from sklearn.metrics import confusion_matrix,classification_report
y_pred = model_trained.predict_classes(X_test)
print(y_pred)
print(classification_report(y_pred,y_test))
print(confusion_matrix(y_pred,y_test))
complete traceback:
Traceback (most recent call last):
File "cnn_nofall_live.py", line 68, in <module>
y_pred = model_trained.predict_classes(X_test)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/sequential.py", line 338, in predict_classes
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training.py", line 1013, in predict
use_multiprocessing=use_multiprocessing)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2.py", line 498, in predict
workers=workers, use_multiprocessing=use_multiprocessing, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2.py", line 475, in _model_iteration
total_epochs=1)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_v2.py", line 187, in run_one_epoch
aggregator.finalize()
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/training_utils.py", line 353, in finalize
self.results = nest.pack_sequence_as(self._structure, self.results)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/nest.py", line 504, in pack_sequence_as
return _pack_sequence_as(structure, flat_sequence, expand_composites)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/nest.py", line 453, in _pack_sequence_as
len(flat_sequence), truncate(flat_sequence, 100)))
ValueError: The target structure is of type `<class 'NoneType'>`
None
However the input structure is a sequence (<class 'list'>) of length 0.
[]
nest cannot guarantee that it is safe to map one to the other.
I think you need to post the full traceback. It is difficult to debug/help without seeing that
But OOTH one possible issue is that you're using "i" for the while loop and the for loop. Maybe change the for loop index to "j" or something.
Try running this code snippet to see what I mean
i=0
while(i<10):
i+=1
for i in range(100):
print(f"for loop i is {i}")
print(f"while loop i is {i}")
btw, you trying for this?
data1 = []
i = 0
while(i<2000):
i+=1
ax,ay,az = mpu.acceleration
data1.append([ax,ay,az])
df = pd.DataFrame(data1, columns=['x','y','z'])
frame_size = 200
hop_size = 100
frames = []
N_FEATURES = 3
for i in range(0,len(df)-frame_size,hop_size):
x = df['x'].values[i:i+frame_size]
y = df['y'].values[i:i+frame_size]
z = df['z'].values[i:i+frame_size]
frames.append([x,y,z])
frames = np.array(frames)
k = int((len(df)-frame_size)/hop_size)
X_test = frames.reshape(k,200,3,1)
I'm trying to implement English to Hindi translation using Deep Learning LSTM. But when I train the model it shows 'nan' loss in both actual and validation.
Link of text file containing translation pairs-: http://www.manythings.org/anki/
Below is my Jupyter notebook code-:
import string
import re
from numpy import array, argmax, random, take, delete
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding, Bidirectional, RepeatVector, TimeDistributed
from keras.preprocessing.text import Tokenizer
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.sequence import pad_sequences
from keras.models import load_model
from keras import optimizers
import matplotlib.pyplot as plt
# function to read raw text file
def read_text(filename):
# open the file
file = open(filename, mode='rt', encoding='utf-8')
# read all text
text = file.read()
file.close()
return text
# split a text into sentences
def to_lines(text):
sents = text.strip().split('\n')
sents = [i.split('\t') for i in sents]
return sents
data = read_text("/content/drive/My Drive/Colab Notebooks/Language Translator New/hin.txt")
eng_hin = to_lines(data)
eng_hin = array(eng_hin)
eng_hin = delete(eng_hin, 2, axis=1)
# Remove punctuation
eng_hin[:,0] = [s.translate(str.maketrans('', '', string.punctuation)) for s in eng_hin[:,0]]
eng_hin[:,1] = [s.translate(str.maketrans('', '', string.punctuation)) for s in eng_hin[:,1]]
# convert to lowercase
for i in range(len(eng_hin)):
eng_hin[i,0] = eng_hin[i,0].lower()
eng_hin[i,1] = eng_hin[i,1].lower()
# empty lists
eng_l = []
hin_l = []
# populate the lists with sentence lengths
for i in eng_hin[:,0]:
eng_l.append(len(i.split()))
for i in eng_hin[:,1]:
hin_l.append(len(i.split()))
print(max(eng_l))
print(max(hin_l))
# function to build a tokenizer
def tokenization(lines):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
# prepare english tokenizer
eng_tokenizer = tokenization(eng_hin[:, 0])
eng_vocab_size = len(eng_tokenizer.word_index) + 1
eng_length = 25
print('English Vocabulary Size: %d' % eng_vocab_size)
# prepare Hindi tokenizer
hin_tokenizer = tokenization(eng_hin[:, 1])
hin_vocab_size = len(hin_tokenizer.word_index) + 1
hin_length = 25
print('Hindi Vocabulary Size: %d' % hin_vocab_size)
# encode and pad sequences
def encode_sequences(tokenizer, length, lines):
# integer encode sequences
seq = tokenizer.texts_to_sequences(lines)
# pad sequences with 0 values
seq = pad_sequences(seq, maxlen=length, padding='post')
return seq
# Model Building
from sklearn.model_selection import train_test_split
train, test = train_test_split(eng_hin, test_size=0.2, random_state = 12)
# prepare training data
trainX = encode_sequences(eng_tokenizer, eng_length, train[:, 0])
trainY = encode_sequences(hin_tokenizer, hin_length, train[:, 1])
# prepare validation data
testX = encode_sequences(eng_tokenizer, eng_length, test[:, 0])
testY = encode_sequences(hin_tokenizer, hin_length, test[:, 1])
# build NMT model
def build_model(in_vocab, out_vocab, in_timesteps, out_timesteps, units):
model = Sequential()
model.add(Embedding(in_vocab, units, input_length=in_timesteps, mask_zero=True))
model.add(LSTM(units))
model.add(RepeatVector(out_timesteps))
model.add(LSTM(units, return_sequences=True))
model.add(Dense(out_vocab, activation='softmax'))
return model
model = build_model(hin_vocab_size, eng_vocab_size, hin_length, eng_length, 512)
rms = optimizers.RMSprop(lr=0.001)
model.compile(optimizer=rms, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
filename = '/content/drive/My Drive/Colab Notebooks/Language Translator New/Englis_Hindi_Checkpoints/model.h1.31_dec_19'
checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
history = model.fit(trainX, trainY.reshape(trainY.shape[0], trainY.shape[1], 1),
epochs=100, batch_size=64,
validation_split = 0.2,
callbacks=[checkpoint], verbose=1)
model.save('/content/drive/My Drive/Colab Notebooks/Language Translator New/Englis_Hindi_Checkpoints/eng2hin.h5')
When I try to fit the model, it runs but shows 'nan' in loss. Please help me to resolve my issue.
In simple words, it usually happens because the loss function / optimizer isn't suiting the network calculations. I recently used this network to create a calculator. Try using loss='categorical_crossentropy' and optimizer='adam' and see if it works.
This is the code I used for fault detection. I need to design a CNN for fault detection with non-image dataset and I am unable to do so. Do I need to shape my input into 4D? I am getting the above error. Actually I have different Training and Testing samples. As Training, I have 480*52 and as Testing 960*52, So if I use them both, I am having another error saying target destination has different dimension.
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from keras.models import Sequential,Input,Model
from keras.layers import Dense,Dropout,Flatten
from keras.layers import Conv1D,MaxPooling1D
from keras.layers import LeakyReLU
# importing Dataset
from lib_read import read_data
folderk = 'TE_process/'
train, test = read_data( folderk )
#training datasets
#i have 10 files of datasets each of rows a480 and columns 52
#53 column i have assigned labels manually for training datasets
X_train = train.iloc[:,:-1].values
y_train = train.iloc[:,52].values
#testing datasets
#i have 10 files for testing easch of rows 960 and columns 52.
#53 column i have assigned label using the code from lib_read
#X_test = test.iloc[:,:-1].values
#y_test = test.iloc[:,52].values
# for 4_faults dataset: f1_small, f8_medium, f13_incipient, f17_big + no-faults
#Encoding the trianing datsets
labelencoder_X= LabelEncoder()
#Train
X_train[:,0] = labelencoder_X.fit_transform(X_train[:,0])
X_train[:,1] = labelencoder_X.fit_transform(X_train[:,1])
X_train[:,2] = labelencoder_X.fit_transform(X_train[:,2])
X_train[:,3] = labelencoder_X.fit_transform(X_train[:,3])
X_train[:,4] = labelencoder_X.fit_transform(X_train[:,4])
X_train[:,5] = labelencoder_X.fit_transform(X_train[:,5])
X_train[:,6] = labelencoder_X.fit_transform(X_train[:,6])
X_train[:,7] = labelencoder_X.fit_transform(X_train[:,7])
X_train[:,8] = labelencoder_X.fit_transform(X_train[:,8])
X_train[:,9] = labelencoder_X.fit_transform(X_train[:,9])
X_train[:,10] = labelencoder_X.fit_transform(X_train[:,10])
X_train[:,11] = labelencoder_X.fit_transform(X_train[:,11])
X_train[:,12] = labelencoder_X.fit_transform(X_train[:,12])
X_train[:,13] = labelencoder_X.fit_transform(X_train[:,13])
X_train[:,14] = labelencoder_X.fit_transform(X_train[:,14])
X_train[:,15] = labelencoder_X.fit_transform(X_train[:,15])
X_train[:,16] = labelencoder_X.fit_transform(X_train[:,16])
X_train[:,17] = labelencoder_X.fit_transform(X_train[:,17])
X_train[:,18] = labelencoder_X.fit_transform(X_train[:,18])
X_train[:,19] = labelencoder_X.fit_transform(X_train[:,19])
X_train[:,20] = labelencoder_X.fit_transform(X_train[:,20])
X_train[:,21] = labelencoder_X.fit_transform(X_train[:,21])
X_train[:,22] = labelencoder_X.fit_transform(X_train[:,22])
X_train[:,23] = labelencoder_X.fit_transform(X_train[:,23])
X_train[:,24] = labelencoder_X.fit_transform(X_train[:,24])
X_train[:,25] = labelencoder_X.fit_transform(X_train[:,25])
X_train[:,26] = labelencoder_X.fit_transform(X_train[:,26])
X_train[:,27] = labelencoder_X.fit_transform(X_train[:,27])
X_train[:,28] = labelencoder_X.fit_transform(X_train[:,28])
X_train[:,29] = labelencoder_X.fit_transform(X_train[:,29])
X_train[:,30] = labelencoder_X.fit_transform(X_train[:,30])
X_train[:,31] = labelencoder_X.fit_transform(X_train[:,31])
X_train[:,32] = labelencoder_X.fit_transform(X_train[:,32])
X_train[:,33] = labelencoder_X.fit_transform(X_train[:,33])
X_train[:,34] = labelencoder_X.fit_transform(X_train[:,34])
X_train[:,35] = labelencoder_X.fit_transform(X_train[:,35])
X_train[:,36] = labelencoder_X.fit_transform(X_train[:,36])
X_train[:,37] = labelencoder_X.fit_transform(X_train[:,37])
X_train[:,38] = labelencoder_X.fit_transform(X_train[:,38])
X_train[:,39] = labelencoder_X.fit_transform(X_train[:,39])
X_train[:,40] = labelencoder_X.fit_transform(X_train[:,40])
X_train[:,41] = labelencoder_X.fit_transform(X_train[:,41])
X_train[:,42] = labelencoder_X.fit_transform(X_train[:,42])
X_train[:,43] = labelencoder_X.fit_transform(X_train[:,43])
X_train[:,44] = labelencoder_X.fit_transform(X_train[:,44])
X_train[:,45] = labelencoder_X.fit_transform(X_train[:,45])
X_train[:,46] = labelencoder_X.fit_transform(X_train[:,46])
X_train[:,47] = labelencoder_X.fit_transform(X_train[:,47])
X_train[:,48] = labelencoder_X.fit_transform(X_train[:,48])
X_train[:,49] = labelencoder_X.fit_transform(X_train[:,49])
X_train[:,50] = labelencoder_X.fit_transform(X_train[:,50])
X_train[:,51] = labelencoder_X.fit_transform(X_train[:,51])
labelencoder_yt = LabelEncoder()
y_train = labelencoder_yt.fit_transform(y_train)
yt_encoded = OneHotEncoder(categorical_features=[0])
y_train = yt_encoded.fit_transform(y_train.reshape(-1,1)).toarray()
#Spliting the datasets
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size= 0.2, random_state=0)
#num_train,height,width,depth=X_train.shape
#num_test=X_test.shape[0]
#Standardize
ss_X=StandardScaler()
X_train = ss_X.fit_transform(X_train)
X_test = ss_X.transform(X_test)
#tryning to rshape the datasets from 2D to 4D
import numpy as np
X_train=np.array(X_train)
X_train=X_train.reshape(3856,52)
#X_train = X_train.reshape(X_train.shape[0], 1, 20, 52)
#X_test = X_test.reshape(X_test.shape[0], 1, 480, 52)
#X_train = X_train.astype('float32')
#X_test = X_test.astype('float32')
#initializing CNN
fault_classifier = Sequential()
# Adding the input layer
fault_classifier.add(Conv1D(64, kernel_size=(3), activation="relu",input_shape=(3856,52)))
fault_classifier.add(LeakyReLU(0.1))
fault_classifier.add(Conv1D(64, kernel_size=(3), activation="relu"))
fault_classifier.add(LeakyReLU(0.1))
fault_classifier.add(MaxPooling1D((2)))
#fault_classifier.add(Conv2D(128, kernel_size=(3,3), activation="relu",input_shape=(50,20,1)))
#fault_classifier.add(LeakyReLU(0.1))
#fault_classifier.add(MaxPooling2D((2,1)))
#fully connected layer
fault_classifier.add(Flatten())
fault_classifier.add(Dense(300, activation="relu"))
fault_classifier.add(LeakyReLU(0.1))
fault_classifier.add(Dense(10, activation='softmax'))
# sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
fault_classifier.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics = ['accuracy'])
#Fit
history = fault_classifier.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100, batch_size=10)
# Predicting the Test set results
y_pred = fault_classifier.predict(X_test)
y_pred = (y_pred > 0.5)
pred_acc = accuracy_score(y_test, y_pred)