import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
dataset = pd.read_csv(r'emails.csv')
dataset.columns #Index(['text', 'spam'], dtype='object')
dataset.shape #(5728, 2)
#Checking for duplicates and removing them
dataset.drop_duplicates(inplace = True)
dataset.shape #(5695, 2)
#Checking for any null entries in the dataset
print (pd.DataFrame(dataset.isnull().sum()))
'''
text 0
spam 0
'''
#Using Natural Language Processing to cleaning the text to make one corpus
# Cleaning the texts
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
#Every mail starts with 'Subject :' will remove this from each text
dataset['text']=dataset['text'].map(lambda text: text[1:])
dataset['text'] = dataset['text'].map(lambda text:re.sub('[^a-zA-Z0-9]+', ' ',text)).apply(lambda x: (x.lower()).split())
ps = PorterStemmer()
corpus=dataset['text'].apply(lambda text_list:' '.join(list(map(lambda word:ps.stem(word),(list(filter(lambda text:text not in set(stopwords.words('english')),text_list)))))))
# Creating the Bag of Words model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
X = cv.fit_transform(corpus.values).toarray()
y = dataset.iloc[:, 1].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)
# Fitting Naive Bayes classifier to the Training set
from sklearn.naive_bayes import MultinomialNB
classifier = MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
classifier.fit(X_train , y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
I practiced on no. just today I shifted on the text so I have this model ready with 0.98 accuracies but when I am trying to predict on a new text input I am getting errors.
subject = "hello this is a test"
classifier.predict([[subject]])
the error which I got is
FutureWarning: Beginning in version 0.22, arrays of bytes/strings will be converted to decimal numbers if dtype='numeric'. It is recommended that you convert the array to a float dtype before using it in scikit-learn, for example by using your_array = your_array.astype(np.float64).
return f(**kwargs)
ValueError: matmul: Input operand 1 has a mismatch in its core dimension 0, with gufunc signature (n?,k),(k,m?)->(n?,m?) (size 29223 is different from 1)
Any suggestions or possible solutions which I can try
I also tried converting the sentence
from sklearn.feature_extraction.text import TfidfTransformer
vectorizer = CountVectorizer()
tfidfconverter = TfidfTransformer()
text = "Hello world!"
text = vectorizer.transform([text]).toarray()
text = tfidfconverter.transform(text).toarray()
label = classifier.predict(text)[0]```
but got a NotFittedError: Vocabulary not fitted or provided.
Related
I try to classify emotion from tweet with dataset of 4401 tweet, when i use smaller sample of data (around 15 tweet) everything just work fine, but when i use the full dataset it raise the error of
Found input variables with inconsistent numbers of samples: [7, 3520]
the error happen when i try to oversampling the data using smote after transforming the data using countvectorizer.
This is the code where the error raise
# N-gram Feature and Term Frequency
vectorizer = CountVectorizer(ngram_range=(1,3))
x_train_tf = vectorizer.fit_transform(str(x_train).split('\n')).toarray()
x_test_tf = vectorizer.transform(str(x_test).split('\n')).toarray()
df_output = pd.DataFrame(data =x_train_tf, columns = vectorizer.get_feature_names_out())
display(df_output)
# the print shape is (7 rows × 250 columns)
smote = SMOTE(random_state=42, k_neighbors=5)
x_smote, y_smote = smote.fit_resample(x_train_tf, y_train)
print("Total Train Data SMOTE : ",x_smote.shape), print("Total Train Label SMOTE : ",y_smote)
i did not understand why this is happening so some explanation could really help.
i already tried to solve it using answers from other similiar question but nothing have worked.
This is the full code
import nltk
import re
#nltk.download()
import string
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from nltk import everygrams
from collections import Counter
from sklearn import preprocessing
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from imblearn.over_sampling import SMOTE
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score, confusion_matrix
dataset = pd.read_csv("G:/TA/Program/dataset/Twitter_Emotion_Dataset.csv", encoding='latin-1')
# Preprocessing
dataset['case_folding_tweet'] = dataset['tweet'].str.casefold()
dataset['only_alphabet_tweet'] = [re.sub('[^a-zA-Z]+\s*', ' ', s) for s in dataset['case_folding_tweet']]
dataset['data_cleaning_tweet'] = dataset['only_alphabet_tweet'].str.replace(r'\b\w{1}\b','').str.replace(r'\s+', ' ')
slangword_dictionary = ("G:/TA/Program/dataset/kamus_singkatan.csv")
deslang = {}
list_slangword = open(slangword_dictionary).readlines()
for line in list_slangword:
slang, unslang = line.strip().split(';')
deslang[slang] = unslang
deslang[slang] = {r"\b{}\b".format(k): v for k, v in deslang.items()}
dataset['data_cleaning_tweet'] = dataset['data_cleaning_tweet'].replace(deslang[slang], regex=True)
dataset['convert_slang_tweet'] = dataset['data_cleaning_tweet']
replace_dictionary = {'tidak ': 'tidak', 'bukan ': 'bukan', 'jangan ': 'jangan', 'belum ': 'belum'}
dataset['convert_negation_tweet'] = dataset['convert_slang_tweet'].replace(replace_dictionary, regex=True)
dataset['tokenization_tweet'] = dataset['convert_negation_tweet'].apply(word_tokenize)
list_stopwords = set(stopwords.words("indonesian"))
list_stopwords.add('username')
list_stopwords.add('url')
dataset['stopword_removal_tweet'] = dataset['tokenization_tweet'].apply(lambda x: [item for item in x if item not in list_stopwords])
factory = StemmerFactory()
stemmer = factory.create_stemmer()
dataset['stemmed_tweet'] = dataset['stopword_removal_tweet'].apply(lambda x: [stemmer.stem(y) for y in x])
# Split data
x = dataset["stemmed_tweet"].values
y = dataset["label"].values
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state= 42)
# Get N-gram and TF
vectorizer = CountVectorizer(ngram_range=(1,3))
x_train_tf = vectorizer.fit_transform(str(x_train).split('\n')).toarray()
x_test_tf = vectorizer.transform(str(x_test).split('\n')).toarray()
# Oversampling
smote = SMOTE(random_state=42, k_neighbors=5)
x_smote, y_smote = smote.fit_resample(x_train_tf, y_train)
print("Total Train Data SMOTE : ",x_smote.shape), print("Total Train Label SMOTE : ",y_smote)
gnb_classifier = GaussianNB()
gnb_classifier.fit(x_smote, y_smote)
print(gnb_classifier)
y_pred = gnb_classifier.predict(x_test_tf)
print("Emotion Predicted :", y_pred)
Link to the dataset
I cannot solve it precisely because I don't have your data, but here are a few observations which should help:
apparently x_train_tf has only 7 rows? it's not enough for training a model and it's not 80% of 4401, as you're supposed to obtain from train_test_split.
Note that y_train has 3520 rows = 4401 * 80%, the correct number of rows.
I suspect that the line x_train_tf = vectorizer.fit_transform(str(x_train).split('\n')).toarray() is not doing what you think it does. Try to decompose the str(x_train).split('\n') part.
i fix the problem using the answer from this post answer
by joining all the train data column before vectorizing.
df_train = pd.DataFrame(data=x_train)
df_test = pd.DataFrame(data=x_test)
series = pd.Series(df_train['stemmed_tweet'])
corpus = series.apply(lambda series: ' '.join(series))
vectorizer = CountVectorizer(ngram_range=(1,3), lowercase=False)
x_train_tf = vectorizer.fit_transform(corpus).toarray()
x_test_tf = vectorizer.transform(str(df_test.values).split("\n")).toarray()
Dataset: I created a very simple dataset of "Supplier", "Item description" columns . This dataset has a list of item descriptions and preferred supplier for that item
Requirement: I would like to write a program that will take an "Item Description" and predict the "Supplier". To keep it very simple, I just have only 5 Unique supplier-Item Description combinations out of the 950 rows in the .txt file
Issue: The accuracy shows up 1 and confusing matrix shows no false positives. But when I give a new data, the prediction is wrong.
Steps Done
Read .txt for "Supplier" and "Item Description"
Label Encoder applied on the "Item Description"
train test and split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10)
Created a Pipeline for applying the TfidfVectorizer and MultinomialNB
pipeline = Pipeline([('vect', vectorizer),
('clf', MultinomialNB())
])
model = pipeline.fit(X_train, y_train)
fit model and predict :
y_pred=model.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
acc= accuracy_score(y_test,y_pred)
# acc is 1.0 and the cm shows no false positives/negatgives
So far, things look ok
dumped the pickle
pickle.dump(model, open(r'supplier_predictions.pkl','wb'))
Tried prediction on a Item Description= 'Lego, Barbie and other Toy Items' ; I was expecting "Toys R Us"
The prediction was wrong, it came up as "Office Depot".
loadedModel = pickle.load(open("supplier_predictions.pkl","rb"))
new_items = {'ITEM_DESCRIPTION': ['Lego, Barbie and other Toy Items']}
new_X = pd.DataFrame(new_items, columns = ['ITEM_DESCRIPTION'])
new_y_pred=loadedModel.predict(new_X)
Can you please let me know
what I am doing wrong here to get the wrong prediction, new_y_pred for the test item description passed in (new_X)
This is my first ML code. I have tried debugging this by looking at various articles, but no luck.
Thanks
== Complete Code, if it is helpful ==
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
import re # librarie for cleaning data
import nltk # library for NLP
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
import pickle
df=pd.read_csv('git_suppliers.txt', sep='\t')
# Prep the data - Item Description
from sklearn.feature_extraction.text import TfidfVectorizer
stemmer = PorterStemmer()
words = stopwords.words("english")
df['ITEM_DESCRIPTION'] = df['ITEM_DESCRIPTION'].apply(lambda x: " ".join([stemmer.stem(i) for i in re.sub("[^a-zA-Z0-9]", " ", x).split() if i not in words]).lower())
# Feature Generation using the TF-IDF
vectorizer = TfidfVectorizer(min_df= 3, stop_words="english", sublinear_tf=True, norm='l2', ngram_range=(1, 2))
final_features = vectorizer.fit_transform(df['ITEM_DESCRIPTION']).toarray()
final_features.shape
# final_features shows only 43 features - not going to use SelectKBest for such such less features count
#
# Split into training and test data
#
X = df['ITEM_DESCRIPTION']
y = df['SUPPLIER']
from sklearn.preprocessing import LabelEncoder
labelObj = LabelEncoder()
y=labelObj.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10)
y_test_decoded=labelObj.inverse_transform(y_test)
#
# Create a pipeline, fit the model, predict for test data and save in pickle
#
pipeline = Pipeline([('vect', vectorizer),
('clf', MultinomialNB())
])
model = pipeline.fit(X_train, y_train)
# Predict for test data
y_pred=model.predict(X_test)
# Accuracy shows up as 1.0 and the confusion matrix shows no false positives/negatives
from sklearn.metrics import confusion_matrix,accuracy_score
cm = confusion_matrix(y_test, y_pred)
print(cm)
acc= accuracy_score(y_test,y_pred)
print(acc)
# Dump the model and lets predict for one item description,
# for which i expect Toys R Us as the supplier/Seller
pickle.dump(model, open(r'supplier_predictions.pkl','wb'))
loadedModel = pickle.load(open("supplier_predictions.pkl","rb"))
new_items = {'ITEM_DESCRIPTION': ['Lego, Barbie and other Toy Items']}
new_X = pd.DataFrame(new_items, columns = ['ITEM_DESCRIPTION'])
new_y_pred=loadedModel.predict(new_X)
labelObj.inverse_transform(new_y_pred)
### Shows Office Depot
My bad - the input to the predict was wrong type. Passed in a series and it worked fine.
new_items = pd.Series(new_items)
new_y_pred=loadedModel.predict(new_items)
labelObj.inverse_transform(new_y_pred)
I was looking through the SHAP package for Python and I found no examples using KernelExplainer to explain textual data predictions so I decided to test it out using the dataset i found on https://www.superdatascience.com/machine-learning/.
I encountered a problem in the KernelExplainer part at the last bit, where I believe the problem is the way I input the data and model into the explainer.
AttributeError: 'numpy.ndarray' object has no attribute 'lower'
Can anyone advise me on what I should revise so as to make the explainer work? I spent hours on this last bit but to no avail. Any help or advice is greatly appreciated. With much thanks!
Dataset: https://drive.google.com/file/d/1-pzY7IQVyB_GmT5dT0yRx3hYzOFGrZSr/view?usp=sharing
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import re
import nltk
#Load the data
os.chdir('C:\\Users\\Win\\Desktop\\MyLearning\\Explainability\\SHAP')
review = pd.read_csv('Restaurant_Reviews.tsv', sep='\t')
#Clean the data
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
def clean_text(df_text_column, data):
corpus = []
for i in range(0, len(data)):
text = re.sub('[^a-zA-Z]', ' ', df_text_column[i])
text = text.lower()
text = text.split()
ps = PorterStemmer()
text = [ps.stem(word) for word in text if not word in set(stopwords.words('english'))]
text = ' '.join(text)
corpus.append(text)
return corpus
X = pd.DataFrame({'Review':clean_text(review['Review'],review)})['Review']
y = review['Liked']
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Creating the pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
vect = TfidfVectorizer()
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
from sklearn.pipeline import make_pipeline
np.random.seed(0)
rf_pipe = make_pipeline(vect, rf)
rf_pipe.steps
rf_pipe.fit(X_train, y_train)
y_pred = rf_pipe.predict(X_test)
y_prob = rf_pipe.predict_proba(X_test)
#Performance Metrics
from sklearn import metrics
metrics.accuracy_score(y_test, y_pred) #Accuracy
metrics.roc_auc_score(y_test, y_prob[:, 1]) #ROC-AUC score
# use Kernel SHAP to explain test set predictions
import shap
explainer = shap.KernelExplainer(rf_pipe.predict_proba, X_train, link="logit")
shap_values = explainer.shap_values(X_test, nsamples=100)
# plot the SHAP values
shap.force_plot(explainer.expected_value[0], shap_values[0][0,:], X_test.iloc[0,:], link="logit")
Getting straight to the point:
1) My goal was to apply NLP and Machine learning algorithm to classify a dataset containing sentences into 5 different types of categories(numeric). For e.g. "I want to know details of my order -> 1".
Code:
import numpy as np
import pandas as pd
dataset = pd.read_csv('Ecom.tsv', delimiter = '\t', quoting = 3)
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
for i in range(0, len(dataset)):
review = re.sub('[^a-zA-Z]', ' ', dataset['User'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
# # Creating the Bag of Words model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
X = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:, 1].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
Everything works fine here, the model is trained well and predicts correct results for test data.
2) Now i wanted to use this trained model to predict a category for a new sentence. So i pre-processed the text in the same way i did for my dataset.
Code:
#Pre processing the new input
new_text = "Please tell me the details of this order"
new_text = new_text.split()
ps = PorterStemmer()
processed_text = [ps.stem(word) for word in new_text if not word in set(stopwords.words('english'))]
vect = CountVectorizer()
Z = vect.fit_transform(processed_text).toarray()
classifier.predict(Z)
ValueError: operands could not be broadcast together with shapes (4,4) (33,)
The only thing i am able to understand is that when i transformed my corpus the first time i trained my model, the shape of the numpy array is (18, 33). Second time when i am trying to predict for a new input, when i transformed my processed_text using fit_transform(), the numpy array shape is (4, 4).
I am not able to figure out is there any process here that i applied incorrectly? What can be the resolution. Thanks in advance! :)
you got the the problem right!
Say you have a corpus made of 33 different words, then your bag of words at training time will have 33 columns. Now you are using another corpus which has only 4 different words. You end up with a matrix with 4 columns, and the model won't like that! hence you need to fit the second corpus in the same bag of words matrix you had at the beginning, with 33 columns. There are different ways to do this, well explained here.
For example one way is to save the transform object you used at training time with fit() and then apply it at test time (only transform())!
The code is used to generate word2vec and use it to train the naive Bayes classifier.
I am able to generate word2vec and use the similarity functions successfully.As a next step I would want to use the word2vec to train the naive bayes classifier. Currently the code given an error when I am trying to slit the data in test and training. How do i convert word2vec model into the array so that it can be used as training data.
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import gensim
# Importing the dataset
dataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3)
# Cleaning the texts
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
for i in range(0, 1000):
review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
# for word2vec we want an array of vectors
corpus.append(review)
#print(corpus)
X = gensim.models.Word2Vec(corpus, min_count=1,size=1000)
#print (X.most_similar("love"))
#embedding_matrix = np.zeros(len(X.wv.vocab), dtype='float32')
#for i in range(len(X.wv.vocab)):
# embedding_vector = X.wv[X.wv.index2word[i]]
# if embedding_vector is not None:
# embedding_matrix[i] = embedding_vector
# Creating the Bag of Words model
#from sklearn.feature_extraction.text import CountVectorizer
#cv = CountVectorizer(max_features = 1500)
#X = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:, 1].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
It gives an error on line -
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
TypeError: Expected sequence or array-like, got <class 'gensim.models.word2vec.Word2Vec'>
Word2Vec provides word embeddings only. If you want to characterize documents by embeddings, you'll need to perform an averaging/summing/max operation on embeddings of all words from each document to have a D-dimensional vector that can be used for classification. See here and there for further information on this.
Otherwise, you can use Doc2Vec model to directly produce document embeddings, for which gensim also gives a very good provider.
You have vectors for each word, now you have two approaches to move forward, one could be simply take average of all the words in a sentence to find the sentence vector, another could be to use tfidf.
I implemented the average approach in one of my ongoing projects and i am sharing the github link, please go to the heading "text vectorization(word2vec)" and you will find the code their.
https://github.com/abhibhargav29/SentimentAnalysis/blob/master/SentimentAnalysis.ipynb. I would however suggest you to read data cleaning before as well to understand it in a better way.
One important advice: Do not split the data into train, cv, test after vectorization, do it before vectorization or you will overfit the model.