Python Bayes heart prediction, results are not accurate - python

I'm trying to make a heart disease prediction program using Naive Bayes. When I finished the classifier, the cross validation showed a mean accuracy of 80% However when I try to make a prediction on a given sample, the prediction is all wrong! The dataset is the heart disease dataset from UCI repository, it contains 303 samples. There are two classes 0: healthy and 1: ill, when I try making a prediction on a sample from the dataset, it doesn't predicts its true value, except for very few samples. Here is the code:
import pandas as pd
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import Imputer, StandardScaler
class Predict:
def Read_Clean(self,dataset):
header_row = ['Age', 'Gender', 'Chest_Pain', 'Resting_Blood_Pressure', 'Serum_Cholestrol',
'Fasting_Blood_Sugar', 'Resting_ECG', 'Max_Heart_Rate',
'Exercise_Induced_Angina', 'OldPeak',
'Slope', 'CA', 'Thal', 'Num']
df = pd.read_csv(dataset, names=header_row)
df = df.replace('[?]', np.nan, regex=True)
df = pd.DataFrame(Imputer(missing_values='NaN', strategy='mean', axis=0)
.fit_transform(df), columns=header_row)
df = df.astype(float)
return df
def Train_Test_Split_data(self,dataset):
Y = dataset['Num'].apply(lambda x: 1 if x > 0 else 0)
X = dataset.drop('Num', axis=1)
validation_size = 0.20
seed = 42
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=validation_size, random_state=seed)
return X_train, X_test, Y_train, Y_test
def Scaler(self, X_train, X_test):
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
return X_train, X_test
def Cross_Validate(self, clf, X_train, Y_train, cv=5):
scores = cross_val_score(clf, X_train, Y_train, cv=cv, scoring='f1')
score = scores.mean()
print("CV scores mean: %.4f " % (score))
return score, scores
def Fit_Score(self, clf, X_train, Y_train, X_test, Y_test, label='x'):
clf.fit(X_train, Y_train)
fit_score = clf.score(X_train, Y_train)
pred_score = clf.score(X_test, Y_test)
print("%s: fit score %.5f, predict score %.5f" % (label, fit_score, pred_score))
return pred_score
def ReturnPredictionValue(self, clf, sample):
y = clf.predict([sample])
return y[0]
def PredictionMain(self, sample, dataset_path='dataset/processed.cleveland.data'):
data = self.Read_Clean(dataset_path)
X_train, X_test, Y_train, Y_test = self.Train_Test_Split_data(data)
X_train, X_test = self.Scaler(X_train, X_test)
self.NB = GaussianNB()
self.Fit_Score(self.NB, X_train, Y_train, X_test, Y_test, label='NB')
self.Cross_Validate(self.NB, X_train, Y_train, 10)
return self.ReturnPredictionValue(self.NB, sample)
When I run:
if __name__ == '__main__':
sample = [41.0, 0.0, 2.0, 130.0, 204.0, 0.0, 2.0, 172.0, 0.0, 1.4, 1.0, 0.0, 3.0]
p = Predict()
print "Prediction value: {}".format(p.PredictionMain(sample))
The result is:
NB: fit score 0.84711, predict score 0.83607 CV scores mean: 0.8000
Prediction value: 1
I get 1 instead of 0 (this sample is already one of the dataset samples).
I did this for more than one sample from the dataset and I get wrong result most of the time, it's as if the accuracy is not 80%!
Any help would be appreciated.
Thanks in advance.
Edit:
Problem solved using Pipeline. The final code is:
import pandas as pd
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.preprocessing import Imputer, StandardScaler, OneHotEncoder
from sklearn.pipeline import Pipeline
class Predict:
def __init__(self):
self.X = []
self.Y = []
def Read_Clean(self,dataset):
header_row = ['Age', 'Gender', 'Chest_Pain', 'Resting_Blood_Pressure', 'Serum_Cholestrol',
'Fasting_Blood_Sugar', 'Resting_ECG', 'Max_Heart_Rate',
'Exercise_Induced_Angina', 'OldPeak',
'Slope', 'CA', 'Thal', 'Num']
df = pd.read_csv(dataset, names=header_row)
df = df.replace('[?]', np.nan, regex=True)
df = pd.DataFrame(Imputer(missing_values='NaN', strategy='mean', axis=0)
.fit_transform(df), columns=header_row)
df = df.astype(float)
return df
def Split_Dataset(self, df):
self.Y = df['Num'].apply(lambda x: 1 if x > 0 else 0)
self.X = df.drop('Num', axis=1)
def Create_Pipeline(self):
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('bayes', GaussianNB()))
model = Pipeline(estimators)
return model
def Cross_Validate(self, clf, cv=5):
scores = cross_val_score(clf, self.X, self.Y, cv=cv, scoring='f1')
score = scores.mean()
print("CV scores mean: %.4f " % (score))
def Fit_Score(self, clf, label='x'):
clf.fit(self.X, self.Y)
fit_score = clf.score(self.X, self.Y)
print("%s: fit score %.5f" % (label, fit_score))
def ReturnPredictionValue(self, clf, sample):
y = clf.predict([sample])
return y[0]
def PredictionMain(self, sample, dataset_path='dataset/processed.cleveland.data'):
print "dataset: "+ dataset_path
data = self.Read_Clean(dataset_path)
self.Split_Dataset(data)
self.model = self.Create_Pipeline()
self.Fit_Score(self.model, label='NB')
self.Cross_Validate(self.model, 10)
return self.ReturnPredictionValue(self.model, sample)
Now making a prediction on the same sample in the question returns [0] which is the true value. Actually by running the following method:
def CheckTrue(self):
clf = self.Create_Pipeline()
out = cross_val_predict(clf, self.X, self.Y)
p = [out == self.Y]
c = 0
for i in range(303):
if p[0][i] == True:
c += 1
print "Samples with true values: {}".format(c)
I get 249 true samples using the pipeline code, whereas I got only 150 before.

You're not applying StandardScaler to the sample. Classifier expects scaled data as it was trained on StandardScaler.transform output, but sample is not scaled the same way as in training.
It is easy to make such mistakes when combining multiple steps (scaling, preprocessing, classification) manually. To avoid such issues it is a good idea to use scikit-learn Pipeline.

Related

How to use t-SNE inside the pipeline

How could I use t-SNE inside my pipeline?
I have managed without pipelining to successfully run t-SNE and on it a classification algorithm.
Do I need to write a custom method that can be called in the pipeline that returns a dataframe, or how does it work?
# How I used t-SNE
%%time
from sklearn.manifold import TSNE
X_std = StandardScaler().fit_transform(dfListingsFeature_classification)
ts = TSNE()
X_tsne = ts.fit_transform(X_std)
print(X_tsne.shape)
feature_list = []
for i in range(1,X_tsne.shape[1]+1):
feature_list .append("TSNE" + str(i))
df_new = pd.DataFrame(X_tsne, columns= feature_list )
df_new['label'] = y
#df_new.head()
X = df_new.drop(columns=['label'])
y = df_new['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
rfc= RandomForestClassifier()
# Train Decision Tree Classifer
rfc= rfc.fit(X_train,y_train)
#Predict the response for test dataset
y_pred = rfc.predict(X_test)
What I want to use it
# How could I use TSNE() inside the the pipeline?
%%time
steps = [('standardscaler', StandardScaler()),
('tsne', TSNE()),
('rfc', RandomForestClassifier())]
pipeline = Pipeline(steps)
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2, random_state=30)
parameteres = {'rfc__max_depth':[1,2,3,4,5,6,7,8,9,10,11,12],
'rfc__criterion':['gini', 'entropy']}
grid = GridSearchCV(pipeline, param_grid=parameteres, cv=5)
grid.fit(X_train, y_train)
print("score = %3.2f" %(grid.score(X_test,y_test)))
print('Training set score: ' + str(grid.score(X_train,y_train)))
print('Test set score: ' + str(grid.score(X_test,y_test)))
print(grid.best_params_)
y_pred = grid.predict(X_test)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print("Precison:",metrics.precision_score(y_test, y_pred))
print("Recall:",metrics.recall_score(y_test, y_pred))
[OUT] TypeError: All intermediate steps should be transformers and implement fit and transform or be the string 'passthrough' 'TSNE()' (type <class 'sklearn.manifold._t_sne.TSNE'>) doesn't
Should I build a custom method or how ? If so how should it look like ?
class TestTSNE(BaseEstimator, TransformerMixin):
def __init__(self):
# don't know
def fit(self, X, y = None):
X_std = StandardScaler().fit_transform(dfListingsFeature_classification)
ts = TSNE()
X_tsne = ts.fit_transform(X_std)
return self
def transform(self, X, y = None):
feature_list = []
for i in range(1,shelf.X_tsne.shape[1]+1):
feature_list .append("TSNE" + str(i))
df_new = pd.DataFrame(X_tsne, columns= feature_list )
df_new['label'] = y
#df_new.head()
X = df_new.drop(columns=['label'])
y = df_new['label']
return X, y
...
steps = [('standardscaler', StandardScaler()),
('testTSNE', TestTSNE()),
('rfc', RandomForestClassifier())]
pipeline = Pipeline(steps)
I think you misunderstood the use of pipeline. From help page:
Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be ‘transforms’, that is, they
must implement fit and transform methods. The final estimator only
needs to implement fit
So this means if your pipeline is:
steps = [('standardscaler', StandardScaler()),
('tsne', TSNE()),
('rfc', RandomForestClassifier())]
You are going to apply standscaler to your features first, then transform the result of this with tsne, before passing it to the classifier. I don't think it makes much sense to train on the tsne output.
If you really want to latch onto pipeline, then you will need to store the results of tsne as an attribute, then just return the feature, training as it is, so that the classifier can work on it.
Something like
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.manifold import TSNE
from sklearn.datasets import make_classification
class TestTSNE(BaseEstimator, TransformerMixin):
def __init__(self,n_components,random_state=None,method='exact'):
self.n_components = n_components
self.method = method
self.random_state = random_state
def fit(self, X, y = None):
ts = TSNE(n_components = self.n_components,
method = self.method, random_state = self.random_state)
self.X_tsne = ts.fit_transform(X)
return self
def transform(self, X, y = None):
return X
Then:
steps = [('standardscaler', StandardScaler()),
('testTSNE', TestTSNE(2)),
('rfc', RandomForestClassifier())]
pipeline = Pipeline(steps)
X,y = make_classification()
pipeline.fit(X,y)
You can retrieve your tsne like this:
pd.DataFrame(pipeline.steps[1][1].X_tsne)
0 1
0 -38.756626 -4.693253
1 46.516308 53.633842
2 49.107910 16.482645
3 18.306377 9.432504
4 33.551056 -27.441383
.. ... ...
95 -31.337574 -16.913471
96 -57.918224 -39.959976
97 55.282658 37.582535
98 66.425125 19.717241
99 -50.692646 11.545088

Linear regression print predicted value

I have a mall dataset and i ran k-means with k = 5. Now after i ran linear regression i wanted to print my predicted value Y to compare with the actual value of Y. Printing the actual value was very easy but i keep getting an error when i try to print the predicted Y. To print the predicted value i used df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}). But i get an error ValueError: array length 35 does not match index length 18.
code:
df = pd.read_csv('D:\Mall_Customers.csv', usecols = ['Spending Score (1-100)', 'Annual Income (k$)'])
x = StandardScaler().fit_transform(df)
kmeans = KMeans(n_clusters=5, max_iter=100, random_state=0)
y_kmeans= kmeans.fit_predict(x)
df0 = df[df.index.isin(mydict[0].tolist())]
Y = df0['Spending Score (1-100)']
X = df0[[ 'Annual Income (k$)','Age']]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train,y_test = train_test_split(X, Y, test_size = 0.5, random_state = 0)
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
r_sq = regressor.score(X, Y)
print('coefficient of determination:', r_sq)
print('intercept:', regressor.intercept_)
print('slope:', regressor.coef_)
y_pred = regressor.predict(X)
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
print(df)

Got ValueError when calling cross_val_score

I am trying to make a project for Machine Learning and I wanted to perform an accuracy evaluation of multiple alhorithms. I am using this CSV and I am loading only Date, Time and CO columns ( I manually renamed it in the CSV). After I prepare my training data, I am trying to perform the evaluations, but I am getting:
ValueError: Supported target types are: ('binary', 'multiclass'). Got 'unknown' instead.
The shapes for the vectors used for evaluations (X_train and Y_train) are:
(9357, 2)
(9357,)
The class:
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
class Models:
test_size: float
random_state: int
def __init__(self, test_size: float = 0.20, random_state: int = 1) -> None:
super().__init__()
self.test_size = test_size
self.random_state = random_state
#staticmethod
def init_models() -> []:
return [
('LR', LogisticRegression(solver='liblinear', multi_class='ovr')),
('LDA', LinearDiscriminantAnalysis()),
('KNN', KNeighborsClassifier()),
('CART', DecisionTreeClassifier()),
('NB', GaussianNB()),
('SVM', SVC(gamma='auto'))
]
def train(self, x: [], y: []):
x_train, x_validation, y_train, y_validation = train_test_split(x, y, test_size=self.test_size,
random_state=self.random_state)
return x_train, x_validation, y_train, y_validation
def evaluate(self, x_train: [], y_train: [], splits: int = 10, random_state: int = 1):
results = []
names = []
models = self.init_models()
for name, model in models:
kfold = StratifiedKFold(n_splits=splits, random_state=random_state)
cv_results = cross_val_score(estimator=model, X=x_train, y=y_train, cv=kfold, scoring='accuracy')
results.append(cv_results)
names.append(name)
print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()))
And I am calling my class as:
models_helper = Models()
array = dataset.values
X = array[:, 1:3]
Y = array[:, 2]
prepared = models_helper.train(X, Y)
classification = models_helper.evaluate(prepared[0], prepared[2])
I avoided this problem by first calculating predicted values with cross_val_predict and then using the predicted values with y_test to get score with metrics.accuracy_score.
# Function that runs the requested algorithm and returns the accuracy metrics.
# Passing the sklearn model as an argument along with cv values and training data.
def fit_ml_algo(algo, X_train, y_train, cv):
# One Pass
model = algo.fit(X_train, y_train)
acc = round(model.score(X_train, y_train) * 100, 2)
# Cross Validation
train_pred = model_selection.cross_val_predict(algo,
X_train,
y_train,
cv=cv,
n_jobs = -1)
# Cross-validation accuracy metric
acc_cv = round(metrics.accuracy_score(y_train, train_pred) * 100, 2)
return train_pred, acc, acc_cv

UndefinedMetricWarning in Sklearn

I'm not able to see my resultant accuracy score in my final graph and I get precision/recall being ill-defined where I don't see any 0's.
I'm using this yeast data: https://archive.ics.uci.edu/ml/machine-learning-databases/yeast/yeast.data
I've tried making the whole set my training set by making train_frac=1.
import pandas as pd
import numpy as np
%matplotlib inline
from matplotlib import pyplot as plt
from sklearn.naive_bayes import GaussianNB
df = pd.read_csv("<my_dir>",names = ['sample','mcg', 'gvh', 'alm', 'mit', 'erl', 'pox', 'vac', 'nuc','site'])
df=df.drop(columns=['sample'])
model_type = GaussianNB()
target = 'site'
train_frac = 0.5
Y = df[target]
df2 = df.drop(columns=[target])
dtype='object'). Everything but site.
X = df[df2.columns[:]]
def naive_split(X, Y, n):
# Take first n lines of X and Y for training and the rest for testing
X_train = X[:n]
X_test = X[n:]
Y_train = Y[:n]
Y_test = Y[n:]
return (X_train, X_test, Y_train, Y_test)
def train_model(n=int(train_frac*df.shape[0])):
X_train, X_test, Y_train, Y_test = naive_split(X, Y, n)
clf = model_type
clf = clf.fit(X_train, Y_train)
return (X_test, Y_test, clf)
X_test, Y_test, clf = train_model()
import sklearn.metrics as metrics
from sklearn import model_selection
sizes = np.arange(0.98,0.01, -0.02)
result = {}
for size in sizes:
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(
X, Y, test_size=size, random_state=200)
clf = model_type
clf = clf.fit(X_train, Y_train)
score = clf.score(X_test, Y_test)
precision = metrics.precision_score(Y_test, clf.predict(X_test), average='weighted')
recall = metrics.recall_score(Y_test, clf.predict(X_test), average='weighted')
result[len(Y_train)] = (score, precision, recall)
result = pd.DataFrame(result).transpose()
result.columns = ['Accuracy','Precision', 'Recall']
result.plot(marker='*', figsize=(15,5))
plt.title('Metrics measures using random train/test splitting')
plt.xlabel('Size of training set')
plt.ylabel('Value');
I get the following results when I expect it to run without error:
C:\Users\<user>\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\metrics\classification.py:1135: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples.'precision', 'predicted', average, warn_for)
C:\Users\<user>\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\metrics\classification.py:1137: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples. 'recall', 'true', average, warn_for)

XGBOOST feature name error - Python

Probably this question has been asked many times in different forms. However, my problem is when I use XGBClassifier() with a production like data, I get a feature name mismatch error. I am hoping someone could please tell me what I am doing wrong. Here is my code. BTW, the data is completely made up:
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import accuracy_score
import xgboost as xgb
data = {"Age":[44,27,30,38,40,35,70,48,50,37],
"BMI":["25-29","35-39","30-35","40-45","45-49","20-25","<19",">70","50-55","55-59"],
"BP":["<140/90",">140/90",">140/90",">140/90","<140/90","<140/90","<140/90",">140/90",">140/90","<140/90"],
"Risk":["No","Yes","Yes","Yes","No","No","No","Yes","Yes","No"]}
df = pd.DataFrame(data)
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
labelencoder = LabelEncoder()
def encoder_X(columns):
for i in columns:
X.iloc[:, i] = labelencoder.fit_transform(X.iloc[:, i])
encoder_X([1,2])
y = labelencoder.fit_transform(y)
onehotencdoer = OneHotEncoder(categorical_features = [[1,2]])
X = onehotencdoer.fit_transform(X).toarray()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 13)
model = xgb.XGBClassifier()
model.fit(X_train, y_train, verbose = True)
y_pred = model.predict(X_test)
predictions = [round(value) for value in y_pred]
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: {0}%".format(accuracy*100))
So far so good, no error. The accuracy score is 100%, but that's because it is a made up data set so I am not worried about that.
When I try to classify a new dataset based on the model, I get "feature name mismatch error":
proddata = {"Age":[65,50,37],
"BMI":["25-29","35-39","30-35"],
"BP":["<140/90",">140/90",">140/90"]}
prod_df = pd.DataFrame(proddata)
def encoder_prod(columns):
for i in columns:
prod_df.iloc[:, i] = labelencoder.fit_transform(prod_df.iloc[:, i])
encoder_prod([1,2])
onehotencdoer = OneHotEncoder(categorical_features = [[1,2]])
prod_df = onehotencdoer.fit_transform(prod_df).toarray()
predictions = model.predict(prod_df)
After this I get the below error
predictions = model.predict(prod_df)
Traceback (most recent call last):
File "<ipython-input-24-456b5626e711>", line 1, in <module>
predictions = model.predict(prod_df)
File "c:\users\sozdemir\appdata\local\programs\python\python35\lib\site-packages\xgboost\sklearn.py", line 526, in predict
ntree_limit=ntree_limit)
File "c:\users\sozdemir\appdata\local\programs\python\python35\lib\site-packages\xgboost\core.py", line 1044, in predict
self._validate_features(data)
File "c:\users\sozdemir\appdata\local\programs\python\python35\lib\site-packages\xgboost\core.py", line 1288, in _validate_features
data.feature_names))
ValueError: feature_names mismatch: ['f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12'] ['f0', 'f1', 'f2', 'f3', 'f4', 'f5']
expected f6, f11, f12, f9, f7, f8, f10 in input data
I know this is happening as a result of OneHotEncoding when fit and transform to an array. I might be wrong though.
If this is as a result of OneHotEncoding, can I just not use OneHotEncoding since LabelEncoder() already codes the categorical values?
Thank you so much for any help and feedback.
PS: The version of XGBOOST is 0.7
xgboost.__version__
Out[37]: '0.7'
It seems like the encoder needs to be saved after it is being fitted. I used joblib from sklearn. Jason from https://machinelearningmastery.com/ gave me the idea of saving the encoder. The below is an edited version:
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
import xgboost as xgb
data = {"Age":[44,27,30,38,40,35,70,48,50,37],
"BMI":["25-29","35-39","30-35","40-45","45-49","20-25","<19",">70","50-55","55-59"],
"BP":["<140/90",">140/90",">140/90",">140/90","<140/90","<140/90","<140/90",">140/90",">140/90","<140/90"],
"Risk":["No","Yes","Yes","Yes","No","No","No","Yes","Yes","No"]}
df = pd.DataFrame(data)
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
labelencoder = LabelEncoder()
def encoder_X(columns):
for i in columns:
X.iloc[:, i] = labelencoder.fit_transform(X.iloc[:, i])
encoder_X([1,2])
y = labelencoder.fit_transform(y)
onehotencdoer = OneHotEncoder(categorical_features = [[1,2]])
onehotencdoer.fit(X)
enc = joblib.dump(onehotencdoer, "encoder.pkl") # save the fitted encoder
X = onehotencdoer.transform(X).toarray()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 13)
model = xgb.XGBClassifier()
model.fit(X_train, y_train, verbose = True)
y_pred = model.predict(X_test)
predictions = [round(value) for value in y_pred]
accuracy = accuracy_score(y_test, predictions)
print("Accuracy: {0}%".format(accuracy*100))
And now, we can use the fitted encoder to transform the prod data:
proddata = {"Age":[65,50,37],
"BMI":["25-29","35-39","30-35"],
"BP":["<140/90",">140/90",">140/90"]}
prod_df = pd.DataFrame(proddata)
def encoder_prod(columns):
for i in columns:
prod_df.iloc[:, i] = labelencoder.fit_transform(prod_df.iloc[:, i])
encoder_prod([1,2])
enc = joblib.load("encoder.pkl")
prod_df = enc.transform(prod_df).toarray()
predictions = model.predict(prod_df)
results = [round(val) for val in predictions]
It seems to be working for this example and I'll try this method at work for a larger data-set.
Please, let me know what you think.
Thanks

Categories

Resources