Make prediction from Pandas DataFrame - python

I am very new to DataScience/Pandas in general. I mainly followed this and could get it to work using different classifiers.
import pandas as pd
import src.helper as helper
import time
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import GradientBoostingClassifier
# Headings
headings = ['class', 'cap-shape', 'cap-surface', 'cap-color', 'bruises', 'odor', 'gill-attachment', 'gill-spacing',
'gill-size', 'gill-color', 'stalk-shape', 'stalk-root', 'stalk-surface-above-ring',
'stalk-surface-below-ring', 'stalk-color-above-ring', 'stalk-color-below-ring', 'veil-type',
'veil-color', 'ring-number', 'ring-type', 'spore-print-color', 'population', 'habitat']
# Load the data
shrooms = pd.read_csv('data/shrooms_no_header.csv', names=headings, converters={"header": float})
# Replace the ? in 'stalk-root' with 0
shrooms.loc[shrooms['stalk-root'] == '?', 'stalk-root'] = np.nan
shrooms.fillna(0, inplace=True)
# Remove columns with only one unique value
for col in shrooms.columns.values:
if len(shrooms[col].unique()) <= 1:
print("Removing column {}, which only contains the value: {}".format(col, shrooms[col].unique()[0]))
shrooms.drop(col, axis=1, inplace=True)
# Col to predict later
col_predict = 'class'
# Binary Encoding
all_cols = list(shrooms.columns.values)
all_cols.remove(col_predict)
helper.encode(shrooms, [col_predict])
# Expand Shrooms DataFrame to Binary Values
helper.expand(shrooms, all_cols)
# Remove the class we want to predict
x_all = list(shrooms.columns.values)
x_all.remove(col_predict)
# Set Train/Test ratio
ratio = 0.7
# Split the DF
df_train, df_test, X_train, Y_train, X_test, Y_test = helper.split_df(shrooms, col_predict, x_all, ratio)
# Try different classifier
# TODO: Batch Use to compare
classifier = GradientBoostingClassifier(n_estimators=1000)
# TODO: Optimize Hyperparamter (where applicable)
# Time the training
timer_start = time.process_time()
classifier.fit(X_train, Y_train)
timer_stop = time.process_time()
time_diff = timer_stop - timer_start
# Get the score
score_train = classifier.score(X_train, Y_train)
score_test = classifier.score(X_test, Y_test)
print('Train Score {}, Test Score {}, Time {}'.format(score_train, score_test, time_diff))
# TODO: Test a manual DataFrame
The "helpers" are functions I don't quite understand fully, but they work:
import numpy as np
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
def split_df(df, y_col, x_cols, ratio):
"""
This method transforms a dataframe into a train and test set, for this you need to specify:
1. the ratio train : test (usually 0.7)
2. the column with the Y_values
"""
mask = np.random.rand(len(df)) < ratio
train = df[mask]
test = df[~mask]
y_train = train[y_col].values
y_test = test[y_col].values
x_train = train[x_cols].values
x_test = test[x_cols].values
return train, test, x_train, y_train, x_test, y_test
def encode(df, columns):
for col in columns:
le = LabelEncoder()
col_values_unique = list(df[col].unique())
le_fitted = le.fit(col_values_unique)
col_values = list(df[col].values)
le.classes_
col_values_transformed = le.transform(col_values)
df[col] = col_values_transformed
def expand(df, list_columns):
for col in list_columns:
colvalues = df[col].unique()
for colvalue in colvalues:
newcol_name = "{}_is_{}".format(col, colvalue)
df.loc[df[col] == colvalue, newcol_name] = 1
df.loc[df[col] != colvalue, newcol_name] = 0
df.drop(list_columns, inplace=True, axis=1)
def correlation_to(df, col):
correlation_matrix = df.corr()
correlation_type = correlation_matrix[col].copy()
abs_correlation_type = correlation_type.apply(lambda x: abs(x))
desc_corr_values = abs_correlation_type.sort_values(ascending=False)
y_values = list(desc_corr_values.values)[1:]
x_values = range(0, len(y_values))
xlabels = list(desc_corr_values.keys())[1:]
fig, ax = plt.subplots(figsize=(8, 8))
ax.bar(x_values, y_values)
ax.set_title('The correlation of all features with {}'.format(col), fontsize=20)
ax.set_ylabel('Pearson correlatie coefficient [abs waarde]', fontsize=16)
plt.xticks(x_values, xlabels, rotation='vertical')
plt.show()
I would like to have a "manual" test, such as entering x attributes and getting a prediction based on that.
So for example, I hardcode a DataFrame like the following:
manual = pd.DataFrame({
"cap-shape": ["x"],
"cap-surface": ["s"],
"cap-color": ["n"],
"bruises": ["f"],
"odor": ["n"],
"gill-attachment": ["a"],
"gill-spacing": ["c"],
"gill-size": ["b"],
"gill-color": ["y"],
"stalk-shape": ["e"],
"stalk-root": ["?"],
"stalk-surface-above-ring": ["s"],
"stalk-surface-below-ring": ["s"],
"stalk-color-above-ring": ["o"],
"stalk-color-below-ring": ["o"],
"veil-type": ["p"],
"veil-color": ["o"],
"ring-number": ["o"],
"ring-type": ["p"],
"spore-print-color": ["o"],
"population": ["c"],
"habitat": ["l"]
})
How would I apply the same encoding? My code says helper.encode(manual, [col_predict]) but the manual ofc does not have a col_predict?
Please bear in mind I am a complete beginner, I searched the web a l ot, but I cannot come up with a proper source/tutorial that lets me test a single set.
The full code can be found here.

Try this:
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score
data = pd.read_csv('agaricus-lepiota.data.txt', header=None) #read data
data.rename(columns={0: 'y'}, inplace = True) #rename predict column (edible or not)
le = LabelEncoder() # encoder to do label encoder
data = data.apply(lambda x: le.fit_transform(x)) #apply LE to all columns
X = data.drop('y', 1) # X without predict column
y = data['y'] #predict column
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
clf = GradientBoostingClassifier()#you can pass arguments
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test) #it is predict for objects in test
print(accuracy_score(y_test, y_pred)) #check accuracy
I think you can read more about this in sklearn site.
Is this example what you want?
To check your manual data:
manual = manual.apply(lambda x: le.fit_transform(x))
clf.predict(manual)

Related

how to use numpy mutual information correctly

i want to use principal component analysis-mutual information (PCA-MI) to have data representation from source which has source relevance of (value from smartinsole) and ouput variable (value from force plate). PCA was used to determine the principal component of Ni provided that the cumulative variance is greater than 98% of the source information measured from 89 insole sensors. MI is generally used in the selection of input variables for predictive models because it is a good indicator of the relationship between input variables and output variables. here I want to get results like a flowchart as below
then I try to make code like below. but I can't generate like what's in the flowchart
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_classif
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# load the dataset
def load_dataset(filename):
# load the dataset as a pandas DataFrame
data = read_csv(filename, header=None)
# retrieve numpy array
dataset = data.values
y = dataset
return y
def load_dataset2(filename):
# load the dataset as a pandas DataFrame
data2 = read_csv(filename, header=None)
# retrieve numpy array
dataset2 = data2.values
X = dataset2
return X
# feature selection
def select_features(X_train, y_train, X_test):
# configure to select a subset of features
fs = SelectKBest(score_func=mutual_info_classif, k=4)
# learn relationship from training data
fs.fit(X_train, y_train)
# transform train input data
X_train_fs = fs.transform(X_train)
# transform test input data
X_test_fs = fs.transform(X_test)
return X_train_fs, X_test_fs, fs
# load the dataset
Insole = pd.read_csv('1119_Rwalk40s1_list.txt', header=None, low_memory=False)
SIData = np.asarray(Insole)
df = pd.read_csv('1119_Rwalk40s1.csv', low_memory=False)
columns = ['Fx','Fy','Fz','Mx','My','Mz']
selected_df = df[columns]
FCDatas = selected_df
SmartInsole = np.array(SIData)
FCData = np.array(FCDatas)
scaler_x = MinMaxScaler(feature_range=(0, 1))
scaler_x.fit(SmartInsole)
xscale = scaler_x.transform(SmartInsole)
scaler_y = MinMaxScaler(feature_range=(0, 1))
scaler_y.fit(FCData)
yscale = scaler_y.transform(FCData)
SIDataPCA = xscale
pca = PCA(n_components=89)
pca.fit(SIDataPCA)
SIdata_pca = pca.transform(SIDataPCA)
X = SIdata_pca
y = yscale
X = SIdata_pca
y = yscale
# split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
# feature selection
X_train_fs, X_test_fs, fs = select_features(X_train, y_train, X_test)
# fit the model
model = LogisticRegression(solver='liblinear')
model.fit(X_train_fs, y_train)
# evaluate the model
yhat = model.predict(X_test_fs)
# evaluate predictions
accuracy = accuracy_score(y_test, yhat)
print('Accuracy: %.2f' % (accuracy*100))
how can I get the correct PCA-MI result data?

How to get SHAP values for each class on a multiclass classification problem in python

I have the following dataframe:
import pandas as pd
import random
import xgboost
import shap
foo = pd.DataFrame({'id':[1,2,3,4,5,6,7,8,9,10],
'var1':random.sample(range(1, 100), 10),
'var2':random.sample(range(1, 100), 10),
'var3':random.sample(range(1, 100), 10),
'class': ['a','a','a','a','a','b','b','c','c','c']})
For which I want to run a classification algorithm in order to predict the 3 classes
So I split my dataset into train and test and I run an xgboost
cl_cols = foo.filter(regex='var').columns
X_train, X_test, y_train, y_test = train_test_split(foo[cl_cols],
foo[['class']],
test_size=0.33, random_state=42)
model = xgboost.XGBClassifier(objective="binary:logistic")
model.fit(X_train, y_train)
Now I would like to get the mean SHAP values for each class, instead of the mean from the absolute SHAP values generated from this code:
shap_values = shap.TreeExplainer(model).shap_values(X_test)
shap.summary_plot(shap_values, X_test)
Also, the plot labels the class as 0,1,2. How can I know to which class from the original do the 0,1 & 2 correspond ?
Because this code:
shap.summary_plot(shap_values, X_test,
class_names= ['a', 'b', 'c'])
gives
and this code:
shap.summary_plot(shap_values, X_test,
class_names= ['b', 'c', 'a'])
gives
So I am not sure about the legend anymore.
Any ideas ?
By doing some research and with the help of this post and #Alessandro Nesti 's answer, here is my solution:
foo = pd.DataFrame({'id':[1,2,3,4,5,6,7,8,9,10],
'var1':random.sample(range(1, 100), 10),
'var2':random.sample(range(1, 100), 10),
'var3':random.sample(range(1, 100), 10),
'class': ['a','a','a','a','a','b','b','c','c','c']})
cl_cols = foo.filter(regex='var').columns
X_train, X_test, y_train, y_test = train_test_split(foo[cl_cols],
foo[['class']],
test_size=0.33, random_state=42)
model = xgboost.XGBClassifier(objective="multi:softmax")
model.fit(X_train, y_train)
def get_ABS_SHAP(df_shap,df):
#import matplotlib as plt
# Make a copy of the input data
shap_v = pd.DataFrame(df_shap)
feature_list = df.columns
shap_v.columns = feature_list
df_v = df.copy().reset_index().drop('index',axis=1)
# Determine the correlation in order to plot with different colors
corr_list = list()
for i in feature_list:
b = np.corrcoef(shap_v[i],df_v[i])[1][0]
corr_list.append(b)
corr_df = pd.concat([pd.Series(feature_list),pd.Series(corr_list)],axis=1).fillna(0)
# Make a data frame. Column 1 is the feature, and Column 2 is the correlation coefficient
corr_df.columns = ['Variable','Corr']
corr_df['Sign'] = np.where(corr_df['Corr']>0,'red','blue')
shap_abs = np.abs(shap_v)
k=pd.DataFrame(shap_abs.mean()).reset_index()
k.columns = ['Variable','SHAP_abs']
k2 = k.merge(corr_df,left_on = 'Variable',right_on='Variable',how='inner')
k2 = k2.sort_values(by='SHAP_abs',ascending = True)
k2_f = k2[['Variable', 'SHAP_abs', 'Corr']]
k2_f['SHAP_abs'] = k2_f['SHAP_abs'] * np.sign(k2_f['Corr'])
k2_f.drop(columns='Corr', inplace=True)
k2_f.rename(columns={'SHAP_abs': 'SHAP'}, inplace=True)
return k2_f
foo_all = pd.DataFrame()
for k,v in list(enumerate(model.classes_)):
foo = get_ABS_SHAP(shap_values[k], X_test)
foo['class'] = v
foo_all = pd.concat([foo_all,foo])
import plotly_express as px
px.bar(foo_all,x='SHAP', y='Variable', color='class')
which results in
I had the same question, perhaps this issue can help: https://github.com/slundberg/shap/issues/764
I haven't tested it yet, but it seems the order should be the same as the order you would have when calling model.predict_proba(). In the link above it is suggested to use the class_names=model.classes_ option of the summary plot.
This is an updated code of #quant's code:
import pandas as pd
import random
import numpy as np
import xgboost
import shap
from sklearn.model_selection import train_test_split
import plotly_express as px
foo = pd.DataFrame({'id':[1,2,3,4,5,6,7,8,9,10],
'var1':random.sample(range(1, 100), 10),
'var2':random.sample(range(1, 100), 10),
'var3':random.sample(range(1, 100), 10),
'class': ['a','a','a','a','a','b','b','c','c','c']})
foo['class'], _ = pd.factorize(foo['class'], sort = True)
cl_cols = foo.filter(regex='var').columns
X_train, X_test, y_train, y_test = train_test_split(foo[cl_cols],
foo[['class']],
test_size=0.33, random_state=42)
model = xgboost.XGBClassifier(objective="multi:softmax")
model.fit(X_train, y_train)
shap_values = shap.TreeExplainer(model).shap_values(X_test)
def get_ABS_SHAP(df_shap,df):
#import matplotlib as plt
# Make a copy of the input data
shap_v = pd.DataFrame(df_shap)
feature_list = df.columns
shap_v.columns = feature_list
df_v = df.copy().reset_index().drop('index',axis=1)
# Determine the correlation in order to plot with different colors
corr_list = list()
for i in feature_list:
b = np.corrcoef(shap_v[i],df_v[i])[1][0]
corr_list.append(b)
corr_df = pd.concat([pd.Series(feature_list),pd.Series(corr_list)],axis=1).fillna(0)
# Make a data frame. Column 1 is the feature, and Column 2 is the correlation coefficient
corr_df.columns = ['Variable','Corr']
corr_df['Sign'] = np.where(corr_df['Corr']>0,'red','blue')
shap_abs = np.abs(shap_v)
k=pd.DataFrame(shap_abs.mean()).reset_index()
k.columns = ['Variable','SHAP_abs']
k2 = k.merge(corr_df,left_on = 'Variable',right_on='Variable',how='inner')
k2 = k2.sort_values(by='SHAP_abs',ascending = True)
k2_f = k2[['Variable', 'SHAP_abs', 'Corr']]
k2_f['SHAP_abs'] = k2_f['SHAP_abs'] * np.sign(k2_f['Corr'])
k2_f.drop(columns='Corr', inplace=True)
k2_f.rename(columns={'SHAP_abs': 'SHAP'}, inplace=True)
return k2_f
foo_all = pd.DataFrame()
for k,v in list(enumerate(model.classes_)):
foo = get_ABS_SHAP(shap_values[k], X_test)
foo['class'] = v
foo_all = pd.concat([foo_all,foo])
px.bar(foo_all,x='SHAP', y='Variable', color='class')
SHAP values are returned as a list. You can access the regarding SHAP absolute values via their indices.
For the summary plot of your Class 0, the code would be
shap.summary_plot(shap_values[0], X_test)

Input data cannot be a list XGBoost

Here is my code.
import pandas as pd
import numpy as np
import json
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.preprocessing import StandardScaler
training_data = pd.read_csv('/Users/aus10/Desktop/MLB_Data/Test_Training_Data/MLB_Training_Data.csv')
df_model = training_data.copy()
scaler = StandardScaler()
features = [['OBS', 'Runs']]
for feature in features:
df_model[feature] = scaler.fit_transform(df_model[feature])
test_data = pd.read_csv('/Users/aus10/Desktop/MLB_Data/Test_Training_Data/Test_Data.csv')
X = training_data.iloc[:,1] #independent columns
y = training_data.iloc[:,-1] #target column
X = X.values.reshape(-1,1)
results = []
# fit final model
model = XGBRegressor(objective="reg:squarederror", random_state=42)
model.fit(X, y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=4)
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print('MSE train: %.3f, test: %.3f' % (
round(mean_squared_error(y_train, y_train_pred),2),
round(mean_squared_error(y_test, y_test_pred),2)
))
print('R^2 train: %.3f, test: %.3f' % (r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred)))
# define one new data instance
index = 0
count = 0
while count < len(test_data):
team = test_data.loc[index].at['Team']
OBS = test_data.loc[index].at['OBS']
Xnew = [[ OBS ]]
# make a prediction
ynew = model.predict(Xnew)
# show the inputs and predicted outputs
results.append(
{
'Team': team,
'Runs': (round(ynew[0],2))
})
index += 1
count += 1
sorted_results = sorted(results, key=lambda k: k['Runs'], reverse=True)
df = pd.DataFrame(sorted_results, columns=[
'Team', 'Runs'])
writer = pd.ExcelWriter('/Users/aus10/Desktop/MLB_Data/ML/Results/Projected_Runs_XGBoost.xlsx', engine='xlsxwriter') # pylint: disable=abstract-class-instantiated
df.to_excel(writer, sheet_name='Sheet1', index=False)
df.style.set_properties(**{'text-align': 'center'})
pd.set_option('display.max_colwidth', 100)
pd.set_option('display.width', 1000)
writer.save()
and the error I'm getting is TypeError: Input data can not be a list.
The data coming from test_data is a csv with a team name and obs which is a float
like this NYY 0.324
Every way to solve it I've seen is just to put it in a 2d array like I did - Xnew = [[ OBS ]],
but I'm still getting the error.
Is there something else I need to do to the test_data coming in? I tried using values.reshape, but that didn't fix it either.
You need to transform your Xnew:
Xnew = np.array(Xnew).reshape((1,-1))

Logistic regression gives me precision of 0.55. Whats wrong with my code?

Copied columns from data frame Z, made dummies, trying to predict Click, a 0/1 variable. Balanced size of train and test. Where did I go wrong?
df = z[['user_state', 'device_maker', 'day_of_week', 'device_area_zscore', 'Age_zscore', 'consumption_zscore', 'click']].copy()
day_dummy = pd.get_dummies(df["day_of_week"])
state_dummy = pd.get_dummies(df["user_state"])
maker_dummy = pd.get_dummies(df["device_maker"])
combined_df = pd.concat([df, day_dummy, state_dummy, maker_dummy], axis=1)
click_rows = combined_df[combined_df.click == 1]
no_click_rows = combined_df[combined_df.click == 0]
no_click_rows = no_click_rows.sample(frac=1, replace=False, random_state=1)
final_df = pd.concat([click_rows, no_click_rows], axis = 0)
final_df = final_df.reset_index(drop=True)
from sklearn.model_selection import train_test_split
final_df = final_df.drop(['user_state', 'device_maker', 'day_of_week'], axis = 1)
x_train, x_test, y_train, y_test = train_test_split(final_df.drop(['click'], axis = 1), final_df['click'], test_size=0.2, random_state=2)
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression()
logmodel.fit(x_train,y_train)
predictions = logmodel.predict(x_test)
from sklearn.metrics import classification_report
print(classification_report(y_test,predictions))
Following are my suggestions:
You are missing stratify argument in train_test_split function. It ensures the target distribution is similar in train/val/test data.
Logistic Regression doesn't well in detecting non-linear patterns in data. Try a tree based model like RandomForestClassifier.

TypeError: Expected binary or unicode string, got 0.0

I am new to python and the entire eco system. I have written a linearClassifier script working on the lending club data.
I get the error TypeError: Expected binary or unicode string, got 0.0 whenever the script is run.
The error seems to be raise from here dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels)).
Here is the partial code;
import os
import tarfile
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from six.moves import urllib
import tensorflow as tf
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
HOME_PATH = os.getcwd()
""" load the csv file with the lending data and convert to tensors """
def convert_duration(s):
try:
if pd.isnull(s):
return s
elif s[0] == '<':
return 0.0
elif s[:2] == '10':
return 10.0
else:
return np.float(s[0])
except TypeError:
return np.float64(s)
def load_data(file_name):
csv_path = os.path.join(HOME_PATH, file_name)
csv_data = pd.read_csv(csv_path, encoding = "ISO-8859-1", dtype={'desc': np.str, 'verification_status_joint': np.str, 'loan_status': np.str},low_memory=False, na_values=[])
loans = csv_data.loc[csv_data['loan_status'].isin(['Fully Paid', 'Charged Off'])] # Sort out only fully Paid (Paid) and Charged Off (Default)
loans['loan_status'] = loans['loan_status'].apply(lambda s: np.int(s == 'Fully Paid')) # Convert to boolean integer
# Drop Columns with one distinct data field
for col in loans.columns:
if loans[col].nunique() == 1:
del loans[col]
for col in loans.columns:
if (loans[col].notnull().sum() / len(loans.index)) < 0.1 :
del loans[col]
# Remove all irrelevant columns & hifg prediction columns based on pure descetion
loans.drop(labels=['id', 'member_id', 'grade', 'sub_grade', 'last_credit_pull_d', 'emp_title', 'url', 'desc', 'title', 'issue_d', 'earliest_cr_line', 'last_pymnt_d','addr_state'], axis=1, inplace=True)
# Process the text based variables
# Term
loans['term'] = loans['term'].apply(lambda s:np.float(s[1:3]))
loans['emp_length'] = loans['emp_length'].apply(lambda s: convert_duration(s))
#change zip code to just the first 3 significant digits
loans['zip_code'] = loans['zip_code'].apply(lambda s:np.float(s[:3]))
loans.fillna('',inplace=True)
loan_data = shuffle(loans)
X = loan_data.drop(labels=['loan_status'], axis=1)
Y = loan_data['loan_status']
## consider processing tensorflow feature columns here and return as one response and standardise at one
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
# scaler = StandardScaler()
# X_train = scaler.fit_transform(X_train)
# X_test = scaler.fit_transform(X_test)
return (X_train, Y_train), (X_test, Y_test)
def my_input_fn(features, labels, batch_size , shuffle=True):
# consider changing categorical columns and all
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
dataset = dataset.shuffle(buffer_size=100000).repeat(count=None).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
def my_eval_fn(features, labels, batch_size , shuffle=True):
# consider changing categorical columns and all
dataset = tf.data.Dataset.from_tensor_slices((features,labels))
dataset = dataset.batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
#Start on calls to make data available
(X_train, Y_train), (X_test, Y_test) = load_data("loan_data.csv")
print(dict(X_train))
my_feature_columns = []
numerical_columns = ['loan_amnt',
'funded_amnt',
'funded_amnt_inv',
'int_rate',
'installment',
'annual_inc',
'dti',
'delinq_2yrs',
'inq_last_6mths',
'mths_since_last_delinq',
'mths_since_last_record',
'open_acc',
'pub_rec',
'revol_bal',
'revol_util',
'total_acc',
'total_pymnt',
'total_pymnt_inv',
'total_rec_prncp',
'total_rec_int',
'total_rec_late_fee',
'recoveries',
'collection_recovery_fee',
'last_pymnt_amnt',
'collections_12_mths_ex_med',
'mths_since_last_major_derog',
'acc_now_delinq',
'tot_coll_amt',
'tot_cur_bal',
'total_rev_hi_lim']
categorical_columns = ['home_ownership',
'verification_status',
'pymnt_plan',
'purpose',
'initial_list_status',
'application_type']
for key in numerical_columns:
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
for key in categorical_columns:
my_feature_columns.append(tf.feature_column.categorical_column_with_hash_bucket(key=key, hash_bucket_size = 10))
classifier = tf.estimator.LinearClassifier(
feature_columns=my_feature_columns
)
classifier.train(
input_fn=lambda:my_input_fn(X_train, Y_train, 100),
steps=100
)
eval_result = classifier.evaluate(
input_fn=lambda:my_eval_fn(X_test, Y_test, 100)
)
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
Any help is appreciated.

Categories

Resources