How to calculate regression with dates as x axis? - python

Is it good practice for a polynomial regression with dates as x axis to convert the datetime values to numbers, from 1 to the len(dataframe)+1? Are the predicted values considered to be accurate?
data['numbered'] = ''
for i in range(1, len(data) + 1):
data.loc[i - 1, ['numbered']] = i
X = data[['numbered']].values
y = data[['ozone']].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
``

Related

How can I make a table for shapley values with Python?

I made a random forest regression with filter for y and x variables and I also wanted to add more about shapley values by creating a graph and table with column for the variable and column for the shaply value result. The code plots the graph, but the table is not showing.
So far my code looks like this:
x=widgets.SelectMultiple(
options=list(dataset.select_dtypes('number').columns),
disabled=False,
value=("NUMBER_SPOTS",)
)
def randomforest(y, x):
x = dataset[list(x)]
y = dataset[y]
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
shap.initjs()
model = RandomForestRegressor(random_state=0).fit(X_train, y_train)
y_predict = model.predict(X_test)
mean_squared_error(y_test, y_predict)**(0.5)
print('Mean Squared Error:', mean_squared_error(y_test, y_predict)**(0.5))
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(X_train)
shap.summary_plot(shap_values, features=X_train, feature_names=X_train.columns, plot_size=[15,8])
shap_vals = shap_values[0, :]
feature_importance = pd.DataFrame(list(zip(X_train.columns, shap_vals)), columns=['X_train', 'shap_vals'])
feature_importance.sort_values(by=['shap_vals'], ascending=False,inplace=True)
feature_importance
interact(randomforest, y = list(dataset.select_dtypes('number').columns), x = x)

Error: "Found input variables with inconsistent numbers of samples: [5114, 3409]"

I wish to follow the below steps:
Load data
Divide into label & feature sets
Normalize data
Divide into test & training sets
Implement oversampling (smote)
Is this the correct order of steps or am I doing anything wrong? I keep getting an error saying "Found input variables with inconsistent numbers of samples: [5114, 3409]".
This error occurs on line: X_train,Y_train = smote.fit_sample(X_train,Y_train)
#data loading
dataset = pd.read_csv('data.csv')
#view data and check for null values
print(dataset.isnull().values.any())
print(dataset.shape)
# Dividing dataset into label and feature sets
X = dataset.drop('Bankrupt?', axis = 1) # Features
Y = dataset['Bankrupt?'] # Labels
print(type(X))
print(type(Y))
print(X.shape)
print(Y.shape)
# Normalizing numerical features so that each feature has mean 0 and variance 1
feature_scaler = StandardScaler()
X_scaled = feature_scaler.fit_transform(X)
# Dividing dataset into training and test sets
X_train, X_test, Y_train, Y_test = train_test_split( X_scaled, Y, test_size = 0.5, random_state = 100)
print(X_train.shape)
print(X_test.shape)
X = dataset.iloc[:,1:].values
y = dataset.iloc[:,0].values.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Implementing Oversampling to balance the dataset;
print("Number of observations in each class before oversampling (training data): \n", pd.Series(Y_train).value_counts())
smote = SMOTE(random_state = 101)
X_train,Y_train = smote.fit_sample(X_train,Y_train)
print("Number of observations in each class after oversampling (training data): \n", pd.Series(Y_train).value_counts())

How to solve "ValueError: y should be a 1d array, got an array of shape (3, 5) instead." for naive Bayes?

from sklearn.model_selection import train_test_split
X = data.drop('Vickers Hardness\n(HV0.5)', axis=1)
y = data['Vickers Hardness\n(HV0.5)']
X_train, y_train, X_test, y_test = train_test_split(X, y, test_size = 0.3)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train, y_train)
y_pred = gnb.predict(X_test)
ValueError: y should be a 1d array, got an array of shape (3, 5) instead.
Used data:
How to rectify this error in naive bayes? how can I put y in 1D array?
The assignments of the train/test split are not ordered right, use:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)

fill missing values (nan) by regression of other columns

I've got a dataset containing a lot of missing values (NAN). I want to use linear or multilinear regression in python and fill all the missing values. You can find the dataset here: Dataset
I have used f_regression(X_train, Y_train) to select which feature should I use.
first of all I convert df['country'] to dummy then used important features then I have used regression but the results Not good.
I have defined following functions to select features and missing values:
def select_features(target,df):
'''Get dataset and terget and print which features are important.'''
df_dummies = pd.get_dummies(df,prefix='',prefix_sep='',drop_first=True)
df_nonan = df_dummies.dropna()
X = df_nonan.drop([target],axis=1)
Y = df_nonan[target]
X = pd.get_dummies(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.30, random_state=40)
f,pval = f_regression(X_train, Y_train)
inds = np.argsort(pval)[::1]
results = pd.DataFrame(np.vstack((f[inds],pval[inds])), columns=X_train.columns[inds], index=['f_values','p_values']).iloc[:,:15]
print(results)
And I have defined following function to predict missing values.
def train(target,features,df,deg=1):
'''Get dataset, target and features and predict nan in target column'''
df_dummies = pd.get_dummies(df,prefix='',prefix_sep='',drop_first=True)
df_nonan = df_dummies[[*features,target]].dropna()
X = df_nonan.drop([target],axis=1)
Y = df_nonan[target]
pol = PolynomialFeatures(degree=deg)
X=X[features]
X = pd.get_dummies(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.40, random_state=40)
X_test, X_val, Y_test, Y_val = train_test_split(X_test, Y_test, test_size=0.50, random_state=40)
# X_train.shape, X_test.shape, Y_train.shape, Y_test.shape
X_train_n = pol.fit_transform(X_train)
reg = linear_model.Lasso()
reg.fit(X_train_n,Y_train);
X_test_n = pol.fit_transform(X_test)
Y_predtrain = reg.predict(X_train_n)
print('train',r2_score(Y_train, Y_predtrain))
Y_pred = reg.predict(X_test_n)
print('test',r2_score(Y_test, Y_pred))
# val
X_val_n = pol.fit_transform(X_val)
X_val_n.shape,X_train_n.shape,X_test_n.shape
Y_valpred = reg.predict(X_val_n)
print('val',r2_score(Y_val, Y_valpred))
X_names = X.columns.values
X_new = df_dummies[X_names].dropna()
X_new = X_new[df_dummies[target].isna()]
X_new_n = pol.fit_transform(X_new)
Y_new = df_dummies.loc[X_new.index,target]
Y_new = reg.predict(X_new_n)
Y_new = pd.Series(Y_new, index=X_new.index)
Y_new.head()
return Y_new, X_names, X_new.index
Then I am using these functions to fill nan for features with p_values<0.05.
But I am not sure is it a good way or not.
With this way many missing remain unpredicted.

Get values from k-means cluster after clustering

I have a dataset that I have run a K-means algorithm on (scikit-learn), and I want to build a decision tree on each cluster. I can recuperate the values from the cluster, but not the "class" values (I'm doing supervised learning, each element can belong to one of two classes and I need the value associated with the data to build my trees)
Ex: unfiltered data set:
[val1 val2 class]
X_train=[val1 val2]
y_train=[class]
The clustering code is this:
X = clusterDF[clusterDF.columns[clusterDF.columns.str.contains('\'AB\'')]]
y = clusterDF['Class']
(X_train, X_test, y_train, y_test) = train_test_split(X, y,
test_size=0.30)
kmeans = KMeans(n_clusters=3, n_init=5, max_iter=3000, random_state=1)
kmeans.fit(X_train, y_train)
y_pred = kmeans.predict(X_test)
And this is my (unbelievably clunky!) code for extracting the values to build the tree. The issue is the Y values; they aren't consistent with the X values
cl={i: np.where(kmeans.labels_ == i)[0] for i in range(kmeans.n_clusters)}
for j in range(0,len(k_means_labels_unique)):
Xc=None
Y=None
#for i in range(0,len(k_means_labels_unique)):
indexes = cl.get(j,0)
for i, row in X.iterrows():
if i in indexes:
if Xc is not None:
Xc = np.vstack([Xc, [row['first occurrence of \'AB\''],row['similarity to \'AB\'']]])
else:
Xc = np.array([row['first occurrence of \'AB\''],row['similarity to \'AB\'']])
if Y is not None:
Y = np.vstack([Y, y[i]])
else:
Y = np.array(y[i])
Xc = pd.DataFrame(data=Xc, index=range(0, len(X)),
columns=['first occurrence of \'AB\'',
'similarity to \'AB\'']) # 1st row as the column names
Y = pd.DataFrame(data=Y, index=range(0, len(Y)),columns=['Class'])
print("\n\t-----Classifier ", j + 1,"----")
(X_train, X_test, y_train, y_test) = train_test_split(X, Y,
test_size=0.30)
classifier = DecisionTreeClassifier(criterion='entropy',max_depth = 2)
classifier = getResults(
X_train,
y_train,
X_test,
y_test,
classifier,
filename='classif'+str(3 + i),
)
Any ideas (or downright more efficient ways) of taking the clustered data to make a decision tree from?
Did not read all the code but my guess is that passing an index vector into the train_test_split function would help you keep track of the samples.
X = clusterDF[clusterDF.columns[clusterDF.columns.str.contains('\'AB\'')]]
y = clusterDF['Class']
indices = clusterDF.index
X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(X, y, indices)

Categories

Resources