Tensorflow, probability of predicted value? - python

Can I get the probability of predicted value?
I can get an accuracy of my data but would like somehow to grab probability of each single predicted value.
My code:
training_data = np.vstack(training_data)
training_target = np.vstack(training_target)
test_data = np.vstack(test_data)
test_target = np.vstack(test_target)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(training_data)
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir="/tmp/basic09",
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
)
)
def input_fn_train():
x = tf.constant(training_data)
y = tf.constant(training_target)
return x, y
def input_fn_test():
x = tf.constant(test_data)
y = tf.constant(test_target)
return x, y
def new_tests():
return np.array(
[
[33.33, 44.44],
], dtype=np.float32)
classifier.fit(input_fn=input_fn_train, steps=200)
score = classifier.evaluate(input_fn=input_fn_test, steps=1)
score_accuracy = score["accuracy"]
score_loss = score["loss"]
print("Score: ", score)
print("Accuracy: ", score_accuracy)
print("Loss: ", score_loss)
predictions = list(classifier.predict_classes(input_fn=new_tests))
print("Predictions: {}".format(predictions))
So, I would like to see a probability of each single prediction in percentage, not sure if it is possible or not?
Any other comment, advice is more than welcome, I am new at Tensorflow and ML.
Thank you.

I will answer my own question, maybe someone will find it useful
So, I need to use function predict_proba, that will return an array of values that contains probability.
training_data = np.vstack(training_data)
training_target = np.vstack(training_target)
test_data = np.vstack(test_data)
test_target = np.vstack(test_target)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(training_data)
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir="/tmp/basic09",
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
)
)
def input_fn_train():
x = tf.constant(training_data)
y = tf.constant(training_target)
return x, y
def input_fn_test():
x = tf.constant(test_data)
y = tf.constant(test_target)
return x, y
def new_tests():
return np.array(
[
[33.33, 44.44],
], dtype=np.float32)
classifier.fit(input_fn=input_fn_train, steps=200)
score = classifier.evaluate(input_fn=input_fn_test, steps=1)
score_accuracy = score["accuracy"]
score_loss = score["loss"]
print("Score: ", score)
print("Accuracy: ", score_accuracy)
print("Loss: ", score_loss)
predictions = list(classifier.predict_proba(input_fn=new_tests))
print("Predictions probability: ", predictions)

Related

ValueError: Found input variables with inconsistent numbers of samples: [28332, 24]

I am currently working with multilabel text classification in the Arabic language using binary relevance and label power set, after I make all preprocessing that I need when I need to combine chi and mutual feature selection based on their weights, I am facing this problem
Found input variables with inconsistent numbers of samples: [28332, 24]
where my dataset has one column have the text and 24 columns as a target as shown in the image :
enter image description here
I am writing this code
`class Classifier:
def __init__(self):
self.merged_df = pd.read_csv(r"D:\project\Ymal.csv", encoding='utf-8')
self.train_df, self.test_df = train_test_split(self.merged_df,test_size=0.2,random_state=42)
self.vectorizer = CountVectorizer()
self.ModelsPerformance = {}
def train(self):
self.train_text = self.train_df['text']
self.test_text = self.test_df['text']
self.train_labels = self.train_df.drop(columns=['text'])
self.test_labels = self.test_df.drop(columns=['text'])
self.mlb = MultiLabelBinarizer()
self.train_labels = self.mlb.fit_transform(self.train_labels)
self.test_labels = self.mlb.transform(self.test_labels)
self.train_text_bow = self.vectorizer.fit_transform(self.train_text)
self.test_text_bow = self.vectorizer.transform(self.test_text)
self.chi2_selector = SelectKBest(chi2, k='all',)
self.mi_selector = SelectKBest(mutual_info_classif, k='all',)
self.chi2_features = self.chi2_selector.fit_transform(self.train_text_bow,self.train_labels)
self.mi_features = self.mi_selector.fit_transform(self.train_text_bow,self.train_labels)
self.weights_chi2 = self.chi2_selector.scores_
self.weights_mi = self.mi_selector.scores_
self.weights = (self.weights_chi2 + self.weights_mi ) / 2
self.top_features = np.argsort(self.weights)[-4000:] #[::-1]
self.train_combined_features = self.train_text_bow[:,self.top_features]
self.test_text_bow = self.vectorizer.transform(self.test_text)
self.test_combined_features = self.test_text_bow[:, self.top_features]
def metricsReport(self,modelName, test_labels, predictions):
hamLoss = hamming_loss(test_labels, predictions)
print("------" + modelName + " Model Metrics-----")
accuracy = accuracy_score(test_labels, predictions)
macroPrecision = precision_score(test_labels, predictions, average='macro')
macroRecall = recall_score(test_labels, predictions, average='macro')
macroF1 = f1_score(test_labels, predictions, average='macro')
microPrecision = precision_score(test_labels, predictions, average='micro')
microRecall = recall_score(test_labels, predictions, average='micro')
microF1 = f1_score(test_labels, predictions, average='micro')
weightedF1 = f1_score(test_labels, predictions, average='weighted')
# print metrics
print("Hamming Loss: {:.4f}".format(hamLoss))
print('Accuracy: {0:.4f}'.format(accuracy))
print('Macro Precision: {0:.4f}'.format(macroPrecision))
print('Macro Recall: {0:.4f}'.format(macroRecall))
print('Macro F1-measure: {0:.4f}'.format(macroF1))
print('Micro Precision: {0:.4f}'.format(microPrecision))
print('Micro Recall: {0:.4f}'.format(microRecall))
print('Micro F1-measure: {0:.4f}\n'.format(microF1))
print('Weighted F1-measure: {0:.4f}\n'.format(weightedF1))
def fitAlgorithms(self):
algorithms = [{'name': 'LinearSVC', 'model': LinearSVC(max_iter=12000, dual=False),
'params': {'C': [0.1, 1, 10]}},
{'name': 'KNN', 'model': KNeighborsClassifier(),
'params': {'n_neighbors': [5, 10, 15]}},
{'name': 'RandomForest', 'model': RandomForestClassifier(),
'params': {'n_estimators': [100, 300, 500]}},
{'name': 'LogisticRegression', 'model': LogisticRegression(),
'params': {'C': [0.1, 1, 10]}},
{'name': 'DecisionTree', 'model': DecisionTreeClassifier(),
'params': {'max_depth': [5, 10, 15]}},
{'name': 'MultinomialNB', 'model': MultinomialNB(),
'params': {'alpha': [0.1, 1, 10]}}
]
for algorithm in algorithms:
model = algorithm['model']
name = algorithm['name']
params = algorithm['params']
# Fit the binary relevance and label powerset classifiers before the grid search
binary_relevance_classifier = BinaryRelevance(model)
binary_relevance_classifier.fit(self.train_combined_features, self.train_labels)
labelPowerSet_classifier = LabelPowerset(model)
labelPowerSet_classifier.fit(self.train_combined_features, self.train_labels)
print(f"Performing GridSearchCV for {name}...")
clf = GridSearchCV(model, params, scoring='accuracy', cv=5)
clf.fit(self.train_combined_features, self.train_labels)
best_params = clf.best_params_
print(f"Best parameters for {name}: {best_params}")
model.set_params(**best_params)
binary_relevance_preds = binary_relevance_classifier.predict(self.test_combined_features)
self.metricsReport(f"Binary Relevance with {name}", self.test_labels, binary_relevance_preds)
labelPowerSet_preds = labelPowerSet_classifier.predict(self.test_combined_features)
self.metricsReport(f"Label Powerset with {name}", self.test_labels, labelPowerSet_preds)
self.ModelsPerformance[name] = clf.best_score_
return self.ModelsPerformance
# Create an instance of the Classifier
classifier = Classifier()
# Invoke the training method
classifier.train()
# Invoke the fitAlgorithms() method
classifier.fitAlgorithms()
but this basic problem is this error I referee it above
please any one can help me and if any one can optimize this ?
I believe that error is clear but I cant avoid this , also i tried the do this to sure the shape but it fine
print("train_text_bow shape:", train_text_bow.shape) print("train_labels shape:", train_labels.shape) train_text_bow shape: (28332, 121714) train_labels shape: (28332, 24)t
I need just to avoid this error

Using ray tune `tune.run` with pytorch returns different optimal hyperparameters combination

I've initialized two identical ANN with PyTorch (both as structure and initial parameters), and I've noticed that the hyperparameters setting with Ray Tune, returns different results for the two ANN, even if I didn't have any random initialization.
Someone could explain what I'm doing wrong? I'll attach the code:
ANN Initialization:
class Featrues_model(nn.Module):
def __init__(self, n_inputs, dim_hidden, n_outputs):
super().__init__()
self.fc1 = nn.Linear(n_inputs, dim_hidden)
self.fc2 = nn.Linear(dim_hidden, n_outputs)
def forward(self, X):
X = self.fc1(X)
X = self.fc2(X)
return X
features_model_v1 = Featrues_model(len(list_input_variables),5,6)
features_model_v2 = Featrues_model(len(list_input_variables),5,6)
features_model_v2.load_state_dict(features_model_v1.state_dict())
Hyperpamameters setting
config = {
"lr": tune.choice([1e-2, 1e-5]),
"weight_decay": tune.choice([1e-2, 1e-5]),
"batch_size": tune.choice([16,64]),
"epochs": tune.choice([10,50])
}
Train & Validation Dataframe
trainset = df_final.copy()
test_abs = int(len(trainset) * 0.8)
train_subset, val_subset = random_split(
trainset, [test_abs, len(trainset) - test_abs]
)
df_train = df_final.iloc[train_subset.indices]
df_val = df_final.iloc[val_subset.indices]
Train function design
def setting_model(config, df_train, df_val, model):
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=config["lr"], weight_decay=config["weight_decay"])
BATCH_SIZE = config["batch_size"]
for epoch in range(config["epochs"]):
train_epoch_loss = 0
train_epoch_acc = 0
step = 0
for i in tqdm(range(0, df_train.shape[0], BATCH_SIZE)):
batch_X = np.array(
df_train[list_input_variables].iloc[i:i+BATCH_SIZE]
)
batch_X = torch.Tensor([x for x in batch_X])
batch_Y = np.array(
df_train[list_output_variables].iloc[i:i+BATCH_SIZE]
)
batch_Y = torch.Tensor([int(y) for y in batch_Y])
batch_Y = batch_Y.type(torch.int64)
optimizer.zero_grad()
outputs = model.forward(batch_X)
train_loss = criterion(outputs, batch_Y)
train_acc = multi_acc(outputs, batch_Y)
train_loss.backward()
optimizer.step()
train_epoch_loss += train_loss.item()
train_epoch_acc += train_acc.item()
step += 1
# print statistics
print(f"Epochs: {epoch}")
print(f"Train Loss: {train_epoch_loss/len(df_train)}")
print(f"Train Acc: {train_epoch_acc/step}")
print("\n")
# Validation loss
with torch.no_grad():
X_val = np.array(
df_val[list_input_variables]
)
X_val = torch.Tensor([x for x in X_val])
Y_val = np.array(
df_val[list_output_variables]
)
Y_val = torch.Tensor([int(y) for y in Y_val])
Y_val = Y_val.type(torch.int64)
outputs = model.forward(X_val)
_, predicted = torch.max(outputs.data, 1)
total = Y_val.size(0)
correct = (predicted == Y_val).sum().item()
loss = criterion(outputs, Y_val)
tune.report(loss=(loss.numpy()), accuracy=correct / total)
print(f"Validation Loss: {loss.numpy()/len(df_val)}")
print(f"Validation Acc: {correct / total:.3f}")
print("Finished Training")
Hyperparameters Tune
result_v1 = tune.run(
partial(setting_model, df_train=df_train, df_val=df_val, model=features_model_v1),
config=config,
fail_fast="raise",
)
result_v2 = tune.run(
partial(setting_model, df_train=df_train, df_val=df_val, model=features_model_v2),
config=config,
fail_fast="raise"
)
Output
result_v1.get_best_config()
{'lr': 1e-05, 'weight_decay': 1e-05, 'epochs': 1}
result_v2.get_best_config()
{'lr': 0.01, 'weight_decay': 1e-05, 'epochs': 1}
The issue is the use of torch.random under the hood. Since you are not directly providing a weight matrix for your layers, pytorch initializes it for you. Luckily, you can have a reproducible experiment by setting
torch.manual_seed(x) # where x is an integer
One should use only a few random seeds, otherwise you might overfit on the random seed. See lottery ticket hypothesis at https://arxiv.org/abs/1803.03635)

Acurracy doesen't work pytorch (stays at 0)

The accuracy on my code doesn't work(accu), it stays at 0, even though it should get higher.
The loss function works perfectly fine but the accu doesn't and i dont know why it doesnt go up.
It doesn't even show an error or anything it just stays at 0 all the time.
And i need the accu variable to evaluate the trained data with matplotlib.
What did i do wrong in this code?
import torch
import os
from torchvision import transforms
from PIL import Image
from os import listdir
import random
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import matplotlib.pyplot as plt
from tqdm import tqdm
from torchsummary import summary
normalize = transforms.Normalize(
mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(256),
transforms.ToTensor(), normalize])
train_data_list = []
target_list = []
train_data = []
waited = False
files = listdir('catsanddogs/train/')
for i in range(len(listdir('catsanddogs/train/'))):
if len(train_data) == 58 and not waited:
waited = True
continue
f = random.choice(files)
files.remove(f)
img = Image.open("catsanddogs/train/" + f)
img_tensor = transform(img)
train_data_list.append(img_tensor)
isSomething = 0
isCat = 1 if 'cat' in f else 0
isDog = 1 if 'dog' in f else 0
if isDog == 0 and isCat == 0:
isSomething = 2
target = [isCat, isDog, isSomething] #, isSomthing
target_list.append(target)
if len(train_data_list) >= 256:
train_data.append((torch.stack(train_data_list), target_list))
train_data_list = []
target_list = []
print('Loaded batch ', len(train_data), 'of ', int(len(listdir('catsanddogs/train/')) / 64))
print('Percentage Done: ', 100 * len(train_data) / int(len(listdir('catsanddogs/train/')) / 64), '%')
if len(train_data) > 2 :
break
class Netz(nn.Module):
def __init__(self):
super(Netz, self).__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=5)
self.conv2 = nn.Conv2d(6, 12, kernel_size=5)
self.conv3 = nn.Conv2d(12, 18, kernel_size=5)
self.conv4 = nn.Conv2d(18, 24, kernel_size=5)
self.fc1 = nn.Linear(3456, 1000)
self.fc2 = nn.Linear(1000, 3)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = F.relu(x)
x = self.conv3(x)
x = F.max_pool2d(x, 2)
x = F.relu(x)
x = self.conv4(x)
x = F.max_pool2d(x, 2)
x = F.relu(x)
x = x.view(-1, 3456)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return torch.sigmoid(x)
model = Netz()
if os.path.isfile('catdognetz.pt'):
model = torch.load('catdognetz.pt')
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
count_parameters(model)
train_losses = []
train_accu = []
def train(epoch):
print('\nEpoch : %d' % epoch)
model.train()
running_loss = 0
correct = 0
total = 0
for data, target in tqdm(train_data):
target = torch.Tensor(target)
data = Variable(data)
target = Variable(target)
inputs, labels = data[0], data[1]
optimizer.zero_grad()
out = model(data)
criterion = F.binary_cross_entropy
loss = criterion(out, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
_, predicted = out.max(1)
total += target.size(0)
correct += predicted.eq(labels).sum().item()
train_loss = running_loss / len(train_data)
accu = 100. * correct / total
train_accu.append(accu)
train_losses.append(train_loss)
print('Train Loss: %.3f | Accuracy: %.3f' % (train_loss, accu))
You should delete this row, because I do not think it is true:
inputs, labels = data[0], data[1]
because you do not need it, where did you use inputs? your ground-truth classes or actual classes are targets, therefore you should use the same variable to compare to the predicted classes. You should replace labels with targets in this line:
correct += predicted.eq(targets).sum().item()
Besides all these steps, you should try to print your targets to make sure that you have configured targets correctly and as they should be.

get MAE and RMSE

get MAE and RMSE
do have tried like this
movies = pd.read_csv('ml-20m/movies.csv')
ratings = pd.read_csv('ml-20m/ratings.csv')
df = pd.merge(movies, ratings, on='movieId', how='inner')
reader = Reader(rating_scale=(0.5, 5))
data = Dataset.load_from_df(df[['userId', 'title', 'rating']], reader)
trainSet, testSet = train_test_split(data, test_size=.25, random_state=0)
algo = SVD(random_state=0)
algo.fit(trainSet)
predictions = algo.test(testSet)
def MAE(predictions):
return accuracy.mae(predictions, verbose=False)
def RMSE(predictions):
return accuracy.rmse(predictions, verbose=False)
print("RMSE: ", RMSE(predictions))
print("MAE: ", MAE(predictions))
and get error
"Singleton array array(<surprise.dataset.DatasetAutoFolds object at 0x000001BD67B62490>,
dtype=object) cannot be considered a valid collection."

Predicting the tensorflow model

I am new to Machine Learning. I am studying the Iris dataset. And used Sepal length, Sepal width, Petal length to predict Petal Width using neural network. Thus making 3 input nodes as A1 with bias b1, 10 hidden node as A2 with bias b2 and 1 output node.
Further, x_val_train, x_val_test,y_val_train,y_val_test variables are used for training and testing
The main function is as below.
x_val = np.array([x[0:3] for x in iris.data])
y_val = np.array([x[3] for x in iris.data])
hidden_layer_size = 10
#Generate a 1D array of random numbers range round(len(x_val)*0.8
train_indices = np.random.choice(len(x_val), round(len(x_val)*0.8), replace = False)
#Create a set which does not contain the numbers in train_indices and turn it into array
test_indices = np.array(list(set(range(len(x_val))) - set(train_indices)))
#print("Train Indexes\n",train_indices,test_indices)
x_val_train = x_val[train_indices]
x_val_test = x_val[test_indices]
y_val_train = y_val[train_indices]
y_val_test = y_val[test_indices]
x_data = tf.placeholder(shape=[None, 3], dtype = tf.float32)
y_target = tf.placeholder(shape = [None, 1], dtype = tf.float32) #Figure out usage of None
#Create Layers for NN
A1 = tf.Variable(tf.random_normal(shape = [3,hidden_layer_size])) #Input -> Hidden
b1 = tf.Variable(tf.random_normal(shape = [hidden_layer_size])) #bias in Input for hidden
A2 = tf.Variable(tf.random_normal(shape = [hidden_layer_size,1])) #Hidden -> Output
b2 = tf.Variable(tf.random_normal(shape=[1])) #Hidden Layer Bias
#Generation of Model
hidden_output = tf.nn.relu(tf.add(tf.matmul(x_data,A1),b1))
final_output = tf.nn.relu(tf.add(tf.matmul(hidden_output,A2),b2))
cost = tf.reduce_mean(tf.square(y_target - final_output))
learning_rate = 0.01
model = tf.train.AdamOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
sess.run(init)
#Training Loop
loss_vec = []
test_loss = []
epoch = 500
for i in range(epoch):
#generates len(x_val_train) random numbers
rand_index = np.random.choice(len(x_val_train), size = batch_size)
#Get len(x_val_train) data with its 3 input notes or
rand_x = x_val_train[rand_index]
#print(rand_index,rand_x)
rand_y = np.transpose([y_val_train[rand_index]])
sess.run(model, feed_dict = {x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(cost, feed_dict = {x_data: rand_x, y_target : rand_y})
loss_vec.append(np.sqrt(temp_loss))
test_temp_loss = sess.run(cost, feed_dict = {x_data : x_val_test, y_target : np.transpose([y_val_test])})
test_loss.append(np.sqrt(test_temp_loss))
if (i+1)%50!=0:
print('Generation: ' + str(i+1) + '.loss = ' + str(temp_loss))
predict = tf.argmax(tf.add(tf.matmul(hidden_output,A2),b2), 1)
test = np.matrix('2 3 4')
pred = predict.eval(session = sess, feed_dict = {x_data : test})
print("pred: ", pred)
plt.plot(loss_vec, 'k-', label='Train Loss')
plt.plot(test_loss, 'r--', label='Test Loss')
plt.show()
Also, In this code,
hidden_output = tf.nn.relu(tf.add(tf.matmul(x_data,A1),b1))`
I have successfully trained my model after normalizing my data. But i need to predict the output by user input data.
Here,
test = np.matrix('2 3 4')
pred = predict.eval(session = sess, feed_dict = {x_data : test})
print("pred: ", pred)
I have written this code to predict the result, but pred always returns 0. I also tried for more than 100 samples, It still returns 0. Can you please tell me where i am getting wrong?
Summary
Let's take a look at
predict = tf.argmax(tf.add(tf.matmul(hidden_output,A2),b2), 1)
This is (almost) equal to
predict = tf.argmax(final_output)
The argmax is the main issue. If final_output was a 1-hot encoding then argmax would make sense, but final_output is just an array of scalars.
Full Working Code
Here is the full working code given that you have
import numpy as np
import tensorflow as tf
import os
import urllib
# Data sets
IRIS_TRAINING = "iris_training.csv"
IRIS_TRAINING_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST = "iris_test.csv"
IRIS_TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
# If the training and test sets aren't stored locally, download them.
if not os.path.exists(IRIS_TRAINING):
raw = urllib.urlopen(IRIS_TRAINING_URL).read()
with open(IRIS_TRAINING, "w") as f:
f.write(raw)
if not os.path.exists(IRIS_TEST):
raw = urllib.urlopen(IRIS_TEST_URL).read()
with open(IRIS_TEST, "w") as f:
f.write(raw)
training_set = tf.contrib.learn.datasets.base.load_csv_with_header( filename=IRIS_TRAINING, target_dtype=np.int, features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header( filename=IRIS_TEST, target_dtype=np.int, features_dtype=np.float32)
x_val_train = training_set.data[:,:3]
x_val_test = test_set.data[:,:3]
y_val_train = training_set.data[:,3].reshape([-1,1])
y_val_test = test_set.data[:,3].reshape([-1,1])
x_data = tf.placeholder(shape=[None, 3], dtype = tf.float32)
y_target = tf.placeholder(shape = [None, 1], dtype = tf.float32) #Figure out usage of None
#Create Layers for NN
hidden_layer_size = 20
A1 = tf.Variable(tf.random_normal(shape = [3,hidden_layer_size])) #Input -> Hidden
b1 = tf.Variable(tf.random_normal(shape = [hidden_layer_size])) #bias in Input for hidden
A2 = tf.Variable(tf.random_normal(shape = [hidden_layer_size,1])) #Hidden -> Output
b2 = tf.Variable(tf.random_normal(shape = [1])) #Hidden Layer Bias
#Generation of model
hidden_output = tf.nn.relu(tf.add(tf.matmul(x_data,A1),b1))
final_output = tf.add(tf.matmul(hidden_output,A2),b2)
loss = tf.reduce_mean(tf.square(y_target - final_output))
learning_rate = 0.01
train = tf.train.AdamOptimizer(learning_rate).minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
#Training Loop
loss_vec = []
test_loss = []
epoch = 2000
batch_size = 100
def oneTrainingSession(epoch,loss_vec,test_loss,batch_size) :
rand_index = np.random.choice(len(x_val_train), size = batch_size)
rand_x = x_val_train #[rand_index,:]
rand_y = y_val_train #[rand_index,:]
temp_loss,_ = sess.run([loss,train], feed_dict = {x_data: rand_x, y_target : rand_y})
loss_vec.append(np.sqrt(temp_loss))
test_temp_loss = sess.run(loss, feed_dict = {x_data : x_val_test, y_target : y_val_test})
test_loss.append(np.sqrt(test_temp_loss))
if (i+1)%500 == 0:
print('Generation: ' + str(i+1) + '.loss = ' + str(temp_loss))
for i in range(epoch):
oneTrainingSession(epoch,loss_vec,test_loss,batch_size)
test = x_val_test[:3,:]
print "The test values are"
print test
print ""
pred = sess.run(final_output, feed_dict = {x_data : test})
print("pred: ", pred)
Output
Generation: 500.loss = 0.12768
Generation: 1000.loss = 0.0389756
Generation: 1500.loss = 0.0370268
Generation: 2000.loss = 0.0361797
The test values are
[[ 5.9000001 3. 4.19999981]
[ 6.9000001 3.0999999 5.4000001 ]
[ 5.0999999 3.29999995 1.70000005]]
('pred: ', array([[ 1.45187187],
[ 1.92516518],
[ 0.36887735]], dtype=float32))

Categories

Resources