buiding a network using lasagne and theano - python

I am trying to build a network but I am getting the following error
this is the building and training code
The network was built by this following video code
The data I created using this repository githup repository
The train contains 24 class of images and the test contains 24 class of labels
import os
import numpy as np
import lasagne
import theano
import theano.tensor as T
def load_mnist_images(filename):
with gzip.open(filename,'rb') as f :
data = np.frombuffer(f.read(),np.uint8,offset=16)
data = data.reshape(-1,1,28,28)
print(type(data))
return data/np.float32(256)
def load_mnist_labels(filename):
with gzip.open(filename,'rb')as f:
data=np.frombuffer(f.read(),np.uint8,offset=8)
return data
def load_dataset():
x_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
x_test = load_mnist_images('test-images-idx3-ubyte.gz')
y_test = load_mnist_labels('test-labels-idx1-ubyte.gz')
return x_train,y_train,x_test,y_test
def build_nn(input_var=None):
l_in = lasagne.layers.InputLayer(shape=(None,1,28,28),input_var=input_var)
l_in_drop = lasagne.layers.DropoutLayer(l_in,p=0.2)
l_hid1 = lasagne.layers.DenseLayer(l_in_drop,num_units=800,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1,p=0.5)
l_hid2 = lasagne.layers.DenseLayer(l_hid1_drop, num_units=800,
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.5)
l_out = lasagne.layers.DenseLayer(l_hid2_drop,num_units=24 ,
nonlinearity= lasagne.nonlinearities.softmax)
return l_out
if __name__ == "__main__":
x_train,y_train,x_test,y_test = load_dataset()
input_var = T.tensor4('inputs')
target_var = T.tensor4('targets')
print(target_var)
network = build_nn(input_var)
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction,target_var)
loss = loss.mean()
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss,params,learning_rate=0.01 , momentum=0.9)
train_fn = theano.function([input_var,target_var],loss , updates=updates)
num_training_steps = 10
for steps in range(num_training_steps):
train_err = train_fn(x_train,y_train)
print("current step is " + str(steps))
and this is the error i get
Traceback (most recent call last):
File "/home/hassan/JPG-PNG-to-MNIST-NN-Format/mnist_test.py", line 64, in <module>
loss = lasagne.objectives.categorical_crossentropy(prediction,target_var)
File "/home/hassan/anaconda3/envs/object-detection/lib/python3.7/site-packages/lasagne/objectives.py", line 181, in categorical_crossentropy
return theano.tensor.nnet.categorical_crossentropy(predictions, targets)
File "/home/hassan/anaconda3/envs/object-detection/lib/python3.7/site-packages/theano/tensor/nnet/nnet.py", line 2101, in categorical_crossentropy
raise TypeError('rank mismatch between coding and true distributions')
TypeError: rank mismatch between coding and true distributions

I solved it there was a mistake in writing te code
instead of writing this
target_var = T.ivector('targets')
I wrote this
target_var = T.tensor4('targets')

Related

Getting Error in loss function while training a model in Pytorch?

I am new to Machine Learning and trying my hands on a categorization problem. While training the model in pytorch I am getting following error. Any help on the topic is much appreciated.
Error: 0D or 1D target tensor expected, multi-target not supported
# -*- coding: utf-8 -*-
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
def makeData(A,B,C,numOfSamples,blur):
a = [A[0] + np.random.randn(numOfSamples)*blur,A[1] + np.random.randn(numOfSamples)*blur]
b = [B[0] + np.random.randn(numOfSamples)*blur,B[1] + np.random.randn(numOfSamples)*blur]
c = [C[0] + np.random.randn(numOfSamples)*blur,C[1] + np.random.randn(numOfSamples)*blur]
data_np = np.hstack((a,b,c)).T
processed = np.array([np.sin(data_np[:,0]),np.cos(data_np[:,1])])
#data_np = np.append(data_np,processed.T,axis=1)
#print(data_np.shape)
labels_np = np.vstack((np.zeros((numOfSamples,1)),np.ones((numOfSamples,1)),1+np.ones((numOfSamples,1))))
data = torch.tensor(data_np).float()
labels = torch.tensor(labels_np).float()
return (data,labels)
def buildAndTrain(data,labels):
model = nn.Sequential(
nn.Linear(2,4),
nn.ReLU(),
nn.Linear(4, 3),
#nn.ReLU(),
#nn.Linear(64, 3),
nn.Softmax(dim=1)
)
lossfun = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr = 0.01)
epochs = 1000
ongoingAccuracy = np.zeros(epochs)
for i in range(epochs):
yHat = model(data)
loss = lossfun(yHat,labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
accuracy = torch.mean((torch.argmax(yHat,axis = 1)==labels).float())*100
print(accuracy)
A = [1,1]
B = [5,7]
C = [-2,3]
numOfSamples = 100
blur = 1
data, labels = makeData(A,B,C,numOfSamples,blur)
buildAndTrain(data,labels)
plt.plot(data[np.where(labels == 0)[0],0],data[np.where(labels == 0)[0],1],'rs')
plt.plot(data[np.where(labels == 1)[0],0],data[np.where(labels == 1)[0],1],'bs')
plt.plot(data[np.where(labels == 2)[0],0],data[np.where(labels == 2)[0],1],'ks')
Please let me know if any more details are required.
Full Stack error
Traceback (most recent call last):
File "C:\Users\prakh\Documents\tutorials\python\pytorch\ann6.py", line 77, in
buildAndTrain(data,labels)
File "C:\Users\prakh\Documents\tutorials\python\pytorch\ann6.py", line 56, in buildAndTrain
loss = lossfun(yHat,labels)
File "C:\Users\prakh\anaconda3\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\prakh\anaconda3\lib\site-packages\torch\nn\modules\loss.py", line 1150, in forward
return F.cross_entropy(input, target, weight=self.weight,
File "C:\Users\prakh\anaconda3\lib\site-packages\torch\nn\functional.py", line 2846, in cross_entropy
return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
RuntimeError: 0D or 1D target tensor expected, multi-target not supported

timm: AssertionError: Batch size should be even when using this

I am implementing simple example for creating a model from scratch using timm. My batch is even but still i am getting an error and I am following below tutorials
https://gist.github.com/Chris-hughes10/a9e5ec2cd7e7736c651bf89b5484b4a9
import argparse
from pathlib import Path
import timm
import timm.data
import timm.loss
import timm.optim
import timm.utils
import torch
import torchmetrics
from timm.scheduler import CosineLRScheduler
from pytorch_accelerated.callbacks import SaveBestModelCallback
from pytorch_accelerated.trainer import Trainer, DEFAULT_CALLBACKS
def create_datasets(image_size, data_mean, data_std, train_path, val_path):
train_transforms = timm.data.create_transform(
input_size=image_size,
is_training=True,
mean=data_mean,
std=data_std,
auto_augment="rand-m7-mstd0.5-inc1",
)
eval_transforms = timm.data.create_transform(
input_size=image_size, mean=data_mean, std=data_std
)
train_dataset = timm.data.dataset.ImageDataset(
train_path, transform=train_transforms
)
eval_dataset = timm.data.dataset.ImageDataset(val_path, transform=eval_transforms)
return train_dataset, eval_dataset
class TimmMixupTrainer(Trainer):
def __init__(self, eval_loss_fn, mixup_args, num_classes, *args, **kwargs):
super().__init__(*args, **kwargs)
self.eval_loss_fn = eval_loss_fn
self.num_updates = None
self.mixup_fn = timm.data.Mixup(**mixup_args)
self.accuracy = torchmetrics.Accuracy(num_classes=num_classes)
self.ema_accuracy = torchmetrics.Accuracy(num_classes=num_classes)
self.ema_model = None
def create_scheduler(self):
return timm.scheduler.CosineLRScheduler(
self.optimizer,
t_initial=self.run_config.num_epochs,
cycle_decay=0.5,
lr_min=1e-6,
t_in_epochs=True,
warmup_t=3,
warmup_lr_init=1e-4,
cycle_limit=1,
)
def training_run_start(self):
# Model EMA requires the model without a DDP wrapper and before sync batchnorm conversion
self.ema_model = timm.utils.ModelEmaV2(
self._accelerator.unwrap_model(self.model), decay=0.9
)
if self.run_config.is_distributed:
self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model)
def train_epoch_start(self):
super().train_epoch_start()
self.num_updates = self.run_history.current_epoch * len(self._train_dataloader)
def calculate_train_batch_loss(self, batch):
xb, yb = batch
mixup_xb, mixup_yb = self.mixup_fn(xb, yb)
return super().calculate_train_batch_loss((mixup_xb, mixup_yb))
def train_epoch_end(
self,
):
self.ema_model.update(self.model)
self.ema_model.eval()
if hasattr(self.optimizer, "sync_lookahead"):
self.optimizer.sync_lookahead()
def scheduler_step(self):
self.num_updates += 1
if self.scheduler is not None:
self.scheduler.step_update(num_updates=self.num_updates)
def calculate_eval_batch_loss(self, batch):
with torch.no_grad():
xb, yb = batch
outputs = self.model(xb)
val_loss = self.eval_loss_fn(outputs, yb)
self.accuracy.update(outputs.argmax(-1), yb)
ema_model_preds = self.ema_model.module(xb).argmax(-1)
self.ema_accuracy.update(ema_model_preds, yb)
return {"loss": val_loss, "model_outputs": outputs, "batch_size": xb.size(0)}
def eval_epoch_end(self):
super().eval_epoch_end()
if self.scheduler is not None:
self.scheduler.step(self.run_history.current_epoch + 1)
self.run_history.update_metric("accuracy", self.accuracy.compute().cpu())
self.run_history.update_metric(
"ema_model_accuracy", self.ema_accuracy.compute().cpu()
)
self.accuracy.reset()
self.ema_accuracy.reset()
def main(data_path):
# Set training arguments, hardcoded here for clarity
image_size = (224, 224)
lr = 5e-3
smoothing = 0.1
mixup = 0.2
cutmix = 1.0
batch_size = 32
bce_target_thresh = 0.2
num_epochs = 40
data_path = Path(data_path)
train_path = data_path / "train"
val_path = data_path / "val"
num_classes = len(list(train_path.iterdir()))
mixup_args = dict(
mixup_alpha=mixup,
cutmix_alpha=cutmix,
label_smoothing=smoothing,
num_classes=num_classes,
)
# Create model using timm
model = timm.create_model(
"resnet50d", pretrained=False, num_classes=num_classes, drop_path_rate=0.05
)
# Load data config associated with the model to use in data augmentation pipeline
data_config = timm.data.resolve_data_config({}, model=model, verbose=True)
data_mean = data_config["mean"]
data_std = data_config["std"]
# Create training and validation datasets
train_dataset, eval_dataset = create_datasets(
train_path=train_path,
val_path=val_path,
image_size=image_size,
data_mean=data_mean,
data_std=data_std,
)
# Create optimizer
optimizer = timm.optim.create_optimizer_v2(
model, opt="lookahead_AdamW", lr=lr, weight_decay=0.01
)
# As we are using Mixup, we can use BCE during training and CE for evaluation
train_loss_fn = timm.loss.BinaryCrossEntropy(
target_threshold=bce_target_thresh, smoothing=smoothing
)
validate_loss_fn = torch.nn.CrossEntropyLoss()
# Create trainer and start training
trainer = TimmMixupTrainer(
model=model,
optimizer=optimizer,
loss_func=train_loss_fn,
eval_loss_fn=validate_loss_fn,
mixup_args=mixup_args,
num_classes=num_classes,
callbacks=[
*DEFAULT_CALLBACKS,
SaveBestModelCallback(watch_metric="accuracy", greater_is_better=True),
],
)
trainer.train(
per_device_batch_size=batch_size,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
num_epochs=num_epochs,
create_scheduler_fn=trainer.create_scheduler,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Simple example of training script using timm.")
parser.add_argument("--data_dir", required=True, help="The data folder on disk.")
args = parser.parse_args()
main(args.data_dir)
Traceback
100%|█████████▉| 333/334 [00:37<00:00, 9.09it/s]Traceback (most recent call last):
File "/home/cvpr/PycharmProjects/timm_tutorials/scratch_model.py", line 201, in <module>
main(args.data_dir)
File "/home/cvpr/PycharmProjects/timm_tutorials/scratch_model.py", line 188, in main
trainer.train(
File "/home/cvpr/anaconda3/envs/timm_tutorials/lib/python3.8/site-packages/pytorch_accelerated/trainer.py", line 437, in train
self._run_training()
File "/home/cvpr/anaconda3/envs/timm_tutorials/lib/python3.8/site-packages/pytorch_accelerated/trainer.py", line 641, in _run_training
self._run_train_epoch(self._train_dataloader)
File "/home/cvpr/anaconda3/envs/timm_tutorials/lib/python3.8/site-packages/pytorch_accelerated/trainer.py", line 704, in _run_train_epoch
batch_output = self.calculate_train_batch_loss(batch)
File "/home/cvpr/PycharmProjects/timm_tutorials/scratch_model.py", line 78, in calculate_train_batch_loss
mixup_xb, mixup_yb = self.mixup_fn(xb, yb)
File "/home/cvpr/anaconda3/envs/timm_tutorials/lib/python3.8/site-packages/timm/data/mixup.py", line 210, in __call__
assert len(x) % 2 == 0, 'Batch size should be even when using this'
AssertionError: Batch size should be even when using this
100%|█████████▉| 333/334 [00:37<00:00, 8.93it/s]
As you can see the model works perfectly until the last batch of the epoch. It is because for the final batch, the loader get the remaining images and put them together in this batch. Unfortunately this final batch seems to have odd size.

How to create Albert model?

I want to create a model for hate speech detection. The idea is to have extra layers on top of an AlbertModel. My issue is in creating the Albert model itself. I was basically following this guide (https://www.kaggle.com/dhruv1234/huggingface-tfbertmodel). I tried many different methods and all are failing.
Below is my code and exceptions, lines marked with '*' is where the exceptions are thrown. Everything I tried is the code, with each method throwing a different error
If you can help me fix any of the methods tried it will be great, and if there any other way to do it that would work also.
This is the link for the full repo:
https://github.com/eliesid123/NLP-HateSpeechDetection/tree/main/HateSpeechDetection
Source code:
import torch
from transformers import AlbertConfig, AlbertModel
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
import numpy as np
class ModelBuilder:
def __init__(self,
tokenizer,
dropout=0.5,
learningRate=12,
batchSize = 9,
epochs = 20,
validationSplit = 0.2,
inputSize = 64
):
self.Dropout = dropout
self.LearningRate = learningRate
self.Epochs = epochs
self.BatchSize = batchSize
self.InputSize = inputSize
self.ValidationSplit = validationSplit
self.AlbertModel = AlbertModel.from_pretrained('albert-xxlarge-v2')
self.AlbertConfig = AlbertConfig()
self.Tokenizer = tokenizer
self.IsCompiled = False
self.Model = None
self.ModelSummary = None
def Create(self, PRINT = False):
#INITIAL KAGGLE GUIDE
# input_ids = tf.keras.Input(shape=(self.InputSize,),dtype='int64')
# attention_masks = tf.keras.Input(shape=(self.InputSize,),dtype='float')
*# output, layer = self.AlbertModel(input_ids,attention_masks)*
#THIS IS BASED ON HUGGINGFACE DOCUMENTATION
# tensor = np.ndarray(shape=(self.InputSize,),dtype='long')
# input_ids = torch.tensor(tensor)
# tensor = np.ndarray(shape=(self.InputSize,),dtype='float')
# attention_masks = torch.tensor(tensor)
*# output = self.AlbertModel(input_ids,attention_masks)*
#THIS IS FROM HUGGINGFACE DOCUMENTATION
# sampleText = "some line............................................."
# encoded = self.Tokenizer.EncodeSentece(sampleText)
# input_ids = encoded['input_ids']
# attention_masks = encoded['attention_mask']
*# output = self.AlbertModel(torch.tensor(encoded).unsqueeze(self.BatchSize))*
output = self.AlbertModel(self.AlbertConfig)
output = output[1]
output = tf.keras.layers.Dense(128,activation='relu')(output)
output = tf.keras.layers.Dropout(self.Dropout)(output)
output = tf.keras.layers.Dense(64,activation='relu')(output)
output = tf.keras.layers.Dropout(self.Dropout)(output)
output = tf.keras.layers.Dense(32,activation='relu')(output)
output = tf.keras.layers.Dropout(self.Dropout)(output)
output = tf.keras.layers.Dense(1,activation='sigmoid')(output)
model = tf.keras.models.Model(inputs = [input_ids, attention_masks] ,outputs = output)
self.Model = model
self.ModelSummary = model.summary()
if PRINT:
print(self.ModelSummary)
Exceptions:
Using tf.Keras.Input
Exception has occurred: AttributeError
•
'KerasTensor' object has no attribute 'size'
File "/home/elie/Desktop/NLP/HateSpeechDetection/src/modelBuilder.py", line 44, in Create output, layer = self.AlbertModel(input_ids,attention_masks) File "/home/elie/Desktop/NLP/HateSpeechDetection/src/main.py", line 16, in main model.Create(PRINT=True) File "/home/elie/Desktop/NLP/HateSpeechDetection/src/main.py", line 21, in <module> main()
Using torch.tensor
Exception has occurred: ValueError
•
not enough values to unpack (expected 2, got 1)
File "/home/elie/Desktop/NLP/HateSpeechDetection/src/modelBuilder.py", line 50, in Create output = self.AlbertModel(input_ids,attention_masks) File "/home/elie/Desktop/NLP/HateSpeechDetection/src/main.py", line 16, in main model.Create(PRINT=True) File "/home/elie/Desktop/NLP/HateSpeechDetection/src/main.py", line 21, in <module> main()
Using AlbertTokenizer.encode_plus()
Exception has occurred: ValueError
•
could not determine the shape of object type 'BatchEncoding'
File "/home/elie/Desktop/NLP/HateSpeechDetection/src/modelBuilder.py", line 54, in Create output = self.AlbertModel(torch.tensor(encoded).unsqueeze(self.BatchSize)) File "/home/elie/Desktop/NLP/HateSpeechDetection/src/main.py", line 16, in main model.Create(PRINT=True) File "/home/elie/Desktop/NLP/HateSpeechDetection/src/main.py", line 21, in <module> main()
Using AlbertConfig
Exception has occurred: AttributeError
•
'AlbertConfig' object has no attribute 'size'
File "/home/elie/Desktop/NLP/HateSpeechDetection/src/modelBuilder.py", line 59, in Create output = self.AlbertModel(self.AlbertConfig) File "/home/elie/Desktop/NLP/HateSpeechDetection/src/main.py", line 16, in main model.Create(PRINT=True) File "/home/elie/Desktop/NLP/HateSpeechDetection/src/main.py", line 21, in <module> main()
Tokenizer class
import numpy as np
from transformers import AlbertTokenizer
class Tokenizor():
def __init__(self,maxLen=64) -> None:
self.Tokenizer = AlbertTokenizer.from_pretrained('albert-xxlarge-v2')
self.MaxLen = maxLen
def EncodeAll(self,data) :
input_ids = []
attention_masks = []
for i in range(len(data)):
encoded = self.EncodeSentece(data[i])
input_ids.append(encoded['input_ids'])
attention_masks.append(encoded['attention_mask'])
return np.array(input_ids),np.array(attention_masks)
def EncodeSentece(self,line):
encoded = self.Tokenizer.encode_plus(
line,
add_special_tokens=True,
max_length=self.MaxLen,
pad_to_max_length=True,
return_attention_mask=True,
)
return encoded

TypeError('No suitable SharedVariable constructor could be found.') Using lasagne and theano

Hi i'm currently learning coding with python and have been following a tutorial series which has helped me make the code i will show below. Apologies for it being so long but I cannot pinpoint the line of code which is causing this error. I have removed a lot of the commenting to reduce the amount of code posted.
import numpy as np
import urllib.request
import os
import gzip
import lasagne
import theano
import theano.tensor as T
def load_dataset():
def download(filename, source="http://yann.lecun.com/exdb/mnist/"):
print("downloading:", filename)
urllib.request.urlretrieve(source+filename, filename)
def load_mnist_images(filename):
if not os.path.exists(filename):
download(filename)
with gzip.open(filename, "rb") as f:
data = np.frombuffer(f.read(), np.uint8, offset= 16)
data = data.reshape(-1, 1, 28, 28)
return data / np.float32(256)
def load_mnist_labels(filename):
if not os.path.exists(filename):
download(filename)
with gzip.open(filename, "rb") as f:
data = np.frombuffer(f.read(), np.uint8, offset= 8)
return data
x_train = load_mnist_images("train-images-idx3-ubyte.gz")
y_train = load_mnist_labels("train-labels-idx1-ubyte.gz")
x_test = load_mnist_images("t10k-images-idx3-ubyte.gz")
y_test = load_mnist_labels("t10k-labels-idx1-ubyte.gz")
return x_train, y_train, x_test, y_test
x_train, y_train, x_test, y_test = load_dataset()
###### creating the handwriting digit recognition code ######
def build_nn(input_var = None):
l_in = lasagne.layers.InputLayer(shape=(None,1,28,28), input_var=input_var)
l_in_drop = lasagne.layers.DropoutLayer(l_in, p=0.2)
l_hid1 = lasagne.layers.DenseLayer(l_in_drop, num_units= 800,
nonlinearity= lasagne.nonlinearities.rectify,
W= lasagne.init.GlorotUniform())
l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=0.5)
l_hid2 = lasagne.layers.DenseLayer(l_hid1_drop, num_units= 800,
nonlinearity= lasagne.nonlinearities.rectify,
W= lasagne.init.GlorotUniform())
l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.5)
l_out = lasagne.layers.DenseLayer(l_hid2_drop, num_units=10,
nonlinearity= lasagne.nonlinearities.softmax)
return l_out
input_var = T.tensor4("inputs") # an empty 4d array
target_var = T.ivector("targets") # an empty 1d int array to represent the labels
network = build_nn(input_var) # call the func that initializes the neural network
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.01, momentum=0.9)
train_fn = theano.function([input_var, target_var], loss, updates= updates)
num_training_steps = 10
for step in range(num_training_steps):
train_err = train_fn(x_train, y_train)
print("current training step is " + str(step))
The error that's stopping this code is this:
Traceback (most recent call last):
File "C:\Users\Admin\.vscode\Practice codes\machine learning\deep learning\deep learning.py", line 125, in <module>
network = build_nn(input_var) # call the func that initializes the neural network
File "C:\Users\Admin\.vscode\Practice codes\machine learning\deep learning\deep learning.py", line 95, in build_nn
l_hid1 = lasagne.layers.DenseLayer(l_in_drop, num_units= 800,
File "C:\Users\Admin\AppData\Roaming\Python\Python38\site-packages\lasagne\layers\dense.py", line 103, in __init__
self.W = self.add_param(W, (num_inputs, num_units), name="W")
File "C:\Users\Admin\AppData\Roaming\Python\Python38\site-packages\lasagne\layers\base.py", line 234, in add_param
param = utils.create_param(spec, shape, name)
File "C:\Users\Admin\AppData\Roaming\Python\Python38\site-packages\lasagne\utils.py", line 393, in create_param
spec = theano.shared(spec, broadcastable=bcast)
File "C:\Users\Admin\AppData\Roaming\Python\Python38\site-packages\theano\compile\sharedvalue.py", line 284, in shared
raise TypeError('No suitable SharedVariable constructor could be found.'
TypeError: No suitable SharedVariable constructor could be found. Are you sure all kwargs are supported? We do not support the parameter dtype or type. value="[[ 0.04638761 -0.02959769 0.02330909 ... 0.01545383 0.04763002
0.05265676]
[ 0.02095251 -0.05393376 -0.04289599 ... -0.02409102 0.02824548
-0.00327342]
[ 0.02908951 -0.02853872 -0.05450716 ... -0.02296509 0.02495853
0.02486875]
...
[-0.03704383 0.0286258 0.01158947 ... -0.02583007 -0.04925423
-0.0470493 ]
[ 0.03230407 -0.00246115 -0.05074456 ... 0.00299953 0.01883504
0.01312843]
[-0.05762409 -0.05119916 -0.02820581 ... -0.05675326 0.00458562
0.04403118]]". parameters="{'broadcastable': (False, False)}"
If it helps I'm using python 3.8 - lasagne 0.2.dev1 - theano 1.0.5.
Any help would be greatly appreciated, any questions feel free to ask.
Thanks in advance

tf.train.import_meta_graph(): unable to load some variable values

I'm using tensorflow 1.10.0. I've been following the tutorial for saving and loading a simple trained MLP model. Saving of data works perfectly and creates following files:
train.ckpt.data-00000-of-00001
train.ckpt.index
train.ckpt.meta
When I'm trying to load train_opt or accmetric variable using:
import tensorflow as tf
with tf.Session() as sess:
load_mod = tf.train.import_meta_graph('/home/akshay/train.ckpt.meta')
load_mod.restore(sess, tf.train.latest_checkpoint('/home/akshay/'))
print (tf.get_default_graph().get_tensor_by_name('train_opt:0'))
I get following error:
Traceback (most recent call last):
File "recover_tftrain.py", line 6, in <module>
print (tf.get_default_graph().get_tensor_by_name('accmetric:0'))
File "/home/arpita/anaconda2/lib/python2.7/site-
packages/tensorflow/python/framework/ops.py", line 3515, in get_tensor_by_name
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
File "/home/arpita/anaconda2/lib/python2.7/site-
packages/tensorflow/python/framework/ops.py", line 3339, in as_graph_element
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
File "/home/arpita/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3381, in _as_graph_element_locked
"graph." % (repr(name), repr(op_name)))
KeyError: "The name 'accmetric:0' refers to a Tensor which does not exist.
The operation, 'accmetric', does not exist in the graph."
However, the loss variable loads perfectly:
Tensor("loss:0", shape=(), dtype=float32)
Are there only some specific variables that can be loaded? Or is there any issue of scope?
Complete code:
from create_batches import Batch
import extractData
import tensorflow as tf
# prepare input data and output labels for neural network
datafile = '/home/akshay/Desktop/datafile.csv'
labelfile = '/home/akshay/Desktop/labelfile.csv'
num_input = 2000
num_hidden1 = 200
num_hidden2 = 200
num_hidden3 = 200
num_output = 25
batch_size = 200
epochs = 25
batch = Batch(extractData.create_data(datafile), extractData.create_labels(labelfile), batch_size)
# create tensorflow networks
vowel_inp = tf.placeholder(dtype = tf.float32, shape = [None, 40000], name = "text_inp")
label_oup = tf.placeholder(dtype = tf.int32, shape = [None], name = "label_oup")
vowel_flat = tf.contrib.layers.flatten(vowel_inp)
# fully connected layers
hidden_1 = tf.layers.dense(inputs = vowel_flat, units = num_hidden1, name = "hidden1", activation = tf.nn.sigmoid)
hidden_2 = tf.layers.dense(inputs = hidden_1, units = num_hidden2, name = "hidden2", activation = tf.nn.sigmoid)
hidden_3 = tf.layers.dense(inputs = hidden_2, units = num_hidden3, name = "hidden3", activation = tf.nn.sigmoid)
train_oup = tf.layers.dense(inputs = hidden_3, units = num_output, name = "output")
# define a cost function
xentropy = tf.losses.sparse_softmax_cross_entropy(labels = label_oup, logits = train_oup)
# define a loss function
loss = tf.reduce_mean(xentropy, name = "loss")
# define an optimizer
train_opt = tf.train.AdagradOptimizer(learning_rate = 0.001).minimize(loss, name="train_opt")
# define accuracy metric
acc, acc_metric_update = tf.metrics.accuracy(label_oup, tf.argmax(train_oup, 1), name="accmetric")
loss_val, acc_val = 0, 0
sess = tf.Session()
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for j in range(epochs):
batch.reset()
for i in range(int(2000/batch_size)):
x, y = batch.getBatch()
y = y.reshape(batch_size)
feed_dict = {vowel_inp: x, label_oup: y}
loss_val, _, acc_val = sess.run([loss, train_opt, acc_metric_update], feed_dict=feed_dict)
if j%25==0:
print ('Epoch:', j, 'Accuracy Val:', acc_val)
print ("Final score:",sess.run(acc))
#save the model
print ('Model saved in: ', saver.save(sess, '/home/akshay/train.ckpt'))
sess.close()

Categories

Resources