I am trying to use the following code to train Keras-I3D model from the following link:
https://github.com/srijandas07/i3d
imported modules are
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="3"
from keras.layers import Dense, Flatten, Dropout, Reshape
from keras import regularizers
from keras.preprocessing import image
from keras.models import Model, load_model
from keras.applications.vgg16 import preprocess_input
from keras.utils import to_categorical
from keras.optimizers import SGD
from i3d_inception import Inception_Inflated3d, conv3d_bn
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, CSVLogger, Callback
from keras.utils import Sequence, multi_gpu_model
import random
import sys
from multiprocessing import cpu_count
import numpy as np
import glob
from skimage.io import imread
import cv2
some definitions
epochs = str(sys.argv[0])
#epochs = 17
model_name = sys.argv[0]
#model_name = model_name
version = sys.argv[0]
num_classes = 35
batch_size = 16
stack_size = 64
DataLoader_video_train = DataLoader_video_train
DataLoader_video_test = DataLoader_video_test
class CustomModelCheckpoint(Callback):
def __init__(self, model_parallel, path):
super(CustomModelCheckpoint, self).__init__()
self.save_model = model_parallel
self.path = path
self.nb_epoch = 0
def on_epoch_end(self, epoch, logs=None):
self.nb_epoch += 1
self.save_model.save(self.path + str(self.nb_epoch) + '.hdf5')
i3d = i3d_modified(weights = 'rgb_imagenet_and_kinetics')
model = i3d.i3d_flattened(num_classes = num_classes)
optim = SGD(lr = 0.01, momentum = 0.9)
there is an issue here with the csvlogger
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor = 0.1, patience = 10)
csvlogger = CSVLogger('i3d_'+model_name+'.csv')
model.compile(loss = 'categorical_crossentropy', optimizer = optim, metrics = ['accuracy'])
model_checkpoint = CustomModelCheckpoint(model, './weights_'+model_name+'/epoch_')
train_generator = DataLoader_video_train('/train_CS.txt',version, batch_size = batch_size)
test_generator = DataLoader_video_test('/test_CS.txt', version, batch_size = batch_size)
fit generator
model.fit_generator(
generator = train_generator,
#validation_data=val_generator,
epochs = epochs,
steps_per_epoch = 17,
callbacks = [csvlogger, reduce_lr, model_checkpoint],
max_queue_size = 48,
workers = cpu_count() - 2,
use_multiprocessing = True,
)
print(model.evaluate_generator(generator = test_generator))
I get the following error
runfile('D:/Clones/i3d-master/i3d_train.py', wdir='D:/Clones/i3d-master')
Reloaded modules: i3d_inception
C:\Users\sancy\Anaconda3\lib\site-packages\keras\engine\training_generator.py:47: UserWarning: Using a generator with `use_multiprocessing=True` and multiple workers may duplicate your data. Please consider using the`keras.utils.Sequence class.
UserWarning('Using a generator with `use_multiprocessing=True`'
Traceback (most recent call last):
File "<ipython-input-30-8f7b9cc152d8>", line 1, in <module>
runfile('D:/Clones/i3d-master/i3d_train.py', wdir='D:/Clones/i3d-master')
File "C:\Users\sancy\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "C:\Users\sancy\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "D:/Clones/i3d-master/i3d_train.py", line 109, in <module>
use_multiprocessing = True,
File "C:\Users\sancy\Anaconda3\lib\site-packages\keras\legacy\interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "C:\Users\sancy\Anaconda3\lib\site-packages\keras\engine\training.py", line 1418, in fit_generator
initial_epoch=initial_epoch)
File "C:\Users\sancy\Anaconda3\lib\site-packages\keras\engine\training_generator.py", line 102, in fit_generator
callbacks.on_train_begin()
File "C:\Users\sancy\Anaconda3\lib\site-packages\keras\callbacks.py", line 132, in on_train_begin
callback.on_train_begin(logs)
File "C:\Users\sancy\Anaconda3\lib\site-packages\keras\callbacks.py", line 1183, in on_train_begin
**self._open_args)
OSError: [Errno 22] Invalid argument: 'i3d_D:/Clones/i3d-master/i3d_train.py.csv'
What am I doing wrong? how do you correctly use str(sys.argv[0]) and CSVLogger?
It looks like the 'i3d_'+ in the filename is causing an invalid filename in csvlogger = CSVLogger('i3d_'+model_name+'.csv'). Try removing the i3d_ prefix.
Related
I trained my model in colab and save it with torch.save('model.pth')
and then when i wanted to load it in my pycharm i get this error:
File "C:\Users\Amin\AppData\Local\Programs\Python\Python310\lib\zipfile.py", line 1334, in _RealGetContents
raise BadZipFile("File is not a zip file")
zipfile.BadZipFile: File is not a zip file`
can anyone help me to fix this error please
i could not find any solution for it on internet
i used tensorflow for training my model and used these imports :
from tensorflow.keras.preprocessing.text import text_to_word_sequence
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import Conv1D, GlobalMaxPooling1D
my program can load the tokenizer that i have built but it wont load the model
this is my model :
max_features = 1000
maxlen = 650
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
model5 = Sequential()
model5.add(Embedding(max_features, embedding_dims ))
model5.add(Dropout(0.2))
model5.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1))
model5.add(GlobalMaxPooling1D())
model5.add(Dense(hidden_dims)) model5.add(Dropout(0.2)) model5.add(Activation('relu'))
model5.add(Dense(5)) model5.add(Activation('softmax'))
model5.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model5.fit(X_train, y_train,
batch_size=32,
epochs=14,
validation_data=(X_test, y_test))
torch.save(model5,'model.pth')
i loaded my model in colab and it was fine but it didn't work in pycharm
relative_model_path = "model.pth"
full_model_path = os.path.join(absolute_path, relative_model_path)
model = torch.load(full_model_path)
Traceback (most recent call last):
File "C:\\Users\\Amin\\PycharmProjects\\src\\model\\categorizer.py", line 25, in \<module\>
model = torch.load(full_model_path)
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\torch\\serialization.py", line 789, in load
return \_load(opened_zipfile, map_location, pickle_module, \*\*pickle_load_args)
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\torch\\serialization.py", line 1131, in \_load
result = unpickler.load()
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\keras\\saving\\pickle_utils.py", line 48, in deserialize_model_from_bytecode
raise e
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\keras\\saving\\pickle_utils.py", line 46, in deserialize_model_from_bytecode
model = saving_lib.load_model(filepath)
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\keras\\saving\\experimental\\saving_lib.py", line 196, in load_model
raise e
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\keras\\saving\\experimental\\saving_lib.py", line 173, in load_model
with zipfile.ZipFile(filepath, "r") as zipfile_to_load:
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\zipfile.py", line 1267, in __init__
self.\_RealGetContents()
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\zipfile.py", line 1334, in \_RealGetContents
raise BadZipFile("File is not a zip file")`your text`
zipfile.BadZipFile: File is not a zip file
I just needed to save the model with
keras.save('model')
not torch because the model was built in tensorflow keras
I want to train a Neural Network using Keras but when I want to build the model I get the following error
2022-03-14 09:38:10.526372: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected
2022-03-14 09:38:10.526465: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (HSKP02): /proc/driver/nvidia/version does not exist
2022-03-14 09:38:10.527391: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
I tried to solve this error by writing
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
before importing Keras but I still get this error. After this error my code fits the data with the validation set with model.fit() but I get another error
Traceback (most recent call last):
File "shallownet_ex.py", line 44, in <module>
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1)
File ".../venv/lib/python3.8/site-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File ".../venv/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 1147, in autograph_handler
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 860, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 918, in compute_loss
return self.compiled_loss(
File ".../venv/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File ".../venv/lib/python3.8/site-packages/keras/losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File ".../venv/lib/python3.8/site-packages/keras/losses.py", line 245, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File ".../venv/lib/python3.8/site-packages/keras/losses.py", line 1789, in categorical_crossentropy
return backend.categorical_crossentropy(
File ".../venv/lib/python3.8/site-packages/keras/backend.py", line 5083, in categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
ValueError: Shapes (None, 4) and (None, 3) are incompatible
The code I'm using looks like this
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from preprocesing import ImageToArrayPreprocessor, SimplePreprocesssor
from datasets import SimpleDatasetLoader
from neuralnetworks.conv import ShallowNet
from keras.optimizers import gradient_descent_v2
from imutils import paths
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
args = vars(ap.parse_args())
imagePaths = list(paths.list_images(args["dataset"]))
sp = SimplePreprocesssor(32, 32)
iap = ImageToArrayPreprocessor()
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)
opt = gradient_descent_v2.SGD(learning_rate=0.005)
model = ShallowNet.build(width=32, height=32, depth=3, classes=3)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['acc'])
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1)
The simpleloader is a function that just loads the images and the simplepreprocesor just resizes the images and I think the error is inside the shallownet.py that looks like this
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.core import Activation, Flatten, Dense
from keras import backend as K
class ShallowNet():
#staticmethod
def build(width, height, depth, classes):
model = Sequential()
inputShape = (height, width, depth)
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(classes))
model.add(Activation("softmax"))
return model
What I deduce is that as my computer doesn't have a GPU I can't perform the training and then I can't fit the model. There is a way to perform this training?
Now, I am trying to run the CNN code regarding "Deep Learning Coordinated Beamforming."
At first, there is no directly of "DLCB_code_output/Results_mmWave_ML0". And there was an error of "there is no directory of ...". Thereby, I created new directories of "DLCB_code_output/Results_mmWave_ML0".
So, as you can see from the title, there is a permission error in that directories.
from __future__ import division
import os, keras
os.environ["KERAS_BACKEND"] = "theano"
os.environ["THEANO_FLAGS"] = "device=gpu%d"%(1)
import numpy as np
import theano as th
import theano.tensor as T
from keras.utils import np_utils
import keras.models as models
from keras.layers.core import Reshape,Dense,Dropout,Activation
from keras.optimizers import adam
from scipy.io import loadmat, savemat
import os.path
from keras import backend as K
#from tensorflow.python.keras.layers import Input,Dense
#from tensorflow.python.keras.models import Sequential
# Model training function
def train(In_train, Out_train, In_test, Out_test,
nb_epoch, batch_size,dr,
num_hidden_layers, nodes_per_layer,
loss_fn,n_BS,n_beams):
in_shp = list(In_train.shape[1:])
AP_models = []
for idx in range(0, n_BS*n_beams-2, n_beams):
idx_str = str(idx / n_beams + 1)
model = models.Sequential()
model.add(Dense(nodes_per_layer, activation='relu', init='he_normal',
name="dense" + idx_str + "1", input_shape=in_shp))
model.add(Dropout(dr))
for h in range(num_hidden_layers):
model.add(Dense(nodes_per_layer, activation='relu',
init='he_normal', name="dense" + idx_str + "h" + str(h)))
model.add(Dropout(dr))
model.add(Dense(n_beams, activation='relu', init='he_normal',
name="dense" + idx_str + "o"))
model.compile(loss=loss_fn, optimizer='adam')
model.summary()
# perform training ...
earlyStoppingCallback = \
keras.callbacks.EarlyStopping(monitor='val_loss',
patience=5,
verbose=0,
mode='auto')
filepath = 'DLCB_code_output/Results_mmWave_ML'+str(idx)
#filepath = 'DLCB_code_output/DL_Result'
#filepath = 'DLCB_code_output'
history = model.fit(In_train,
Out_train[:, idx:idx + n_beams],
batch_size=batch_size,
nb_epoch=nb_epoch,
verbose=2,
validation_data=(In_test, Out_test[:,idx:idx + n_beams]),
callbacks = [
keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True, mode='auto'),
keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
])
# we re-load the best weights once training is finished
model.load_weights(filepath)
AP_models.append(model)
return AP_models
# Reading input and output sets generated from MATLAB
In_set_file=loadmat('DLCB_dataset/DLCB_input.mat')
Out_set_file=loadmat('DLCB_dataset/DLCB_output.mat')
In_set=In_set_file['DL_input']
Out_set=Out_set_file['DL_output']
# Parameter initialization
num_user_tot=In_set.shape[0]
n_DL_size=[.001,.05,.1,.15,.2,.25,.3,.35,.4,.45,.5,.55,.6,.65,.7,.75,.8]
count=0
num_tot_TX=4
num_beams=128
for DL_size_ratio in n_DL_size:
print (DL_size_ratio)
count=count+1
DL_size=int(num_user_tot*DL_size_ratio)
np.random.seed(2016)
n_examples = DL_size
num_train = int(DL_size * 0.8)
num_test = int(num_user_tot*.2)
train_index = np.random.choice(range(0,num_user_tot), size=num_train, replace=False)
rem_index = set(range(0,num_user_tot))-set(train_index)
test_index= list(set(np.random.choice(list(rem_index), size=num_test, replace=False)))
In_train = In_set[train_index]
In_test = In_set[test_index]
Out_train = Out_set[train_index]
Out_test = Out_set[test_index]
# Learning model parameters
nb_epoch = 10
batch_size = 100
dr = 0.05 # dropout rate
num_hidden_layers=4
nodes_per_layer=In_train.shape[1]
loss_fn='mean_squared_error'
# Model training
AP_models = train(In_train, Out_train, In_test, Out_test,
nb_epoch, batch_size,dr,
num_hidden_layers, nodes_per_layer,
loss_fn,num_tot_TX,num_beams)
# Model running/testing
DL_Result={}
for id in range(0,num_tot_TX,1):
beams_predicted=AP_models[id].predict( In_test, batch_size=10, verbose=0)
DL_Result['TX'+str(id+1)+'Pred_Beams']=beams_predicted
DL_Result['TX'+str(id+1)+'Opt_Beams']=Out_test[:,id*num_beams:(id+1)*num_beams]
DL_Result['user_index']=test_index
savemat('DLCB_code_output/DL_Result'+str(count),DL_Result)
I can't deal with this problem.
Please give me some advice.
Now, I use the Windows OS, Python 3.6.8, Keras 2.3.1 and Tensorflow 2.0.0
"C:\Users\kohei\AppData\Local\Programs\Python\Python36\lib\site-packages\h5py_hl\files.py", line 173, in make_fid fid = h5f.open(name, flags, fapl=fapl) File "h5py_objects.pyx", line 54,
in h5py._objects.with_phil.wrapper File "h5py_objects.pyx", line 55,
in h5py._objects.with_phil.wrapper File "h5py\h5f.pyx", line 88,
in h5py.h5f.open OSError: Unable to open file (unable to open file:
name = 'DLCB_code_output/Results_mmWave_ML0',
errno = 13, error message = 'Permission denied', flags = 0, o_flags = 0)```
I'm trying to train a simple movie recommendation system using the latest-small movie lens dataset, but I keep getting an error saying that:
Traceback (most recent call last):
File "D:\AI\Python projects\anotherone.py", line 48, in <module>
history = model.fit([train.userId,train.movieId], train.rating,epochs=10, verbose=1)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py", line 780, in fit
steps_name='steps_per_epoch')
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training_arrays.py", line 363, in model_iteration
batch_outs = f(ins_batch)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\keras\backend.py", line 3292, in _call_
run_metadata=self.run_metadata)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1458, in _call_
run_metadata_ptr)
tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[4,0] = 179819 is not in [0, 8984)
[[{{node Movie-Embedding/embedding_lookup}}]]
Code:
# importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import warnings
import tensorflow as tf
from tensorflow.keras.layers import Input, Embedding, Flatten, Dot, Dense, concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
from sklearn.model_selection import train_test_split
# ignoring warnings
warnings.filterwarnings('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# importing our data
df = pd.read_csv('D:/AI/Data sets/ml-latest-small/ratings.csv')
train, test = train_test_split(df, test_size=0.2, random_state=42)
# extracting the unique number of users and movies
n_users = len(df.userId.unique())
n_movies = len(df.movieId.unique())
# creating the embedding
movie_input = Input(shape=[1], name="Movie-Input")
movie_embedding = Embedding(n_movies+1, 5, name="Movie-Embedding")(movie_input)
movie_vec = Flatten(name="Flatten-Movie")(movie_embedding)
user_input = Input(shape=[1], name = "User-Input")
user_embedding = Embedding(n_users+1, 5, name="User-Embedding")(user_input)
user_vec = Flatten(name="Flatten-user")(user_embedding)
# concatinating the features
concat = concatenate([movie_vec,user_vec])
# creating our model
layer1 = Dense(128,activation="relu")(concat)
layer2 = Dense(32,activation="relu")(layer1)
outputLayer = Dense(1)(layer2)
model = Model([user_input,movie_input],outputLayer)
model.compile('adam','mean_squared_error')
# tranning the model
if os.path.exists('multiParam.h5'):
model = load_model('multiParam.h5')
else:
history = model.fit([train.userId,train.movieId], train.rating,epochs=10, verbose=1)
model.save('multiParam.h5')
plt.plot(history.history['loss'])
plt.xlabel("Epochs")
plt.ylabel("Training Error")
# testing the model
print(model.evaluate([test.userId, test.movieId], test.rating))
# running some predictions
predictions = model.predict([test.userId.head(10), test.movieId.head(10)])
[print(predictions[i], test.rating.iloc[i]) for i in range(0,10)]
I'm still new to machine learning, but from the research I understood, I need to provide the number of unique values +1 to the embedding layer, which I'm doing but It's still not working, any help can be appreciated, thank you :)
You should try Label encoding for both movieId and userId in order to make them sequential starting from zero :
from sklearn.preprocessing import LabelEncoder
user_enc = LabelEncoder()
df['user'] = user_enc.fit_transform(df['userId'].values)
n_users = df['user'].nunique()
item_enc = LabelEncoder()
df['movie'] = item_enc.fit_transform(df['movieId'].values)
n_movies = df['movie'].nunique()
I am trying to reproduce the Long-term Recurrent Convolutional Networks paper.
I have a pretrained caffe model that I'd like to use in theano.
I have the .caffemodel for this file, and the prototxt.
I have used the lasagne example to load the caffe weights to the caffe model.
This is the code I used, but the data is not loaded to the lasagne model.
I check it by using the lasagne.layers.get_all_param_values(net) command, which throws this error.
Traceback (most recent call last):
File "/home/anilil/projects/pycharm-community-5.0.4/helpers/pydev/pydevd.py", line 2411, in <module>
globals = debugger.run(setup['file'], None, None, is_module)
File "/home/anilil/projects/pycharm-community-5.0.4/helpers/pydev/pydevd.py", line 1802, in run
launch(file, globals, locals) # execute the script
File "/media/anilil/Data/charm/mv_clean/Vgg_las.py", line 218, in <module>
x=lasagne.layers.get_all_param_values(net)
File "/usr/local/lib/python2.7/dist-packages/lasagne/layers/helper.py", line 439, in get_all_param_values
params = get_all_params(layer, **tags)
File "/usr/local/lib/python2.7/dist-packages/lasagne/layers/helper.py", line 353, in get_all_params
return utils.unique(params)
File "/usr/local/lib/python2.7/dist-packages/lasagne/utils.py", line 157, in unique
for el in l:
File "/usr/local/lib/python2.7/dist-packages/lasagne/layers/helper.py", line 352, in <genexpr>
params = chain.from_iterable(l.get_params(**tags) for l in layers)
AttributeError: 'str' object has no attribute 'get_params'
TRIAL/Test Code:-
# -*- coding: utf-8 -*-
import os
import sys
import lasagne
from lasagne.layers import InputLayer
from lasagne.layers import DenseLayer
from lasagne.layers import NonlinearityLayer
from lasagne.nonlinearities import rectify
from lasagne.layers import DropoutLayer
from lasagne.layers import Pool2DLayer as PoolLayer
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
from lasagne.nonlinearities import softmax
import theano as T
from lasagne.layers import LocalResponseNormalization2DLayer as LRN
sys.path.append('/home/anilil/projects/lstm/lisa-caffe-public/python/')
import caffe
from lasagne.utils import floatX
import numpy as np
def build_model():
net = {}
# Input layer
net['input'] = InputLayer((None, 3, 227, 227))
# First Conv Layer
net['conv1'] = ConvLayer(net['input'], num_filters=96,filter_size=7, pad=0, flip_filters=False,stride=2,nonlinearity=rectify)
net['pool1'] = PoolLayer(net['conv1'], pool_size=3,stride=2,mode='max')
net['norm1'] = LRN(net['pool1'],alpha=0.0001,beta=0.75,n=5)
# 2nd Conv Layer
net['conv2'] = ConvLayer(net['norm1'], num_filters=384,filter_size=5, pad=0, flip_filters=False,stride=2,nonlinearity=rectify)
net['pool2'] = PoolLayer(net['conv2'], pool_size=3,stride=2,mode='max')
net['norm2'] = LRN(net['pool2'],alpha=0.0001,beta=0.75,n=5)
# 3rd Conv Layer
net['conv3'] = ConvLayer(net['norm2'], num_filters=512,filter_size=3, pad=1, flip_filters=False,nonlinearity=rectify)
net['conv4'] = ConvLayer(net['conv3'], num_filters=512,filter_size=3, pad=1, flip_filters=False,nonlinearity=rectify)
net['conv5'] = ConvLayer(net['conv4'], num_filters=384,filter_size=3, pad=1, flip_filters=False,nonlinearity=rectify)
net['pool5'] = PoolLayer(net['conv5'], pool_size=3,stride=2,mode='max')
net['fc6'] = DenseLayer(net['pool5'], num_units=4096,nonlinearity=rectify)
net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5)
net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096)
net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5)
net['fc8-ucf'] = DenseLayer(net['fc7_dropout'], num_units=101, nonlinearity=None)
net['prob'] = NonlinearityLayer(net['fc8-ucf'], softmax)
return net
if __name__=="__main__":
net = build_model()
#net= load_caffe_weights(net,'/home/anilil/projects/lstm/lisa-caffe-public/examples/LRCN_activity_recognition/deploy_singleFrame.prototxt','/home/anilil/projects/lstm/lisa-caffe-public/examples/LRCN_activity_recognition/singleframe_flow/snaps/snapshots_singleFrame_flow_v2_iter_50000.caffemodel')
caffe.set_device(0)
caffe.set_mode_gpu()
net_caffe = caffe.Net('/home/anilil/projects/lstm/lisa-caffe-public/examples/LRCN_activity_recognition/deploy_singleFrame.prototxt', '/home/anilil/projects/lstm/lisa-caffe-public/examples/LRCN_activity_recognition/singleframe_flow/snaps/snapshots_singleFrame_flow_v2_iter_50000.caffemodel', caffe.TEST)
layers_caffe = dict(zip(list(net_caffe._layer_names), net_caffe.layers))
for name, layer in net.items():
try:
layer.W.set_value(layers_caffe[name].blobs[0].data,borrow=True)
layer.b.set_value(layers_caffe[name].blobs[1].data,borrow=True)
except AttributeError:
continue
print ("Loaded the files without issues !!!!!!!!!!")
x=lasagne.layers.get_all_param_values(net)
print ("Saved Weights to the file without issues !!!!!!!!!!")
try:
x=lasagne.layers.get_all_param_values(net['prob'])
or make your net in this way:
def build_model():
net = {}
# Input layer
net = InputLayer((None, 3, 227, 227))
# First Conv Layer
net = ConvLayer(net, num_filters=96,filter_size=7, pad=0, flip_filters=False,stride=2,nonlinearity=rectify)
net = PoolLayer(net, pool_size=3,stride=2,mode='max')
....
net= NonlinearityLayer(net, softmax)
return net