I am running a code on jupyter notebook using virtual machine (Ubuntu). Normally when there is an error it points to the file where the error is giving its path but I got an error with a path I do not have
ValueError Traceback (most recent call last)
/tmp/ipykernel_84/10827517.py in <module>
----> 1 vae_cl.train(max_epochs=500, plan_kwargs=dict(weight_decay=0.0))
/tmp/ipykernel_84/1274271952.py in train(self, max_epochs, n_samples_per_label, check_val_every_n_epoch, train_size, validation_size, batch_size, use_gpu, plan_kwargs, **trainer_kwargs)
187 use_gpu=use_gpu,
188 check_val_every_n_epoch=check_val_every_n_epoch,
--> 189 **trainer_kwargs,
190 )
191 return runner()
/tmp/ipykernel_84/2912366039.py in __init__(self, model, training_plan, data_splitter, max_epochs, use_gpu, **trainer_kwargs)
59 self.data_splitter = data_splitter
60 self.model = model
---> 61 gpus, device = parse_use_gpu_arg(use_gpu)
62 self.gpus = gpus
63 self.device = device
ValueError: too many values to unpack (expected 2)
What I noticed is that the function it is pointing to (---> 61) is actually in the file I am running but I do not understand why the path is like that '/tmp/ipykernel_84/1274271952.py'
Any idea?
Related
I signed up for some pay as you go credit - and to my dismay now receive this error both on Disco AND Stable Diffusion:
FileNotFoundError Traceback (most recent call last)
<ipython-input-4-191981527364> in <module>
38 import py3d_tools as p3d
39
---> 40 from helpers import DepthModel, sampler_fn
41 from k_diffusion.external import CompVisDenoiser
42 from ldm.util import instantiate_from_config
4 frames
/content/MiDaS/midas/backbones/next_vit.py in <module>
6 from .utils import activations, forward_default, get_activation
7
----> 8 file = open("./externals/Next_ViT/classification/nextvit.py", "r")
9 source_code = file.read().replace(" utils", " externals.Next_ViT.classification.utils")
10 exec(source_code)
FileNotFoundError: [Errno 2] No such file or directory: './externals/Next_ViT/classification/nextvit.py'
I also receive this error on Stable:
NameError Traceback (most recent call last)
<ipython-input-5-d64464a7a6a5> in <module>
154 if load_on_run_all and ckpt_valid:
155 local_config = OmegaConf.load(f"{ckpt_config_path}")
--> 156 model = load_model_from_config(local_config, f"{ckpt_path}", half_precision=half_precision)
157 device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
158 model = model.to(device)
<ipython-input-5-d64464a7a6a5> in load_model_from_config(config, ckpt, verbose, device, half_precision)
136 print(f"Global Step: {pl_sd['global_step']}")
137 sd = pl_sd["state_dict"]
--> 138 model = instantiate_from_config(config.model)
139 m, u = model.load_state_dict(sd, strict=False)
140 if len(m) > 0 and verbose:
Despite clearing my Google Drive and reloading Disco and Stable Diffusion, (including the .ckpt file, placed correctly as before in the models folder) the same errors occur.
I am new to deeplearning.
I am trying to use the deepface library in my local machine. I used pip install deepface to install the library, tried on python 3.7.13, 3.8.13 and 3.9.13 which were all created using conda virtual environment.
However when running the code snippet below, I am getting the same error when running on my local machine. Do I need a GPU to run the library? If yes, how do I set it up? Because from the online guides/ articles, none of them mentioned the need of installing / setup a GPU.
I have a GeForce MX450 on my local pc.
code
import cv2
from deepface import DeepFace
import numpy as np
def analyse_face():
imagepath = "happy_face_woman.png"
image = cv2.imread(imagepath)
face_analysis = DeepFace.analyze(image)
print(face_analysis)
print(analyse_face())
Error:
ResourceExhaustedError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_14196\3829791526.py in <module>
12 print(face_analysis)
13
---> 14 analyse_face()
~\AppData\Local\Temp\ipykernel_14196\3829791526.py in analyse_face()
9 imagepath = "happy_face_woman.png"
10 image = cv2.imread(imagepath)
---> 11 face_analysis = DeepFace.analyze(image)
12 print(face_analysis)
13
c:\Users\user_name\anaconda3\envs\deepFacepy37\lib\site-packages\deepface\DeepFace.py in analyze(img_path, actions, models, enforce_detection, detector_backend, prog_bar)
352
353 if 'age' in actions and 'age' not in built_models:
--> 354 models['age'] = build_model('Age')
355
356 if 'gender' in actions and 'gender' not in built_models:
c:\Users\user_name\anaconda3\envs\deepFacepy37\lib\site-packages\deepface\DeepFace.py in build_model(model_name)
61 model = models.get(model_name)
62 if model:
---> 63 model = model()
...
-> 1922 seed=self.make_legacy_seed())
1923
1924 def truncated_normal(self, shape, mean=0., stddev=1., dtype=None):
ResourceExhaustedError: failed to allocate memory [Op:AddV2]
Different Error output
ResourceExhaustedError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_14196\3829791526.py in <module>
12 print(face_analysis)
13
---> 14 analyse_face()
~\AppData\Local\Temp\ipykernel_14196\3829791526.py in analyse_face()
9 imagepath = "happy_face_woman.png"
10 image = cv2.imread(imagepath)
---> 11 face_analysis = DeepFace.analyze(image)
12 print(face_analysis)
13
c:\Users\user_name\anaconda3\envs\deepFacepy37\lib\site-packages\deepface\DeepFace.py in analyze(img_path, actions, models, enforce_detection, detector_backend, prog_bar)
352
353 if 'age' in actions and 'age' not in built_models:
--> 354 models['age'] = build_model('Age')
355
356 if 'gender' in actions and 'gender' not in built_models:
c:\Users\user_name\anaconda3\envs\deepFacepy37\lib\site-packages\deepface\DeepFace.py in build_model(model_name)
61 model = models.get(model_name)
62 if model:
---> 63 model = model()
...
-> 1922 seed=self.make_legacy_seed())
1923
1924 def truncated_normal(self, shape, mean=0., stddev=1., dtype=None):
ResourceExhaustedError: OOM when allocating tensor with shape[7,7,512,4096] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc [Op:RandomUniform]
Additional Info
I've ran the command to check my GPU usage and the details is as follows:
!nvidia-smi
Why do not you disable GPU?
import os
os.environ["CUDA_VISIBLE_DEVICES"]=""
I'm faced this error when I running this code in Google colab.
The error is:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-14-67951c7b20d5> in <module>()
22
23 model3 = get_model3()
---> 24 model3.fit(wrap_generator3(train_generator3), steps_per_epoch=train_generator3.samples/train_generator3.batch_size, epochs=20)
25 #model3.fit_generator(generator=train_generator3, steps_per_epoch=train_generator3.samples/train_generator3.batch_size, epochs=20)
7 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in __init__(self, session, callable_options)
1468 try:
1469 self._handle = tf_session.TF_SessionMakeCallable(
-> 1470 session._session, options_ptr)
1471 finally:
1472 tf_session.TF_DeleteBuffer(options_ptr)
InvalidArgumentError: Requested tensor connection from unknown node: "dense_8_target:0".
How I solve this problem?
Can anyone help me?
Thank you
I'm trying to train my custom dataset by creating yolov3.cfg file and yolov3.weights file with labelled annotations and images using darkflow. However when I'm trying to run tfnet = TFNet(history), it throws an error of "exit not defined".
I have installed darkflow by the following steps:
In Anaconda Prompt:
git clone https://github.com/thtrieu/darkflow.git
cd darkflow
python3 setup.py build_ext –inplace
pip install
then:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import cv2
from darkflow.net.build import TFNet
history = {"model": "C:/Users/Business Intelli/Desktop/Object-Detection/Dataset/yolov3.cfg",
"load": "C:/Users/Business Intelli/Desktop/Object-Detection/Dataset/yolov3.weights",
"batch": 8,
"epoch": 50,
"gpu": 1.0,
"train": True,
"annotation": "C:/Users/Business Intelli/Desktop/Object-Detection/Dataser/Stumps",
"dataset": "C:/Users/Business Intelli/Desktop/Object-Detection/Dataser/Stumps"}
tfnet = TFNet(history)
Parsing C:/Users/Business Intelli/Desktop/Object-Detection/Dataset/yolov3.cfg
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-10-6f6b945047c5> in <module>
----> 1 tfnet = TFNet(history)
~\Anaconda3\lib\site-packages\darkflow\net\build.py in __init__(self, FLAGS, darknet)
56
57 if darknet is None:
---> 58 darknet = Darknet(FLAGS)
59 self.ntrain = len(darknet.layers)
60
~\Anaconda3\lib\site-packages\darkflow\dark\darknet.py in __init__(self, FLAGS)
15
16 print('Parsing {}'.format(self.src_cfg))
---> 17 src_parsed = self.parse_cfg(self.src_cfg, FLAGS)
18 self.src_meta, self.src_layers = src_parsed
19
~\Anaconda3\lib\site-packages\darkflow\dark\darknet.py in parse_cfg(self, model, FLAGS)
66 cfg_layers = cfg_yielder(*args)
67 meta = dict(); layers = list()
---> 68 for i, info in enumerate(cfg_layers):
69 if i == 0: meta = info; continue
70 else: new = create_darkop(*info)
~\Anaconda3\lib\site-packages\darkflow\utils\process.py in cfg_yielder(model, binary)
314 #-----------------------------------------------------
315 else:
--> 316 exit('Layer {} not
implemented'.format(d['type']))
317
318 d['_size'] = list([h, w, c, l, flat])
NameError: name 'exit' is not defined
So I faced the same problem and the issue is with the process.py file in
darkflow --> utils folder.
Apparently the exit() method is not in-built so you have to add this line in process.py
from sys import exit
Note : If your code is reaching this point it means the models can't read the layers. The weights file that I downloaded for yolov3 gave me same trouble and I couldn't find a link that has a proper weight file for yolov3 that works in darkflow. So I had to stick to yolo.cfg and yolo.weights .
I used a pretrained imagenet VGG16 model in keras and add my own Dense on top.
I'm trying to save and load weights from the model i have trained.
the code im using to save the model is
import time
start = time.time()
history = model.fit_generator(generator=train_batches,
epochs=epochs,
steps_per_epoch=steps_train,
#callbacks=callbacks_list,
validation_data=valid_batches,
validation_steps=steps_valid,
shuffle=True)
end = time.time()
model.save("modelvgg.npy")
Let me know if this an incorrect way to do it,or if there is a better way to do it.
but when i try to load them,using this,
def __init__(self, vgg16_npy_path=None, trainable=True):
if vgg16_npy_path is None:
path = inspect.getfile(Vgg16)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "modelvgg.npy")
vgg16_npy_path = path
print(path)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
self.trainable = trainable
print("npy file loaded")
but i get this error:
UnpicklingError Traceback (most recent call last)
~/.local/lib/python3.6/site-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
446 try:
--> 447 return pickle.load(fid, **pickle_kwargs)
448 except Exception:
UnpicklingError: invalid load key, 'H'.
During handling of the above exception, another exception occurred:
OSError Traceback (most recent call last)
<ipython-input-5-d099900e8f3b> in <module>
46 labels = tf.placeholder(tf.float32, [batch_size, 2])
47
---> 48 vgg = vgg16.Vgg16()
49 model.build(images)
50 cost = (-1) * tf.reduce_sum(tf.multiply(labels, tf.log(model.prob)), axis=1)
~/Bureau/Grad-CAM_final/model/vgg16.py in __init__(self, vgg16_npy_path, trainable)
18 print(path)
19
---> 20 self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
21 self.trainable = trainable
22 print("npy file loaded")
~/.local/lib/python3.6/site-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
448 except Exception:
449 raise IOError(
--> 450 "Failed to interpret file %s as a pickle" % repr(file))
451 finally:
452 if own_fid:
OSError: Failed to interpret file '/home/omri/Bureau/Grad-CAM_final/model/modelvgg.npy' as a pickle
Any suggestions on what i may be doing wrong? Thank you in advance.
This is not the correct way to load a keras model saved as HDF5 (since you saved it with model.save)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
The correct way is to use keras.models.load_model:
from keras.models import load_model
model = load_model('your_file.hdf5')