I'm faced this error when I running this code in Google colab.
The error is:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-14-67951c7b20d5> in <module>()
22
23 model3 = get_model3()
---> 24 model3.fit(wrap_generator3(train_generator3), steps_per_epoch=train_generator3.samples/train_generator3.batch_size, epochs=20)
25 #model3.fit_generator(generator=train_generator3, steps_per_epoch=train_generator3.samples/train_generator3.batch_size, epochs=20)
7 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in __init__(self, session, callable_options)
1468 try:
1469 self._handle = tf_session.TF_SessionMakeCallable(
-> 1470 session._session, options_ptr)
1471 finally:
1472 tf_session.TF_DeleteBuffer(options_ptr)
InvalidArgumentError: Requested tensor connection from unknown node: "dense_8_target:0".
How I solve this problem?
Can anyone help me?
Thank you
Related
I am running a code on jupyter notebook using virtual machine (Ubuntu). Normally when there is an error it points to the file where the error is giving its path but I got an error with a path I do not have
ValueError Traceback (most recent call last)
/tmp/ipykernel_84/10827517.py in <module>
----> 1 vae_cl.train(max_epochs=500, plan_kwargs=dict(weight_decay=0.0))
/tmp/ipykernel_84/1274271952.py in train(self, max_epochs, n_samples_per_label, check_val_every_n_epoch, train_size, validation_size, batch_size, use_gpu, plan_kwargs, **trainer_kwargs)
187 use_gpu=use_gpu,
188 check_val_every_n_epoch=check_val_every_n_epoch,
--> 189 **trainer_kwargs,
190 )
191 return runner()
/tmp/ipykernel_84/2912366039.py in __init__(self, model, training_plan, data_splitter, max_epochs, use_gpu, **trainer_kwargs)
59 self.data_splitter = data_splitter
60 self.model = model
---> 61 gpus, device = parse_use_gpu_arg(use_gpu)
62 self.gpus = gpus
63 self.device = device
ValueError: too many values to unpack (expected 2)
What I noticed is that the function it is pointing to (---> 61) is actually in the file I am running but I do not understand why the path is like that '/tmp/ipykernel_84/1274271952.py'
Any idea?
As metioned in the title, I am getting this TyperError for the following code
I am using google collab and is set to GPU runtime type.
%%time
history = [evaluate(model, valid_dl)]
history
%%time
history += fit_OneCycle(epochs, max_lr, model, train_dl, valid_dl,
grad_clip=grad_clip,
weight_decay=1e-4,
opt_func=opt_func)
def plot_losses(history):
train_losses = [x.get('train_loss') for x in history]
val_losses = [x['val_loss'] for x in history]
plt.plot(train_losses, '-bx')
plt.plot(val_losses, '-rx')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['Training', 'Validation'])
plt.title('Loss vs. No. of epochs')
for the following line i am getting the error :-
plot_losses(history)
And i am getting the following error message:
AttributeError Traceback (most recent call last)
/usr/local/lib/python3.7/dist-packages/matplotlib/cbook/__init__.py in index_of(y)
1626 try:
-> 1627 return y.index.values, y.values
1628 except AttributeError:
AttributeError: 'builtin_function_or_method' object has no attribute 'values'
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
----------------------------------9 frames------------------------------------------
<__array_function__ internals> in atleast_1d(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py in __array__(self, dtype)
676 return handle_torch_function(Tensor.__array__, (self,), self, dtype=dtype)
677 if dtype is None:
--> 678 return self.numpy()
679 else:
680 return self.numpy().astype(dtype, copy=False)
TypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
The line of error is not shown here, however, you can replace the array (if it is an array) which you have with:
Thearray.cpu().numpy()
I am a bit confused as I never encountered such an error before. I am tryiing to train my CNN model on images. Below you can see a picture of my code, and then the error message. As you can see it starts at epoch 1 then it stops :(
Does anyone have any idea where does the problem comes from? If anyone had a similar error message before when training your CNN?
Any help is welcome,
Thanks
history = modelA.fit(train_data,
validation_data = test_data,
epochs = 60,
callbacks = [best_model, reduce_lr, es])
ERROR MESSAGE
Epoch 1/60
---------------------------------------------------------------------------
UnimplementedError Traceback (most recent call last)
<ipython-input-68-4b47ff852a2a> in <module>()
2 validation_data = test_data,
3 epochs = 60,
----> 4 callbacks = [best_model, reduce_lr, es])
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
53 ctx.ensure_initialized()
54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
57 if name is not None:
UnimplementedError: Graph execution error:
I used a pretrained imagenet VGG16 model in keras and add my own Dense on top.
I'm trying to save and load weights from the model i have trained.
the code im using to save the model is
import time
start = time.time()
history = model.fit_generator(generator=train_batches,
epochs=epochs,
steps_per_epoch=steps_train,
#callbacks=callbacks_list,
validation_data=valid_batches,
validation_steps=steps_valid,
shuffle=True)
end = time.time()
model.save("modelvgg.npy")
Let me know if this an incorrect way to do it,or if there is a better way to do it.
but when i try to load them,using this,
def __init__(self, vgg16_npy_path=None, trainable=True):
if vgg16_npy_path is None:
path = inspect.getfile(Vgg16)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "modelvgg.npy")
vgg16_npy_path = path
print(path)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
self.trainable = trainable
print("npy file loaded")
but i get this error:
UnpicklingError Traceback (most recent call last)
~/.local/lib/python3.6/site-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
446 try:
--> 447 return pickle.load(fid, **pickle_kwargs)
448 except Exception:
UnpicklingError: invalid load key, 'H'.
During handling of the above exception, another exception occurred:
OSError Traceback (most recent call last)
<ipython-input-5-d099900e8f3b> in <module>
46 labels = tf.placeholder(tf.float32, [batch_size, 2])
47
---> 48 vgg = vgg16.Vgg16()
49 model.build(images)
50 cost = (-1) * tf.reduce_sum(tf.multiply(labels, tf.log(model.prob)), axis=1)
~/Bureau/Grad-CAM_final/model/vgg16.py in __init__(self, vgg16_npy_path, trainable)
18 print(path)
19
---> 20 self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
21 self.trainable = trainable
22 print("npy file loaded")
~/.local/lib/python3.6/site-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
448 except Exception:
449 raise IOError(
--> 450 "Failed to interpret file %s as a pickle" % repr(file))
451 finally:
452 if own_fid:
OSError: Failed to interpret file '/home/omri/Bureau/Grad-CAM_final/model/modelvgg.npy' as a pickle
Any suggestions on what i may be doing wrong? Thank you in advance.
This is not the correct way to load a keras model saved as HDF5 (since you saved it with model.save)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
The correct way is to use keras.models.load_model:
from keras.models import load_model
model = load_model('your_file.hdf5')
I am doing a ML course on Coursera
When I run the following command
sf['Country'] = sf['Country'].apply(transform_country)
Following is the error i get
RuntimeError Traceback (most recent call last)
<ipython-input-10-e97a176c3eea> in <module>()
----> 1 sf['Country'] = sf['Country'].apply(transform_country)
F:\Anaconda2\envs\gl-env\lib\site-packages\graphlab\data_structures\sarray.pyc in apply(self, fn, dtype, skip_undefined, seed)
1892
1893 with cython_context():
-> 1894 return SArray(_proxy=self.__proxy__.transform(fn, dtype, skip_undefined, seed))
1895
1896
F:\Anaconda2\envs\gl-env\lib\site-packages\graphlab\cython\context.pyc in __exit__(self, exc_type, exc_value, traceback)
47 if not self.show_cython_trace:
48 # To hide cython trace, we re-raise from here
---> 49 raise exc_type(exc_value)
50 else:
51 # To show the full trace, we do nothing and let exception propagate
RuntimeError: Runtime Exception. Cannot evaluate lambda. Lambda workers cannot not start.
What do I do now ?