The code returns an error whenever i run it, im running a conda enviroment, and ive tried installing tensorflow, keras, and layers multiple times, but none of them change anything, ive tried installing it through pip and conda multiple times, and i keep getting this error:
from keras import __version__
File "D:\Miniconda\envs\py39tensor\lib\site-packages\keras\__init__.py", line 25, in <module>
from keras import models
File "D:\Miniconda\envs\py39tensor\lib\site-packages\keras\models.py", line 20, in <module>
from keras import metrics as metrics_module
File "D:\Miniconda\envs\py39tensor\lib\site-packages\keras\metrics.py", line 26, in <module>
from keras import activations
File "D:\Miniconda\envs\py39tensor\lib\site-packages\keras\activations.py", line 20, in <module>
from keras.layers import advanced_activations
File "D:\Miniconda\envs\py39tensor\lib\site-packages\keras\layers\__init__.py", line 31, in <module>
from keras.layers.preprocessing.image_preprocessing import CenterCrop
File "D:\Miniconda\envs\py39tensor\lib\site-packages\keras\layers\preprocessing\image_preprocessing.py", line 24, in <module>
from keras.preprocessing import image as image_preprocessing
File "D:\Miniconda\envs\py39tensor\lib\site-packages\keras\preprocessing\__init__.py", line 26, in <module>
from keras.utils import all_utils as utils
File "D:\Miniconda\envs\py39tensor\lib\site-packages\keras\utils\all_utils.py", line 34, in <module>
from keras.utils.multi_gpu_utils import multi_gpu_model
File "D:\Miniconda\envs\py39tensor\lib\site-packages\keras\utils\multi_gpu_utils.py", line 20, in <module>
from keras.layers.core import Lambda
File "D:\Miniconda\envs\py39tensor\lib\site-packages\keras\layers\core\__init__.py", line 20, in <module>
from keras.layers.core.dropout import Dropout
File "D:\Miniconda\envs\py39tensor\lib\site-packages\keras\layers\core\dropout.py", line 26, in <module>
class Dropout(base_layer.BaseRandomLayer):
AttributeError: module 'keras.engine.base_layer' has no attribute 'BaseRandomLayer'
code:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x, test, y_test) = mnist.load_data()
print(x_train.shape)
print(y_train.shape)
I assume you are using python 3.7. One possible solution would be to use a python=3.8 conda environment.
Also, you have a mistake in your code above:
(x_train, y_train), (x, test, y_test) = mnist.load_data()
Change it to
(x_train, y_train), (x_test, y_test) = mnist.load_data()
Then, everything shall work.
Related
I'm trying to make tf lite work but it has some import problem
as I type :
Import tflite_model_maker to python
It throws these errors:
/home/miscope/miniconda3/envs/sencondenv/lib/python3.9/site-packages/tensorflowjs/read_weights.py:28: FutureWarning: In the future np.objectwill be defined as the corresponding NumPy scalar. np.uint8, np.uint16, np.object, np.bool] Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/miscope/examples/tensorflow_examples/lite/model_maker/pip_package/src/tflite_model_maker/__init__.py", line 44, in <module> from tflite_model_maker import audio_classifier File "/home/miscope/examples/tensorflow_examples/lite/model_maker/pip_package/src/tflite_model_maker/audio_classifier/__init__.py", line 24, in <module> from tensorflow_examples.lite.model_maker.core.data_util.audio_dataloader import DataLoader File "/home/miscope/examples/tensorflow_examples/lite/model_maker/pip_package/src/tensorflow_examples/lite/model_maker/core/data_util/audio_dataloader.py", line 27, in <module> from tensorflow_examples.lite.model_maker.core.task.model_spec import audio_spec File "/home/miscope/examples/tensorflow_examples/lite/model_maker/pip_package/src/tensorflow_examples/lite/model_maker/core/task/model_spec/__init__.py", line 20, in <module> from tensorflow_examples.lite.model_maker.core.task.model_spec import audio_spec File "/home/miscope/examples/tensorflow_examples/lite/model_maker/pip_package/src/tensorflow_examples/lite/model_maker/core/task/model_spec/audio_spec.py", line 30, in <module> from tensorflow_examples.lite.model_maker.core.task import model_util File "/home/miscope/examples/tensorflow_examples/lite/model_maker/pip_package/src/tensorflow_examples/lite/model_maker/core/task/model_util.py", line 28, in <module> from tensorflowjs.converters import converter as tfjs_converter File "/home/miscope/miniconda3/envs/sencondenv/lib/python3.9/site-packages/tensorflowjs/__init__.py", line 21, in <module> from tensorflowjs import converters File "/home/miscope/miniconda3/envs/sencondenv/lib/python3.9/site-packages/tensorflowjs/converters/__init__.py", line 21, in <module> from tensorflowjs.converters.converter import convert File "/home/miscope/miniconda3/envs/sencondenv/lib/python3.9/site-packages/tensorflowjs/converters/converter.py", line 35, in <module> from tensorflowjs.converters import keras_h5_conversion as conversion File "/home/miscope/miniconda3/envs/sencondenv/lib/python3.9/site-packages/tensorflowjs/converters/keras_h5_conversion.py", line 33, in <module> from tensorflowjs import write_weights # pylint: disable=import-error File "/home/miscope/miniconda3/envs/sencondenv/lib/python3.9/site-packages/tensorflowjs/write_weights.py", line 25, in <module> from tensorflowjs import read_weights File "/home/miscope/miniconda3/envs/sencondenv/lib/python3.9/site-packages/tensorflowjs/read_weights.py", line 28, in <module> np.uint8, np.uint16, np.object, np.bool] File "/home/miscope/miniconda3/envs/sencondenv/lib/python3.9/site-packages/numpy/__init__.py", line 305, in __getattr__ raise AttributeError(__former_attrs__[attr]) AttributeError: module 'numpy' has no attribute 'object'.np.objectwas a deprecated alias for the builtinobject. To avoid this error in existing code, use object by itself. Doing this will not modify any behavior and is safe. The aliases was originally deprecated in NumPy 1.20; for more details and guidance see the original release note at: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
I think I installed all the required package
with the setup.py
from this repo (pip_packages) https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker
I'm using miniconda with python3.9.2
I don't even know if I can use tf lite model maker outside of google colab.
I would like to train a custom object detection model like this but outside of colab.
https://colab.research.google.com/github/khanhlvg/tflite_raspberry_pi/blob/main/object_detection/Train_custom_model_tutorial.ipynb
I hope you can help, thanks.
I'm doing a school project, and decided to work on an idea of using a django website to display a chatbot's response. The website should also allow users to add an intent for the chatbot to learn from. After working on the website, I tried importing Keras to begin writing the training file.
import random
import json
import pickle
import numpy as np
import nltk
from nltk.stem import WordNetLemmatizer
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
# Rest of the code isn't necessary as error is raised above
However, when I ran the file, it keeps giving me the following error:
Traceback (most recent call last):
File "D:\User\SILPythonChatbot\SILProject\BotApp\training.py", line 9, in <module>
from keras.models import Sequential
File "D:\User\SILPythonChatbot\venv\lib\site-packages\keras\__init__.py", line 21, in <module>
from keras import models
File "D:\User\SILPythonChatbot\venv\lib\site-packages\keras\models\__init__.py", line 18, in <module>
from keras.engine.functional import Functional
File "D:\User\SILPythonChatbot\venv\lib\site-packages\keras\engine\functional.py", line 24, in <module>
import tensorflow.compat.v2 as tf
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\__init__.py", line 37, in <module>
from tensorflow.python.tools import module_util as _module_util
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\python\__init__.py", line 42, in <module>
from tensorflow.python import data
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\python\data\__init__.py", line 21, in <module>
from tensorflow.python.data import experimental
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\python\data\experimental\__init__.py", line 96, in <module>
from tensorflow.python.data.experimental import service
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\python\data\experimental\service\__init__.py", line 419, in <module>
from tensorflow.python.data.experimental.ops.data_service_ops import distribute
File "D:\UserSILPythonChatbot\venv\lib\site-packages\tensorflow\python\data\experimental\ops\data_service_ops.py", line 22, in <module>
from tensorflow.python.data.experimental.ops import compression_ops
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\python\data\experimental\ops\compression_ops.py", line 16, in <module>
from tensorflow.python.data.util import structure
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\python\data\util\structure.py", line 22, in <module>
from tensorflow.python.data.util import nest
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\python\data\util\nest.py", line 34, in <module>
from tensorflow.python.framework import sparse_tensor as _sparse_tensor
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\python\framework\sparse_tensor.py", line 24, in <module>
from tensorflow.python.framework import constant_op
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\python\framework\constant_op.py", line 25, in <module>
from tensorflow.python.eager import execute
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\python\eager\execute.py", line 22, in <module>
from tensorflow.python.framework import ops
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\python\framework\ops.py", line 49, in <module>
from tensorflow.python.framework import cpp_shape_inference_pb2
File "D:\User\SILPythonChatbot\venv\lib\site-packages\tensorflow\python\framework\cpp_shape_inference_pb2.py", line 203, in <module>
'HandleShapeAndType' : _reflection.GeneratedProtocolMessageType('HandleShapeAndType', (_message.Message,), {
TypeError: 'DTypeMeta' object is not iterable
I tried reinstalling. However, that failed as well.
Does anybody know why this happens, or how to fix it? Thanks.
I am learning machine learning and trying to create a one output from 3 input single layer from Kaggle, I am trying to import tensor flow and keras with the following code:
from tensorflow import keras
from tensorflow.keras import layers
# Create a networkth 1 linear unit
model = keras.Sequential([
layers.Dense(unit =1, input_shape=[3])
])
but I get the following error:
"/Users/ahmedhamadto/PycharmProjects/Deep Learning/venv/bin/python" "/Users/ahmedhamadto/PycharmProjects/Deep Learning/main.py"
Traceback (most recent call last):
File "/Users/ahmedhamadto/PycharmProjects/Deep Learning/main.py", line 1, in <module>
from tensorflow import keras
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/tensorflow/__init__.py", line 37, in <module>
from tensorflow.python.tools import module_util as _module_util
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/tensorflow/python/__init__.py", line 37, in <module>
from tensorflow.python.eager import context
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/tensorflow/python/eager/context.py", line 29, in <module>
from tensorflow.core.framework import function_pb2
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/tensorflow/core/framework/function_pb2.py", line 7, in <module>
from google.protobuf import descriptor as _descriptor
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/google/protobuf/descriptor.py", line 47, in <module>
from google.protobuf.pyext import _message
ImportError: dlopen(/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/google/protobuf/pyext/_message.cpython-310-darwin.so, 0x0002): symbol not found in flat namespace (__ZNK6google8protobuf10TextFormat21FastFieldValuePrinter19PrintMessageContentERKNS0_7MessageEiibPNS1_17BaseTextGeneratorE)
Process finished with exit code 1
I am using a M1 Pro MacBook Pro incase the info is relevant.
I am running into this error when i import this
from gensim.models import KeyedVectors
the error is
File "c:\Users\frase\eg1.py", line 11, in <module>
from gensim.models import KeyedVectors
File "C:\Users\frase\AppData\Local\Programs\Python\Python39\lib\site-packages\gensim\__init__.py", line 11, in <module>
from gensim import parsing, corpora, matutils, interfaces, models, similarities, utils # noqa:F401
File "C:\Users\frase\AppData\Local\Programs\Python\Python39\lib\site-packages\gensim\corpora\__init__.py", line 6, in <module>
from .indexedcorpus import IndexedCorpus # noqa:F401 must appear before the other classes
File "C:\Users\frase\AppData\Local\Programs\Python\Python39\lib\site-packages\gensim\corpora\indexedcorpus.py", line 14, in <module>
from gensim import interfaces, utils
File "C:\Users\frase\AppData\Local\Programs\Python\Python39\lib\site-packages\gensim\interfaces.py", line 19, in <module>
from gensim import utils, matutils
File "C:\Users\frase\AppData\Local\Programs\Python\Python39\lib\site-packages\gensim\matutils.py", line 1024, in <module>
from gensim._matutils import logsumexp, mean_absolute_difference, dirichlet_expectation
File "gensim/_matutils.pyx", line 1, in init gensim._matutils
#!/usr/bin/env cython
ValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 88 from C header, got 80 from PyObject
I have used KeyedVectors before to load in a model, but this started to occur when i installed 'TensorFlow'.
I have looked up this issue, and reduced the version of Numpy to 1.19.2 (which is max for Tensorflow), but that did not work. I have also tried to simply uninstall and reinstall and that also has not worked.
Any reason why this is happening?
Example of code used for (NOTE: I can't test it so there might be errors):
model = KeyedVectors.load_word2vec_format(
'\Word2vec\GoogleNews-vectors-negative300.bin.gz', binary=True)
# create the embedding matrix
embedding_matrix = np.zeros((vocab_size, 300)) # 300 is dim size
for word, vector in model.key_to_vectors():
if(word in tokenizer.word_index):
idx = tokenizer.word_index[word]
embedding_matrix[idx] = np.array(vector, dtype=np.float32)
Solutions.
Downgrade python environment to 3.8/3.7
uninstall Numpy and upgrade to Latest NumPy version
Reference- ValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 88 from C header, got 80 from PyObject
Greeting Dear Community,
I try to use the python keras package and I got this error:
I am running this on a oracle-linux virtual box. Is it something to do
it expects some kind of GPU box ?
Thx for the help.
In [59]: from keras.models import Sequential#issue
...:
Traceback (most recent call last):
File "<ipython-input-59-0f55ca179bba>", line 1, in <module>
from keras.models import Sequential#issue
File "/home/oracle/anaconda/lib/python2.7/site-packages/keras/models.py", line 15, in <module>
from .utils.layer_utils import container_from_config
File "/home/oracle/anaconda/lib/python2.7/site-packages/keras/utils/layer_utils.py", line 10, in <module>
from ..layers.convolutional import Convolution1D, Convolution2D, MaxPooling1D, MaxPooling2D, ZeroPadding2D
File "/home/oracle/anaconda/lib/python2.7/site-packages/keras/layers/convolutional.py", line 6, in <module>
from theano.sandbox.cuda import dnn
File "/home/oracle/anaconda/lib/python2.7/site-packages/theano/sandbox/cuda/dnn.py", line 9, in <module>
from theano.gof import Optimizer, local_optimizer, COp
ImportError: cannot import name COp
The line from dnn.py
from theano.gof import Optimizer, local_optimizer, COp