AttributeError in loading training dataset when using Mask-RCNN - python

When using Mask-RCNN-TF2.7.0-keras2.8.0 for my test, I have some problems in just loading my training and validation dataset.
I provide my code here so you can better figure out what's wrong.
Basic setup
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Avoid AVX Warning from using CPU
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import h5py
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from glob import glob
# Root directory of the project
ROOT_DIR = os.path.abspath('/Mask_RCNN2.0/')
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
import tensorflow as tf
config = tf.compat.v1.ConfigProto()
gpus = tf.config.experimental.list_physical_devices('GPU') # If there is any GPU available
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True) # Allocating GPU memory
# config.gpu_options.allow_growth=True
config.gpu_options.per_process_gpu_memory_fraction = 0.6 # Maximum consumption of GPU limit to 0.6
sess = tf.compat.v1.Session(config=config)
# for visualization
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
Configuration
classes = {1: 'trout'}
class Config(Config):
NAME = 'trout'
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# BATCH_SIZE = IMAGES_PER_GPU * GPU_COUNT
NUM_CLASSES = len(classes) + 1 # background + number of classes
IMAGE_MIN_DIM = 128
IMAGE_MAX_DIM = 128
IMAGE_RESIZE_MODE = 'square'
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # Refer to the setup in train_shapes.ipynb
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
config = Config()
config.display()
Dataset
from multiprocessing import Lock, Value, Pool, cpu_count
import tqdm
def load_images(i, images, annotation_file, temp):
path = os.path.join(images[i], 'image')
img_path = os.path.join(temp, os.path.split(path)[0].replace('/', '-').replace(':', '') + '.png')
if os.path.exists(img_path):
height, width = cv2.imread(img_path).shape[:2]
else:
with h5py.File(annotation_file, 'r') as h5_file:
height, width = h5_file[path][:].shape[:2]
image = h5_file[path][:]
cv2.imwrite(img_path, image[:, :, ::-1])
lock.acquire()
count.value += 1
print('Reading images: {:.2f} %'.format(100 * count.value / len(images)),
sep=' ', end='\r' if count.value < len(images) else '\n', flush=True)
lock.release()
return (i, path, width, height)
def init_pool(l, c):
global lock, count
lock = l
count = c
class Dataset(utils.Dataset):
def load_images(self, annotation_file, classes, source, mode='full'):
self.classes = classes
self.annotation_file = annotation_file
self.temp = os.path.join(os.path.join(os.path.dirname(annotation_file), os.path.split(annotation_file)[-1] + 'temp'))
if not os.path.exists(self.temp):
os.mkdir(self.temp)
with h5py.File(self.annotation_file, 'r') as h5_file:
for c in self.classes:
self.add_class(source, c, self.classes[c])
self.images = h5_file['annotations'][:].astype(np.str)
np.random.seed(0)
image_idx = np.arange(self.images.size)
val_idx = np.random.choice(image_idx, image_idx.size // 6)
if mode == 'train':
self.images = self.images[np.invert(np.isin(image_idx, val_idx))]
elif mode == 'val':
self.images = self.images[np.isin(image_idx, val_idx)]
else:
print('Warning: set mode to "train" or "val", otherwise using full dataset')
l = Lock()
c = Value('d', 0)
if __name__ == "__main__":
with Pool(processes=cpu_count(), initializer=init_pool, initargs=(l, c)) as pool:
to_add = pool.starmap(load_images, list(zip(np.arange(len(self.images)),
[self.images] * len(self.images),
[self.annotation_file] * len(self.images),
[self.temp] * len(self.images))))
for (i, path, width, height) in to_add:
self.add_image(source, image_id=i, path=path, width=width, height=height)
pool.close()
pool.join()
def load_image(self, image_id):
info = self.image_info[image_id]
path = info['path']
img_path = os.path.join(self.temp, os.path.split(path)[0].replace('/', '-').replace(':', '') + '.png')
print(img_path)
image = cv2.imread(img_path)[:, :, ::-1]
return image
def load_mask(self, image_id):
info = self.image_info[image_id]
img_path = info['path']
path = os.path.split(img_path)[0]
with h5py.File(self.annotation_file, 'r') as h5_file:
mask = h5_file[os.path.join(path, 'mask')][:]
classes = h5_file[os.path.join(path, 'class_names')][:].astype(np.str)
use = np.array([idx for idx, name in enumerate(classes) for c in self.classes if name in self.classes[c].split(',')], dtype=np.int32)
class_ids = np.array([c for name in classes for c in self.classes if name in self.classes[c].split(',')], dtype=np.int32)
mask = mask[:, :, use]
non_empty = mask.sum(axis=(0, 1)) > 10
return mask[:, :, non_empty], class_ids[non_empty]
def prepare(self):
super().prepare()
print('{} images, classes: '.format(len(self.image_ids)), *['[{}: {}]'.format(idx, self.classes[idx]) for idx in self.classes])
augmentation = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
iaa.Affine(rotate=iap.Choice([0,90,180,270])),
iaa.Affine(scale={'x': (0.8, 1.2), 'y': (0.8, 1.2)},
translate_percent={'x': (-0.2, 0.2), 'y': (-0.2, 0.2)},
rotate=(-45, 45))
], random_order=True)
Load dataset (where errors occur)
dataset_train = Dataset()
dataset_train.load_images(annotation_file='/Mask_RCNN2.0/trout/Dataset/314_0_out.h5',
classes=classes,
source='trout',
mode = 'train')
dataset_train.prepare()
The error info is as follow, and it seems to be circulating and can't stop even if I stop the kernel.
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\multiprocessing\spawn.py", line 125, in _main
prepare(preparation_data)
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\multiprocessing\spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\runpy.py", line 265, in run_path
return _run_module_code(code, init_globals, run_name,
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\runpy.py", line 97, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Mask_RCNN2.0\trout\FishSeg_0516.py", line 271, in <module>
model.train(dataset_train, dataset_val,
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\mrcnn\model.py", line 2354, in train
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\mrcnn\model.py", line 2201, in compile
self.keras_model.add_metric(loss, name=name, aggregation='mean')
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\keras\engine\base_layer_v1.py", line 1132, in add_metric
self._graph_network_add_metric(value, aggregation, name)
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\keras\engine\functional.py", line 914, in _graph_network_add_metric
add_metric_layer(value)
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\keras\engine\base_layer_v1.py", line 765, in __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\keras\engine\base_layer.py", line 3359, in call
self.add_metric(inputs, aggregation=self.aggregation, name=self.metric_name)
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\keras\engine\base_layer_v1.py", line 1113, in add_metric
self._symbolic_add_metric(value, aggregation, name)
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\keras\engine\base_layer_v1.py", line 1887, in _symbolic_add_metric
metric_obj, result_tensor = base_layer_utils.create_mean_metric(
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\keras\engine\base_layer_utils.py", line 35, in create_mean_metric
return metric_obj, metric_obj(value)
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\keras\metrics.py", line 237, in __call__
return distributed_training_utils.call_replica_local_fn(
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\keras\distribute\distributed_training_utils.py", line 60, in call_replica_local_fn
return fn(*args, **kwargs)
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\keras\metrics.py", line 223, in replica_local_fn
result_t = self.result() # pylint: disable=not-callable
File "C:\Apps\Anaconda3\envs\CNNTest2\lib\site-packages\keras\utils\metrics_utils.py", line 124, in decorated
tf.__internal__.distribute.strategy_supports_no_merge_call()):
AttributeError: module 'tensorflow.compat.v2.__internal__.distribute' has no attribute 'strategy_supports_no_merge_call'
My computer setup:
GPU: NVIDIA Quadro P620, 2GB
CUDA: 11.2
cuDNN: 8.1
tensorflow 2.8.0
Keras 2.7.0
Windows 10
Loading dataset only takes a few second when I run my code on Google Colab (Ubuntun 18.04, run perfectly), but it takes a long time on local machine with the error message AttributeError: module 'tensorflow.compat.v2.__internal__.distribute' has no attribute 'strategy_supports_no_merge_call, and both CPU and GPU are almost out of memory.
I wonder if there is something different between Ubuntun/Linux system and windows 10 that I haven't notice? I have previously tested tensorflow 1.x on windows 10, and similar problem occurs. This really troubles me for a long time. Could anyone please help me?
Thanks in advance for your kindness.
Best,
Erin

Related

What and where am I going wrong in this code for pytorch based object detection?

I am using Yolov5 for this project
Here is my code
import numpy as np
import cv2
import torch
import torch.backends.cudnn as cudnn
from models.experimental import attempt_load
from utils.general import non_max_suppression
weights = '/Users/nidhi/Desktop/yolov5/best.pt'
device = torch.device('cpu')
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
cudnn.benchmark = True
# Capture with opencv and detect object
cap = cv2.VideoCapture('Pothole testing.mp4')
width, height = (352, 352) # quality
cap.set(3, width) # width
cap.set(4, height) # height
while(cap.isOpened()):
time.sleep(0.2) # wait for 0.2 second
ret, frame = cap.read()
if ret ==True:
now = time.time()
img = torch.from_numpy(frame).float().to(device).permute(2, 0, 1)
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
pred = model(img, augment=False)[0]
pred = non_max_suppression(pred, 0.39, 0.45, classes=0, agnostic=True) # img, conf, iou, classes, ...
print('time -> ', time.time()-now)
else:
break
cap.release()
The error I am getting:
File "run.py", line 38, in <module>
pred = model(img, augment=False)[0]
File "/Users/nidhi/Library/Python/3.8/lib/python/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "/Users/nidhi/Desktop/yolov5/models/yolo.py", line 118, in forward
return self.forward_once(x, profile) # single-scale inference, train
File "/Users/nidhi/Desktop/yolov5/models/yolo.py", line 134, in forward_once
x = m(x) # run
File "/Users/nidhi/Library/Python/3.8/lib/python/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "/Users/nidhi/Desktop/yolov5/models/common.py", line 152, in forward
return torch.cat(x, self.d)
RuntimeError: Sizes of tensors must match except in dimension 1. Got 108 and 107 in dimension 3 (The offending index is 1)
Operating system: macOS Big Sur 11.2.3
Python version: 3.8.2
The model is used best.pt which I had trained on Google Colab, I used yolov5l model to train the dataset.
Are you getting your error in the following line?
pred = model(img, augment=False)[0]
It might be because YOLO expects inputs of the image size which are multiple of 32. So 320×320, 352×352 etc. But you are 352x288. You will either have to resize it, or pad the 288 dimension with white/black pixels to make it 352.
If you are not sure about where you are getting the error, can you attach the whole error?

TensorFlow graph error in Estimator (ValueError: Tensor (...) must be from the same graph as Tensor(...))

UPDATE: Testing the same code with tensorflow-gpu 1.13.1 works both on my PC and on Google Cloud.
Using TensorFlow Estimator and running train_and_evaluate gives me the following error message:
"ValueError: Tensor("Const:0", shape=(3,), dtype=float32) must be from the same graph as Tensor("ParallelMapDataset:0", shape=(), dtype=variant, device=/device:CPU:0)." (see the full error output near bottom)
This happens when training the CNN on my PC with a GPU (GeForge RTX 2070). I am using Python 3.7 with tensorflow-gpu/tensorflow 1.14.0, Keras 2.2.4, running in a Conda environment.
It happens after the following log message "... Saving checkpoints for 2716 into C:/EstimatorOutput/10/model.ckpt." and appear to be when the input function for the evaluation step is being processed.
The code, as it is now, has run previously with no issue, but this has suddenly changed for reasons that are unclear to me.
I ran similar code on Google Cloud (which also previously ran fine), and the same problem occur (see error output near bottom; Run on GPU (BASIC_GPU); TensorFlow 1.14; Keras 2.2.4)
The error seems to be related to the evaluation step when the graph is created for some reason the new graph is not compatible.
Here is my code - >
My task module:
import tensorflow as tf
from train_model import model #("train_model" is local folder)
from train_model.model import create_estimator
if __name__ == '__main__':
model_num = 10
# Throw properties into params dict to pass to other functions
params = {}
params['train csv'] = "train_set_local.csv"
params['eval csv'] = "eval_set_local.csv"
params['output path'] = "C:/EstimatorOutput/" + str(model_num) + "/"
params['data path'] = "C:/Databases/Birds_dB/Images"
params['image size'] = [244, 224]
params["batch size"] = 16*2
params['use random flip'] = True
params['learning rate'] = 0.000001
params['dropout rate'] = 0.50
params['num classes'] = 123
params['train steps'] = 65000
params['eval steps'] = 20
params['eval_throttle_secs'] = 600
params['num parallel calls'] = 4
# Run the training job
model.go_train(params) # (See "go_train" below in model script ->)
My model module
import tensorflow as tf
from tensorflow.python.keras import estimator as kes
from tensorflow.python.keras.applications.vgg16 import VGG16
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Dropout, Flatten, Dense
from train_model.input_fn import make_input_fn
def create_estimator(params):
# Import VGG16 model for transfer learning
base_model = VGG16(weights='imagenet')
base_model.summary()
x = base_model.get_layer('fc2').output
x = Dropout(params['dropout rate'])(x)
predictions = Dense(params['num classes'], activation="sigmoid", name="sm_out")(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in model.layers:
layer.trainable = True
model.compile(
loss="binary_crossentropy",
optimizer=tf.train.AdamOptimizer(params['learning rate'],
beta1=0.9,
beta2=0.999),
metrics=["categorical_accuracy"]
)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.95
run_config = tf.estimator.RunConfig(
session_config=config,
model_dir=params['output path']
)
# Convert to Estimator
estimator_model = kes.model_to_estimator(
keras_model=model,
config=run_config
)
return estimator_model
def go_train(params):
# Create the estimator
Est = create_estimator(params)
# Set up Estimator train and evaluation specifications
train_spec = tf.estimator.TrainSpec(
input_fn=make_input_fn(params['train csv'], tf.estimator.ModeKeys.TRAIN, params, augment=True),
max_steps=params['train steps']
)
eval_spec = tf.estimator.EvalSpec(
input_fn=make_input_fn(params['eval csv'], tf.estimator.ModeKeys.EVAL, params, augment=True),
steps=params['eval steps'], # Evaluates on "eval steps" batches
throttle_secs=params['eval_throttle_secs']
)
# Run training and evaluation
tf.estimator.train_and_evaluate(Est, train_spec, eval_spec)
My input module:
import tensorflow as tf
from keras.applications.vgg16 import preprocess_input
tf.logging.set_verbosity(v=tf.logging.INFO)
HEIGHT = 224
WIDTH = 224
NUM_CHANNELS = 3
NCLASSES = 123
def read_and_preprocess_with_augment(image_bytes, label=None):
return read_and_preprocess(image_bytes, label, augment=True)
def read_and_preprocess(image_bytes, label=None, augment=False):
image = tf.image.decode_jpeg(contents=image_bytes, channels=NUM_CHANNELS)
image = tf.image.convert_image_dtype(image=image, dtype=tf.float32) # 0-1
image = tf.expand_dims(input=image, axis=0) # resize_bilinear needs batches
if augment:
# Resize to slightly larger than target size
image = tf.image.resize_bilinear(images=image, size=[HEIGHT + 50, WIDTH + 50], align_corners=False)
# Image random rotation
degree_angle = tf.random.uniform((), minval=-25, maxval=25, dtype=tf.dtypes.float32)
radian = degree_angle * 3.14 / 180
image = tf.contrib.image.rotate(image, radian, interpolation='NEAREST')
# remove batch dimension
image = tf.squeeze(input=image, axis=0)
# Random Crop
image = tf.random_crop(value=image, size=[HEIGHT, WIDTH, NUM_CHANNELS])
# Random L-R flip
image = tf.image.random_flip_left_right(image=image)
# Random brightness
image = tf.image.random_brightness(image=image, max_delta=63.0 / 255.0)
# Random contrast
image = tf.image.random_contrast(image=image, lower=0.2, upper=1.8)
else:
image = tf.image.resize_bilinear(images=image, size=[HEIGHT, WIDTH], align_corners=False)
image = tf.squeeze(input=image, axis=0) # remove batch dimension
image = tf.cast(tf.round(image * 255), tf.int32)
image = preprocess_input(image)
label = tf.one_hot(tf.strings.to_number(label, out_type=tf.int32), depth=NCLASSES)
return {"input_1": image}, label
def make_input_fn(csv_of_filenames, mode, params, augment=False):
def _input_fn():
def decode_csv(csv_row):
filename, label = tf.decode_csv(records=csv_row, record_defaults=[[""], [""]])
image_bytes = tf.read_file(filename=filename)
return image_bytes, label
# Create tf.data.dataset from filename
dataset = tf.data.TextLineDataset(filenames=csv_of_filenames).map(map_func=decode_csv, num_parallel_calls=params['num parallel calls'])
if augment:
dataset = dataset.map(map_func=read_and_preprocess_with_augment, num_parallel_calls=params['num parallel calls'])
else:
dataset = dataset.map(map_func=read_and_preprocess, num_parallel_calls=params['num parallel calls'])
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None
dataset = dataset.shuffle(buffer_size=10*params["batch size"])
else:
num_epochs = 1
dataset = dataset.repeat(count=num_epochs).batch(batch_size=params["batch size"]).prefetch(4)
images, labels = dataset.make_one_shot_iterator().get_next()
return images, labels
return _input_fn
Error output on PC
As mentioned, the above code when running locally on my GPU results is this series of error messages(abbreviated):
Saving checkpoints for 2716 into ....
...
...
File "C:...\estimator.py", line 501, in _evaluate
self._evaluate_build_graph(input_fn, hooks, checkpoint_path))
File "C:...\estimator.py", line 1501, in _evaluate_build_graph
self._call_model_fn_eval(input_fn, self.config))
File "C:...\estimator.py", line 1534, in _call_model_fn_eval
input_fn, ModeKeys.EVAL)
File "C:...\estimator.py", line 1022, in _get_features_and_labels_from_input_fn
self._call_input_fn(input_fn, mode))
File "C:...\estimator.py", line 1113, in _call_input_fn
return input_fn(**kwargs)
File "C:...\input_fn.py", line 71, in _input_fn
dataset = dataset.map(map_func=read_and_preprocess_with_augment, num_parallel_calls=params['num parallel calls'])
File "C:...dataset_ops.py", line 1776, in map
self, map_func, num_parallel_calls, preserve_cardinality=False))
File "C:...\dataset_ops.py", line 3239, in init
**flat_structure(self))
File "C:...\gen_dataset_ops.py", line 4179, in parallel_map_dataset
name=name)
File "C:...\op_def_library.py", line 366, in _apply_op_helper
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
File "C:...\ops.py", line 6135, in _get_graph_from_inputs
_assert_same_graph(original_graph_element, graph_element)
File "C:...ops.py", line 6071, in _assert_same_graph
(item, original_item))
ValueError: Tensor("Const:0", shape=(3,), dtype=float32) must be from the same graph as Tensor("ParallelMapDataset:0", shape=(), dtype=variant, device=/device:CPU:0).
Error output on Google Cloud
service
The replica master 0 exited with a non-zero status of 1.
Traceback (most recent call last): [...]
File "/usr/local/lib/python3.5/dist-packages/tensorflow_estimator/python/estimator/estimator.py", line 1534, in _call_model_fn_eval input_fn, ModeKeys.EVAL)
File "/usr/local/lib/python3.5/dist-packages/tensorflow_estimator/python/estimator/estimator.py", line 1022, in _get_features_and_labels_from_input_fn self._call_input_fn(input_fn, mode))
File "/usr/local/lib/python3.5/dist-packages/tensorflow_estimator/python/estimator/estimator.py", line 1113, in _call_input_fn return input_fn(**kwargs)
File "/root/.local/lib/python3.5/site-packages/train_model/input_fn.py", line 87, in _input_fn dataset = dataset.map(map_func=read_and_preprocess_with_augment, num_parallel_calls=params['num parallel calls'])
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 1776, in map self, map_func, num_parallel_calls, preserve_cardinality=False))
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 3239, in init **flat_structure(self)) File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/gen_dataset_ops.py", line 4179, in parallel_map_dataset name=name) File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/op_def_library.py", line 366, in _apply_op_helper g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/ops.py", line 6135, in _get_graph_from_inputs _assert_same_graph(original_graph_element, graph_element)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/ops.py", line 6071, in _assert_same_graph (item, original_item))
ValueError: Tensor("Const_1:0", shape=(3,), dtype=float32, device=/device:CPU:0) must be from the same graph as Tensor("ParallelMapDataset:0", shape=(), dtype=variant, device=/device:CPU:0).
Any help/hint is much appreciated. I am stuck at this point and do not know how to debug this one!
use this preprocess function:
from tensorflow.keras.applications.mobilenet import preprocess_input
It has same functionality to that of VGGs preprocess input.

Writing training model for CNN

I am writing the training code for TwoStream-IQA which is a two-stream convolutional neural network. This model predicts the quality score for the patches being assessed through two streams of the network. In the training below, I have used test dataset provided in the GitHub link above.
The training code is as below:
import os
import time
import numpy as np
import argparse
import chainer
chainer.global_config.train=True
from chainer import cuda
from chainer import serializers
from chainer import optimizers
from chainer import iterators
from chainer import training
from chainer.training import extensions
from PIL import Image
from sklearn.feature_extraction.image import extract_patches
from model import Model
parser = argparse.ArgumentParser(description='train.py')
parser.add_argument('--model', '-m', default='',
help='path to the trained model')
parser.add_argument('--gpu', '-g', default=0, type=int, help='GPU ID')
args = parser.parse_args()
model = Model()
cuda.cudnn_enabled = True
cuda.check_cuda_available()
xp = cuda.cupy
model.to_gpu()
## prepare training data
test_label_path = 'data_list/test.txt'
test_img_path = 'data/live/'
test_Graimg_path = 'data/live_grad/'
save_model_path = '/models/nr_sana_2stream.model'
patches_per_img = 256
patchSize = 32
print('-------------Load data-------------')
final_train_set = []
with open(test_label_path, 'rt') as f:
for l in f:
line, la = l.strip().split() # for debug
tic = time.time()
full_path = os.path.join(test_img_path, line)
Grafull_path = os.path.join(test_Graimg_path, line)
inputImage = Image.open(full_path)
Graf = Image.open(Grafull_path)
img = np.asarray(inputImage, dtype=np.float32)
Gra = np.asarray(Graf, dtype=np.float32)
img = img.transpose(2, 0, 1)
Gra = Gra.transpose(2, 0, 1)
img1 = np.zeros((1, 3, Gra.shape[1], Gra.shape[2]))
img1[0, :, :, :] = img
Gra1 = np.zeros((1, 3, Gra.shape[1], Gra.shape[2]))
Gra1[0, :, :, :] = Gra
patches = extract_patches(img, (3, patchSize, patchSize), patchSize)
Grapatches = extract_patches(Gra, (3, patchSize, patchSize), patchSize)
X = patches.reshape((-1, 3, patchSize, patchSize))
GraX = Grapatches.reshape((-1, 3, patchSize, patchSize))
temp_slice1 = [X[int(float(index))] for index in range(256)]
temp_slice2 = [GraX[int(float(index))] for index in range(256)]
##############################################
for j in range(len(temp_slice1)):
temp_slice1[j] = xp.array(temp_slice1[j].astype(np.float32))
temp_slice2[j] = xp.array(temp_slice2[j].astype(np.float32))
final_train_set.append((
np.asarray((temp_slice1[j], temp_slice2[j])).astype(np.float32),
int(la)
))
##############################################
print('--------------Done!----------------')
print('--------------Iterator!----------------')
train_iter = iterators.SerialIterator(final_train_set, batch_size=4)
optimizer = optimizers.Adam()
optimizer.use_cleargrads()
optimizer.setup(model)
updater = training.StandardUpdater(train_iter, optimizer, device=0)
print('--------------Trainer!----------------')
trainer = training.Trainer(updater, (50, 'epoch'), out='result')
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'main/loss', 'elapsed_time']))
print('--------------Running trainer!----------------')
trainer.run()
But the code is producing error on line trainer.run() as:
-------------Load data-------------
--------------Done!----------------
--------------Iterator!----------------
--------------Trainer!----------------
--------------Running trainer!----------------
Exception in main training loop: Unsupported dtype object
Traceback (most recent call last):
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/trainer.py", line 316, in run
update()
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py", line 154, in update_core
in_arrays = self.converter(batch, self.device)
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/dataset/convert.py", line 149, in concat_examples
return to_device(device, _concat_arrays(batch, padding))
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/dataset/convert.py", line 37, in to_device
return cuda.to_gpu(x, device)
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/backends/cuda.py", line 285, in to_gpu
return _array_to_gpu(array, device_, stream)
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/backends/cuda.py", line 333, in _array_to_gpu
return cupy.asarray(array)
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/cupy/creation/from_data.py", line 60, in asarray
return core.array(a, dtype, False)
File "cupy/core/core.pyx", line 2049, in cupy.core.core.array
File "cupy/core/core.pyx", line 2083, in cupy.core.core.array
Will finalize trainer extensions and updater before reraising the exception.
Traceback (most recent call last):
File "<ipython-input-69-12b84b41c6b9>", line 1, in <module>
runfile('/mnt/nas/sanaalamgeer/Projects/1/MyOwnChainer/Two-stream_IQA-master/train.py', wdir='/mnt/nas/sanaalamgeer/Projects/1/MyOwnChainer/Two-stream_IQA-master')
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 668, in runfile
execfile(filename, namespace)
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 108, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "/mnt/nas/sanaalamgeer/Projects/1/MyOwnChainer/Two-stream_IQA-master/train.py", line 129, in <module>
trainer.run()
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/trainer.py", line 330, in run
six.reraise(*sys.exc_info())
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/six.py", line 693, in reraise
raise value
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/trainer.py", line 316, in run
update()
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py", line 154, in update_core
in_arrays = self.converter(batch, self.device)
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/dataset/convert.py", line 149, in concat_examples
return to_device(device, _concat_arrays(batch, padding))
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/dataset/convert.py", line 37, in to_device
return cuda.to_gpu(x, device)
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/backends/cuda.py", line 285, in to_gpu
return _array_to_gpu(array, device_, stream)
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/backends/cuda.py", line 333, in _array_to_gpu
return cupy.asarray(array)
File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/cupy/creation/from_data.py", line 60, in asarray
return core.array(a, dtype, False)
File "cupy/core/core.pyx", line 2049, in cupy.core.core.array
File "cupy/core/core.pyx", line 2083, in cupy.core.core.array
ValueError: Unsupported dtype object
Maybe thats's because I am arraging training data wrong because the model takes training parameters as:
length = x_data.shape[0]
x1 = Variable(x_data[0:length:2])
x2 = Variable(x_data[1:length:2])
and y_data as:
t = xp.repeat(y_data[0:length:2], 1)
The variable final_train_set prepapres dataset of a tuple (Numpy Array, 66) where every Numpy Array has dimensions (2, 3, 32, 32) which carries two types patches (3, 32, 32).
I have used dataset from the github link provided above.
I am a newbie in Chainer,Please help!!
In very short, you inappropriately called numpy.asarray: numpy.asarray does not concatenate two cupy.ndarrays, while it concatenates two numpy.ndarrays.
Your code in brief:
import numpy, cupy
final_train_set = []
N_PATCH_PER_IMAGE = 8
for i in range(10):
label = 0
temp_slice_1 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]
temp_slice_2 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]
for j in range(N_PATCH_PER_IMAGE):
temp_slice_1[j] = cupy.array(temp_slice_1[j])
temp_slice_2[j] = cupy.array(temp_slice_2[j])
final_train_set.append(
[
# attempting to concatenate two cupy arrays by numpy.asarray
numpy.asarray([temp_slice_1[j], temp_slice_2[j]]),
label
]
)
The bug
import numpy as np
import cupy as cp
print("two numpy arrays")
print(np.asarray([np.zeros(shape=(1,)), np.zeros(shape=(1,))]))
print(np.asarray([np.zeros(shape=(1,)), np.zeros(shape=(1,))]).dtype)
print()
print("two cupy arrays")
print(np.asarray([cp.zeros(shape=(1,)), cp.zeros(shape=(1,))]))
print(np.asarray([cp.zeros(shape=(1,)), cp.zeros(shape=(1,))]).dtype)
two numpy arrays
[[0.]
[0.]]
float64
two cupy arrays
[[array(0.)]
[array(0.)]]
object
Solution: comment out two lines
import numpy # not import cupy here
for i in range(10):
label = 0
temp_slice_1 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]
temp_slice_2 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]
for j in range(N_PATCH_PER_IMAGE):
# temp_slice_1[j] = cupy.array(temp_slice_1[j]) <- comment out!
# temp_slice_2[j] = cupy.array(temp_slice_2[j]) <- comment out!
final_train_set.append(
[
# concatenate two numpy arrays: usually cupy should not be used in dataset
numpy.asarray([temp_slice_1[j], temp_slice_2[j]]),
label
]
)
Footnote
In the code you presented, xp is not specified, so you could not get answer from anyone. Please post WHOLE BODY of your code including the model if you were not able to separate the problem.
I guess you might not able to run the training code for another reason. In this code, the data is first brought to the main memory in the construction of final_train_set. But if the number of images is huge, the main memory would run out and MemoryError would be raised. (In other words, if the number of image is small and your memory is large enough, the error would not be happen)
In that case, the following references (Chainer at glance and Dataset Abstraction) would help.
DISCLAIMER: None of this code is written by me
I found this Github repository using OpenCV, Scipy and a few other module for the quality assessment. Here is the code:
# Python code for BRISQUE model
# Original paper title: No-Reference Image Quality Assessment in the Spatial Domain
# Link: http://ieeexplore.ieee.org/document/6272356/
import cv2
import numpy as np
from scipy import ndimage
import math
def get_gaussian_filter():
[m,n] = [(ss - 1.0) / 2.0 for ss in (shape,shape)]
[y,x] = np.ogrid[-m:m+1,-n:n+1]
window = np.exp( -(x*x + y*y) / (2.0*sigma*sigma) )
window[window < np.finfo(window.dtype).eps*window.max() ] = 0
sum_window = window.sum()
if sum_window != 0:
window = np.divide(window, sum_window)
return window
def lmom(X):
(rows, cols) = X.shape
if cols == 1:
X = X.reshape(1,rows)
n = rows
X.sort()
b = np.zeros(3)
b0 = X.mean()
for r in range(1,4):
Num = np.prod(np.tile(np.arange(r+1,n+1), (r,1))-np.tile(np.arange(1,r+1).reshape(r,1),(1,n-r)),0)
Num = Num.astype(np.float)
Den = np.prod(np.tile(n, (1, r)) - np.arange(1,r+1), 1)
b[r-1] = 1.0/n * sum(Num/Den * X[0,r:])
L = np.zeros(4)
L[0] = b0
L[1] = 2*b[0] - b0
L[2] = 6*b[1] - 6*b[0] + b0
L[3] = 20*b[2] - 30*b[1] + 12*b[0] - b0
return L
def compute_features(im):
im = im.astype(np.float)
window = get_gaussian_filter()
scalenum = 2
feat = []
for itr_scale in range(scalenum):
mu = cv2.filter2D(im, cv2.CV_64F, window, borderType=cv2.BORDER_CONSTANT)
mu_sq = mu * mu
sigma = np.sqrt(abs(cv2.filter2D(im*im, cv2.CV_64F, window, borderType=cv2.BORDER_CONSTANT) - mu_sq))
structdis = (im-mu)/(sigma+1)
structdis_col_vector = np.reshape(structdis.transpose(), (structdis.size,1))
L = lmom(structdis.reshape(structdis.size,1))
feat = np.append(feat,[L[1], L[3]])
shifts = [[0,1], [1,0], [1,1], [-1,1]]
for itr_shift in shifts:
shifted_structdis = np.roll(structdis, itr_shift[0], axis=0)
shifted_structdis = np.roll(shifted_structdis, itr_shift[1], axis=1)
shifted_structdis_col_vector = np.reshape(shifted_structdis.T, (shifted_structdis.size,1))
pair = structdis_col_vector * shifted_structdis_col_vector
L = lmom(pair.reshape(pair.size,1))
feat = np.append(feat, L)
im = cv2.resize(im, (0,0), fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)
return feat
im = ndimage.imread('example.bmp', flatten=True)
feat = compute_features(im)
print feat

TypeError: cannot unpack non-iterable int object

Im trying to make my first CNN using pyTorch and am following online help and code already people wrote. i am trying to reproduce their results. I'm using the Kaggle Dogs Breed Dataset for this and below is the error I get. The trainloader does not return my images and labels and any attempt to get them leads in an error:
Traceback (most recent call last):
File "E:\Program Files\JetBrains\PyCharm Community Edition 2018.2.4\helpers\pydev\pydevd.py", line 1664, in <module>
main()
File "E:\Program Files\JetBrains\PyCharm Community Edition 2018.2.4\helpers\pydev\pydevd.py", line 1658, in main
globals = debugger.run(setup['file'], None, None, is_module)
File "E:\Program Files\JetBrains\PyCharm Community Edition 2018.2.4\helpers\pydev\pydevd.py", line 1068, in run
pydev_imports.execfile(file, globals, locals) # execute the script
File "E:\Program Files\JetBrains\PyCharm Community Edition 2018.2.4\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "C:/Users/sbzfk/PycharmProjects/my_FCN_attempt/Kaggle_Dogs_Competition.py", line 85, in <module>
img, label = next(iter(train_loader))
File "C:\Users\sbzfk\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\utils\data\dataloader.py", line 314, in __next__
batch = self.collate_fn([self.dataset[i] for i in indices])
File "C:\Users\sbzfk\AppData\Local\Programs\Python\Python37\lib\site-packages\torch\utils\data\dataloader.py", line 314, in <listcomp>
batch = self.collate_fn([self.dataset[i] for i in indices])
File "C:/Users/sbzfk/PycharmProjects/my_FCN_attempt/Kaggle_Dogs_Competition.py", line 42, in __getitem__
img = self.transform(img)
File "C:\Users\sbzfk\AppData\Local\Programs\Python\Python37\lib\site-packages\torchvision\transforms.py", line 34, in __call__
img = t(img)
File "C:\Users\sbzfk\AppData\Local\Programs\Python\Python37\lib\site-packages\torchvision\transforms.py", line 187, in __call__
w, h = img.size
TypeError: cannot unpack non-iterable int object
Below is my code:
class DogsDataset(Dataset):
def __init__(self, filenames, labels, root_dir, transform=None):
assert len(filenames) == len(labels) # if the two are not of equal length throw an error
self.filenames = filenames
self.labels = labels
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
this_img = join(self.root_dir, 'train', self.filenames[idx]+'.jpg')
print(this_img)
img = io.imread(this_img)
label = self.labels[idx]
print(label)
if self.transform:
img = self.transform(img)
return [img, label]
batch_size = 64
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataset_root = expanduser(join('~', 'Documents', 'kaggle_dogs_dataset'))
# join will intelligently join directories irrespective of OS, and expanduser will
# replace with /home/ in linux or the username in Windows
csv_file = pd.read_csv(join(dataset_root, 'labels.csv')) # csv file has two columns, id which are filenames and breed which are labels
filenames = csv_file.id.values # convert that column to an array, id is the column name and values converty to numpy array
# le = LabelEncoder()
# labels = le.fit_transform(csv_file.breed) # this will just encode the names between 0 to models-1 , basically changing strings to integers
labels = csv_file.breed.values
filenames_train, filenames_eval, labels_train, labels_eval = train_test_split(filenames, labels,
test_size=0.1, stratify=labels) # this is an import from sklearn as the name implies, it randomly splits data into train and eval, 10% of it to test and rest train
data_transform = transforms.Compose([transforms.Scale(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
dog_train = DogsDataset(filenames_train, labels_train, dataset_root, transform=data_transform)
train_loader = DataLoader(dog_train, batch_size, shuffle=True)
dog_eval = DogsDataset(filenames_eval, labels_eval, dataset_root, transform=data_transform)
eval_loader = DataLoader(dog_eval, batch_size, shuffle=True)
def im_show(axis, inp):
"""Denormalize and show"""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
axis.imshow(inp)
img, label = next(iter(train_loader))
print(img.size(), label.size())
fig = plt.figure(1, figsize=(16, 4))
grid = ImageGrid(fig, 111, nrows_ncols=(1, 4), axes_pad=0.05)
for i in range(img.size()[0]):
ax = grid[i]
im_show(ax, img[i])
Ive tried debugging it line by line and with transform=none I seem to read all the images, only with transform=data_transform I seem to get this error.
It seems like you are using torchvision's image transforms. Some of these transforms are expecting as input a PIL.Image object, rather than a tensor or numpy array.
You are using io.imread to read ths image file, and I suspect this io is not PIL.Image resulting with a numpy array.
Make sure you pass PIL.Image objects to transforms and that your DogsDataset returns a 3D tensor for image (C-H-W shaped).

AttributeError: 'Tensor' object has no attribute '_keras_shape'

I'm trying to run code below to generate a JSON file and use it to built a t-SNE with a set of images. However my experience with Keras and machine learning is limited and I'm unable to run code below and getting error: AttributeError: 'Tensor' object has no attribute '_keras_shape'
import argparse
import sys
import numpy as np
import json
import os
from os.path import isfile, join
import keras
from keras.preprocessing import image
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
from keras.models import Model
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from scipy.spatial import distance
def process_arguments(args):
parser = argparse.ArgumentParser(description='tSNE on audio')
parser.add_argument('--images_path', action='store', help='path to directory of images')
parser.add_argument('--output_path', action='store', help='path to where to put output json file')
parser.add_argument('--num_dimensions', action='store', default=2, help='dimensionality of t-SNE points (default 2)')
parser.add_argument('--perplexity', action='store', default=30, help='perplexity of t-SNE (default 30)')
parser.add_argument('--learning_rate', action='store', default=150, help='learning rate of t-SNE (default 150)')
params = vars(parser.parse_args(args))
return params
def get_image(path, input_shape):
img = image.load_img(path, target_size=input_shape)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
def find_candidate_images(images_path):
"""
Finds all candidate images in the given folder and its sub-folders.
Returns:
images: a list of absolute paths to the discovered images.
"""
images = []
for root, dirs, files in os.walk(images_path):
for name in files:
file_path = os.path.abspath(os.path.join(root, name))
if ((os.path.splitext(name)[1]).lower() in ['.jpg','.png','.jpeg']):
images.append(file_path)
return images
def analyze_images(images_path):
# make feature_extractor
model = keras.applications.VGG16(weights='imagenet', include_top=True)
feat_extractor = Model(inputs=model.input, outputs=model.get_layer("fc2").output)
input_shape = model.input_shape[1:3]
# get images
candidate_images = find_candidate_images(images_path)
# analyze images and grab activations
activations = []
images = []
for idx,image_path in enumerate(candidate_images):
file_path = join(images_path,image_path)
img = get_image(file_path, input_shape);
if img is not None:
print("getting activations for %s %d/%d" % (image_path,idx,len(candidate_images)))
acts = feat_extractor.predict(img)[0]
activations.append(acts)
images.append(image_path)
# run PCA firt
print("Running PCA on %d images..." % len(activations))
features = np.array(activations)
pca = PCA(n_components=300)
pca.fit(features)
pca_features = pca.transform(features)
return images, pca_features
def run_tsne(images_path, output_path, tsne_dimensions, tsne_perplexity, tsne_learning_rate):
images, pca_features = analyze_images(images_path)
print("Running t-SNE on %d images..." % len(images))
X = np.array(pca_features)
tsne = TSNE(n_components=tsne_dimensions, learning_rate=tsne_learning_rate, perplexity=tsne_perplexity, verbose=2).fit_transform(X)
# save data to json
data = []
for i,f in enumerate(images):
point = [float((tsne[i,k] - np.min(tsne[:,k]))/(np.max(tsne[:,k]) - np.min(tsne[:,k]))) for k in range(tsne_dimensions) ]
data.append({"path":os.path.abspath(join(images_path,images[i])), "point":point})
with open(output_path, 'w') as outfile:
json.dump(data, outfile)
if __name__ == '__main__':
params = process_arguments(sys.argv[1:])
images_path = params['images_path']
output_path = params['output_path']
tsne_dimensions = int(params['num_dimensions'])
tsne_perplexity = int(params['perplexity'])
tsne_learning_rate = int(params['learning_rate'])
run_tsne(images_path, output_path, tsne_dimensions, tsne_perplexity, tsne_learning_rate)
print("finished saving %s" % output_path)
from: https://github.com/ml4a/ml4a-ofx/blob/master/scripts/tSNE-images.py
Here is what I'm getting:
Traceback (most recent call last):
File "tSNE-images.py", line 95, in <module>
run_tsne(images_path, output_path, tsne_dimensions, tsne_perplexity, tsne_learning_rate)
File "tSNE-images.py", line 75, in run_tsne
images, pca_features = analyze_images(images_path)
File "tSNE-images.py", line 50, in analyze_images
feat_extractor = Model(inputs=model.input, outputs=model.get_layer("fc2").output)
File "/Users/.../anaconda3/lib/python3.6/site-packages/keras/legacy/interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "/Users/.../anaconda3/lib/python3.6/site-packages/keras/engine/network.py", line 91, in __init__
self._init_graph_network(*args, **kwargs)
File "/Users/.../anaconda3/lib/python3.6/site-packages/keras/engine/network.py", line 251, in _init_graph_network
input_shapes=[x._keras_shape for x in self.inputs],
File "/Users/.../anaconda3/lib/python3.6/site-packages/keras/engine/network.py", line 251, in <listcomp>
input_shapes=[x._keras_shape for x in self.inputs],
AttributeError: 'Tensor' object has no attribute '_keras_shape'
I found similar error in here:
`https://stackoverflow.com/questions/47616588/keras-throws-tensor-object-has-no-attribute-keras-shape-when-splitting-a`
However I can't seem to figure out how to go about updating code using Lambda. How can I solve this error?
I followed #user2300867 suggestion and updated tensorflow with:
pip3 install --upgrade tensorflow-gpu
and updated keras to 2.2.4
pip install Keras==2.2.4
I still got error:
TypeError: expected str, bytes or os.PathLike object, not NoneType
but this was easy to fix by simply editing the code for local paths

Categories

Resources