Im trying to load an image to Jupyter notebook to use it in Tensorflow, Im using this Code below :
import numpy as np
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.model_selection as train_test_split
import tensorflow as tf
from PIL import Image
import os
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, Dense,Flatten, Dropout
data= []
labels = []
classes = 43
cur_path = os.getcwd()
for i in range(classes) :
path = os.path.join(cur_path,'Dataset\Train', str(i))
images = os.listdir(path)
for a in images :
try:
image = Image.open(path + '\\' + a)
image.resize(30,30)
image.show()
image = np.array(image)
data.append(image)
labels.append(i)
except:
print("error loading image")
data = np.array(data)
labels = np.array(labels)
Unfortunately im having this error message :error loading image
Does anyone have any idea?
Update : Removed the try and except ,
for a in images :
image = Image.open(path + '\\' + a)
image.resize(30,30)
display(image)
image = np.array(image)
data = np.array(data)
labels = np.array(labels)
error :
ValueError
Traceback (most recent call last)
in
2
3 image = Image.open(path + '\' + a)
----> 4 image.resize(30,30)
5 display(image)
6 image = np.array(image)
~\anaconda3\lib\site-packages\PIL\Image.py in resize(self, size, resample, box, reducing_gap)
1883 )
1884 ]
-> 1885 raise ValueError(
1886 message + " Use " + ", ".join(filters[:-1]) + " or " +
filters[-1]
1887 )
ValueError: Unknown resampling filter (30). Use Image.NEAREST (0),
Image.LANCZOS (1), Image.BILINEAR (2), Image.BICUBIC (3), Image.BOX (4) or
Image.HAMMING (5)
Solved the value error after a few changes:
from IPython.display import display
for a in images :
try:
image = Image.open(path + '\\' + a)
image = image.resize((30,30))
display(image)
image = np.array(image)
data.append(image)
labels.append(i)
except:
print("error loading image")
data = np.array(data)
labels = np.array(labels)
**Thankyou basic mojo **
Related
I am trying to create a dataset for tesseract. But unable to do so. The following code should output a csv file containing the image path and image label feature and .npz file. But the code does append any files in the csv
import numpy as np
import os
from tensorflow.keras.preprocessing.image import img_to_array, load_img
import pandas as pd
image_dataset_dir = "datasets/images"
new_dataset_folder = "datasets/new"
dataset = {
"image" :[],
"label" : []
}
for label in os.listdir(image_dataset_dir):
images_dir= image_dataset_dir + "/" + label
if not os.path.isdir(images_dir):
continue
for image_file in os.listdir(images_dir):
# if not image_file.endswith(".jpg", ".png",".tiff"):
# continue
img = load_img(os.path.join(image_dataset_dir, label, image_file))
x = img_to_array(img)
rel_path = label + "/" + os.path.splitext(image_file)[0] + '.npz'
os.makedirs(new_dataset_folder + "/" + label, exist_ok=True)
npz_file = os.path.join(new_dataset_folder, rel_path)
np.savez(npz_file, x)
# print(rel_path)
dataset["image"].append(rel_path)
dataset["label"].append(label)
df = pd.DataFrame(dataset)
df.to_csv(os.path.join(new_dataset_folder, "train.csv"), index=False)
print('Dataset converted to npz and saved here at %s '%new_dataset_folder)
df.head()
Your objective, create files and save the output and their values.
.npz is none public zones, try using it with different backgrounds matching patterns.
Sample: Using Pandas ( data frame as your requirements ) and Tensorflow
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
BATCH_SIZE = 1
IMG_SIZE = (32, 32)
new_dataset_folder = "F:\\temp\\Python\\excel"
PATH = 'F:\\datasets\\downloads\\cats_name'
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
train_dataset = tf.keras.utils.image_dataset_from_directory(train_dir, shuffle=True,
batch_size=BATCH_SIZE, image_size=IMG_SIZE)
class_names = train_dataset.class_names
print( 'class_names: ' + str( class_names ) )
print( train_dataset )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Dataset
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = {
"image" :[],
"label" : []
}
file_order = 0
for data in train_dataset :
file_path = new_dataset_folder + "\\" + str(int(data[1][0])) + ".npz"
dataset["image"].append(file_path)
dataset["label"].append(str(int(data[1][0])))
# Save
encoding = "utf-8"
with open( new_dataset_folder + "\\" + str(file_order), "wb" ) as f:
f.write(str(data[0]).encode(encoding))
file_order = file_order + 1
df = pd.DataFrame(dataset)
df.to_csv(os.path.join(new_dataset_folder, "train.csv"), index=False)
My dataset is represented by a csv file with two attributes: an image path and its label. I have dozens of different labels but the label '51' represents around 34% of the dataset and the label '13' represents around 41%, so just these two labels represent 3/4 of the entire dataset and my classifier ends up classifying everything as '13' (I think I've never seen it classify anything as '51'. Could this be a problem?). How can I deal with this?
I'll leave the code that I currently have here:
from cv2 import cv2 as cv
import numpy as np
import sys
sys.path.extend(['../../'])
from src import utils
if __name__ == '__main__':
DICTIONARY_SIZE = 50
TRAIN_SIZE = 100
TEST_SIZE = 100
DETECTOR = cv.KAZE_create()
MATCHER = cv.FlannBasedMatcher()
EXTRACTOR = cv.BOWImgDescriptorExtractor(DETECTOR, MATCHER)
TRAINER = cv.BOWKMeansTrainer(DICTIONARY_SIZE)
SVM = cv.ml.SVM_create()
SVM.setType(cv.ml.SVM_C_SVC)
SVM.setKernel(cv.ml.SVM_LINEAR)
SVM.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, 100, 1e-6))
print("Generating Training and Test Sets...")
train, test = utils.getTrainingAndTestSets('multiclass.csv', TRAIN_SIZE, TEST_SIZE)
print("Generating Dictionary...")
for train_entry in train:
img_path = train_entry[0]
img = cv.imread(img_path)
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
keypoint, descriptors = DETECTOR.detectAndCompute(img, None)
if descriptors is not None:
TRAINER.add(descriptors)
EXTRACTOR.setVocabulary(TRAINER.cluster())
print("Preparing Training Data...")
train_desc = []
train_labels = []
for train_entry in train:
img_path = train_entry[0]
img_label = int(train_entry[1])
img = cv.imread(img_path)
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
descriptor = EXTRACTOR.compute(img, DETECTOR.detect(img))
if descriptor is not None:
train_desc.extend(descriptor)
train_labels.append(img_label)
print("Training...")
SVM.train(np.array(train_desc), cv.ml.ROW_SAMPLE, np.array(train_labels))
correct_predictions = 0
samples_tested = len(test)
print("Testing...")
for test_entry in test:
img_path = test_entry[0]
real_attribute_id = int(test_entry[1])
img = cv.imread(img_path)
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
feature = EXTRACTOR.compute(img, DETECTOR.detect(img))
try:
_, prediction = SVM.predict(feature)
predicted_attribute_id = int(prediction[0][0])
if predicted_attribute_id == real_attribute_id:
print("CORRECT PREDICTION! :)")
correct_predictions += 1
else:
print("INCORRECT PREDICTION... :(")
print("Predicted Label: " + utils.getLabelFromAttributeID(predicted_attribute_id) + "(" + str(predicted_attribute_id) + ")")
print("Real Label: " + utils.getLabelFromAttributeID(real_attribute_id) + "(" + str(real_attribute_id) + ")")
except Exception:
samples_tested -= 1
correct_percentage = (correct_predictions / samples_tested) * 100
print("Test Results: " + "{:.2f}".format(correct_percentage) + "% Correct Predictions.")
Feel free to tell me if my current approach has any error. Thanks.
I am trying to replicate this code from a repository of COVIDNet in Keras.
I fully ran the first notebook in the repo, but in the second one, I get an error.
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
# Set parameters here
INPUT_SIZE = (224, 224)
mapping = {'normal': 0, 'bacteria': 1, 'viral': 2, 'COVID-19': 3}
train_filepath = 'train_split.txt'
test_filepath = 'test_split.txt'
# load in the train and test files
file = open(train_filepath, 'r')
trainfiles = file.readlines()
file = open(test_filepath, 'r')
testfiles = file.readlines()
# Print
print('Total samples for train: ', len(trainfiles))
print('Total samples for test: ', len(testfiles))
Total samples for train: 72696
Total samples for test: 6255
# resize to input size and normalize to 0 - 1
x_train = []
x_test = []
y_train = []
y_test = []
for i in range(len(testfiles)):
test_i = testfiles[i].split()
imgpath = test_i[1]
img = cv2.imread(os.path.join('data', 'test', imgpath))
img = cv2.resize(img, INPUT_SIZE) # resize
img = img.astype('float32') / 255.0
x_test.append(img)
y_test.append(mapping[test_i[2]])
print('Shape of test images: ', x_test[0].shape)
for i in range(len(trainfiles)):
train_i = trainfiles[i].split()
imgpath = train_i[1]
img = cv2.imread(os.path.join('data', 'train', imgpath))
img = cv2.resize(img, INPUT_SIZE) # resize
img = img.astype('float32') / 255.0
x_train.append(img)
y_train.append(mapping[train_i[2]])
print('Shape of train images: ', x_train[0].shape)
Error:
error Traceback (most recent call last)
<ipython-input-31-5fa13deb65a6> in <module>()
10 imgpath = test_i[1]
11 img = cv2.imread(os.path.join('data', 'test', imgpath))
---> 12 img = cv2.resize(img, INPUT_SIZE) # resize
13 img = img.astype('float32') / 255.0
14 x_test.append(img)
There are multiple questions about similar errors, but I do not see how to fix it. Thanks!
I'm creating a heatmap for a CNN as per this tutorial.
In the last part:
def create_patiens_cam(case, plane):
patient_id = case['id']
mri = case['mri']
folder_path = f'./CAMS/{plane}/{patient_id}/'
if os.path.isdir(folder_path):
shutil.rmtree(folder_path)
os.makedirs(folder_path)
os.makedirs(folder_path + 'slices/')
os.makedirs(folder_path + 'cams/')
params = list(mrnet.parameters())
weight_softmax = np.squeeze(params[-2].cpu().data.numpy())
num_slices = mri.shape[1]
global feature_blobs
feature_blobs = []
mri = mri.to(device)
logit = mrnet(mri)
size_upsample = (256, 256)
feature_conv = feature_blobs[0]
h_x = F.softmax(logit, dim=1).data.squeeze(0)
probs, idx = h_x.sort(0, True)
probs = probs.cpu().numpy()
idx = idx.cpu().numpy()
slice_cams = returnCAM(feature_blobs[-1], weight_softmax, idx[:1])
for s in tqdm_notebook(range(num_slices), leave=False):
slice_pil = (transforms
.ToPILImage()(mri.cpu()[0][s] / 255))
slice_pil.save(folder_path + f'slices/{s}.png',
dpi=(300, 300))
img = mri[0][s].cpu().numpy()
img = img.transpose(1, 2, 0)
heatmap = (cv2
.cvtColor(cv2.applyColorMap(
cv2.resize(slice_cams[s], (256, 256)),
cv2.COLORMAP_JET),
cv2.COLOR_BGR2RGB)
)
result = heatmap * 0.3 + img * 0.5
pil_img_cam = Image.fromarray(np.uint8(result))
pil_img_cam.save(folder_path + f'cams/{s}.png', dpi=(300, 300))
I have created a folder "CAMS" in my 'mrnet' folder. However when running this last code (in jupyter notebook) I get no errors but no png's are being created. Anyone has any idea what could be wrong or where I could look to see what's wrong as I get no errors?
FULL CODE:
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 13 21:54:40 2021
#author: GlaDOS
"""
import os
import io
import requests
from PIL import Image
from torchvision import models, transforms
from torch.autograd import Variable
from torch.nn import functional as F
import numpy as np
import cv2
import pdb
from matplotlib import pyplot as plt
import sys
sys.path.append('C:/Users/GlaDOS/mrnet')
import shutil
import torch
import model
from dataloader import MRDataset
from tqdm import tqdm_notebook
task = 'acl'
plane = 'sagittal'
prefix = 'sag'
model_name = [name for name in os.listdir('C:/Users/GlaDOS/mrnet/models/')
if (task in name) and
(plane in name) and
(prefix in name)][0]
is_cuda = torch.cuda.is_available()
device = torch.device("cuda" if is_cuda else "cpu")
mrnet = torch.load(f'C:/Users/GlaDOS/mrnet/models/{model_name}')
mrnet = mrnet.to(device)
_ = mrnet.eval()
dataset = MRDataset('C:/Users/GlaDOS/mrnet/data/',
task,
plane,
transform=None,
train=False)
loader = torch.utils.data.DataLoader(dataset,
batch_size=1,
shuffle=False,
num_workers=0,
drop_last=False)
def returnCAM(feature_conv, weight_softmax, class_idx):
size_upsample = (256, 256)
bz, nc, h, w = feature_conv.shape
slice_cams = []
for s in range(bz):
for idx in class_idx:
cam = weight_softmax[idx].dot(feature_conv[s].reshape((nc, h*w)))
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
slice_cams.append(cv2.resize(cam_img, size_upsample))
return slice_cams
patients = []
for i, (image, label, _) in tqdm_notebook(enumerate(loader), total=len(loader)):
patient_data = {}
patient_data['mri'] = image
patient_data['label'] = label[0][0][1].item()
patient_data['id'] = '0' * (4 - len(str(i))) + str(i)
patients.append(patient_data)
acl = list(filter(lambda d: d['label'] == 1, patients))
def create_patiens_cam(case, plane):
patient_id = case['id']
mri = case['mri']
folder_path = f'C:/Users/GlaDOS/mrnet/cams/{plane}/{patient_id}/'
if os.path.isdir(folder_path):
shutil.rmtree(folder_path)
os.makedirs(folder_path)
os.makedirs(folder_path + 'slices/')
os.makedirs(folder_path + 'cams/')
params = list(mrnet.parameters())
weight_softmax = np.squeeze(params[-2].cpu().data.numpy())
num_slices = mri.shape[1]
global feature_blobs
feature_blobs = []
mri = mri.to(device)
logit = mrnet(mri)
size_upsample = (256, 256)
feature_conv = feature_blobs[0]
h_x = F.softmax(logit, dim=1).data.squeeze(0)
probs, idx = h_x.sort(0, True)
probs = probs.cpu().numpy()
idx = idx.cpu().numpy()
slice_cams = returnCAM(feature_blobs[-1], weight_softmax, idx[:1])
for s in tqdm_notebook(range(num_slices), leave=False):
slice_pil = (transforms
.ToPILImage()(mri.cpu()[0][s] / 255))
slice_pil.save(folder_path + f'slices/{s}.png',
dpi=(300, 300))
img = mri[0][s].cpu().numpy()
img = img.transpose(1, 2, 0)
heatmap = (cv2
.cvtColor(cv2.applyColorMap(
cv2.resize(slice_cams[s], (256, 256)),
cv2.COLORMAP_JET),
cv2.COLOR_BGR2RGB)
)
result = heatmap * 0.3 + img * 0.5
pil_img_cam = Image.fromarray(np.uint8(result))
pil_img_cam.save(folder_path + f'cams/{s}.png', dpi=(300, 300))
Use seaborn:
import seaborn as sns
sns_plot.savefig('output.png')
Below is my code. When I run it I am getting this error:
recognizer.train(x_train, np.array(y_labels))
TypeError: src is not a numpy array, neither a scalar
I have tried using recognizer.train(x_train, y_labels) instead of recognizer.train(x_train, np.array(y_labels)) but still get some error?
import cv2
import os
from PIL import Image, ImageEnhance
import numpy as np
import pickle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(BASE_DIR, "images")
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
path = 'images'
recognizer = cv2.face.LBPHFaceRecognizer_create()
current_id = 0
label_ids = {}
y_labels = []
x_train = []
for root, dirs, files in os.walk(image_dir):
for file in files:
if file.endswith("png") or file.endswith("jpg"):
path = os.path.join(root, file)
label = os.path.basename(os.path.dirname(path)).replace(" ", "_").lower()
#print(label, path)
if not label in label_ids:
label_ids[label] = current_id
current_id += 1
id_ = label_ids[label]
#print(label_ids)
y_labels.append(label)
x_train.append(path)
pil_image = Image.open(path).convert("L")
#contrast_enhancer = ImageEnhance.Contrast(pil_image)
#pil_enhanced_image = contrast_enhancer.enhance(2)
#enhanced_image = np.asarray(pil_enhanced_image)
size = (550, 550)
final_image = pil_image.resize(size, Image.ANTIALIAS)
image_array = np.array(final_image)
#print(image_array)
faces = face_cascade.detectMultiScale(image_array , scaleFactor=1.3, minNeighbors=5)
for (x,y,w,h) in faces:
roi = image_array[y:y+h, x:x+w]
x_train.append(roi)
y_labels.append(id_)
#print(y_labels)
#print(x_train)
#x_train = np.asarray(x_train)
with open("labels.pickle", "wb") as f:
pickle.dump(label_ids, f)
recognizer.train(x_train, np.array(y_labels))
recognizer.save("trainner.yml")
you may try this, it worked for me:
recognizer.train(x_train, y_labels)
or
recognizer.update(x_train, y_labels)