Encode folder labels stored in a numpy array in Python - python

I'm working on a Parkinson dataset.
In my dataset folder, there are two folders :
In two of each, there are two other folders but that's really a detail:
in which...:
Now, in my code i'm doing a feature extraction and a label extraction here's my attempt:
(I've used the split function to get the name of the folder as you can tell in line 12.)
from imutils import paths
import numpy as np
import sys
import cv2
import os
import mahotas as mt
data =[]
np.set_printoptions(threshold=sys.maxsize)
pathswave=r'C:\Users\Bsi\Desktop\PFE2\Base2\dataset\wave'
imagePaths = list(paths.list_images(pathswave))
for imagePath in imagePaths:
label = imagePath.split(os.path.sep)[-2]
image = cv2.imread(imagePath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = cv2.blur(image,(3,3))
image = cv2.resize(image, (200, 200))
textures = mt.features.haralick(image)
feat = textures.mean(axis=0)
data.append(feat)
data.append(label)
print(np.array(data))
Here's a portion of the output:
Now is there any way to convert the two labels, 'parkinson' and 'healthy' to two distinct integers ( preferably 0 and 1, (1 being 'parkinson).

3 numbers, in case its not either :)
x = imagePath.split(os.path.sep)[-2]
label = '0' if x == 'healthy' else '1' if x == 'parkinsons' else '-1'

You might want to use a dictionary:
my_dict = {'healthy': 0, 'parkinsons': 1}
You can later get 0 or 1, by accessing the elements using get:
my_number=my_dict.get(label)

Related

Stitching multiple pngs into a h5 image h5py

I created an model in blender. From here I took 2d slices through the y-plane of that model leading to the following.
600 png files each corresponding to a ylocation i.e y=0, y=0.1 etc
Each png file has a resolution of 500 x 600.
I am now trying to merge the 600 pngs into a h5 file using python before loading the .h5 into some software. I find that each individual png file is read fine and looks great. However when I look at the final 3d image there is some stretching of the image, and im not sure how this is being created.
The images are resized (from 600x600 to 500x600, but I have checked and this is not the cause of the stretching). I would like to know why I am introducing such stretching in other planes (not y-plane).
Here is my code, please note that there is some work in progress here, hence why I append the dataset to a list (this is to be used for later code)
from PIL import Image
import sys
import os
import h5py
import numpy as np
import cv2
from datetime import datetime
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + '//..//..')
Xlen=500
Ylen=600
Zlen=600
directory=dir_path+"/LowPolyA21/"
for filename in os.listdir(directory):
if fnmatch.fnmatch(filename, '*.png'):
image = Image.open(directory+filename)
new_image = image.resize((Zlen, Xlen))
new_image.save(directory+filename)
dataset = np.zeros((Xlen, Zlen, Ylen), np.float)
# traverse all the pictures under the specified address
cnt_num = 0
img_list = sorted(os.listdir(directory))
os.chdir(directory)
for img in (img_list):
if img.endswith(".png"):
gray_img = cv2.imread(img, 0)
dataset[:, :, cnt_num] = gray_img
cnt_num += 1
dataset[dataset == 0] = -1
dataset=dataset.swapaxes(1,2)
datasetlist=[]
datasetlist.append(dataset)
dz_dy_dz = (float(0.001),float(0.001),float(0.001))
for j in range(Xlen):
for k in range(Ylen):
for l in range(Zlen):
if datasetlist[i][j,k,l]>1:
datasetlist[i][j,k,l]=1
now = datetime.now()
timestamp = now.strftime("%d%m%Y_%H%M%S%f")
out_h5_path='voxelA_'+timestamp+'_flipped'
out_h5_path2='voxelA_'+timestamp+'_flipped.h5'
with h5py.File(out_h5_path2, 'w') as f:
f.attrs['dx_dy_dz'] = dz_dy_dz
f['data'] = datasetlist[i] # Write data to the file's primary key data below
Example of image without stretching (in y-plane)
Example of image with stretching (in x-plane)

Merging images from different folders and storing to a different folder

I have three different folders m1, m2, and m3. The 'm1' folder contains images of the format image(i)_m1.png (where i =1 to N), 'm2' folder contains images of the format image(i)_m2.png, and 'm3' folder contains images of the format image(i)_m3.png. I want to merge these images using cv2.merge like this:(cv2.merge((image1_m1, image1_m2, image1_m3)) and it continues for N times and get stored in a different folder than contains 'N' merged images of the format image(i)_merged.png.
import pandas as pd
import cv2
import numpy as np
import glob
import os
filenames1 = glob.glob("data_folder/m1/*.png")
filenames1.sort()
filenames2 = glob.glob("data_folder/m2/*.png")
filenames2.sort()
filenames3 = glob.glob("data_folder/m3/*.png")
filenames3.sort()
for f1 in filenames1:
for f2 in filenames2:
for f3 in filenames3:
img1 = cv2.imread(f1)
img2 = cv2.imread(f2)
img3 = cv2.imread(f3)
img_m1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img_m2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
img_m3 = cv2.cvtColor(img3, cv2.COLOR_BGR2GRAY)
img_rgb = cv2.merge((img_m1, img_m2, img_m3))
cv2.imwrite("data_folder/merge/img_name.png", img_rgb)
Your question is not complete. I assume you have a problem with for loop.
You might replace the nested for loops with this:
for f1,f2,f3 in zip(filenames1,filenames2,filenames3):

Scikit-learn: Loading images from folder to create a labelled dataset for KNN classification

I want to do handwritten digit recognition using K-Nearest Neighbours classification with scikit-learn. I have a folder that has 5001 images of handwritten digits (500 images for each digit from 0-9).
I am trying to find a way to create a dataset based on these images, so that I can then create a training and testing set. I have read a lot of online tutorials about how to do K-Nearest Neighbours classification using scikit-learn but most of the tutorials load existing datasets such as the MNIST dataset of handwritten digits.
Is there any way to create your own dataset by reading images from a folder and then assigning a label to each image? I am not sure what methods I can use to do this. Any insights are appreciated.
You can use Pillow or opencv libraries to read your images.
For Pillow:
from PIL import Image
import numpy as np
img = PIL.Image.open("image_location/image_name") # This returns an image object
img = np.asarray(img) # convert it to ndarray
For Opencv:
import cv2
img = cv2.imread("image_location/image_name", cv2.IMREAD_GRAYSCALE)
To convert all of your images you can use, for example, os library:
import os
Create a list of your images names
loc = os.listdir('your_images_folder')
To store grayscale images with one color channel you can use an empty array
data = np.ones((# of images, image_size wxh))
for i, l in enumerate(loc):
# Full image path
path = os.path.join("your_images_folder", l)
img = np.asarray(PIL.Image.open(path))
# Make a vector from an image
img = img.reshape(-1, img.size)
# store this vector
data[i,:] = img
As a result, wou will get numpy array "data" for your classification project.
"y" vector can be added also in the same loop from the name of each image.
To trace your process with a progress bar in a loop sometimes tqdm library can be a proper solution.
To store rgb images you can implement the same solution. For rgb images img.reshape(-1, ) will return your a longer vector.
To read the data you should do something like this :
from os import listdir
from os.path import isfile, join
import re
import matplotlib.pyplot as plt
mypath = '.' # edit with the path to your data
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
x = []
y = []
for file in files:
label = file.split('_')[0] # assuming your img is named like this "eight_1.png" you want to get the label "eight"
y.append(label)
img = plt.imread(file)
x.append(img)
Then you will need to manipulate a little bit x and y before give it to scikit learn but you should be fine.
Does this help?
import os
import imageio
def convert_word_to_label(word):
if word == 'zero':
return 0
elif word == 'one':
return 1
elif word == 'two':
return 2
elif word == 'three':
return 3
elif word == 'four':
return 4
elif word == 'five':
return 5
elif word == 'six':
return 6
elif word == 'seven':
return 7
elif word == 'eight':
return 8
elif word == 'nine':
return 9
def create_dataset(path):
X = []
y = []
for r, d, f in os.walk(path):
for image in f:
if '.jpg' in image:
image_path = os.path.join(r, image)
img = imageio.imread(image_path)
X.append(img)
word = image.split('_')[0]
y.append(convert_word_to_label(word))
return X, y
if __name__ == '__main__':
X, y = create_dataset('path/to/image_folder/')

How to load images from a folder in sequence based on the number on the images name

I want to make a program to show images 1 by 1 every second based on the frame number in sequence (the images come from a video and i already change it into images .jpg with 292 frames), but there's some error that make my program to show it 1 by 1 but not in sequence, sometimes its jumped (from frame 1 to 100) and sometimes its not.
import glob
import cv2
img_location =
'E:\\User\\Programming\\Python\\Work\\asd\\*.jpg'
img_loc = sorted(glob.glob(img_location))
for img in img_loc:
print('processing %s...' % img,)
img = cv2.imread(img, 0)
cv2.imshow("img",img)
cv2.waitKey(250)
An alternative way is naming your file with the suffix 0. For example: 001, 002, ... 100, 101, ... 250
I guess you have to d0 something as follows
import glob
import cv2
import numpy as np
img_location = 'E:\\User\\Programming\\Python\\Work\\asd\\*.jpg'
img_loc = sorted(glob.glob(img_location))
img_numbers = [int(iloc.split(".")[0]) for iloc in img_loc]
sorted_indices = np.argsort(img_numbers)
for sorted_img_idx in sorted_indices:
print('processing %s...' % img_loc[sorted_img_idx],)
img = cv2.imread(img_loc[sorted_img_idx], 0)
cv2.imshow("img",img)
cv2.waitKey(250)
What this does this it assumes naming of files is as follows: 1.jpg 2.jpg and so on, we then strip the .jpg, and convert "1" to int, then we apply argsort, that returns an array of indices that correspond to a sorted array, then we just iterate through all indices and display the in sequence images
ALTERNATIVE AND EASIER WAY:
If the naming convention is as mentioned above then simply just do
base_path = 'E:\\User\\Programming\\Python\\Work\\asd\\
img_names = [ base_path + str(img_number) + ".jpg" for img_number in range(1,101)
and then iterate through it and display the images
Ffmpeg can play folder with pictures.
You can create:
cap = cv2.VideoCapture('E:\\User\\Programming\\Python\\Work\\asd\\%d.jpg', cv2.CAP_FFMPEG)
or
cap = cv2.VideoCapture('E:\\User\\Programming\\Python\\Work\\asd\\%d.jpg', cv2.CAP_IMAGES)

Python 3.5: PIL Image.fromarray producing nonsense image

I have an RGB image. When I import this image, I convert it to HSV using matplotlib.color and save the resulting array in a dict. When I want to display this image, I use Image.fromarray with mode = 'HSV'. I'm not sure what I am doing wrong but when the image is displayed, I get a mess (seen below along with code). Any help is appreciated. The code snippets below are roughly what happens in order to any given set of imported images.
RGB to HSV Code:
from skimage import io
import matplotlib.colors as mpclr
import glob
import os
from PIL import Image, ImageOps
types = ("\*.tif", "\*.jpg", "\*.ppm")
imagePath = []
def importAllImgs(folderPath):
for ext in types:
imagePath.extend(glob.glob(folderPath + ext))
im_coll = io.ImageCollection(imagePath, conserve_memory = True)
im_array = []
for i in range(len(im_coll)):
#CONVERSION HAPPENS HERE
image = im_coll[i]
fltImg = np.around((np.array(image)/255.0), decimals = 2)
imgHSV = mpclr.rgb_to_hsv(fltImg)
im_array.append(imgHSV)
return im_array, imagePath
Storage of Data:
def organizeAllData(self, imgArrList, imgPathList):
self.allImages = dict()
self.imageKeys = imgPathList
for i in range(len(imgPathList)):
self.allImages[imgPathList[i]] = {'H': imgArrList[i][:, :, 0],
'S': imgArrList[i][:, :, 1],
'V': imgArrList[i][:, :, 2]}
self.hsvValues = []
self.labelValues = []
return self.allImages
Construction of array for displaying image:
def getImage(self, imageOfInterest):
H = self.allImages[imageOfInterest]['H'][:,:]
S = self.allImages[imageOfInterest]['S'][:,:]
V = self.allImages[imageOfInterest]['V'][:,:]
imgArray = np.dstack((H,S,V))
return imgArray
Displaying of Image:
preImArray = halThrThsnd.getImage(self.imagePaths[self.imageIndex])
self.preIm = Image.fromarray(preImArray, 'HSV')
And finally, the resulting image:
As per user sascha's comment (see below question), I decided to normalize the libraries I'm using for HSV conversion. Once I did that, I got normal images no problem. It turns out that depending on what library you use for image conversion, you will get different HSV value ranges. Some libraries will produce a range from 0 to 1. Others will produce a range from 0 to 255.
Tl;dr: Used the same library across all processes, got a good image.

Categories

Resources