index error in my python program - python

This is a program for face recognition using pca logic. Everything went fine except for the index error that came up at the end of the program.
When I run the code I get an index error at the fourth last line of my program.
distances.append((dist, y[i]))
IndexError: list index out of range
can anyone just help in this. I am newbie into python, so am I not so expert in solving.
Here is my code :
from sklearn.decomposition import RandomizedPCA
import numpy as np
import glob
import cv2
import math
import os.path
import string
#function to get ID from filename
def ID_from_filename(filename):
part = string.split(filename, '/')
return part[1].replace("s", "")
#function to convert image to right format
def prepare_image(filename):
img_color = cv2.imread(filename)
img_gray = cv2.cvtColor(img_color, cv2.cv.CV_RGB2GRAY)
img_gray = cv2.equalizeHist(img_gray)
return img_gray.flat
IMG_RES = 92 * 112 # img resolution
NUM_EIGENFACES = 10 # images per train person
NUM_TRAINIMAGES = 110 # total images in training set
#loading training set from folder train_faces
folders = glob.glob('train_faces/*')
# Create an array with flattened images X
# and an array with ID of the people on each image y
X = np.zeros([NUM_TRAINIMAGES, IMG_RES], dtype='int8')
y = []
# Populate training array with flattened imags from subfolders of
train_faces and names
c = 0
for x, folder in enumerate(folders):
train_faces = glob.glob(folder + '/*')
for i, face in enumerate(train_faces):
X[c,:] = prepare_image(face)
y.append(ID_from_filename(face))
c = c + 1
# perform principal component analysis on the images
pca = RandomizedPCA(n_components=NUM_EIGENFACES, whiten=True).fit(X)
X_pca = pca.transform(X)
# load test faces (usually one), located in folder test_faces
test_faces = glob.glob('test_faces/*')
# Create an array with flattened images X
X = np.zeros([len(test_faces), IMG_RES], dtype='int8')
# Populate test array with flattened imags from subfolders of train_faces
for i, face in enumerate(test_faces):
X[i,:] = prepare_image(face)
# run through test images (usually one)
for j, ref_pca in enumerate(pca.transform(X)):
distances = []
# Calculate euclidian distance from test image to each of the known
images and save distances
for i, test_pca in enumerate(X_pca):
dist = math.sqrt(sum([diff**2 for diff in (ref_pca - test_pca)]))
distances.append((dist, y[i]))
found_ID = min(distances)[1]
print "Identified (result: "+ str(found_ID) +" - dist - " +
str(min(distances)[0]) + ")"

Your i in the loop below goes up to the length of X_pca - 1
for i, test_pca in enumerate(X_pca):
dist = math.sqrt(sum([diff**2 for diff in (ref_pca - test_pca)]))
distances.append((dist, y[i]))
However, your y is not built to have that length necessarily:
for x, folder in enumerate(folders):
train_faces = glob.glob(folder + '/*')
for i, face in enumerate(train_faces):
X[c,:] = prepare_image(face)
y.append(ID_from_filename(face))
So you are using an index i which is greater than the bounds of your list y.

Related

How to get cosine similiarity for 2 different lists?

I'm trying to get cosine similarity for 2 sets of data (with unequal lengths).
Test set contains 4 random similar images from google.
Training set contains 1 similar image to test set from google.
Following the code im using to do the same by converting image to vectors and calculating cosine similarity
import os
from PIL import Image
from sklearn.metrics.pairwise import cosine_similarity
from img_to_vec import Img2Vec
import numpy as np
test_path = '/Users/Desktop/img_vec/test'
train_path = '/Users/Desktop/img_vec/train'
print("Getting vectors for test images...\n")
img2vec = Img2Vec()
# For each test image, we store the filename and vector as key, value in a dictionary
pics = {}
for file in os.listdir(test_path):
filename = os.fsdecode(file)
img = Image.open(os.path.join(test_path, filename))
vec = img2vec.get_vec(img)
pics[filename] = vec
# print (pics)
pic_name = {}
for file1 in os.listdir(train_path):
filename1 = os.fsdecode(file1)
img1 = Image.open(os.path.join(train_path, filename1))
vec1 = img2vec.get_vec(img1)
pic_name[filename1] = vec1
# print(pic_name)
vec1 = np.array([pics])
vec2 = np.array([pic_name])
sims = {}
for key in list(pics.keys()):
print(key)
sims[key] = cosine_similarity(vec1[vec2].reshape((1, -1)), vec1[key].reshape((1, -1)))[0][0]
d_view = [(v, k) for k, v in sims.items()]
d_view.sort(reverse=True)
for v, k in d_view:
print(v, k)
However, I'm unable to resolve the following error:
sims[key] = cosine_similarity(vec1[vec2].reshape((1, -1)), vec1[key].reshape((1, -1)))[0][0]
IndexError: arrays used as indices must be of integer (or boolean) type
I tried to compute cosine similarity in Python manually (using numpy) by using a specialised library. It doesn't work. I believe it's an issue with dtype.
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
# vectors
a = np.array([1,2,3])
b = np.array([1,1,4])
# manually compute cosine similarity
dot = np.dot(a, b)
norma = np.linalg.norm(a)
normb = np.linalg.norm(b)
cos = dot / (norma * normb)
# use library, operates on sets of vectors
aa = a.reshape(1,3)
ba = b.reshape(1,3)
cos_lib = cosine_similarity(aa, ba)
Any help / guidance / alternative is much appreciated.
vec1 = np.array([pics])
vec2 = np.array([pic_name])
I don't see the need to do this.
Also, in the line where error is coming, the error is present at:
vec1[vec2].reshape((1, -1))
because you're indexing vec1 using vec2. I suppose you mean to put key instead of vec2.

Loading Images from Multiple Folders along with their labels into the numpy arrays for Multiclassification Problem

I am working on a multiclass image classification problem and there are over 120 folders inside my training folder and I am trying to load all the images from the 120 folders along with their label in the numpy arrays both X and Y.I am trying the below code But Getting the following error
ValueError: all input arrays must have the same shape
The code that I am using
def loadimgs(path,n = 0):
'''
path => Path of train directory or test directory
'''
X=[]
y = []
cat_dict = {}
fruit_dict = {}
curr_y = n
# we load every alphabet seperately so we can isolate them later
for fruit in os.listdir(path):
print("loading Fruit: " + fruit)
fruit_dict[fruit] = [curr_y,None]
fruit_path = os.path.join(path,fruit)
# every letter/category has it's own column in the array, so load seperately
for img in os.listdir(fruit_path):
cat_dict[curr_y] = (fruit, img)
category_images=[]
img_path = os.path.join(fruit_path, img)
# read all the images in the current category
for filename in img_path:
image = cv2.imread(filename)
category_images.append(image)
y.append(curr_y)
try:
X.append(np.stack(category_images))
# edge case - last one
except ValueError as e:
print(e)
print("error - category_images:", category_images)
curr_y += 1
fruit_dict[fruit][1] = curr_y - 1
y = np.vstack(y)
X = np.stack(X)
return X,y,fruit_dict
My training folder is as follows
Training Folder
Note: I donot want to use the keras ImageDataGenerator for doing this task

Loop through large .tif stack (image raster) and extract positions

I want to run through a large tif stack +1500 frames and extract the coordinates of the local maxima for each frame. The code below does the job, however extremely slow for large files. When running on smaller bits (e.g. 20 frames) each frame is done almost instantly - when running on the whole dataset, each frame takes seconds.
Any solutions to run a faster code? I figure it is due to the loading of the large tiff file - however it should only be necessary one time initially?
I have the following code:
from pims import ImageSequence
from skimage.feature import peak_local_max
def cmask(index,array):
radius = 3
a,b = index
nx,ny = array.shape
y,x = np.ogrid[-a:nx-a,-b:ny-b]
mask = x*x + y*y <= radius*radius
return(sum(array[mask])) # number of pixels
images = ImageSequence('tryhard_red_small.tif')
frame_list = []
x = []
y = []
int_liposome = []
BG_liposome = []
for i in range(len(images[0])):
tmp_frame = images[0][i]
xy = pd.DataFrame(peak_local_max(tmp_frame, min_distance=8,threshold_abs=3000))
x.extend(xy[0].tolist())
y.extend(xy[1].tolist())
for j in range(len(xy)):
index = x[j],y[j]
int_liposome.append(cmask(index,tmp_frame))
frame_list.extend([i]*len(xy))
print "Frame: ", i, "of ",len(images[0])
features = pd.DataFrame(
{'lip_int':int_liposome,
'y' : y,
'x' : x,
'frame' : frame_list})
Have you tried profiling the code, say with %prun or %lprun in ipython? That'll tell you exactly where your slowdowns are occurring.
I can't make my own version of this without the tif stack, but I suspect the problem is the fact that you're using lists to store everything. Every time you do an append or an extension, python is having to allocate more memory. You could try getting the total count of maxima first, then allocating your output arrays, then rerunning to fill the arrays. Something like below
# run through once to get the count of local maxima
npeaks = (len(peak_local_max(f, min_distance=8, threshold_abs=3000))
for f in images[0])
total_peaks = sum(npeaks)
# allocate storage arrays and rerun
x = np.zeros(total_peaks, np.float)
y = np.zeros_like(x)
int_liposome = np.zeros_like(x)
BG_liposome = np.zeros_like(x)
frame_list = np.zeros(total_peaks, np.int)
index_0 = 0
for frame_ind, tmp_frame in enumerate(images[0]):
peaks = pd.DataFrame(peak_local_max(tmp_frame, min_distance=8,threshold_abs=3000))
index_1 = index_0 + len(peaks)
# copy the data from the DataFrame's underlying numpy array
x[index_0:index_1] = peaks[0].values
y[index_0:index_1] = peaks[1].values
for i, peak in enumerate(peaks, index_0):
int_liposome[i] = cmask(peak, tmp_frame)
frame_list[index_0:index_1] = frame_ind
# update the starting index
index_0 = index_1
print "Frame: ", frame_ind, "of ",len(images[0])

Moments as feature and KNN classifier using OpenCV and Python - Knearest doesn't classify

I have a set of twelve shapes and I'm trying to determine the shape of a query image with the my database
For convinience I have done all the preprocessing in MATLAB and have stored the shape contours,
as binary image separately for training and testing prupose
I have done the Feature extraction and Implementation phase in Python
In the training phase, I read the binary image, extract HU MOMETS (7 Vector)
as features for all the train images(285 in total)
So my samples (train samples) dimension is [285, 7]
My responses dimension is [285]
Now I follow a similar strategy for testing as well. Test images consititue 541 in total
Read image -> extract hu moments -> feed it to knn.find_nearest
Dimension of the test moments is [1, 7]
The problem that I'm facing here is, irrespective of the test image that I give, I'm getting a
return value in the knn.find_nearest as 76. It is consistently giving the same value for every image
What have I done to debug
Checked with code to ensure if the dimens that I'm providing to KNN is correct
Checked the Hu-moments values in matlab as well, and found nearly similar with opencv
Also made sure that hu-moments for test image are computing properly
Tested with changing the k value (when k=1 retval is 76, when k=3 retval is 75)
I don't know what I have done wrong here. please help
import os
import sys
import numpy
import cv2
import cv2.cv as cv
import xlwt
def main():
pth = sys.argv[1]
lsfolds = os.listdir(pth)
files_path = []
all_moments = numpy.empty((0,7))
train_samples = []
responses = []
cnt=0
for di in lsfolds:
img_path = os.path.join(pth,di)
lsfiles = os.listdir(img_path)
for ls in lsfiles:
comp_path = os.path.join(img_path,ls)
files_path.append(comp_path)
if (comp_path[-4:] == 's.db'):
continue
img_bin = cv2.imread(comp_path,-1)
#cv2.imshow('Image', img_bin)
#cv2.waitKey()
#################### Moments as Feature ###############################
moments = cv2.moments(img_bin,1)
hu_moments = cv2.HuMoments(moments)
hu_moments = hu_moments.reshape((1,7))
all_moments = numpy.append(all_moments,hu_moments,0)
train_samples.append(comp_path)
responses.append(int(cnt))
########################################################################
cnt += 1
responses = numpy.float32(responses)
all_moments = numpy.float32(all_moments)
################## KNN #####################################
knn_train_eng = cv2.KNearest()
knn_train_eng.train(all_moments,responses)
#######################################Testing######################################
timg_pth = sys.argv[2]
tfolds = os.listdir(timg_pth)
wb = xlwt.Workbook()
ws = wb.add_sheet('Test Results')
c=0
for tdi in tfolds:
timg_dir = os.path.join(timg_pth,tdi)
tfiles = os.listdir(timg_dir)
for fl in tfiles:
timg_path = os.path.join(timg_dir,fl)
if (timg_path[-4:] == 's.db'):
continue
timg = cv2.imread(timg_path,-1)
timg_bin = timg;
#cv2.imshow('test_bin',timg_bin)
tmoments = cv2.moments(timg_bin)
thu_moments = cv2.HuMoments(tmoments)
thu_moments = thu_moments.reshape((1,7))
thu_moments = numpy.float32(thu_moments)
retval, results, neigh_resp, dists = knn_train_eng.find_nearest(thu_moments,1) # Predict using KNN
res,dist = int(results[0][0]),dists[0][0]
op_answ = str(int((results[0][0])))
print op_answ
print train_samples[int(op_answ)]
op = cv2.imread(train_samples[int(op_answ)])
c+=1
cv2.destroyAllWindows()
##############################################################################
cv2.destroyAllWindows()
if __name__ == '__main__':
main()

Face recognition - Python

I am trying to make face recognition by Principal Component Analysis (PCA) using python.
Now I am able to get the minimum euclidean distance between the training images images and the input image input_image. Here is my code:
import os
from PIL import Image
import numpy as np
import glob
import numpy.linalg as linalg
#Step1: put database images into a 2D array
filenames = glob.glob('C:\\Users\\me\\Downloads\\/*.pgm')
filenames.sort()
img = [Image.open(fn).convert('L').resize((90, 90)) for fn in filenames]
images = np.asarray([np.array(im).flatten() for im in img])
#Step 2: find the mean image and the mean-shifted input images
mean_image = images.mean(axis=0)
shifted_images = images - mean_image
#Step 3: Covariance
c = np.asmatrix(shifted_images) * np.asmatrix(shifted_images.T)
#Step 4: Sorted eigenvalues and eigenvectors
eigenvalues,eigenvectors = linalg.eig(c)
idx = np.argsort(-eigenvalues)
eigenvalues = eigenvalues[idx]
eigenvectors = eigenvectors[:, idx]
#Step 5: Only keep the top 'num_eigenfaces' eigenvectors
num_components = 20
eigenvalues = eigenvalues[0:num_components].copy()
eigenvectors = eigenvectors[:, 0:num_components].copy()
#Step 6: Finding weights
w = eigenvectors.T * np.asmatrix(shifted_images)
# check eigenvectors.T/eigenvectors
#Step 7: Input image
input_image = Image.open('C:\\Users\\me\\Test\\5.pgm').convert('L').resize((90, 90))
input_image = np.asarray(input_image).flatten()
#Step 8: get the normalized image, covariance,
# eigenvalues and eigenvectors for input image
shifted_in = input_image - mean_image
c = np.cov(input_image)
cmat = c.reshape(1,1)
eigenvalues_in, eigenvectors_in = linalg.eig(cmat)
#Step 9: Find weights of input image
w_in = eigenvectors_in.T * np.asmatrix(shifted_in)
# check eigenvectors/eigenvectors_in
#Step 10: Euclidean distance
d = np.sqrt(np.sum(np.asarray(w - w_in)**2, axis=1))
idx = np.argmin(d)
print idx
My problem now is that I want to return the image (or its index in the array images) with the minimum euclidean distance not its index in the array of distances d
I don't believe that you have modified the order that the images are stored in w compared to in images, therefore, the idx from np.argmin(d) should be the same index of the images list, so
images[idx]
should be the image you want.
Of course,
images[idx].shape
will give (1800,) because it's still flattened. If you want to unflatten it, you can do:
images[idx].reshape(90,90)

Categories

Resources