I want to compare two images to check if they are equal or not, but for that i need to compare a specific region (ROI) of both images.
I've cropped the areas i want to compare, but now i would like to know how can i do that process, because i can't directly compare the cropped images.
How can i for example get the average pixels values of both cropped images and compare them?
Update: I've solved the situation.
Current code:
import cv2
import numpy as np
from skimage.measure import compare_ssim as ssim
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the sum of the squared difference between the two images;
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
polarity_ok = cv2.resize(cv2.imread("polarity_OK_edited.jpg"),None,fx=0.2, fy=0.2) #resize the image to be smaller
polarity_nok = cv2.resize(cv2.imread("Polarity_NOK1.JPG"), None,fx=0.2, fy=0.2) #resize the image to be smaller
polarity_ok_cropped = polarity_ok[350:408, 97:111]
polarity_nok_cropped = polarity_nok[350:408, 97:111]
polarity_ok_cropped1 = polarity_ok[359:409, 232:240]
polarity_nok_cropped1 = polarity_nok[359:409, 232:240]
polarity_ok_cropped2 = polarity_ok[118:153, 44:69]
polarity_nok_cropped2 = polarity_nok[118:153, 44:69]
polarity_ok_cropped3 = polarity_ok[94:142, 192:197]
polarity_nok_cropped3 = polarity_nok[94:142, 192:197]
m = mse(polarity_ok_cropped, polarity_nok_cropped)
s = ssim(polarity_ok_cropped, polarity_nok_cropped, multichannel=True)
diff = cv2.subtract(polarity_ok_cropped, polarity_nok_cropped)
result = not np.any(diff)
m1 = mse(polarity_ok_cropped1, polarity_nok_cropped1)
s1 = ssim(polarity_ok_cropped1, polarity_nok_cropped1, multichannel=True)
diff1 = cv2.subtract(polarity_ok_cropped1, polarity_nok_cropped1)
result1 = not np.any(diff1)
m2 = mse(polarity_ok_cropped2, polarity_nok_cropped2)
s2 = ssim(polarity_ok_cropped2, polarity_nok_cropped2, multichannel=True)
diff2 = cv2.subtract(polarity_ok_cropped2, polarity_nok_cropped2)
result2 = not np.any(diff2)
m3 = mse(polarity_ok_cropped2, polarity_nok_cropped2)
s3 = ssim(polarity_ok_cropped2, polarity_nok_cropped2, multichannel=True)
diff3 = cv2.subtract(polarity_ok_cropped3, polarity_nok_cropped3)
result3 = not np.any(diff3)
if (result and result1 and result2 and result3):
print ("The polarity is correct. Awesome :)")
else:
print ("Nice try, but the polarity is incorrect. Take another chance!")
If you know exactly where the objects you want to compare are, simple and fast method using OpenCV to compare two images is to extract histograms using calcHistogram() for each channel (RGB or HSV) and then compare them using compareHist().
Further infos and examples might be found here: Histogram comparsion.
You can use the Structural Similarity Index (SSIM) as giving the 2 images as input and returning a score value in the range [-1, 1]. A score of 1 indicating a perfect similarity between 2 input images (In case of both images are equal)
from skimage.measure import compare_ssim
(score, diff) = compare_ssim(image1, image2, full=True)
Btw Converting the input images before comparison into grayscale is prefered.
One more way to do the same :
from PIL import Image
import math, operator
i1 = Image.open('./image1.png')
i2 = Image.open('./image2.png')
#this will resize any format of image file
assert i1.mode == i2.mode, "Different kinds of images."
assert i1.size == i2.size, "Different sizes."
pairs = zip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1-p2) for p1,p2 in pairs)
else:
dif = sum(abs(c1-c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
ncomponents = i1.size[0] * i1.size[1] * 3
print ("Difference (percentage):", (dif / 255.0 * 100) / ncomponents)
You need to install pillow.
hope this will help you.
Related
I'm trying to implement Reinhard's method to use the color distribution of a target image to color normalize a passed in image for a research project. I've gotten the code to work and it outputs correctly but it's pretty slow. It takes about 20 minutes to iterate through 300 images. I'm pretty sure the bottleneck is how I'm handling applying the function to each image. I'm currently iterating through each pixel of the image and applying the functions below to each channel.
def reinhard(target, img):
#converts image and target from BGR colorspace to l alpha beta
lAB_img = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)
lAB_tar = cv2.cvtColor(target, cv2.COLOR_BGR2Lab)
#finds mean and standard deviation for each color channel across the entire image
(mean, std) = cv2.meanStdDev(lAB_img)
(mean_tar, std_tar) = cv2.meanStdDev(lAB_tar)
#iterates over image implementing formula to map color normalized pixels to target image
for y in range(512):
for x in range(512):
lAB_tar[x, y, 0] = (lAB_img[x, y, 0] - mean[0]) / std[0] * std_tar[0] + mean_tar[0]
lAB_tar[x, y, 1] = (lAB_img[x, y, 1] - mean[1]) / std[1] * std_tar[1] + mean_tar[1]
lAB_tar[x, y, 2] = (lAB_img[x, y, 2] - mean[2]) / std[2] * std_tar[2] + mean_tar[2]
mapped = cv2.cvtColor(lAB_tar, cv2.COLOR_Lab2BGR)
return mapped
My supervisor told me that I could try using a matrix to apply the function all at once to improve the runtime but I'm not exactly sure how to go about doing that.
The original and the target:
Color transfer reuslts using Reinhard'method in 5 ms:
I prefer to implement the formulat in numpy vectorized operations other than python loops.
# implementing the formula
#(Io - mo)/so*st + mt = Io * (st/so) + mt - mo*(st/so)
ratio = (std_tar/std_ori).reshape(-1)
offset = (mean_tar - mean_ori*std_tar/std_ori).reshape(-1)
lab_tar = cv2.convertScaleAbs(lab_ori*ratio + offset)
Here is the code:
# 2019/02/19 by knight-金
# https://stackoverflow.com/a/54757659/3547485
import numpy as np
import cv2
def reinhard(target, original):
# cvtColor: COLOR_BGR2Lab
lab_tar = cv2.cvtColor(target, cv2.COLOR_BGR2Lab)
lab_ori = cv2.cvtColor(original, cv2.COLOR_BGR2Lab)
# meanStdDev: calculate mean and stadard deviation
mean_tar, std_tar = cv2.meanStdDev(lab_tar)
mean_ori, std_ori = cv2.meanStdDev(lab_ori)
# implementing the formula
#(Io - mo)/so*st + mt = Io * (st/so) + mt - mo*(st/so)
ratio = (std_tar/std_ori).reshape(-1)
offset = (mean_tar - mean_ori*std_tar/std_ori).reshape(-1)
lab_tar = cv2.convertScaleAbs(lab_ori*ratio + offset)
# convert back
mapped = cv2.cvtColor(lab_tar, cv2.COLOR_Lab2BGR)
return mapped
if __name__ == "__main__":
ori = cv2.imread("ori.png")
tar = cv2.imread("tar.png")
mapped = reinhard(tar, ori)
cv2.imwrite("mapped.png", mapped)
mapped_inv = reinhard(ori, tar)
cv2.imwrite("mapped_inv.png", mapped)
I managed to figure it out after looking at the numpy documentation. I just needed to replace my nested for loop with proper array accessing. It took less than a minute to iterate through all 300 images with this.
lAB_tar[:,:,0] = (lAB_img[:,:,0] - mean[0])/std[0] * std_tar[0] + mean_tar[0]
lAB_tar[:,:,1] = (lAB_img[:,:,1] - mean[1])/std[1] * std_tar[1] + mean_tar[1]
lAB_tar[:,:,2] = (lAB_img[:,:,2] - mean[2])/std[2] * std_tar[2] + mean_tar[2]
Question:
How can I programmatically return a raster that is the difference of two (differently sized) red bands?
i.e.
gdal_calc.py -A 'WARPED.tif' -B 'DSC_1636.tif' --outfile = 'dif.tif' --calc = "A-B"
QGIS raster calculator performs this function just fine. However, the previous code returns the following error.
Exception: Error! Dimensions of file DSC_1636.tif (7380, 4928) are different from other files (7743, 5507). Cannot proceed
I am currently under the impression I should read in the rasters using a defined extent, created by finding the overlap as shown below, but I am still not able to make this work.
# Subtract two rasters of different dimensions
# Pixel coordinates define overlap
import os, sys
from PIL import Image
from osgeo import gdal, ogr, osr
gdal.UseExceptions()
# Use PIL to get information from images
im1 = Image.open('DSC_0934-warped.tif')
print('warped image size is %s ' % str(im1.size))
im2 = Image.open('DSC_1636.png')
print('initial image (image 2) size is %s' % str(im2.size))
warped image size is (7743, 5507)
initial image (image 2) size is (7380, 4928)
# Use GDAL to get information about images
def get_extent(fn):
'''Returns min_x, max_y, max_x, min_y'''
ds = gdal.Open(fn)
gt = ds.GetGeoTransform()
return (gt[0], gt[3], gt[0] + gt[1] * ds.RasterXSize,
gt[3] + gt[5] * ds.RasterYSize)
print('extent of warped.tif is %s' % str(get_extent('DSC_0934-warped.tif')))
print('extent of 1636.png is %s' % str(get_extent('DSC_1636.png')))
extent of warped.tif is (-375.3831214210602, 692.5167764068751, 7991.3588371542955, -5258.102875649754)
extent of 1636.png is (0.0, 0.0, 7380.0, 4928.0)
r1 = get_extent('DSC_0934-warped.tif')
r2 = get_extent('DSC_1636.png')
# Get left, top, right, bottom of dataset's bounds in pixel coordinates
intersection = [max(r1[0], r2[0]),
min(r1[1], r2[1]),
min(r1[2], r2[2]),
max(r1[3], r2[3])]
print('checking for overlap')
if (intersection[2] < intersection[0]) or (intersection[1] > intersection[3]):
intersection = None
print('no overlap')
else:
print('intersection overlaps at: %s' % intersection)
checking for overlap
intersection overlaps at: [0.0, 0.0, 7380.0, 4928.0]
The most straight forward answer is to read in the images as an array of defined dimensions.
Without reposting the code above used to check where the overlap is, the solution can be had with the following additions. (Thank you #Val)
# Get the data
ds1_src = gdal.Open( "DSC_1636.png" )
ds2_src = gdal.Open( "DSC_0934-warped.tif")
ds1_bnd = ds1_src.GetRasterBand(1).ReadAsArray(xoff=0, yoff=0, win_xsize=7380, win_ysize=4928)
ds2_bnd = ds2_src.GetRasterBand(1).ReadAsArray(xoff=0, yoff=0, win_xsize=7380, win_ysize=4928)
# Do the maths...
data_out = ds2_bnd - ds1_bnd
#Write the out file
driver = gdal.GetDriverByName("GTiff")
dsOut = driver.Create("out.tiff", 7380, 4928, 1, GDT_Byte)
CopyDatasetInfo(ds1_src,dsOut)
bandOut=dsOut.GetRasterBand(1)
BandWriteArray(bandOut, data_out)
#Close the datasets
ds1_src = None
ds2_src = None
ds1_bnd = None
ds2_bnd = None
bandOut = None
dsOut = None
I've writen my own image compare function for RobotFramework with the help of a question asked over here.
from PIL import Image, ImageChops, ImageDraw, ImageFont
def check_image_files(self, file1, file2, file3) :
''' Check two image files
``file1``: absolute path to the first file
``file2``: absolute path to the second file
``file3``: absolute path to the compare file
'''
self.builtin.log("File1: %s" %file1)
self.builtin.log("File2: %s" %file2)
point_table = ([0] + ([255] * 255))
f1 = Image.open(file1)
f2 = Image.open(file2)
diff = ImageChops.difference(f1, f2)
diff = diff.convert('L')
diff = diff.point(point_table)
f3 = diff.convert('RGB')
f3.paste(f2, mask=diff)
f3.save(file3)
The end result now is a complete black screen if there are no differences found in the file, but I want to get a true / false back. So I can let the testcase PASS / FAIL if the 2 files are not identical. Now the testcase succeeds if the files are not identical for a small part and that's not what I want.
I've read the PIL documentation but couldnt get what I needed (btw I'm a tester with a interest for Programming)
The below example is from the RossetaCode.org on basic image comparison where they calculate the difference. This is of course a precursor to determining if an image is identical or not. In case it is, then 0,0 is returned.
from itertools import izip
from PIL import Image
i1 = Image.open("image1.png")
i2 = Image.open("image2.png")
assert i1.mode == i2.mode, "Different kinds of images."
assert i1.size == i2.size, "Different sizes."
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1-p2) for p1,p2 in pairs)
else:
dif = sum(abs(c1-c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
ncomponents = i1.size[0] * i1.size[1] * 3
print "Difference (percentage):", (dif / 255.0 * 100) / ncomponents
I have a stereo pair and would like to create a disparity map. However, the shift between the two images in not simply left to right or up and down, but some combination of the two. I have tried to use the StereoBM function in Open CV Python but the results have diagonal black and white lines across the image. My question is, is it possible to use two images where the parallax is in the diagonal direction to compute a disparity map, or do the images need to be rotated in order for this function to work?
EDIT: After reading the answers below, and doing some research, I decided to try the stereoRectifyUncalibrated function. I first find key points in the first image with SURF, and then repeat this for the second image. I then use the FLANN based matcher to match the points, and I remove the outliers. I then find the fundamental mat using the findFundamentalMat function, and then I call stereoRectifyUncalibrated. However, I get an error that begins like this: (-215) CV_IS_MAT(_points1) && CV_IS_MAT(_points2) && (_points1->rows == 1 || _points1->cols == 1) &&...
I have made sure that the data types of everything are the same, and that each point array are the same dimensions. I put the part of my code where I use stereoRectifyUncalibrated below.
#Detect feature points with SURF
detector = cv2.SURF()
kp1, desc1 = detector.detectAndCompute(img1, None)
kp2, desc2 = detector.detectAndCompute(img2, None)
#Match Points
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
matcher = cv2.FlannBasedMatcher(flann_params, {})
matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k=2)
mkp1, mkp2 = [], []
ratio = 0.75
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
np.float32([kp.pt for kp in mkp1])
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
print '%d / %d inliers/matched' % (np.sum(status), len(status))
statusmat = np.zeros((max(status.shape),2),dtype = np.float64)
statusmat[:,0] = status[:,0]
statusmat[:,1] = status[:,0]
status = np.array(status, dtype=bool)
p1f=p1[status.view(np.ndarray).ravel()==1,:] #Remove Outliers
p2f=p2[status.view(np.ndarray).ravel()==1,:] #Remove Outliers
#Attempt to rectify using stereoRectifyUncalibrated
fundmat, mask = cv2.findFundamentalMat(p1f,p2f,cv2.RANSAC,3,0.99,)
rectmat1, rectmat2 = cv2.stereoRectifyUncalibrated(p1f,p2f,fundmat,imgsize)
Thanks for the answers so far!
It seems that this function stereoRectifyUncalibrated takes a row or column vector, not a n x 2 matrix
Also output seems to have 3 elements
p1fNew = p1f.reshape((p1f.shape[0] * 2, 1))
p2fNew = p2f.reshape((p2f.shape[0] * 2, 1))
retBool ,rectmat1, rectmat2 = cv2.stereoRectifyUncalibrated(p1fNew,p2fNew,fundmat,imgsize)
I am trying to make face recognition by Principal Component Analysis (PCA) using python.
Now I am able to get the minimum euclidean distance between the training images images and the input image input_image. Here is my code:
import os
from PIL import Image
import numpy as np
import glob
import numpy.linalg as linalg
#Step1: put database images into a 2D array
filenames = glob.glob('C:\\Users\\me\\Downloads\\/*.pgm')
filenames.sort()
img = [Image.open(fn).convert('L').resize((90, 90)) for fn in filenames]
images = np.asarray([np.array(im).flatten() for im in img])
#Step 2: find the mean image and the mean-shifted input images
mean_image = images.mean(axis=0)
shifted_images = images - mean_image
#Step 3: Covariance
c = np.asmatrix(shifted_images) * np.asmatrix(shifted_images.T)
#Step 4: Sorted eigenvalues and eigenvectors
eigenvalues,eigenvectors = linalg.eig(c)
idx = np.argsort(-eigenvalues)
eigenvalues = eigenvalues[idx]
eigenvectors = eigenvectors[:, idx]
#Step 5: Only keep the top 'num_eigenfaces' eigenvectors
num_components = 20
eigenvalues = eigenvalues[0:num_components].copy()
eigenvectors = eigenvectors[:, 0:num_components].copy()
#Step 6: Finding weights
w = eigenvectors.T * np.asmatrix(shifted_images)
# check eigenvectors.T/eigenvectors
#Step 7: Input image
input_image = Image.open('C:\\Users\\me\\Test\\5.pgm').convert('L').resize((90, 90))
input_image = np.asarray(input_image).flatten()
#Step 8: get the normalized image, covariance,
# eigenvalues and eigenvectors for input image
shifted_in = input_image - mean_image
c = np.cov(input_image)
cmat = c.reshape(1,1)
eigenvalues_in, eigenvectors_in = linalg.eig(cmat)
#Step 9: Find weights of input image
w_in = eigenvectors_in.T * np.asmatrix(shifted_in)
# check eigenvectors/eigenvectors_in
#Step 10: Euclidean distance
d = np.sqrt(np.sum(np.asarray(w - w_in)**2, axis=1))
idx = np.argmin(d)
print idx
My problem now is that I want to return the image (or its index in the array images) with the minimum euclidean distance not its index in the array of distances d
I don't believe that you have modified the order that the images are stored in w compared to in images, therefore, the idx from np.argmin(d) should be the same index of the images list, so
images[idx]
should be the image you want.
Of course,
images[idx].shape
will give (1800,) because it's still flattened. If you want to unflatten it, you can do:
images[idx].reshape(90,90)