Find EyeMap of an image using python opencv - python

I tried coding for EyeMapC but couldn't get expected output.
EyeMapC = 1/3(Cb2 + Cr'2 + Cb/Cr)
where Cr' = 255 - Cr
Here's my code
from __future__ import division
import cv2
import numpy as np
img = cv2.imread('img/file.jpg')
EyeMap = cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB)
y,Cr,Cb = cv2.split(EyeMap)
Q = np.square(Cb) #,dtype=np.float64)
Cr_bar = (255-Cr)
R = np.square(Cr_bar) #,dtype=np.float64)
G = Cb/Cr
EyeC = (Q/3+R/3+G/3)
#EyeC = np.mod(EyeC,255)
cv2.imshow('EyeC',EyeC)
cv2.waitKey(0)
cv2.destroyAllWindows()
Cb2 and Cr'2 have to be normalized between [0,255] before putting in formula or the final EyeC value needs to be normalized ?
Original Image: Original Image
Output: Output Image

Related

Detecting the volume/overlap region in image registration for OCT data

I am working on image registration of OCT data. I would like to locate the regions/area in my targeted registered image, where image registration has actually occurred from the source images. I am working in Python. Can anyone please tell me what are the available techniques?
Any suggestions on how to proceed with the problem are also welcomed. I have done some trial image registration on two images initially. The goal is to do registration of a large dataset.
My code is given below:
#importing libraries
import cv2
import numpy as np
# from skimage.measure import structural_similarity as ssim
# from skimage.measure import compare_ssim
import skimage
from skimage import measure
import matplotlib.pyplot as plt
def imageRegistration():
# open the image files
path = 'D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/sliceImages(_x_)/'
image1 = cv2.imread(str(path) + '104.png')
image2 = cv2.imread(str(path) + '0.png')
# converting to greyscale
img1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
height, width = img2.shape
# Create ORB detector with 5000 features.
orb_detector = cv2.ORB_create(5000)
# Find keypoints and descriptors.
# The first arg is the image, second arg is the mask
# (which is not reqiured in this case).
kp1, d1 = orb_detector.detectAndCompute(img1, None)
kp2, d2 = orb_detector.detectAndCompute(img2, None)
# Match features between the two images.
# We create a Brute Force matcher with
# Hamming distance as measurement mode.
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match the two sets of descriptors.
matches = matcher.match(d1, d2)
# Sort matches on the basis of their Hamming distance.
matches.sort(key=lambda x: x.distance)
# Take the top 90 % matches forward.
matches = matches[:int(len(matches) * 90)]
no_of_matches = len(matches)
# Define empty matrices of shape no_of_matches * 2.
p1 = np.zeros((no_of_matches, 2))
p2 = np.zeros((no_of_matches, 2))
for i in range(len(matches)):
p1[i, :] = kp1[matches[i].queryIdx].pt
p2[i, :] = kp2[matches[i].trainIdx].pt
# Find the homography matrix.
homography, mask = cv2.findHomography(p1, p2, cv2.RANSAC)
# Use this matrix to transform the
# colored image wrt the reference image.
transformed_img = cv2.warpPerspective(image1,
homography, (width, height))
# Save the output.
cv2.imwrite('output.jpg', transformed_img)
#following is the code figuring out difference in the source image, target image and the registered image
# 0 mse means perfect similarity , no difference
# mse >1 means there is difference and as the value increases , the difference increases
def findingDifferenceMSE():
path = 'D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/sliceImages(_x_)/'
image1 = cv2.imread(str(path) + '104.png')
image2 = cv2.imread(str(path) + '0.png')
image3 = cv2.imread('D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/output.jpg')
err = np.sum((image1.astype("float") - image3.astype("float")) ** 2)
err /= float(image1.shape[0] * image3.shape[1])
print("MSE:")
print('Image 104 and output image: ', + err)
err1 = np.sum((image2.astype("float") - image3.astype("float")) ** 2)
err1 /= float(image2.shape[0] * image3.shape[1])
print('Image 0 and output image: ', + err1)
def findingDifferenceSSIM():
path = 'D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/sliceImages(_x_)/'
image1 = cv2.imread(str(path) + '104.png')
image2 = cv2.imread(str(path) + '0.png')
image3 = cv2.imread('D:/Fraunhofer Thesis/LatestPythonImplementations/Import_OCT_Vision/output.jpg')
result1=measure.compare_ssim(image1,image3)
print(result1)
#calling the fucntion
imageRegistration()
findingDifferenceMSE()
#findingDifferenceSSIM()
This is the registered image:
This image is the first reference image:
This is the second reference image:
Image differentiation technique can be used to identify the registered area in the images by comparing it with base images. In this way, the different areas will be recognized.

how to increase brightness of a piece of a rgb image without overfolow?

I have an image as shown below and I want to increase the brightness of the lightning section.
my input image:
Here's my code:
import cv2 as cv
import numpy as np
src = cv.imread('./img.jpg')
hsv_src = cv.cvtColor(src, cv.COLOR_BGR2HSV)
v = hsv_src[:,:,2]
value = 50
hsv_src[:,:,2]=np.where((255-v)<value,255,v+value) # v+value> 255
out = cv.cvtColor(hsv_src,cv.COLOR_HSV2BGR)
cv.imshow('output',out)
cv.waitKey(0)
but I eventually got this:
I just want to increase the brightness of lightning but what I'm watching now is increasing the brightness of the entire image. I'm honestly confused and don't know what to do.
You can not add, but multiply by a number. See example:
import cv2 as cv
import numpy as np
src = cv.imread('zHSbF.jpg')
hsv_src = cv.cvtColor(src, cv.COLOR_BGR2HSV)
v = hsv_src[:,:,2]
k = 1.5
hsv_src[:,:,2]=np.clip(np.uint16(hsv_src[:,:,2])*k, 0, 255)
out = cv.cvtColor(hsv_src,cv.COLOR_HSV2BGR)
cv.imwrite('out8.png', out)
cv.imshow('output',out)
cv.waitKey(0)

convert IR image to RGB with python

The code below is intended to take an infrared image (B&W) and convert it to RGB. It does so successfully, but with significant noise. I have included a few lines for noise reduction but they don't seem to help. I've included the starting/resulting photos below. Any advice/corrections are welcome and thank you in advance!
from skimage import io
import numpy as np
import glob, os
from tkinter import Tk
from tkinter.filedialog import askdirectory
import cv2
path = askdirectory(title='Select PNG Folder') # shows dialog box and return the path
outpath = askdirectory(title='Select SAVE Folder')
# wavelength in microns
MWIR = 4.5
R = .642
G = .532
B = .44
vector = [R, G, B]
vectorsum = np.sum(vector)
vector = vector / vectorsum #normalize
vector = vector*255 / MWIR #changing this value changes the outcome significantly so I
#have been messing with it in the hopes of fixing it but no luck so far.
vector = np.power(vector, 4)
for file in os.listdir(path):
if file.endswith(".png"):
imIn = io.imread(os.path.join(path, file))
imOut = imIn * vector
ret,thresh = cv2.threshold(imOut,64,255,cv2.THRESH_BINARY)
kernel = np.ones((5, 5), np.uint8)
erode = cv2.erode(thresh, kernel, iterations = 1)
result = cv2.bitwise_or(imOut, erode)
io.imsave(os.path.join(outpath, file) + '_RGB.png',imOut.astype(np.uint8))
Your noise looks like completely random values, so I suspect you have an error in your conversion from float to uint8. But instead of rolling everything for yourself, why don't you just use:
imOut = cv2.cvtColor(imIn,cv2.COLOR_GRAY2BGR)
Here is one way to do that in Python/OpenCV.
Your issue is likely that your channel values are exceeding the 8-bit range.
Sorry, I do not understand the relationship between your R,G,B weights and your MWIR. Dividing by MWIR will do nothing if your weights are properly normalized.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('car.jpg')
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# make color channels
red = gray.copy()
green = gray.copy()
blue = gray.copy()
# set weights
R = .642
G = .532
B = .44
MWIR = 4.5
# get sum of weights and normalize them by the sum
R = R**4
G = G**4
B = B**4
sum = R + G + B
R = R/sum
G = G/sum
B = B/sum
print(R,G,B)
# combine channels with weights
red = (R*red)
green = (G*green)
blue = (B*blue)
result = cv2.merge([red,green,blue])
# scale by ratio of 255/max to increase to fully dynamic range
max=np.amax(result)
result = ((255/max)*result).clip(0,255).astype(np.uint8)
# write result to disk
cv2.imwrite("car_colored.png", result)
# display it
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Result
If the noise is coming from the sensor itself, like a grainy noise, you'll need to look into denoising algorithms. scikit-image and opencv provide some denoising algorithms you can try. Maybe take a look at this and this.
I recently learned about matplotlib.cm, which handles colormaps. I've been using those to artificially color IR images, and made a brief example using the same black & white car image used above. Basically, I create a colormap .csv file locally, then refer to it for RGB weights. You may have to pick and choose which colormap you prefer, but that's up to personal preference.
Input image:
Python:
import os
import numpy as np
import cv2
from matplotlib import cm
# Multiple colormap options are available- I've hardcoded viridis for this example.
colormaps = ["viridis", "plasma", "inferno", "magma", "cividis"]
def CreateColormap():
if not os.path.exists("viridis_colormap.csv"):
# Get 256 entries from "viridis" or any other Matplotlib colormap
colormap = cm.get_cmap("viridis", 256)
# Make a Numpy array of the 256 RGB values
# Each line corresponds to an RGB colour for a greyscale level
np.savetxt("viridis_colormap.csv", (colormap.colors[...,0:3]*255).astype(np.uint8), fmt='%d', delimiter=',')
def RecolorInfraredImageToRGB(ir_image):
# Load RGB lookup table from CSV file
lookup_table = np.loadtxt("viridis_colormap.csv", dtype=np.uint8, delimiter=",")
# Make output image, same height and width as IR image, but 3-channel RGB
result = np.zeros((*ir_image.shape, 3), dtype=np.uint8)
# Take entries from RGB LUT according to greyscale values in image
np.take(lookup_table, ir_image, axis=0, out=result)
return result
if __name__ == "__main__":
CreateColormap()
img = cv2.imread("bwcar.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
recolored = RecolorInfraredImageToRGB(gray)
cv2.imwrite("car_recolored.png", recolored)
cv2.imshow("Viridis recolor", recolored)
cv2.waitKey(0)
Output:

How can I better noise addition in python?

I'm trying to add a random noise from uniform distribution between min pixel
value and 0.1 times the maximum pixel value to each pixel for each channel of original image.
Here's my code so far:
[in]:
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Read image with cv2
image = cv2.imread('example_image.jpg' , 1)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Display image
imshow(image_rgb)
# R,G,B channel separation
R, G, B = cv2.split(image_rgb)
# Creating Noise
noise_R = np.random.uniform(R.min(),R.max()*0.1, R.size)
noise_R.shape = (256,256)
noise_G = np.random.uniform(B.min(),B.max()*0.1, G.size)
noise_G.shape = (256,256)
noise_B = np.random.uniform(G.min(), G.max()*0.1, B.size)
noise_B.shape = (256,256)
# Adding noise to each channel separately
R = R + noise_R
G = G + noise_G
B = B + noise_B
rgb_noise = R + G + B
noisy_image = image + rgb_noise
[out]:
ValueError: operands could not be broadcast together with shapes (256,256,3) (256,256)
I'm getting an ValueError that the array shapes for rgb_noise and image are not equal. I've tried changing the shape of rgb_noise to that of image's but the I get a size error. How to fix it ? Is there any better method ?
Your solution is a bit verbose, and could be made more compact.
However, the reason why you do not get white-ish noise is that you compute your red channel differently from the other two.
Changing this:
noise_R = np.random.uniform(R_min,R_max*0.3, image_G.size)
to this:
noise_R = np.random.uniform(R_min,R_max*0.1, image_R.size)
You can be simplistic and add the noise by only the numpy array.
import numpy
import matplotlib.pyplot as plt
import cv2
Look, plotting the image will only work good with jupyter notebooks.
Do cv2.imshow() for other IDEs.
1) Have your Image
img = cv2.imread('path').astype(np.uint0)
2) Make a random noise
r, g, b = img.shape
noise = np.random.randint(0,255,r*g*b).reshape(r,g,b)
3) Blend them
image_with_noise = cv2.addWeighted(img,0.5,noise,0.5,0)
You can adjust the value of alpha and beta values.
There you have a noisy image!

B&W image to binary array

I want to convert my b&w image(.png) to binary array(black is 1 white is 0). I have written some code, but it's not working. Error says: argument 2 to map() must support iteration.
Here is my code:
from PIL import Image
from resizeimage import resizeimage
import sys
def threshold(col):
s = sum(col)
return int(s > 255 * 3 // 2)
img = Image.open("filename.png")
ratio = float((img.size[1]) / (img.size[0]))
img = resizeimage.resize_cover(img, [100, int(ratio * 100)])
pixels = img.getdata()
binary = list(map(threshold, img))
array2d = [binary[i * img.size[0] : (i+1) * img.size[0]] for i in range(img.size[1])]
print('\n'.join(''.join(map(str, line)) for line in array2d))
Here is the image:
You need to convert your image to grayscale first, since PIL opens it as RGB. Then, invert the 0 & 255 values. Then, you can convert the non-zero values to 1. Here's one way:
from PIL import Image
import numpy as np
img = Image.open('bw_circle.png').convert('L')
np_img = np.array(img)
np_img = ~np_img # invert B&W
np_img[np_img > 0] = 1
And an alternative way using PIL for the inversion:
from PIL import Image, ImageOps
import numpy as np
img = Image.open('bw_circle.png').convert('L')
img_inverted = ImageOps.invert(img)
np_img = np.array(img_inverted)
np_img[np_img > 0] = 1

Categories

Resources