np.dstack() does not recreate original image in OpenCV - python

My program breaks an image into 4 channels of CMYK. When I go to re stack these images something isn't working, it does not look like the original image although it does have color.
Input Image:
Code:
import cv2
import numpy as np
# Load image
bgr = cv2.imread('xpwallpaper.jpg')
# Make float and divide by 255 to give BGRdash
bgrdash = bgr.astype(np.float)/255.
# Calculate K as (1 - whatever is biggest out of Rdash, Gdash, Bdash)
K = 1 - np.max(bgrdash, axis=2)
# Calculate C
C = (1-bgrdash[...,2] - K)/(1-K)
# Calculate M
M = (1-bgrdash[...,1] - K)/(1-K)
# Calculate Y
Y = (1-bgrdash[...,0] - K)/(1-K)
# Combine 4 channels into single image and re-scale back up to uint8
CMYK = (np.dstack((C,M,Y,K))*255).astype(np.uint8)
cv2.imwrite("CMYK.jpg", CMYK)
Resulting Image:
This is an oddly specific issue I am having trouble searching.

Related

Combining 3 channel numpy array to form an rgb image

I was trying to combine 3 gray scale images into a single overlapping image with three different colors for each.
For that, I added each into a 3 channel numpy array.
But when plotting with im.show I don't get a colourful image. Till adding 2nd channel it works, but when I add the third channel, it doesn't work. The final image has only red and blue colour.
It is supposed to be red, green and blue for corresponding to the overlapping images.
Why would it be?
image1 = Image.open("E:/imaging/04102022_Bronze/Copper_4_2/10.tif") #openingimage1
image1_norm =(np.array(image1)-np.array(image1).min() ) / (np.array(image1).max() -
np.array(image1).min()) #normalisingimage1
image2 = Image.open("E:/imaging/04102022_Bronze/Oxygen_1_2/10.tif")#openingimage2
image2_norm = (np.array(image2)-np.array(image2).min()) / (np.array(image2).max() -
np.array(image2).min())#normalisingimage2
image3 = Image.open("E:/imaging/04102022_Bronze/Oxygen_1_2/10.tif")#openingimage3
image3_norm = (np.array(image3)-np.array(image3).min()) / (np.array(image3).max() -
np.array(image3).min())#normalisingimage3
im=np.array(image2)
new_image = np.zeros(im.shape + (3,)) #creating an empty 3 channel numpy array .shape of this
array is (255, 1024, 3)
new_image[:,:,0] = image1_norm #adding the three images into three channels
new_image[:,:,1] = image2_norm
new_image[:,:,2] = image3_norm
new_image1=new_image*255.999
new_image2= new_image1.astype(np.uint8)
final_image=final_image=Image.fromarray(new_image2, mode='RGB')
A few possible issues...
When you open an image in PIL, if you want to be sure it is single-channel greyscale, and not accidentally 3-channel RGB, or a palette image, force it to greyscale:
im = Image.open('image.png').convert('L')
Try not to repeat complicated calculations or expressions several times - it just makes for a maintenance nightmare. Maybe use a function instead:
def normalize(im):
# Normalise image to range 0..1
min, max = im.min(), im.max()
return (im.astype(float)-min)/(max-min)
You can use Numpy's dstack() to merge channels - it means "depth"-stack, as opposed to np.vstack() which stacks images vertically above/below each other and np.hstack() which stacks images side-by-side horizontally. It is a lot simpler than creating an image of the right size and individually pushing each channel into it.
result = np.dstack((im1, im2, im3))
That would make the overall code more like this:
#!/usr/bin/env python3
from PIL import Image
import numpy as np
def normalize(im):
# Normalise image to range 0..1
min, max = im.min(), im.max()
return (im.astype(float)-min)/(max-min)
# Load images as single channel Numpy arrays
im1 = np.array(Image.open('ch1.png').convert('L'))
im2 = np.array(Image.open('ch2.png').convert('L'))
im3 = np.array(Image.open('ch3.png').convert('L'))
# Normalize and scale
n1 = normalize(im1) * 255.999
n2 = normalize(im2) * 255.999
n3 = normalize(im3) * 255.999
# Merge channels to RGB
result = np.dstack((n1,n2,n3))
result = Image.fromarray(result.astype(np.uint8))
result.save('result.png')
That makes these three input images:
into this merged image:

How to merge two RGBA images

I'm trying to merge two RGBA images (with a shape of (h,w,4)), taking into account their alpha channels.
Example :
What I've tried
I tried to do this using opencv for that, but I getting some strange pixels on the output image.
Images Used:
and
import cv2
import numpy as np
import matplotlib.pyplot as plt
image1 = cv2.imread("image1.png", cv2.IMREAD_UNCHANGED)
image2 = cv2.imread("image2.png", cv2.IMREAD_UNCHANGED)
mask1 = image1[:,:,3]
mask2 = image2[:,:,3]
mask2_inv = cv2.bitwise_not(mask2)
mask2_bgra = cv2.cvtColor(mask2, cv2.COLOR_GRAY2BGRA)
mask2_inv_bgra = cv2.cvtColor(mask2_inv, cv2.COLOR_GRAY2BGRA)
# output = image2*mask2_bgra + image1
output = cv2.bitwise_or(cv2.bitwise_and(image2, mask2_bgra), cv2.bitwise_and(image1, mask2_inv_bgra))
output[:,:,3] = cv2.bitwise_or(mask1, mask2)
plt.figure(figsize=(12,12))
plt.imshow(cv2.cvtColor(output, cv2.COLOR_BGRA2RGBA))
plt.axis('off')
Output :
So what I figured out is that I'm getting those weird pixels because I used cv2.bitwise_and function (Which btw works perfectly with binary alpha channels).
I tried using different approaches
Question
Is there an approach to do this (While keeping the output image as an 8bit image).
I was able to obtain the expected result in 2 stages.
# Read both images preserving the alpha channel
hh1 = cv2.imread(r'C:\Users\524316\Desktop\Stack\house.png', cv2.IMREAD_UNCHANGED)
hh2 = cv2.imread(r'C:\Users\524316\Desktop\Stack\memo.png', cv2.IMREAD_UNCHANGED)
# store the alpha channels only
m1 = hh1[:,:,3]
m2 = hh2[:,:,3]
# invert the alpha channel and obtain 3-channel mask of float data type
m1i = cv2.bitwise_not(m1)
alpha1i = cv2.cvtColor(m1i, cv2.COLOR_GRAY2BGRA)/255.0
m2i = cv2.bitwise_not(m2)
alpha2i = cv2.cvtColor(m2i, cv2.COLOR_GRAY2BGRA)/255.0
# Perform blending and limit pixel values to 0-255 (convert to 8-bit)
b1i = cv2.convertScaleAbs(hh2*(1-alpha2i) + hh1*alpha2i)
Note: In the b=above the we are using only the inverse alpha channel of the memo image
But I guess this is not the expected result. So moving on ....
# Finding common ground between both the inverted alpha channels
mul = cv2.multiply(alpha1i,alpha2i)
# converting to 8-bit
mulint = cv2.normalize(mul, dst=None, alpha=0, beta=255,norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
# again create 3-channel mask of float data type
alpha = cv2.cvtColor(mulint[:,:,2], cv2.COLOR_GRAY2BGRA)/255.0
# perform blending using previous output and multiplied result
final = cv2.convertScaleAbs(b1i*(1-alpha) + mulint*alpha)
Sorry for the weird variable names. I would request you to analyze the result in each line. I hope this is the expected output.
You could use PIL library to achieve this
from PIL import Image
def merge_images(im1, im2):
bg = Image.open(im1).convert("RGBA")
fg = Image.open(im2).convert("RGBA")
x, y = ((bg.width - fg.width) // 2 , (bg.height - fg.height) // 2)
bg.paste(fg, (x, y), fg)
# convert to 8 bits (pallete mode)
return bg.convert("P")
we can test it using the provided images:
result_image = merge_images("image1.png", "image2.png")
result_image.save("image3.png")
Here's the result:

OpenCV RGB to CMYK conversion not producing expected results

I am trying to separate the CMYK channels from an RGB image to be used later.
The question np.dstack() does not recreate original image in OpenCV is my earlier question using a test image. I decided to run it again with a different image and it returned some results and I don't know what's happening.
The issue with this is the results do not match the original image as they are supposed to.
Left: Original Image
Top Right: Result as .jpg
Bottom Right: Result as .tif
Code:
import cv2
import numpy as np
# Load image
bgr = cv2.imread('xpwallpaper.jpg')
# Make float and divide by 255 to give BGRdash
bgrdash = bgr.astype(np.float)/255.
# Calculate K as (1 - whatever is biggest out of Rdash, Gdash, Bdash)
K = 1 - np.max(bgrdash, axis=2)
# Calculate C
C = (1-bgrdash[...,2] - K)/(1-K)
# Calculate M
M = (1-bgrdash[...,1] - K)/(1-K)
# Calculate Y
Y = (1-bgrdash[...,0] - K)/(1-K)
# Combine 4 channels into single image and re-scale back up to uint8
CMYK = (np.dstack((C,M,Y,K))*255).astype(np.uint8)
cv2.imwrite("CMYK.jpg", CMYK)

Convert a black and white image to array of numbers?

Like the image above suggests, how can I convert the image to the left into an array that represent the darkness of the image between 0 for white and decimals for darker colours closer to 1? as shown in the image usingpython 3`?
Update:
I have tried to work abit more on this. There are good answers below too.
# Load image
filename = tf.constant("one.png")
image_file = tf.read_file(filename)
# Show Image
Image("one.png")
#convert method
def convertRgbToWeight(rgbArray):
arrayWithPixelWeight = []
for i in range(int(rgbArray.size / rgbArray[0].size)):
for j in range(int(rgbArray[0].size / 3)):
lum = 255-((rgbArray[i][j][0]+rgbArray[i][j][1]+rgbArray[i][j][2])/3) # Reversed luminosity
arrayWithPixelWeight.append(lum/255) # Map values from range 0-255 to 0-1
return arrayWithPixelWeight
# Convert image to numbers and print them
image_decoded_png = tf.image.decode_png(image_file,channels=3)
image_as_float32 = tf.cast(image_decoded_png, tf.float32)
numpy.set_printoptions(threshold=numpy.nan)
sess = tf.Session()
squeezedArray = sess.run(image_as_float32)
convertedList = convertRgbToWeight(squeezedArray)
print(convertedList) # This will give me an array of numbers.
I would recommend to read in images with opencv. The biggest advantage of opencv is that it supports multiple image formats and it automatically transforms the image into a numpy array. For example:
import cv2
import numpy as np
img_path = '/YOUR/PATH/IMAGE.png'
img = cv2.imread(img_path, 0) # read image as grayscale. Set second parameter to 1 if rgb is required
Now img is a numpy array with values between 0 - 255. By default 0 equals black and 255 equals white. To change this you can use the opencv built in function bitwise_not:
img_reverted= cv2.bitwise_not(img)
We can now scale the array with:
new_img = img_reverted / 255.0 // now all values are ranging from 0 to 1, where white equlas 0.0 and black equals 1.0
Load the image and then just invert and divide by 255.
Here is the image ('Untitled.png') that I used for this example: https://ufile.io/h8ncw
import numpy as np
import cv2
import matplotlib.pyplot as plt
my_img = cv2.imread('Untitled.png')
inverted_img = (255.0 - my_img)
final = inverted_img / 255.0
# Visualize the result
plt.imshow(final)
plt.show()
print(final.shape)
(661, 667, 3)
Results (final object represented as image):
You can use PIL package to manage images. Here's example how it can be done.
from PIL import Image
image = Image.open('sample.png')
width, height = image.size
pixels = image.load()
# Check if has alpha, to avoid "too many values to unpack" error
has_alpha = len(pixels[0,0]) == 4
# Create empty 2D list
fill = 1
array = [[fill for x in range(width)] for y in range(height)]
for y in range(height):
for x in range(width):
if has_alpha:
r, g, b, a = pixels[x,y]
else:
r, g, b = pixels[x,y]
lum = 255-((r+g+b)/3) # Reversed luminosity
array[y][x] = lum/255 # Map values from range 0-255 to 0-1
I think it works but please note that the only test I did was if values are in desired range:
# Test max and min values
h, l = 0,1
for row in array:
h = max([max(row), h])
l = min([min(row), l])
print(h, l)
You have to load the image from the path and then transform it to a numpy array.
The values of the image will be between 0 and 255. The next step is to standardize the numpy array.
Hope it helps.

Webcam color calibration using OpenCV

Having ordered half a dozen webcams online for a project I notice that the colors on the output are not consistent.
In order to compensate for this I have attempted to take a template image and extract the R,G and B histograms and tried to match the target images's RGB histograms based on this.
This was inspired from the description of the solution for a very similar problem Comparative color calibration
The perfect solution will look like this :
In order to try to solve this I wrote the following script which performed poorly:
EDIT (Thanks to #DanMašek and #api55)
import numpy as np
def show_image(title, image, width = 300):
# resize the image to have a constant width, just to
# make displaying the images take up less screen real
# estate
r = width / float(image.shape[1])
dim = (width, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
# show the resized image
cv2.imshow(title, resized)
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
from matplotlib import pyplot as plt
from scipy.misc import lena, ascent
import cv2
source = cv2.imread('/media/somadetect/Lexar/color_transfer_data/1/frame10.png')
s_b = source[:,:,0]
s_g = source[:,:,1]
s_r = source[:,:,2]
template = cv2.imread('/media/somadetect/Lexar/color_transfer_data/5/frame6.png')
t_b = source[:,:,0]
t_r = source[:,:,1]
t_g = source[:,:,2]
matched_b = hist_match(s_b, t_b)
matched_g = hist_match(s_g, t_g)
matched_r = hist_match(s_r, t_r)
y,x,c = source.shape
transfer = np.empty((y,x,c), dtype=np.uint8)
transfer[:,:,0] = matched_r
transfer[:,:,1] = matched_g
transfer[:,:,2] = matched_b
show_image("Template", template)
show_image("Target", source)
show_image("Transfer", transfer)
cv2.waitKey(0)
Template image :
Target Image:
The Matched Image:
Then I found Adrian's (pyimagesearch) attempt to solve a very similar problem in the following link
Fast Color Transfer
The results seem to be fairly good with some saturation defects. I would welcome any suggestions or pointers on how to address this issue so all web cam outputs could be calibrated to output similar colors based on one template image.
Your script performs poorly because you are using the wrong index.
OpenCV images are BGR, so this was correct in your code:
source = cv2.imread('/media/somadetect/Lexar/color_transfer_data/1/frame10.png')
s_b = source[:,:,0]
s_g = source[:,:,1]
s_r = source[:,:,2]
template = cv2.imread('/media/somadetect/Lexar/color_transfer_data/5/frame6.png')
t_b = source[:,:,0]
t_r = source[:,:,1]
t_g = source[:,:,2]
but this is wrong
transfer[:,:,0] = matched_r
transfer[:,:,1] = matched_g
transfer[:,:,2] = matched_b
since here you are using RGB and not BGR, so the color changes and your OpenCV still thinks it is BGR. That is why it looks weird.
It should be:
transfer[:,:,0] = matched_b
transfer[:,:,1] = matched_g
transfer[:,:,2] = matched_r
As other possible solutions, you may try to look which parameters can be set in your camera. Sometimes they have some auto parameters which you can set manually for all of them to match. Also, beware of this auto parameters, usually white balance and focus and others are set auto and they may change quite a lot in the same camera from one time to another (depending on illumination, etc etc).
UPDATE:
As DanMašek points out, also
t_b = source[:,:,0]
t_r = source[:,:,1]
t_g = source[:,:,2]
is wrong, since the r should be index 2 and g index 1
t_b = source[:,:,0]
t_g = source[:,:,1]
t_r = source[:,:,2]
I have attempted a white patch based calibration routine. Here is the link https://theiszm.wordpress.com/tag/white-balance/.
The code snippet follows:
import cv2
import math
import numpy as np
import sys
from matplotlib import pyplot as plt
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
# Read original image
im_o = cv2.imread('/media/Lexar/color_transfer_data/5/frame10.png')
im = im_o
cv2.imshow('Org',im)
cv2.waitKey()
B = im[:,:, 0]
G = im[:,:, 1]
R = im[:,:, 2]
R= np.array(R).astype('float')
G= np.array(G).astype('float')
B= np.array(B).astype('float')
# Extract pixels that correspond to pure white R = 255,G = 255,B = 255
B_white = R[168, 351]
G_white = G[168, 351]
R_white = B[168, 351]
print B_white
print G_white
print R_white
# Compensate for the bias using normalization statistics
R_balanced = R / R_white
G_balanced = G / G_white
B_balanced = B / B_white
R_balanced[np.where(R_balanced > 1)] = 1
G_balanced[np.where(G_balanced > 1)] = 1
B_balanced[np.where(B_balanced > 1)] = 1
B_balanced=B_balanced * 255
G_balanced=G_balanced * 255
R_balanced=R_balanced * 255
B_balanced= np.array(B_balanced).astype('uint8')
G_balanced= np.array(G_balanced).astype('uint8')
R_balanced= np.array(R_balanced).astype('uint8')
im[:,:, 0] = (B_balanced)
im[:,:, 1] = (G_balanced)
im[:,:, 2] = (R_balanced)
# Notice saturation artifacts
cv2.imshow('frame',im)
cv2.waitKey()
# Extract the Y plane in original image and match it to the transformed image
im_o = cv2.cvtColor(im_o, cv2.COLOR_BGR2YCR_CB)
im_o_Y = im_o[:,:,0]
im = cv2.cvtColor(im, cv2.COLOR_BGR2YCR_CB)
im_Y = im[:,:,0]
matched_y = hist_match(im_o_Y, im_Y)
matched_y= np.array(matched_y).astype('uint8')
im[:,:,0] = matched_y
im_final = cv2.cvtColor(im, cv2.COLOR_YCR_CB2BGR)
cv2.imshow('frame',im_final)
cv2.waitKey()
The input image is:
The result of the script is:
Thank you all for suggestions and pointers!!

Categories

Resources