Color detection of object in Image - python

I am using opencv and python .
I need to detect color of object in image for example given below image , the color of shirt is red.
On this link i found something useful but its detecting image of skin .
http://lokeshdhakar.com/projects/color-thief/
I think i will have to use image contour extraction and then carry out color detection for that .

Getting the dominant colors may be achieved using the following simple approach:
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
img = cv2.imread('red_shirt.jpg')
height, width, dim = img.shape
EDIT: take only the center of the image:
img = img[(height/4):(3*height/4), (width/4):(3*width/4), :]
height, width, dim = img.shape
img_vec = np.reshape(img, [height * width, dim] )
kmeans = KMeans(n_clusters=3)
kmeans.fit( img_vec )
EDIT: count cluster pixels, order clusters by cluster size
unique_l, counts_l = np.unique(kmeans.labels_, return_counts=True)
sort_ix = np.argsort(counts_l)
sort_ix = sort_ix[::-1]
fig = plt.figure()
ax = fig.add_subplot(111)
x_from = 0.05
for cluster_center in kmeans.cluster_centers_[sort_ix]:
ax.add_patch(patches.Rectangle( (x_from, 0.05), 0.29, 0.9, alpha=None,
facecolor='#%02x%02x%02x' % (cluster_center[2], cluster_center[1], cluster_center[0] ) ) )
x_from = x_from + 0.31
plt.show()
You can remove BG and skin pixels with this kind of preprocessing

Load the frame
Convert BGR to HSV
Range the value of pixel
also check out this link Color detection in opencv

Related

Finding matching data point within two images using Python

I am have having two images, namely Fig 1 and Fig 2. Both taken from the same source but not aligned. The task is to find the common data point among these two images and draw lines between the data points that match in both the images., I am looking at this figure should be like Fig 4.
So far, I have used OpenCV and written the following codes:
import cv2
import matplotlib.pyplot as plt
img_file1= "Fig_1.png"
img_file2= "Fig_2.png"
img1= cv2.imread(img_file1)
img2= cv2.imread(img_file2)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
figure, ax = plt.subplots(1, 2, figsize=(16, 8))
ax[0].imshow(img1, cmap='gray')
ax[1].imshow(img2, cmap='gray')
#sift
sift = cv2.xfeatures2d.SIFT_create()
keypoints_1, descriptors_1 = sift.detectAndCompute(img1,None)
keypoints_2, descriptors_2 = sift.detectAndCompute(img2,None)
#feature matching
bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)
matches = bf.match(descriptors_1,descriptors_2)
matches = sorted(matches, key = lambda x:x.distance)
img3 = cv2.drawMatches(img1, keypoints_1, img2, keypoints_2, matches[:50], img2, flags=2)
plt.imshow(img3),plt.show()
This gives to be not expected result, see figure 4. Plus look quite messy and unclear.
Can anyone help me with how to do this? Thanks in advance.
Fig 1
Fig 2
img3
Fig 3
The transformation seems purely translational. Try template matching by normalized grayscale correlation.
Basically, this seems to me a registration problem (the images need to be registered).
Here is what you can do:
find the location of the points with connected components analysis
calculate the shift needed to register the two images. Here it seems your images are only translated so a simple crosscorrelation-based registration is enough.
from skimage.registration import phase_cross_correlation
from skimage.io import imread
from skimage.measure import label, regionprops
from skimage.filters import threshold_otsu
from matplotlib.pyplot import imshow, plot, figure
import numpy as np
# Load images
img_a = imread("671OL.jpg", as_gray=True)
img_b = imread("zpevD.jpg", as_gray=True)
# apply threshold
th_img_a = img_a > threshold_otsu(img_a)
th_img_b = img_b > threshold_otsu(img_b)
# measure connected component
img_lable = label(th_img_a)
r_props = regionprops(img_lable)
figure(figsize=(15,7))
rows, cols = img_b.shape
# calculate the registration (shift) of the two images
flow = phase_cross_correlation(th_img_a, th_img_b)
# stack the images and trace the segments that connect the points
d=10
# a vertical white bar between the two pictures
vbar=np.ones((rows,d))
xshift = cols+d
dy,dx = flow[0]
dx=dx + xshift
imshow(np.hstack([img_a, vbar, img_b]), cmap='gray')
for rp in r_props:
y0,x0 = rp.centroid
x1 = x0 + dx
y1 = y0 - dy
if y1<rows and x1 < 2*cols + d:
# filter out points that are not in img_b
plot([x0,x1],[y0,y1], '--', alpha=0.5)

Images are changed to different colors (with pillow), how to get it back to the original colors?

I am trying to find the dominant color in a frame in a video. This works well, however, my frames are somehow converted into different colors. Yellow/pink becomes blue/purple-ish, but black and white stay the same (thus it is not the inverted colors).
Does anyone know where it comes from and how I can change it so that the original colors are kept? This is my code:
import cv2
from sklearn.cluster import KMeans
from collections import Counter
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
video = cv2.VideoCapture('video.mp4')
def show_blurred_image(image, dominant_color):
frame_to_blur = Image.fromarray(image)
blurred_frame = cv2.blur(image, (200,200))
blurred_frame = Image.fromarray(blurred_frame)
plt.subplot(121),plt.imshow(frame_to_blur),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(blurred_frame),plt.title('Blurred')
plt.xticks([]), plt.yticks([])
R = round(dominant_color[0])
G = round(dominant_color[1])
B = round(dominant_color[2])
custom_color = '#%02x%02x%02x' % (R, G, B)
print(custom_color)
rect = patches.Rectangle((1620,0),300,1080,linewidth=1,
fill = True,
edgecolor=custom_color,
facecolor=custom_color)
ax = plt.gca()
ax.add_patch(rect)
plt.show()
def get_dominant_color(image, k=4, image_processing_size = None):
"""
takes an image as input
returns the dominant color of the image as a list
dominant color is found by running k means on the
pixels & returning the centroid of the largest cluster
processing time is sped up by working with a smaller image;
this resizing can be done with the image_processing_size param
which takes a tuple of image dims as input
>>> get_dominant_color(my_image, k=4, image_processing_size = (25, 25))
[56.2423442, 34.0834233, 70.1234123]
"""
#resize image if new dims provided
if image_processing_size is not None:
image = cv2.resize(image, image_processing_size,
interpolation = cv2.INTER_AREA)
#reshape the image to be a list of pixels
image = image.reshape((image.shape[0] * image.shape[1], 3))
#cluster and assign labels to the pixels
clt = KMeans(n_clusters = k)
labels = clt.fit_predict(image)
#count labels to find most popular
label_counts = Counter(labels)
#subset out most popular centroid
dominant_color = clt.cluster_centers_[label_counts.most_common(1)[0][0]]
return list(dominant_color)
dominant_colors = []
show_frame = 10
frame_nb = 0
while(video.isOpened()):
ret, frame = video.read()
if ret == True:
if (frame_nb == show_frame):
dominant_color = get_dominant_color(frame)
show_blurred_image(frame, dominant_color)
frame_nb += 1
else:
break
video.release()
cv2.destroyAllWindows()
OpenCV loads images in a BGR format, while PIL and matplotlib works with the RGB format. If you want to use the libraries together, you need to convert the images in the right color spaces.
In your case :
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

How to get histogram of intensity of individual masked cells in an image?

OK so newbie here that has been working on a set of homework problems with the original post here: How do I make a mask from one image and then transfer it to another?
. The original idea was to take the DAPI image (grey image) and apply it as a mask to the NPM1 (green) image. After implementing the suggested code from HansHirse (thanks!) along with some other code I had been making for the homework problem I finally got a working histogram of all compatible cells in the image. The "compatibility" bit is that any cells touching the border weren't supposed to be counted. However, I still need to find a way to get histograms of each individual cell as well. I've attached the original images from the post too:
To do this, I tried blob_doh and one other method to get segmented regions of each cell but have no idea as to how I can apply these coordinates to an image for the histogram.
PS. The code is a bit messy. I segmented the code such that the blob_doh is near the bottom and the other method is also its own separate piece at the very bottom. Sorry!
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
import cv2
import mahotas as mh
import scipy
from scipy import ndimage
import matplotlib.patches as mpatches
from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square
from skimage.color import label2rgb
# Read image into numpy array
image = cv2.imread("NOTREATDAPI.jpg",0)
dna = np.array(image) # must be gray-scale image
plt.show()
# Remove extraneous artifacts from image; set the threshold
dnaf = ndimage.gaussian_filter(dna, 8) #gaussian filter for general image
T = mh.thresholding.otsu(dnaf) # set threshold via mahotas otsu thresholding
theta=np.array(dnaf > T) #setting mask of values in image to calculated otsu threshold
cleared = clear_border(theta) #removes all cells that are in contact with the image border
epsilon = np.array(cleared) #final masked DAPI product
print("DAPI MASK USING GAUSSIAN FILTER AND OTSU THRESHOLDING");
plt.imshow(epsilon)
plt.show()
# Load and reset original images
image = cv2.imread("NOTREATDAPI.jpg",0) #The DAPI Image
image1 = cv2.imread("NOTREATNPM1.jpg",0) #The NPM1 Image
print("Original DAPI Image");plt.imshow(image);plt.show() #The DAPI Image
print("Original NPM1 Image");plt.imshow(image1);plt.show() #The NPM1 Image
# Create an array of bool of same shape as image
maskAboveThreshold = epsilon > 0 #Use mask array from above - include only values above non-masked zeros
print("Final Masked Image of NPM1"); plt.imshow(image1 *
maskAboveThreshold, cmap='gray')
plt.show()
True_NPM1= image1 * maskAboveThreshold # Final masked version of NPM1 set back to grayscale
# Create a mask using the DAPI image and binary thresholding at 25
_, mask = cv2.threshold(True_NPM1, 1, 255, cv2.THRESH_BINARY)
# Do some morphological opening to get rid of small artifacts
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN,
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15)))
# Calculate the histogram using the NPM1 image and the obtained binary
mask
hist = cv2.calcHist([image1], [0], mask, [256], [0, 256])
# Show bar plot of calculated histogram
plt.bar(np.arange(256), np.squeeze(hist))
plt.show()
# Show mask image
plt.imshow(mask)
plt.show()
#blob_doh way of segmenting the cells ------
import cv2 as cv
from PIL import Image, ImageDraw
image10 = np.array(Image.open("OXALIDAPI.jpg"))
plt.imshow(image10)
#Convert to gaussian image with thresholds
image10 = cv2.imread("OXALIDAPI.jpg",0)
dna = np.array(image10) # gray-scale image
plt.show()
# Remove extraneous artifacts from image; set the threshold
dnaf = ndimage.gaussian_filter(dna, 8) #gaussian filter for general image
T = mh.thresholding.otsu(dnaf) # set threshold via mahotas otsu thresholding
theta=np.array(dnaf > T) #setting mask of values in image to calculated otsu threshold
cleared = clear_border(theta) #removes all cells that are in contact with the image border
image = np.array(cleared) #final masked DAPI product
#print("DAPI MASK USING GAUSSIAN FILTER AND OTSU THRESHOLDING");
plt.imshow(epsilon)
plt.show()
# Convert image to grayscale
image_gray = rgb2gray(image)
plt.imshow(image_gray,cmap="gray")
def plot_blobs(img,blobs):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.imshow(img, interpolation='nearest')
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r*1.25, color="red", linewidth=1, fill=False)
ax.add_patch(c)
# blob_doh
blobs_doh = blob_doh(image_gray, min_sigma=10, max_sigma=256,
threshold=.025)
plot_blobs(image,blobs_doh)
#get blob coordinates
def filter_blobs(blobs,r_cutoff=5):
new_blobs = []
for b in blobs:
if b[2] > r_cutoff:
new_blobs.append(b)
return new_blobs
new_blobs = filter_blobs(blobs_doh)
#plot_blobs(image,new_blobs)
print(new_blobs)
#Other method of segmenting cells. maybe useful?
yeta = cv2.imread("NOTREATDAPI.jpg",0)
image = np.array(yeta)
# apply threshold
dnaf = ndimage.gaussian_filter(image, 8)
T = mh.thresholding.otsu(dnaf) # set threshold
plt.imshow(dnaf > T)
epsilon=np.array(dnaf > T)
plt.show()
# remove artifacts connected to image border
cleared = clear_border(epsilon)
# label image regions
label_image = label(cleared)
image_label_overlay = label2rgb(label_image, image=image)
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(image_label_overlay)
for region in regionprops(label_image):
# take regions with large enough areas
if region.area >= 50:
# draw rectangle around individual cells
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=0.5)
ax.add_patch(rect)
#ax.set_axis_off()
#plt.tight_layout()
plt.show()
howzer=np.array(image_label_overlay)
What you are looking for is cv2.connectedComponents. Basically, once you have the binary mask that separate the cells, you try to label each connected component of the mask as one cell:
# I choose OTSU instead of binary, but they are not much different in this case
_, mask = cv2.threshold(dapi, 25, 255, cv2.THRESH_OTSU)
# compute the connected component
labels, markers = cv2.connectedComponents(mask)
# load 2nd image in grayscale
# as your 2nd image is only green/black
npm1 = cv2.imread('npm1.jpg', cv2.IMREAD_GRAYSCALE)
# for you image (and usually), labels[0] is the background
for label in labels[1:]:
# compute the histogram over the entire 256 levels of intensity
hist, bins = np.histogram(npm1[markers==label], bins=range(256))
# do whatever you like to hist
# note that bins=range(256) and hist only have 255 values
plt.bar(bins[1:], hist)
plt.title('cell number: {:}'.format(label))
So for example the histogram of the first and second cells:
And the cell markers are:

Checking if image is mostly black and white or color

I am trying to classify if an image mostly contains black and white or color, to be precise it is a photo of a photocopy(think xerox),which is mostly black and white.The image is NOT single channel image, but a 3 channel image.
I just want to know if there are any obvious ways to solve this that im missing.
for now im trying to plot histograms and may be do a pixel count, but that does not look very promising,any suggestions on this would be really helpful.
Thanks in advance.
I am unsure of the exact use case, but having experienced similar issues I used this rather helpful article.
https://www.alanzucconi.com/2015/05/24/how-to-find-the-main-colours-in-an-image/
The GitHub containing the full code is found here: https://gist.github.com/jayapal/077f63f3163abbfb3c50c7d209524cc6
If this is for your own visual the histogram should be enough, if you are attempting to automate however, it may be helpful to round the color values up or down, this would provide information on if the image is darker or lighter than a certain value.
What are you using this code for on a larger perspective? Maybe that will help provide more adequate information
Edit: The code above also provides the ability to define a region of the image, hopefully this will make your selection more accurate
Adding code directly
from sklearn.cluster import KMeans
from sklearn import metrics
import cv2
import numpy as np
import cv2
image = cv2.imread("red.png")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Resize it
h, w, _ = image.shape
w_new = int(100 * w / max(w, h) )
h_new = int(100 * h / max(w, h) )
image = cv2.resize(image, (w_new, h_new));
# Reshape the image to be a list of pixels
image_array = image.reshape((image.shape[0] * image.shape[1], 3))
print image_array
# Clusters the pixels
clt = KMeans(n_clusters = 3)
clt.fit(image_array)
def centroid_histogram(clt):
# grab the number of different clusters and create a histogram
# based on the number of pixels assigned to each cluster
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins = numLabels)
# normalize the histogram, such that it sums to one
hist = hist.astype("float")
hist /= hist.sum()
# return the histogram
return hist
# Finds how many pixels are in each cluster
hist = centroid_histogram(clt)
# Sort the clusters according to how many pixel they have
zipped = zip (hist, clt.cluster_centers_)
zipped.sort(reverse=True, key=lambda x : x[0])
hist, clt.cluster_centers = zip(*zipped)
# By Adrian Rosebrock
import numpy as np
import cv2
bestSilhouette = -1
bestClusters = 0;
for clusters in range(2, 10):
# Cluster colours
clt = KMeans(n_clusters = clusters)
clt.fit(image_array)
# Validate clustering result
silhouette = metrics.silhouette_score(image_array, clt.labels_,
metric='euclidean')
# Find the best one
if silhouette > bestSilhouette:
bestSilhouette = silhouette;
bestClusters = clusters;
print bestSilhouette
print bestClusters

Calculate the Convex Hull of Clustered Centers from K-means Numpy Array

I have a Numpy array of colors that is returned when using K-means quantization on an image. After K-means is used, I need to take those centers and calculate the convex hull of the set of color centers.
Essentially I need to find which points/colors don't have enough contrast from another color. I think the answer to this would be to find the points inside the hull (simplices?) that aren't the vertices of the hull.
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from scipy.spatial import ConvexHull
from skimage import color, exposure
from sklearn import cluster
def quantize(pixels, n_colors):
width, height, depth = pixels.shape
reshaped_pixels = np.reshape(pixels, (width * height, depth))
model = cluster.KMeans(n_clusters=n_colors, n_init=100, precompute_distances=True)
labels = model.fit_predict(reshaped_pixels)
centers = model.cluster_centers_
quantized_pixels = np.reshape(centers[labels], (width, height, centers.shape[1]))
return quantized_pixels, centers, labels
def scale_image(img, max_width=100):
img_width, img_height = img.size
ratio = (max_width / float(img_width))
new_height = int((float(img_height)*float(ratio)))
img.thumbnail((max_width, new_height), Image.NEAREST)
return img
# Open image, convert to RGB just in case
img = Image.open('image2.jpg').convert('RGB')
# Scale image to speed up processing
img = scale_image(img, 80)
# Convert to numpy array
np_img_array = np.array(img)
# Convert rgb to lab colorspace
lab_pixels = color.rgb2lab(np_img_array)
# Kmeans quantization
quantized_lab_pixels, centers, labels = quantize(lab_pixels, 16)
# Convert pixels back to rgb
quantized_rgb_pixels = (color.lab2rgb(quantized_lab_pixels)*255).astype('uint8')
# Attempt to use convex hull around cluster centers
hull = ConvexHull(centers)
for simplex in hull.simplices:
plt.plot(centers[simplex, 0], centers[simplex, 1])
plt.scatter(*centers.T, alpha=.5, color='k', marker='v')
for p in centers:
point_is_in_hull = point_in_hull(p, hull)
marker = 'x' if point_is_in_hull else 'd'
color = 'g' if point_is_in_hull else 'm'
plt.scatter(p[0], p[1], marker=marker, color=color)
plt.show()
Update: Here is my rendered plot output. I'm not using the color labels in the plotting in this render.
Here is more so what would be helpful to see. However, the plots aren't what I need... I need to find out which of the cluster centers (colors) make up the vertices of the hull and then pair them back to the vertices labels.
Here is an example of the RGB colors outputted (on the left), and then on the right you would have the final colors, which would be after excluding cluster centers that fall outside of the region.

Categories

Resources