Connected Component Labeling Algorithm in Python - python

My work requires applying Local Binary Operator on Images. For that I have already converted the images in Gray then implemented a Connected Components analysis on the image also.
Here is the Code:
Adding Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from skimage.io import imread, imshow
from skimage.color import rgb2gray
from skimage.morphology import (erosion, dilation, closing, opening,area_closing, area_opening)
from skimage.measure import label, regionprops, regionprops_table
Rendering the image
plt.figure(figsize=(6,6))
painting = imread("E:/Project/for_annotation/Gupi Gain0032.jpg")
plt.imshow(painting);
plt.figure(figsize=(6,6))
Binarizing Image
gray_painting = rgb2gray(painting)
binarized = gray_painting<0.55
plt.imshow(binarized);
4.Declaring Kernel
square = np.array([[1,1,1],
[1,1,1],
[1,1,1]])
Dilation function
def multi_dil(im, num, element=square):
for i in range(num):
im = dilation(im, element)
return im
Erosion function
def multi_ero(im, num, element=square):
for i in range(num):
im = erosion(im, element)
return im
Functions Applied
plt.figure(figsize=(6,6))
multi_dilated = multi_dil(binarized, 7)
area_closed = area_closing(multi_dilated, 50000)
multi_eroded = multi_ero(area_closed, 7)
opened = opening(multi_eroded)
plt.imshow(opened);
Label function
plt.figure(figsize=(6,6))
label_im = label(opened)
regions = regionprops(label_im)
plt.imshow(label_im);
Extract features
properties = ['area','convex_area','bbox_area', 'extent', 'mean_intensity','solidity', 'eccentricity', 'orientation']
pd.DataFrame(regionprops_table(label_im, gray_painting,
properties=properties))
Filtering Regions
masks = []
bbox = []
list_of_index = []
for num, x in enumerate(regions):
area = x.area
convex_area = x.convex_area
if (num!=0 and (area>100) and (convex_area/area <1.05)
and (convex_area/area >0.95)):
masks.append(regions[num].convex_image)
bbox.append(regions[num].bbox)
list_of_index.append(num)
count = len(masks)
Extracting Images
fig, ax = plt.subplots(2, int(count/2), figsize=(15,8))
for axis, box, mask in zip(ax.flatten(), bbox, masks):
red = painting[:,:,0][box[0]:box[2], box[1]:box[3]] * mask
green = painting[:,:,1][box[0]:box[2], box[1]:box[3]] * mask
blue = painting[:,:,2][box[0]:box[2], box[1]:box[3]] * mask
image = np.dstack([red,green,blue])
axis.imshow(image)
plt.tight_layout()
plt.figure(figsize=(6,6))
rgb_mask = np.zeros_like(label_im)
for x in list_of_index:
rgb_mask += (label_im==x+1).astype(int)
red = painting[:,:,0] * rgb_mask
green = painting[:,:,1] * rgb_mask
blue = painting[:,:,2] * rgb_mask
image = np.dstack([red,green,blue])
plt.imshow(image);
I am getting an error.
ValueError: Number of columns must be a positive integer, not 0

There is a possible approach which is not very far from what you attempted. Assume the background pixels are assigned the label 0, and the object pixels the value 1.
scan the image row by row;
when you meet a pixel 1, set a new label and perform a flood fill operation, replacing 1 by the new label.
Flood filling can be implemented very simply:
set the starting pixel to the new label;
recursively fill the eight neighbors, if they have a 1.
https://en.wikipedia.org/wiki/Flood_fill
The code of this version is pretty simple. But you will notice that it can easily overflow the stack because the number of pending fills can be as large as the image size.
def FloodFill(X, Y, Label):
I[X,Y]= Label
for all 8-way neighbors (X'=X±1, Y'=Y±1, inside image):
if I[X',Y'] == 1:
FloodFill(X', Y', Label)
def CCL(Image I):
Label= 1
for Y in range(I.Height):
for X in range(I.Width):
if I[X, Y] == 1:
Label+= 1
FloodFill(X, Y, Label)
So I would recommend the scanline version, which is a little more involved.
https://en.wikipedia.org/wiki/Flood_fill#Scanline_fill

Related

Segmenting pictures using their histograms in python

I have problem with segmenting (or clustering?) pictures using their histograms - i mean i dont know totally how can i do it. I have, lets say, 200 images and i have to group it (like people to people, buildings to buildings etc.)
If you want to make exacly same task as me there is a source of images http://wang.ist.psu.edu/docs/related/
I know how to get histograms etc, my code is below (with detailed description).
import numpy as np
import matplotlib.pyplot as plt
import cv2
# 1. Loading images from folder
path = 'image\\350.jpg'
img = plt.imread(path)
imglist=[]
for i in range(0,20):
x = np.random.randint(0,1000)
path = "image\\"+str(x)+".jpg"
# print(path)
img = plt.imread(path)
imglist.append(img)
# just testing if everything is fine
plt.figure(figsize = (20,10))
for i in range(0,20):
plt.subplot(4,5,i+1)
plt.imshow(imglist[i])
plt.xticks([])
plt.yticks([])
# 2. QUANTIZATION (FOR ONE IMAGE)
# QUANTIZATION TO 2 COLORS FOR EACH OF R, G, B --> 8 COLORS
imgq2 = np.floor(img/128)*128+64
imgq2 =imgq2.astype(int)
# QUANTIZATION TO 4 COLORS
imgq4 = np.floor(img/64)*64 + 32
imgq4 = imgq4.astype(int)
# QUANTIZATION TO 8 COLORS
imgq8 = np.floor(img/32)*32 + 16
imgq8 = imgq8.astype(int)
# TESTING
plt.figure(figsize = (20,10))
plt.subplot(1,3,1)
plt.imshow(img)
plt.title('Oryginal image')
plt.subplot(1,3,2)
plt.imshow(imgq2)
plt.title('2 values of RGB')
plt.subplot(1,3,3)
plt.imshow(imgq4)
plt.title('4 values of RGB')
# checking if this is true
imgq4.shape
imgq8.shape
print(np.unique(imgq4[:,:,:]))
print(np.unique(imgq8[:,:,:]))
imgq4 = np.floor(img/128)
imgq4 = imgq4.astype(int)
# plt.imshow(imgq4)
# plt.show()
# 3. MAKING 3-Dim (RGB) HISTOGRAM
hist = {(i,j,k): 0 for i in range(0,4) for j in range(0,4) for k in range(0,4)}
print(hist)
img = plt.imread('image\\0.jpg')
img2 = img.copy()
img2 = np.floor(img2/64).astype(int)
img2.shape
img2 = img2.reshape( img2.shape[0]*img2.shape[1] ,3)
for i in range(0,img2.shape[0]):
hist[ img2[i,0],img2[i,1],img2[i,2] ] = hist[img2[i,0],img2[i,1],img2[i,2]] + 1
print(len(hist.keys()), len(hist.values()))
# histogram values
print(hist.values())
print(img2.shape[0] == np.sum(list(hist.values())))`
I have also this file, which is making clustering on random points
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
no_of_points = 1000
X = np.random.rand(no_of_points,2)
plt.scatter(X[:,0],X[:,1],s = 50, cmap = 'rainbow')
kmeans = KMeans(n_clusters = 5)
kmeans.fit(X)
plt.scatter(X[:,0], X[:,1], s = 50, cmap = 'rainbow', c = kmeans.labels_)
plt.scatter(kmeans.cluster_centers_[:,0],kmeans.cluster_centers_[:,1],s = 100, c = 'black', alpha = 0.5)
plt.show()
To sum up, i have 2 questions.
Is this possible to make or transform somehow histograms (or vectors) to something, which i could cluster like in my second file?
How to automatize process of making image histogram? (its made just for one image)
Thanks for help!

How to morph two grid-like images seamlessly?

I have two images that consist of colored squares with different grid step (10x10 and 12x12).
What I want is to make the first image to be smoothly transformed into the second one.
When I use a plain image overlay with cv2.addWeighted() function, the result (left) is not good because of the intersected grid spaces. I suppose it would be better to shift remaining grid cells to the borders and clear out the rest (right).
Is there any algorithm to deal with this task?
Thanks.
You can interpolate each pixel individually between different images.
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
np.random.seed(200)
num_images = 2
images = np.random.rand(num_images, 8,8)
for index, im in enumerate(images):
print(f'Images {index}')
fig = plt.imshow(im)
plt.show()
Interpolating these images:
n_frames = 4
x_array = np.linspace(0, 1, int(n_frames))
def interpolate_images(frame):
intermediate_image = np.zeros((1, *images.shape[1:]))
for lay in range(images.shape[1]):
for lat in range(images.shape[2]):
tck = interpolate.splrep(np.linspace(0, 1, images.shape[0]), images[:, lay, lat], k = 1)
intermediate_image[:, lay, lat] = interpolate.splev(x_array[frame], tck)
return intermediate_image
for frame in range(n_frames):
im = interpolate_images(int(frame))
fig = plt.imshow(im[0])
plt.show()

How to blur the image according to segmentation map

Forgive me if I am unable to explain well because I am not native speaker.
I am working on blurring the part of image according to the white part of segmentation map. For example here is my segmentation image ( bmp image ).
.
Now what I want is to blur the part of original image where the pixels are white in the segmentation map. I just wrote the following code to so.
mask = mask >= 0.5
mask = np.reshape(mask, (512, 512))
mh, mw = 512, 512
mask_n = np.ones((mh, mw, 3))
mask_n[:,:,0] *= mask
mask_n[:,:,1] *= mask
mask_n[:,:,2] *= mask
# discard padded area
ih, iw, _ = image_n.shape
delta_h = mh - ih
delta_w = mw - iw
top = delta_h // 2
bottom = mh - (delta_h - top)
left = delta_w // 2
right = mw - (delta_w - left)
mask_n = mask_n[top:bottom, left:right, :]
# addWeighted
image_n = image_n *1 + cv2.blur(mask_n * 0.8, (800, 800))
Please help me, Thanks.
You can do it in the following steps:
Load original image and mask image.
Blur the whole original image and save it in a different variable.
Use np.where() method to select the pixels from the mask where you want blurred values and then replace it.
See the sample code below:
import cv2
import numpy as np
img = cv2.imread("./image.png")
blurred_img = cv2.GaussianBlur(img, (21, 21), 0)
mask = cv2.imread("./mask.png")
output = np.where(mask==np.array([255, 255, 255]), blurred_img, img)
cv2.imwrite("./output.png", output)
Here's an alternative to the solution proposed by #Chris Henri. It relies on scipy.ndimage.filters.gaussian_filter and NumPy's boolean indexing:
from skimage import io
import numpy as np
from scipy.ndimage.filters import gaussian_filter
import matplotlib.pyplot as plt
mask = io.imread('https://i.stack.imgur.com/qJiKf.png')
img = np.random.random(size=mask.shape[:2])
idx = mask.min(axis=-1) == 255
blurred = gaussian_filter(img, sigma=3)
blurred[~idx] = 0
fig, axs = plt.subplots(1, 3, figsize=(12, 4))
for ax, im in zip(axs, [img, mask, blurred]):
ax.imshow(im, cmap='gray')
ax.set_axis_off()
plt.show(fig)
Here is yet another alternative to do so, useful though when you have a 2D segmentation array indicating the segmented object class of pixel (mutually exclusive) for every index (i,j), and a 3D image on which you want to apply the blur.
def gaussian_blur(image: np.ndarray,
segmentation: np.ndarray,
classes_of_interest: list,
gaussian_variance: float = 10) -> np.ndarray:
'''
Function that applies a gaussian filter to the image,
specifically to the pixels contained in the possible segmented classes.
Returns an image (np.ndarray) where the gaussian blur intensity is
regulated by the parameter gaussian_variance.
'''
#Apply masking to select only the indices where the specific class is present
mask = np.isin(segmentation, classes_of_interest)
#Creating a 3D mask for all the channels and place it at channel axis
mask_3d = np.stack([mask,mask,mask], axis=2)
#Mask the image according to the 3D mask
img_masked = np.where(mask_3d, img, 0).astype(np.int8)
#Define gaussian blur noisy function
def noisy(image):
row,col,ch= image.shape
mean = 0
var = gaussian_variance
sigma = np.sqrt(var)
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
#Sums up gaussian noise to img
noisy = image + gauss
return noisy.astype(np.uint8)
#Blurs the masked segmentation
img_masked_noisy = noisy(img_masked)
#Puts the blurred part back in the original image as substitution
img[mask_3d] = img_masked_noisy[mask_3d]
return img
And here is a toy example:
import numpy as np
possible_classes = [1,2,3]
#Setting up a toy example with a small image,
#shape (N, N, 3)
img = np.floor(np.random.random(size=(8,8,3)) * 256).astype(np.uint8)
#Setting up a fake segmentation with 3 mutually exclusive possible classes,
#shape (N, N)
segmentation = np.random.choice(possible_classes, size=(8,8))
new_img_blurred = gaussian_blur(img,
segmentation= segmentation,
classes_of_interest= possible_classes[:2])

Checking if image is mostly black and white or color

I am trying to classify if an image mostly contains black and white or color, to be precise it is a photo of a photocopy(think xerox),which is mostly black and white.The image is NOT single channel image, but a 3 channel image.
I just want to know if there are any obvious ways to solve this that im missing.
for now im trying to plot histograms and may be do a pixel count, but that does not look very promising,any suggestions on this would be really helpful.
Thanks in advance.
I am unsure of the exact use case, but having experienced similar issues I used this rather helpful article.
https://www.alanzucconi.com/2015/05/24/how-to-find-the-main-colours-in-an-image/
The GitHub containing the full code is found here: https://gist.github.com/jayapal/077f63f3163abbfb3c50c7d209524cc6
If this is for your own visual the histogram should be enough, if you are attempting to automate however, it may be helpful to round the color values up or down, this would provide information on if the image is darker or lighter than a certain value.
What are you using this code for on a larger perspective? Maybe that will help provide more adequate information
Edit: The code above also provides the ability to define a region of the image, hopefully this will make your selection more accurate
Adding code directly
from sklearn.cluster import KMeans
from sklearn import metrics
import cv2
import numpy as np
import cv2
image = cv2.imread("red.png")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Resize it
h, w, _ = image.shape
w_new = int(100 * w / max(w, h) )
h_new = int(100 * h / max(w, h) )
image = cv2.resize(image, (w_new, h_new));
# Reshape the image to be a list of pixels
image_array = image.reshape((image.shape[0] * image.shape[1], 3))
print image_array
# Clusters the pixels
clt = KMeans(n_clusters = 3)
clt.fit(image_array)
def centroid_histogram(clt):
# grab the number of different clusters and create a histogram
# based on the number of pixels assigned to each cluster
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins = numLabels)
# normalize the histogram, such that it sums to one
hist = hist.astype("float")
hist /= hist.sum()
# return the histogram
return hist
# Finds how many pixels are in each cluster
hist = centroid_histogram(clt)
# Sort the clusters according to how many pixel they have
zipped = zip (hist, clt.cluster_centers_)
zipped.sort(reverse=True, key=lambda x : x[0])
hist, clt.cluster_centers = zip(*zipped)
# By Adrian Rosebrock
import numpy as np
import cv2
bestSilhouette = -1
bestClusters = 0;
for clusters in range(2, 10):
# Cluster colours
clt = KMeans(n_clusters = clusters)
clt.fit(image_array)
# Validate clustering result
silhouette = metrics.silhouette_score(image_array, clt.labels_,
metric='euclidean')
# Find the best one
if silhouette > bestSilhouette:
bestSilhouette = silhouette;
bestClusters = clusters;
print bestSilhouette
print bestClusters

10 Pin Bowling score capture

I want to use OCR to capture the bowling scores from the monitor at the lances. I had a look at this sudoku solver, as I think its pretty similar - numbers and grids right? It has trouble finding the horizontal lines. Has anyone got any tips for pre-processing this image to make it easier to detect the lines (or numbers!). Also any tips for how to deal with the split (the orange ellipse around some of the 8's int he image)?
So far I have got the outline of the score area and cropped it.
import matplotlib
matplotlib.use('TkAgg')
from skimage import io
import numpy as np
import matplotlib.pyplot as plt
from skimage import measure
from skimage.color import rgb2gray
# import pytesseract
from matplotlib.path import Path
from qhd import *
def polygonArea(poly):
"""
Return area of an unclosed polygon.
:see: https://stackoverflow.com/a/451482
:param poly: (n,2)-array
"""
# we need a plain list for the following operations
if isinstance(poly, np.ndarray):
poly = poly.tolist()
segments = zip(poly, poly[1:] + [poly[0]])
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in segments))
filename = 'good.jpg'
image = io.imread(filename)
image = rgb2gray(image)
# Find contours at a constant value of 0.8
contours = measure.find_contours(image, 0.4)
# Display the image and plot all contours found
fig, ax = plt.subplots()
c = 0
biggest = None
biggest_size = 0
for n, contour in enumerate(contours):
curr_size = polygonArea(contour)
if curr_size > biggest_size:
biggest = contour
biggest_size = curr_size
biggest = qhull2D(biggest)
# Approximate that so we just get a rectangle.
biggest = measure.approximate_polygon(biggest, 500)
# vertices of the cropping polygon
yc = biggest[:,0]
xc = biggest[:,1]
xycrop = np.vstack((xc, yc)).T
# xy coordinates for each pixel in the image
nr, nc = image.shape
ygrid, xgrid = np.mgrid[:nr, :nc]
xypix = np.vstack((xgrid.ravel(), ygrid.ravel())).T
# construct a Path from the vertices
pth = Path(xycrop, closed=False)
# test which pixels fall within the path
mask = pth.contains_points(xypix)
# reshape to the same size as the image
mask = mask.reshape(image.shape)
# create a masked array
masked = np.ma.masked_array(image, ~mask)
# if you want to get rid of the blank space above and below the cropped
# region, use the min and max x, y values of the cropping polygon:
xmin, xmax = int(xc.min()), int(np.ceil(xc.max()))
ymin, ymax = int(yc.min()), int(np.ceil(yc.max()))
trimmed = masked[ymin:ymax, xmin:xmax]
plt.imshow(trimmed, cmap=plt.cm.gray), plt.title('trimmed')
plt.show()
https://imgur.com/LijB85I is an example of how the score is displayed.

Categories

Resources