How to get rid of skeleton line except contour? - python

I want to get rid of the skeletonized lines except the contours using python.
And, want to extract only the largest contour.
(Actually, I tried to make skeletonized line from the segmented mask. And, I got the main stem with contour like above picture. Among the contours, I want to extract only the contour with largest area.)
I don't know how to do it.
Please help me if you have any idea.
Thanks in advance.
import os
import numpy as np
import cv2
from plantcv.plantcv import find_objects
from plantcv.plantcv import image_subtract
from plantcv.plantcv.morphology import segment_sort
from plantcv.plantcv.morphology import segment_skeleton
from plantcv.plantcv.morphology import _iterative_prune
from plantcv.plantcv import print_image
from plantcv.plantcv import plot_image
from plantcv.plantcv import params
from cv2.ximgproc import thinning
def find_large_contour(img, mask):
params.device += 1
mask1 = np.copy(mask)
ori_img = np.copy(img)
# If the reference image is grayscale convert it to color
if len(np.shape(ori_img)) == 2:
ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)
objects, hierarchy = cv2.findContours(mask1, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
for i, cnt in enumerate(objects):
cv2.drawContours(ori_img, objects, i, (255, 102, 255), -1, lineType=8, hierarchy=hierarchy)
if params.debug == 'print':
print_image(ori_img, os.path.join(params.debug_outdir, str(params.device) + '_id_objects.png'))
elif params.debug == 'plot':
plot_image(ori_img)
return objects, hierarchy, ori_img
def prune(skel_img, size=2, mask=None):
# Store debug
debug = params.debug
params.debug = None
pruned_img = skel_img.copy()
# Check to see if the skeleton has multiple objects
skel_objects, _ = find_objects(skel_img, skel_img)
_, objects = segment_skeleton(skel_img)
kept_segments = []
removed_segments = []
if size > 0:
# If size>0 then check for segments that are smaller than size pixels long
# Sort through segments since we don't want to remove primary segments
secondary_objects, primary_objects = segment_sort(skel_img, objects)
# Keep segments longer than specified size
for i in range(0, len(secondary_objects)):
if len(secondary_objects[i]) > size:
kept_segments.append(secondary_objects[i])
else:
removed_segments.append(secondary_objects[i])
# Draw the contours that got removed
removed_barbs = np.zeros(skel_img.shape[:2], np.uint8)
cv2.drawContours(removed_barbs, removed_segments, -1, 255, 1,
lineType=8)
# Subtract all short segments from the skeleton image
pruned_img = image_subtract(pruned_img, removed_barbs)
pruned_contour_img = image_subtract(pruned_img, removed_barbs)
pruned_img = _iterative_prune(pruned_img, 1)
# Reset debug mode
params.debug = debug
# Make debugging image
if mask is None:
pruned_plot = np.zeros(skel_img.shape[:2], np.uint8)
else:
pruned_plot = mask.copy()
pruned_plot = cv2.cvtColor(pruned_plot, cv2.COLOR_GRAY2RGB)
pruned_obj, pruned_hierarchy, large_contour = find_large_contour(pruned_img, pruned_img)
cv2.drawContours(pruned_plot, removed_segments, -1, (0, 0, 255), params.line_thickness, lineType=8)
cv2.drawContours(pruned_plot, pruned_obj, -1, (150, 150, 150), params.line_thickness, lineType=8)
# Auto-increment device
params.device += 1
if params.debug == 'print':
print_image(pruned_img, os.path.join(params.debug_outdir, str(params.device) + '_pruned.png'))
print_image(pruned_plot, os.path.join(params.debug_outdir, str(params.device) + '_pruned_debug.png'))
elif params.debug == 'plot':
plot_image(pruned_img, cmap='gray')
plot_image(pruned_plot)
# Segment the pruned skeleton
segmented_img, segment_objects = segment_skeleton(pruned_img, mask)
return pruned_img, segmented_img, segment_objects, large_contour
vseg = cv2.imread("vseg.png", cv2.IMREAD_GRAYSCALE)
gray = thinning(vseg, thinningType=cv2.ximgproc.THINNING_GUOHALL)
pruned, seg_img, edge_objects, large_contour = prune(skel_img=gray, size=3, mask=vseg)
img_cont_gray = cv2.cvtColor(large_contour, cv2.COLOR_BGR2GRAY)
ret_cont, thresh_cont = cv2.threshold(img_cont_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imwrite("first_cont111.png", thresh_cont)
## then I want to extract the only contour with largest area

Use morphology as the first step
ret,thresh = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
rect=cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, rect)
Then find the conneсted component of maximal area
max_component=np.full(opening.shape, 0, np.uint8)
nb_components,labels,stats,centroids= cv2.connectedComponentsWithStats(opening,8)
max_component[labels == np.argmax(stats[1:, -1])+1]=255

using skimage (if you don't have it: conda install scikit-image)
import scipy.ndimage as ndi
from skimage.morphology import binary_erosion, binary_dilation
from skimage.measure import regionprops
from skimage import io
#
img = io.imread("first_cont111.png") > 0 # open image and ensure 0,1 data
# get rid of 1-pixel lines
img = binary_erosion(img)
img = binary_dilation(img)
# find individual objects and give them unique labels
label_img, _ = ndi.label(img)
props = regionprops(label_img)
# find the label that corresponds to the object with maximum area:
objects = sorted([(p.label, p.area) for p in props], key=lambda x: x[1], reverse=True)
obj = objects[0][0]
# make an image of the same size as the input image:
output_img = np.zeros_like(img)
# and use fancy indexing to copy the largest object
output_img[label_img==obj] = 1
# now make the contour by subtracting the eroded shape
output_img = output_img - binary_erosion(output_image)

Related

How to resize and translate a masked image over a background OpenCV and Python

By doing a bit of my own googling and following this tutorial I have created the python script below. It finds the most dominant (common) color in an image and replaces it with another "background" image. It basically creates a mask and places it on top of the background image. My question is how would I resize the mask and translate it. I am a complete beginner to OpenCV with Python so some code examples with explanation would go a long way :).
Here is the script:
import os
#from colorthief import ColorThief
from PIL import Image
import cv2
import matplotlib.pyplot as plt
import numpy as np
imgDirec = "/Users/.../images"
def find_dominant_color(filename):
#Resizing parameters
width, height = 150,150
image = Image.open(filename)
image = image.resize((width, height),resample = 0)
#Get colors from image object
pixels = image.getcolors(width * height)
#Sort them by count number(first element of tuple)
sorted_pixels = sorted(pixels, key=lambda t: t[0])
#Get the most frequent color
dominant_color = sorted_pixels[-1][1]
return dominant_color
filepath = "/Users/.../image.jpg" #Foreground Image
dominant_color = find_dominant_color(filepath)
#dominant_color = color_thief.get_color(quality=1)
print(dominant_color)
image = cv2.imread(filepath)
image_copy = np.copy(image)
image_copy = cv2.cvtColor(image_copy, cv2.COLOR_BGR2RGB)
lower_blue = np.array([dominant_color[0]-20, dominant_color[1]-20, dominant_color[2]-20]) ##[R value, G value, B value]
upper_blue = np.array([dominant_color[0]+20, dominant_color[1]+20, dominant_color[2]+20])
#plt.imshow(image_copy)
mask = cv2.inRange(image_copy, lower_blue, upper_blue)
#plt.imshow(mask, cmap='gray')
masked_image = np.copy(image_copy)
masked_image[mask != 0] = [0, 0, 0]
#plt.imshow(masked_image)
background_image = cv2.imread('/Users/.../background1.jpg')
background_image = cv2.cvtColor(background_image, cv2.COLOR_BGR2RGB)
crop_background = background_image[0:image_copy.shape[0], 0:image_copy.shape[1]]
crop_background[mask == 0] = [0, 0, 0]
#plt.imshow(crop_background)
#These Transformations do not work as intended.
newImg = cv2.resize(crop_background, (0,0), fx=2, fy=2)
height, width = masked_image.shape[:2]
quarter_height, quarter_width = height / 4, width / 4
T = np.float32([[1, 0, quarter_width], [0, 1, quarter_height]])
img_translation = cv2.warpAffine(masked_image, T, (width, height))
final_image = crop_background + masked_image
plt.imshow(final_image)
plt.show()
This is image.jpg
This is background1.jpg
And running the script right know I get:
I want to be able to make the person smaller and translate him around the background. How would I do this? Also, is there any way to keep the background image the original size while putting the smaller picture of the person on top? Again I am beginner (primarily an iOS Dev) so there may be a pretty obvious solution. Please enlighten me!
Thanks in advance!
For answering this problem you must find two things in the code. First one is that, in which line the background cropped? This process will be in the below line
crop_background = background_image[0:image_copy.shape[0], 0:image_copy.shape[1]]
So for translating Person in background you must define two offsets that translate image in background. I Will do that like this:
x_offset=100 # translate in x-axis
y_offset=200 # translate in y-axis
crop_background = background_image[y_offset:image_copy.shape[0]+y_offset, x_offset:image_copy.shape[1]+x_offset]
So far we added translation feature, but how we can see the whole background instead of cropped background? for adding this feature you should overwrite final_image to the exact location of which we crop the image.
background_image[y_offset:image_copy.shape[0]+y_offset, x_offset:image_copy.shape[1]+x_offset]=final_image
by adding this line the new picture will be like this:
so what about resizing the image? there is a function in OpenCV which it's name is cv2.resize by that you can resize image to any size, I reshape your image to (100,200) in the below line and re-run the code:
image = cv2.resize(image,(100,200))
And the result will be:
The whole code will be like the below:
import os
#from colorthief import ColorThief
from PIL import Image
import cv2
import matplotlib.pyplot as plt
import numpy as np
imgDirec = "/home/isv/Desktop/"
def find_dominant_color(filename):
#Resizing parameters
width, height = 150,150
image = Image.open(filename)
image = image.resize((width, height),resample = 0)
#Get colors from image object
pixels = image.getcolors(width * height)
#Sort them by count number(first element of tuple)
sorted_pixels = sorted(pixels, key=lambda t: t[0])
#Get the most frequent color
dominant_color = sorted_pixels[-1][1]
return dominant_color
filepath = "/home/isv/Desktop/image.jpg" #Foreground Image
dominant_color = find_dominant_color(filepath)
#dominant_color = color_thief.get_color(quality=1)
print(dominant_color)
image = cv2.imread(filepath)
image = cv2.resize(image,(100,200)) #added line
image_copy = np.copy(image)
image_copy = cv2.cvtColor(image_copy, cv2.COLOR_BGR2RGB)
lower_blue = np.array([dominant_color[0]-20, dominant_color[1]-20, dominant_color[2]-20]) ##[R value, G value, B value]
upper_blue = np.array([dominant_color[0]+20, dominant_color[1]+20, dominant_color[2]+20])
#plt.imshow(image_copy)
mask = cv2.inRange(image_copy, lower_blue, upper_blue)
#plt.imshow(mask, cmap='gray')
masked_image = np.copy(image_copy)
masked_image[mask != 0] = [0, 0, 0]
#plt.imshow(masked_image)
background_image = cv2.imread('/home/isv/Desktop/background1.jpg')
background_image = cv2.cvtColor(background_image, cv2.COLOR_BGR2RGB)
x_offset=100 #added line
y_offset=200 #added line
crop_background = background_image[y_offset:image_copy.shape[0]+y_offset, x_offset:image_copy.shape[1]+x_offset] #change line
crop_background[mask == 0] = [0, 0, 0]
#plt.imshow(crop_background)
#These Transformations do not work as intended.
newImg = cv2.resize(crop_background, (0,0), fx=2, fy=2)
height, width = masked_image.shape[:2]
quarter_height, quarter_width = height / 4, width / 4
T = np.float32([[1, 0, quarter_width], [0, 1, quarter_height]])
img_translation = cv2.warpAffine(masked_image, T, (width, height))
final_image = crop_background + masked_image
background_image[y_offset:image_copy.shape[0]+y_offset, x_offset:image_copy.shape[1]+x_offset]=final_image #added line
plt.imshow(final_image)
plt.show()
plt.figure() # added line
plt.imshow(background_image) # added line
plt.show() # added line
I hope that this code will help you.

Python - Remove black outline & overlay PNG image on JPEG image

I have two images:
Fragments from painting
Whole painting
I need to solve two issues:
1st. On the first image, I need to remove the black outline from each fragment. I've tried threshold and erosion, but neither of them worked. How can I do that?
2nd. I can't overlap the first image on the second, and I really don't know why. It always result on the first image overlapping it totally and putting black pixels where it should be possible to see the second image.
I'm using Python3 and OpenCV 3.2, on Ubuntu 18.04.
My program:
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
import cv2
import sys
plano_f = cv2.imread("Domenichino_Virgin-and-unicorn.jpg")
sobrepor = cv2.imread("Domenichino_Virgin-and-unicorn_img.png")
plano_f = cv2.cvtColor(plano_f, cv2.COLOR_BGR2GRAY, -1)
#sobrepor_BGRA = cv2.cvtColor(sobrepor, cv2.COLOR_BGR2BGRA)
sobrepor_BGRA = cv2.imread("nova_png.png", -1)
plt.imshow(sobrepor_BGRA),plt.show()
rows, cols, han = sobrepor_BGRA.shape
total = rows*cols
#printProgressBar(0, total, prefix="Executando...", suffix="completo", length=50)
'''for i in range(rows):
for j in range(cols):
if(sobrepor_BGRA[i, j][0] <= 5 and sobrepor_BGRA[i, j][1] <= 5 and sobrepor_BGRA[i, j][2] <= 5 and sobrepor_BGRA[i, j][3] != 0):
sobrepor_BGRA[i, j] = (0, 0, 0, 0)
#printProgressBar(i*j, total, prefix='Executando...', suffix='completo', length=50)
sys.stdout.write("\rExecutando linha " + str(i) + " de " + str(rows) + "...")
sys.stdout.flush()
cv2.imwrite("nova_png.png", sobrepor_BGRA)'''
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
#sobrepor_BGRA = cv2.cvtColor(sobrepor_BGRA, cv2.COLOR_BGRA2GRAY, -1)
sobrepor_BGRA = cv2.erode(sobrepor_BGRA, kernel, iterations=3)
#sobrepor_BGRA = cv2.cvtColor(sobrepor_BGRA, cv2.COLOR_GRAY2BGRA)
cv2.imwrite("nova_png2.png", sobrepor_BGRA)
#sobrepor_RGBA = cv2.cvtColor(sobrepor_BGRA, cv2.COLOR_BGRA2RGBA)
#plt.imshow(sobrepor_RGBA),plt.show()
sys.stdout.write("\nPronto!")
nova_img = cv2.addWeighted(sobrepor_BGRA, 1, plano_f, 0, 0)
cv2.imwrite("combined.png", nova_img)
plt.imshow(nova_img),plt.show()
You can use bitwise operations to do this. The idea is to obtain a mask of the missing sections of the fragments then bitwise-or the two sections together. Here's two halfs of the image, one is the fragments you already have and the other is the missing sections.
We combine both halves to get the whole painting
import cv2
import numpy as np
fragment = cv2.imread('1.jpg')
whole = cv2.imread('2.jpg')
fragment[np.where((fragment <= [250,250,250]).all(axis=2))] = [0]
result1 = cv2.bitwise_and(whole, fragment)
result2 = cv2.bitwise_and(whole, 255 - fragment)
final = result1 + result2
cv2.imshow('result1', result1)
cv2.imshow('result2', result2)
cv2.imshow('final', final)
cv2.waitKey()
1st - your image is a jpeg image which means that the black lines around the pieces are going to be imperfect due to compression artifacts, a simple threshold or dilation isn't going to perfectly remove these. You can try saving in a lossless format and modifying by hand in paint or something to clean up, you may even want to perform this step after doing an erosion and cleaning up most of it.
2nd - why don't you just copy with a mask using the copyTo function, here is an example:
import cv2
img1 = cv2.imread('x2djw.jpg')
img2 = cv2.imread('5RnNh.jpg')
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
thr, img1_mask = cv2.threshold(img1, 250, 255, cv2.THRESH_BINARY_INV)
img1_mask = img1_mask[:, :, 0] & img1_mask[:, :, 1] & img1_mask[:, :, 2]
el = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
img1_mask = cv2.erode(img1_mask, el)
img2 = cv2.merge((img2, img2, img2))
img2 = cv2.copyTo(img1, img1_mask, img2)
cv2.imwrite('test_result.png', img2)

How to remove small colored pixels from image using opencv

I am working on a project in which i have to process the image and get the rgb values of colored lined as shown in image. i detect the lines and get the values by processing the image from left to right line by line.
but there any colored pixel occur before the full line. then my algorithm may produce wrong output for me.
Here is the original
And here is the image after running my code in which some pixels are before and after the left line:
I implement filter like median filter which removes that small pixels and noise but that also change the color values. So filters become useless for me. I want another way to do this.
Here is my code:
import cv2
import numpy as np
def image_processing(img):
msg = ''
try:
img = cv2.imread(img)
# img = cv2.resize(img, (650, 120), interpolation=cv2.INTER_AREA)
img_hsv = cv2.cvtColor(255 - img, cv2.COLOR_BGR2HSV)
except Exception as e:
msg += 'Unable to read image.'
else:
lower_red = np.array([40, 0, 0]) # lower range for red values
upper_red = np.array([95, 255, 255]) # upper range for red values
# mask on red color lines to find them
mask = cv2.inRange(img_hsv, lower_red, upper_red)
# original image with just red color pixels all other pixels will be set to 0(black)
color_detected_img = cv2.bitwise_and(img, img, mask=mask)
# finding the pixel where color is detected in [colored_detected_img]
img_dimensions = color_detected_img.shape # height & width of the image
left_line_colors = []
right_line_colors = []
y_color_index = 10
x_color_index = 0
left_line_detected = False
right_line_detected = False
# getting left line values
while x_color_index < img_dimensions[1] - 1:
x_color_index += 1
if color_detected_img[y_color_index, x_color_index].all() > 0: # checking if color values are not 0 (0 is black)
left_line_detected = True
for y in range(img_dimensions[0] - 1):
# print(y, x_color_index)
left_line_colors.append(
color_detected_img[y,x_color_index].tolist())
elif left_line_detected:
break
else:
continue
# ---- Getting final results of left line ----
try:
left_line_colors = [l for l in left_line_colors if (l[0] != 0 and l[1] != 0 and l[2] != 0)]
# adding all the rgb list values together i.e if -> [[1, 2, 3], [2, 4, 1]] then sum -> [3, 6, 4]
sum_of_left_rgb = [sum(i) for i in zip(*left_line_colors)]
left_rgb = [int(sum_of_left_rgb[0] / len(left_line_colors)),
int(sum_of_left_rgb[1] / len(left_line_colors)),
int(sum_of_left_rgb[2] / len(left_line_colors))]
print(left_rgb[2], left_rgb[1], left_rgb[0])
except:
msg += 'No left line found.'
cv2.imshow("Cropped", color_detected_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return msg
print(image_processing('C:/Users/Rizwan/Desktop/example_strip1.jpg'))
It should give me result
But this result image i get by implementing medianblur filter and it changes the rgb values which produce wrong result.

Replacement of circular spots by respective colors

My objective here is to replace the spot in mask_image by a color corresponding to the spot in original_image. What I did here is to find connected components and labeling them, but I can't figure out how to find the corresponding labeled spot and replace it.
How can i put the n circles in n objects and fill them by the corresponding intensities?
Any help would be appreciated.
For example, if spot in (2, 1) in mask image should be painted by color of corresponding spot in this image below.
mask image http://myfair.software/goethe/images/mask.jpg
original image http://myfair.software/goethe/images/original.jpg
def thresh(img):
ret , threshold = cv2.threshold(img,5,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
return threshold
def spot_id(img):
seed_pt = (5, 5)
fill_color = 0
mask = np.zeros_like(img)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
for th in range(5, 255):
prev_mask = mask.copy()
mask = cv2.threshold(img, th, 255, cv2.THRESH_BINARY)[1]
mask = cv2.floodFill(mask, None, seed_pt, fill_color)[1]
mask = cv2.bitwise_or(mask, prev_mask)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
#here I labelled them
n_centers, labels = cv2.connectedComponents(mask)
label_hue = np.uint8(892*labels/np.max(labels))
blank_ch = 255*np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
labeled_img[label_hue==0] = 0
print('There are %d bright spots in the image.'%n_centers)
cv2.imshow("labeled_img",labeled_img)
return mask, n_centers
image_thresh = thresh(img_greyscaled)
mask, centers = spot_id(img_greyscaled)
There is one very simple way of accomplishing this task. First one needs to sample the value at the center of each dot in mask_image. Next, one expands this color to fill the dot in that same image.
Here is some code using PyDIP (because I know it better than OpenCV, I'm an author), I'm sure something similar can be done with OpenCV alone:
import PyDIP as dip
import cv2
import numpy as np
# Load the color image shown in the question
original_image = cv2.imread('/home/cris/tmp/BxW25.png')
# Load the mask image shown in the question
mask_image = cv2.imread('/home/cris/tmp/aqf3Z.png')[:,:,0]
# Get a single colored pixel in the middle of each spot of the mask
colors = dip.EuclideanSkeleton(mask_image > 50, 'loose ends away') * original_image
# Spread that color across the full spot
# (dilation and similar operators like this one don't work with color images,
# so we apply the operation on each channel separately)
for t in range(colors.TensorElements()):
colors.TensorElement(t).Copy(dip.MorphologicalReconstruction(colors.TensorElement(t), mask_image))
# Save the result
cv2.imwrite('/home/cris/tmp/so.png', np.array(colors))

Detection of leaf on unpredictable background

A project I have been working about for some time is a unsupervised leaf segmentation. The leaves are captured on a white or colored paper, and some of them has shadows.
I want to be able to threshold the leaf and also remove the shadow (while reserving the leaf's details); however I cannot use fixed threshold values due to diseases changing the color of the leaf.
Then, I begin to research and find out a proposal by Horprasert et. al. (1999) in "A Statistical Approach for Real-time Robust Background Subtraction and Shadow Detection", which compare areas in the image with colour of the now-known background using the chromacity distortion measure. This measure takes account of the fact that for desaturated colours, hue is not a relevant measure.
Based on it, I was able to achieve the following results:
However, the leaves that are captured on a white paper need to change the Mask V cv2.bitwise_not() giving me the below result:
I'm thinking that I'm forgetting some step to get a complete mask that will work for all or most of my leaves. Samples can be found here.
My Code:
import numpy as np
import cv2
import matplotlib.pyplot as plot
import scipy.ndimage as ndimage
def brightness_distortion(I, mu, sigma):
return np.sum(I*mu/sigma**2, axis=-1) / np.sum((mu/sigma)**2, axis=-1)
def chromacity_distortion(I, mu, sigma):
alpha = brightness_distortion(I, mu, sigma)[...,None]
return np.sqrt(np.sum(((I - alpha * mu)/sigma)**2, axis=-1))
def bwareafilt ( image ):
image = image.astype(np.uint8)
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(image, connectivity=4)
sizes = stats[:, -1]
max_label = 1
max_size = sizes[1]
for i in range(2, nb_components):
if sizes[i] > max_size:
max_label = i
max_size = sizes[i]
img2 = np.zeros(output.shape)
img2[output == max_label] = 255
return img2
img = cv2.imread("Amostra03.jpeg")
sat = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)[:,:,1]
val = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)[:,:,2]
sat = cv2.medianBlur(sat, 11)
val = cv2.medianBlur(val, 11)
thresh_S = cv2.adaptiveThreshold(sat , 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 401, 10);
thresh_V = cv2.adaptiveThreshold(val , 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 401, 10);
mean_S, stdev_S = cv2.meanStdDev(img, mask = 255 - thresh_S)
mean_S = mean_S.ravel().flatten()
stdev_S = stdev_S.ravel()
chrom_S = chromacity_distortion(img, mean_S, stdev_S)
chrom255_S = cv2.normalize(chrom_S, chrom_S, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX).astype(np.uint8)[:,:,None]
mean_V, stdev_V = cv2.meanStdDev(img, mask = 255 - thresh_V)
mean_V = mean_V.ravel().flatten()
stdev_V = stdev_V.ravel()
chrom_V = chromacity_distortion(img, mean_V, stdev_V)
chrom255_V = cv2.normalize(chrom_V, chrom_V, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX).astype(np.uint8)[:,:,None]
thresh2_S = cv2.adaptiveThreshold(chrom255_S , 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 401, 10);
thresh2_V = cv2.adaptiveThreshold(chrom255_V , 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 401, 10);
images = [img, thresh_S, thresh_V, cv2.bitwise_and(thresh2_S, cv2.bitwise_not(thresh2_V))]
titles = ['Original Image', 'Mask S', 'Mask V', 'S + V']
for i in range(4):
plot.subplot(2,2,i+1),
if i == 0 :
plot.imshow(images[i])
else :
plot.imshow(images[i], cmap='gray')
plot.title(titles[i])
plot.xticks([]),plot.yticks([])
plot.show()
Any idea to solve this issue?
Try this on...I'm using "grabCut" from the openCV lib. It's not perfect, but it might be a good start.
import cv2
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
#%matplotlib inline #uncomment if in notebook
def mask_leaf(im_name, external_mask=None):
im = cv2.imread(im_name)
im = cv2.blur(im, (5,5))
height, width = im.shape[:2]
mask = np.ones(im.shape[:2], dtype=np.uint8) * 2 #start all possible background
'''
#from docs:
0 GC_BGD defines an obvious background pixels.
1 GC_FGD defines an obvious foreground (object) pixel.
2 GC_PR_BGD defines a possible background pixel.
3 GC_PR_FGD defines a possible foreground pixel.
'''
#2 circles are "drawn" on mask. a smaller centered one I assume all pixels are definite foreground. a bigger circle, probably foreground.
r = 100
cv2.circle(mask, (int(width/2.), int(height/2.)), 2*r, 3, -3) #possible fg
#next 2 are greens...dark and bright to increase the number of fg pixels.
mask[(im[:,:,0] < 45) & (im[:,:,1] > 55) & (im[:,:,2] < 55)] = 1 #dark green
mask[(im[:,:,0] < 190) & (im[:,:,1] > 190) & (im[:,:,2] < 200)] = 1 #bright green
mask[(im[:,:,0] > 200) & (im[:,:,1] > 200) & (im[:,:,2] > 200) & (mask != 1)] = 0 #pretty white
cv2.circle(mask, (int(width/2.), int(height/2.)), r, 1, -3) #fg
#if you pass in an external mask derived from some other operation it is factored in here.
if external_mask is not None:
mask[external_mask == 1] = 1
bgdmodel = np.zeros((1,65), np.float64)
fgdmodel = np.zeros((1,65), np.float64)
cv2.grabCut(im, mask, None, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_MASK)
#show mask
plt.figure(figsize=(10,10))
plt.imshow(mask)
plt.show()
#mask image
mask2 = np.where((mask==1) + (mask==3), 255, 0).astype('uint8')
output = cv2.bitwise_and(im, im, mask=mask2)
plt.figure(figsize=(10,10))
plt.imshow(output)
plt.show()
mask_leaf('leaf1.jpg', external_mask=None)
mask_leaf('leaf2.jpg', external_mask=None)
Addressing the external mask. Here's an example of HDBSCAN clustering...I'm not going to go into the details...you can look up the docs and change it or use as-is.
import hdbscan
from collections import Counter
def hdbscan_mask(im_name):
im = cv2.imread(im_name)
im = cv2.blur(im, (5,5))
indices = np.dstack(np.indices(im.shape[:2]))
data = np.concatenate((indices, im), axis=-1)
data = data[:,2:]
data = imb.reshape(im.shape[0]*im.shape[1], 3)
clusterer = hdbscan.HDBSCAN(min_cluster_size=1000, min_samples=20)
clusterer.fit(data)
plt.figure(figsize=(10,10))
plt.imshow(clusterer.labels_.reshape(im.shape[0:2]))
plt.show()
height, width = im.shape[:2]
mask = np.ones(im.shape[:2], dtype=np.uint8) * 2 #start all possible background
cv2.circle(mask, (int(width/2.), int(height/2.)), 100, 1, -3) #possible fg
#grab cluster number for circle
vals_im = clusterer.labels_.reshape(im.shape[0:2])
vals = vals_im[mask == 1]
commonvals = []
cnts = Counter(vals)
for v, count in cnts.most_common(20):
#print '%i: %7d' % (v, count)
if v == -1:
continue
commonvals.append(v)
tst = np.in1d(vals_im, np.array(commonvals))
tst = tst.reshape(vals_im.shape)
hmask = tst.astype(np.uint8)
plt.figure(figsize=(10,10))
plt.imshow(hmask)
plt.show()
return hmask
hmask = hdbscan_mask('leaf1.jpg')
then to use the initial function with the new mask (output suppressed):
mask_leaf('leaf1.jpg', external_mask=hmask)
This was all made in a notebook from scratch so hopefully there's no errant variables that choke it up when running it somewhere else. (note: I did NOT swap BGR to RGB for plt display, sorry)

Categories

Resources