I'm trying to make a circle under the player on soccer field, similar to this:
If I just do a circle around the player's feet, it looks bad:
I'm trying to draw the circle only on the green part of the field (to make it more 3D).
First I masked only the green part on the field, using the following code:
def mask(img):
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
## mask of green (36,25,25) ~ (86, 255,255)
lower = (36, 25, 25)
upper = (86, 255, 255)
mask = cv2.inRange(hsv, lower, upper)
## slice the green
imask = mask == 0
green = np.zeros_like(img, np.uint8)
green[imask] = img[imask]
img = green
return img
This gives the following result:
How can I draw the ellipse only on the green (after the masking - black) part?
P.S. This is the full code:
import cv2
import numpy as np
def mask(img):
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
## mask of green (36,25,25) ~ (86, 255,255)
lower = (36, 25, 25)
upper = (86, 255, 255)
mask = cv2.inRange(hsv, lower, upper)
## slice the green
imask = mask == 0
green = np.zeros_like(img, np.uint8)
# green.fill(255)
green[imask] = img[imask]
img = green
return img
def player_ellipse(img, player_point):
axesLength = (45, 20)
angle = 0
startAngle = 0
endAngle = 360
# Red color in BGR
color = (0, 0, 255)
# Line thickness of 5 px
thickness = 5
img = cv2.ellipse(img, player_point, axesLength, angle, startAngle, endAngle, color, thickness)
return img
def main():
img = cv2.imread('img.png')
point = (160, 665)
img = player_ellipse(img, point)
img = mask(img)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
main()
And this is player without any editing:
You are almost there. All that lefts to do is to copy the pixels from the original image (without ellipse) using the founded mask as answered by Micka and Christoph Rackwitz in comments.
Optionally you may apply some morphological operations to make the mask more appealing.
So the steps are:
Draw an ellipse:
Extract a mask using green color:
[Optional] Apply mask erode:
Copy pixels from original image using the mask:
img_with_ellipse[green_mask] = img[green_mask]
Complete example:
import cv2
import numpy as np
def mask(img):
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
## mask of green (36,25,25) ~ (86, 255,255)
lower = (36, 25, 25)
upper = (86, 255, 255)
mask = cv2.inRange(hsv, lower, upper)
mask = cv2.erode(mask, np.ones((3, 3), np.uint8), iterations=5) == 0
return mask
def player_ellipse(img, player_point):
axesLength = (45, 20)
angle = 0
startAngle = 0
endAngle = 360
# Red color in BGR
color = (0, 0, 255)
# Line thickness of 5 px
thickness = 5
img = cv2.ellipse(
img.copy(),
player_point,
axesLength,
angle,
startAngle,
endAngle,
color,
thickness,
)
return img
def main():
img = cv2.imread("img.png")
point = (160, 665)
green_mask = mask(img)
img_with_ellipse = player_ellipse(img, point)
img_with_ellipse[green_mask] = img[green_mask]
cv2.imshow("img", img_with_ellipse)
cv2.waitKey(0)
main()
Related
I'm trying to use opencv to grab specific elements out of the image. So far what I have done is reduced the number of colors in the image, and gotten a dictionary of colors with their counts. What I want to do now is replace all colors which do not match this specific color to white, so that I can iterate over the image and create different images where each image only has one object of that particular color.
This is what I've done so far:
def showOnlyOneColor(img, rgb_key):
print("Getting only one color")
rgb_key = rgb_key.split("-")
r = int(rgb_key[0])
g = int(rgb_key[1])
b = int(rgb_key[2])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
rgb_low = np.array([r,g,b])
rgb_high = np.array([r,g,b])
mask = cv2.inRange(hsv, rgb_low, rgb_high)
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
cv2.fillPoly(mask, contours, (255, 255, 255))
result = cv2.bitwise_and(img,img,mask=mask)
return result
Second attempt:
def showOnlyOneColor(img, rgb_key):
print("Getting only one color")
rgb_key = rgb_key.split("-")
r = int(rgb_key[0])
g = int(rgb_key[1])
b = int(rgb_key[2])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
rgb_low = np.array([r,g,b])
rgb_high = np.array([r,g,b])
mask = cv2.inRange(hsv, rgb_low, rgb_high)
img[mask!=255] = (255, 255, 255)
return img
This results in a white image for all iterations
This is what I'm using to reduce the number of colors in the image:
def reduceNumberOfColors(img):
div = 128
return img // div * div + div // 2
Latest attempt:
def showOnlyOneColor(img, rgb_key):
print("Getting only one color")
rgb_key = rgb_key.split("-")
r = int(rgb_key[0])
g = int(rgb_key[1])
b = int(rgb_key[2])
hsv_color = colorsys.rgb_to_hsv(r,g,b)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
rgb_low = np.array(hsv_color)
rgb_high = np.array(hsv_color)
mask = cv2.inRange(hsv, rgb_low, rgb_high)
img[mask!=255] = (255, 255, 255)
return img
One of the resulting images shows something, the rest of the images are white
How I organize rgb:
def getColorCount(img):
color_dict = {}
print("Getting color count")
for i in tqdm(range(img.shape[0])):
for j in range(img.shape[1]):
color = img[i,j]
r = color[0]
g = color[1]
b = color[2]
rgb_askey = str(r)+"-"+str(g)+"-"+str(b)
if rgb_askey not in color_dict.keys():
color_dict[rgb_askey] = 1
else:
color_dict[rgb_askey] += 1
return color_dict
Here is one way to do what you want in Python/OpenCV. Note colors are in the order B,G,R.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('corn.jpg')
# do simple color reduction
imgcopy = img.copy()
div = 128
imgcopy = div * ( imgcopy // div ) + div // 2
# get list of unique colors
list_bgr_colors = np.unique(imgcopy.reshape(-1, imgcopy.shape[2]), axis=0)
print(list_bgr_colors)
print(list_bgr_colors[1])
# save reduced color image
cv2.imwrite("corn_reduced_colors.png", imgcopy)
# display reduced color image
cv2.imshow("reduced_colors", imgcopy)
cv2.waitKey(0)
# loop over colors in list and change all non-specified colors to white
i = 1
for color in list_bgr_colors:
# threshold on the specified color
lower=np.array((color))
upper=np.array((color))
mask = cv2.inRange(imgcopy, lower, upper)
# change all non-specified color to white
result = imgcopy.copy()
result[mask!=255] = (255, 255, 255)
# save results
cv2.imwrite("corn_color_{0}.png".format(i), result)
# display result
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# increment
i += 1
List of Reduced Colors:
[[ 64 64 64]
[ 64 64 192]
[ 64 192 64]
[ 64 192 192]
[192 192 192]]
Reduced Color Image:
Individual Color Images:
This works for me in Python/OpenCV. Note that OpenCV colors are in the order B,G,R.
import cv2
import numpy as np
# read image
img = cv2.imread('corn.jpg')
# threshold on yellow color
lower=(0,170,215)
upper=(70,255,255)
mask = cv2.inRange(img, lower, upper)
# change all non-yellow to white
result = img.copy()
result[mask!=255] = (255, 255, 255)
# save results
cv2.imwrite('corn_yellow.jpg',result)
# display result
cv2.imshow("mask", mask)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Colors in OpenCV are in the order B,G,R.
So try this in Python/OpenCV.
def showOnlyOneColor(img, bgr_key):
print("Getting only one color")
bgr_key = bgr_key.split("-")
b = int(bgr_key[0])
g = int(bgr_key[1])
r = int(bgr_key[2])
bgr_low = np.array((b,g,r))
bgr_high = np.array((b,g,r))
mask = cv2.inRange(img, bgr_low, bgr_high)
img[mask!=255] = (255, 255, 255)
return img
ADDITION
I think your issue is in your assumption that you have RGB colors when you use colorsys to convert from RGB to HSV. Your colors are actually B, G, R. So to use colorsys you need to convert your image to RGB first or better reverse the order of the colors that you extract from B,G,R to R,G,B before using colorsys.
Your dictionary colors are actually B, G, R not R, G, B. So reverse them or reverse them later when using colorsys.
I have done masking the infected area. I want to calculate the percentage of infected area on leaves. This is my code. How to calculate the percentage of infected area?
import cv2
import numpy as np
img = cv2.imread('AFTER_5736.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# find the green color
mask_green = cv2.inRange(hsv, (36, 0, 0), (86,255,255))
# find the brown color
mask_brown = cv2.inRange(hsv, (8, 60, 20), (30, 255, 255))
# find the yellow color in the leaf
mask_yellow = cv2.inRange(hsv, (14, 39, 64), (40, 255, 255))
# find any of the three colors(green or brown or yellow) in the image
#mask = cv2.bitwise_or(mask_green, mask_brown)
#mask = cv2.bitwise_or(mask, mask_yellow)
mask = cv2.bitwise_not(mask_green)
# Bitwise-AND mask and original image
res = cv2.bitwise_not(img, img, mask= mask)
cv2.imshow("final image", res)
cv2.waitKey(0)
cv2.destroyAllWindows()
Original Image
Mask Image indicate the infected area of the leaves
Try taking the sum of the image to see how many pixels are masked out for that color.
>>> sum(sum(mask_brown))
16203
>>> sum(sum(mask_green))
22906
>>> sum(sum(mask_yellow))
9292
>>> brown = sum(sum(mask_brown))
>>> green = sum(sum(mask_green))
>>> yellow = sum(sum(mask_yellow))
>>> total = brown + green + yellow
>>> percentHealthy = green / total
>>> percentHealthy
0.47325468482056154
>>> percentDiseased = (brown + yellow) / total
>>> percentDiseased
0.5267453151794385
import cv2
import numpy as np
img = cv2.imread('AFTER_5746.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# find the green color
mask_green = cv2.inRange(hsv, (36,0,0), (86,255,255))
# find the brown color
mask_brown = cv2.inRange(hsv, (90, 60, 20), (30, 255, 200))
# find the yellow color in the leaf
mask_yellow = cv2.inRange(hsv, (14, 39, 64), (40, 255, 255))
# find any of the three colors(green or brown or yellow) in the image
mask = cv2.bitwise_or(mask_green, mask_brown)
mask = cv2.bitwise_or(mask, mask_yellow)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(img,img, mask= mask)
cv2.imshow("original", img)
cv2.imshow("final image", res)
cv2.waitKey(0)
cv2.destroyAllWindows()
i use this image segmentation using HSV colormap but the brown area always go missing in the extracted leaf image as shown here:
how to make the mask_brown visible?
Your brown mask is empty (no white). How did you get your values for brown? They are not inclusive of brown in OpenCV HSV. Your brown is at the hue=0/180 wrap-around transition. OpenCV inRange() does not seem to like specifying 160 to 20 as lower and upper, respectfully (without separating into two browns, one from 160 to 180 and the other from 0 to 20). So I just gave it 0 to 180 to cover the range. Don't go too high in Value (V) or you will start including the sky.
Here is a selection of colors for the brown that seems to work for me in Python/OpenCV.
Input:
import cv2
import numpy as np
img = cv2.imread('AFTER_5746.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# find the green color
mask_green = cv2.inRange(hsv, (36,0,0), (86,255,255))
# find the brown color
mask_brown = cv2.inRange(hsv, (0, 0, 0), (180, 255, 160))
# find the yellow color in the leaf
mask_yellow = cv2.inRange(hsv, (14, 39, 64), (40, 255, 255))
# find any of the three colors(green or brown or yellow) in the image
mask = cv2.bitwise_or(mask_green, mask_brown)
mask = cv2.bitwise_or(mask, mask_yellow)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(img,img, mask= mask)
cv2.imshow("original", img)
cv2.imshow("mask_green", mask_green)
cv2.imshow("mask_brown", mask_brown)
cv2.imshow("mask_yellow", mask_yellow)
cv2.imshow("mask", mask)
cv2.imshow("final image", res)
cv2.waitKey(0)
cv2.destroyAllWindows()
I assume this is what you want.
Problem
Using this answer to create a segmentation program, it is counting the objects incorrectly. I noticed that alone objects are being ignored or poor imaging acquisition.
I counted 123 objects and the program returns 117, as can be seen, bellow. The objects circled in red seem to be missing:
Using the following image from a 720p webcam:
Code
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import label
import urllib.request
# https://stackoverflow.com/a/14617359/7690982
def segment_on_dt(a, img):
border = cv2.dilate(img, None, iterations=5)
border = border - cv2.erode(border, None)
dt = cv2.distanceTransform(img, cv2.DIST_L2, 3)
plt.imshow(dt)
plt.show()
dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(np.uint8)
_, dt = cv2.threshold(dt, 140, 255, cv2.THRESH_BINARY)
lbl, ncc = label(dt)
lbl = lbl * (255 / (ncc + 1))
# Completing the markers now.
lbl[border == 255] = 255
lbl = lbl.astype(np.int32)
cv2.watershed(a, lbl)
print("[INFO] {} unique segments found".format(len(np.unique(lbl)) - 1))
lbl[lbl == -1] = 0
lbl = lbl.astype(np.uint8)
return 255 - lbl
# Open Image
resp = urllib.request.urlopen("https://i.stack.imgur.com/YUgob.jpg")
img = np.asarray(bytearray(resp.read()), dtype="uint8")
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
## Yellow slicer
mask = cv2.inRange(img, (0, 0, 0), (55, 255, 255))
imask = mask > 0
slicer = np.zeros_like(img, np.uint8)
slicer[imask] = img[imask]
# Image Binarization
img_gray = cv2.cvtColor(slicer, cv2.COLOR_BGR2GRAY)
_, img_bin = cv2.threshold(img_gray, 140, 255,
cv2.THRESH_BINARY)
# Morphological Gradient
img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN,
np.ones((3, 3), dtype=int))
# Segmentation
result = segment_on_dt(img, img_bin)
plt.imshow(np.hstack([result, img_gray]), cmap='Set3')
plt.show()
# Final Picture
result[result != 255] = 0
result = cv2.dilate(result, None)
img[result == 255] = (0, 0, 255)
plt.imshow(result)
plt.show()
Question
How to count the missing objects?
Answering your main question, watershed does not remove single objects. Watershed was functioning fine in your algorithm. It receives the predefined labels and perform segmentation accordingly.
The problem was the threshold you set for the distance transform was too high and it removed the weak signal from the single objects, thus preventing the objects from being labeled and sent to the watershed algorithm.
The reason for the weak distance transform signal was due to the improper segmentation during the color segmentation stage and the difficulty of setting a single threshold to remove noise and extract signal.
To remedy this, we need to perform proper color segmentation and use adaptive threshold instead of the single threshold when segmenting the distance transform signal.
Here is the code i modified. I have incorporated color segmentation method by #user1269942 in the code. Extra explanation is in the code.
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import label
import urllib.request
# https://stackoverflow.com/a/14617359/7690982
def segment_on_dt(a, img, img_gray):
# Added several elliptical structuring element for better morphology process
struct_big = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
struct_small = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
# increase border size
border = cv2.dilate(img, struct_big, iterations=5)
border = border - cv2.erode(img, struct_small)
dt = cv2.distanceTransform(img, cv2.DIST_L2, 3)
dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(np.uint8)
# blur the signal lighty to remove noise
dt = cv2.GaussianBlur(dt,(7,7),-1)
# Adaptive threshold to extract local maxima of distance trasnform signal
dt = cv2.adaptiveThreshold(dt, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 21, -9)
#_ , dt = cv2.threshold(dt, 2, 255, cv2.THRESH_BINARY)
# Morphology operation to clean the thresholded signal
dt = cv2.erode(dt,struct_small,iterations = 1)
dt = cv2.dilate(dt,struct_big,iterations = 10)
plt.imshow(dt)
plt.show()
# Labeling
lbl, ncc = label(dt)
lbl = lbl * (255 / (ncc + 1))
# Completing the markers now.
lbl[border == 255] = 255
plt.imshow(lbl)
plt.show()
lbl = lbl.astype(np.int32)
cv2.watershed(a, lbl)
print("[INFO] {} unique segments found".format(len(np.unique(lbl)) - 1))
lbl[lbl == -1] = 0
lbl = lbl.astype(np.uint8)
return 255 - lbl
# Open Image
resp = urllib.request.urlopen("https://i.stack.imgur.com/YUgob.jpg")
img = np.asarray(bytearray(resp.read()), dtype="uint8")
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
## Yellow slicer
# blur to remove noise
img = cv2.blur(img, (9,9))
# proper color segmentation
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 140, 160), (35, 255, 255))
#mask = cv2.inRange(img, (0, 0, 0), (55, 255, 255))
imask = mask > 0
slicer = np.zeros_like(img, np.uint8)
slicer[imask] = img[imask]
# Image Binarization
img_gray = cv2.cvtColor(slicer, cv2.COLOR_BGR2GRAY)
_, img_bin = cv2.threshold(img_gray, 140, 255,
cv2.THRESH_BINARY)
plt.imshow(img_bin)
plt.show()
# Morphological Gradient
# added
cv2.morphologyEx(img_bin, cv2.MORPH_OPEN,cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)),img_bin,(-1,-1),10)
cv2.morphologyEx(img_bin, cv2.MORPH_ERODE,cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)),img_bin,(-1,-1),3)
plt.imshow(img_bin)
plt.show()
# Segmentation
result = segment_on_dt(img, img_bin, img_gray)
plt.imshow(np.hstack([result, img_gray]), cmap='Set3')
plt.show()
# Final Picture
result[result != 255] = 0
result = cv2.dilate(result, None)
img[result == 255] = (0, 0, 255)
plt.imshow(result)
plt.show()
Final results :
124 Unique items found.
An extra item was found because one of the object was divided to 2.
With proper parameter tuning, you might get the exact number you are looking. But i would suggest getting a better camera.
Looking at your code, it is completely reasonable so I'm just going to make one small suggestion and that is to do your "inRange" using HSV color space.
opencv docs on color spaces:
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html
another SO example using inRange with HSV:
How to detect two different colors using `cv2.inRange` in Python-OpenCV?
and a small code edits for you:
img = cv2.blur(img, (5,5)) #new addition just before "##yellow slicer"
## Yellow slicer
#mask = cv2.inRange(img, (0, 0, 0), (55, 255, 255)) #your line: comment out.
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #new addition...convert to hsv
mask = cv2.inRange(hsv, (0, 120, 120), (35, 255, 255)) #new addition use hsv for inRange and an adjustment to the values.
Improving Accuracy
Detecting missing objects
im_1, im_2, im_3
I've count 12 missing objects: 2, 7, 8, 11, 65, 77, 78, 84, 92, 95, 96. edit: 85 too
117 found, 12 missing, 6 wrong
1° Attempt: Decrease Mask Sensibility
#mask = cv2.inRange(img, (0, 0, 0), (55, 255, 255)) #Current
mask = cv2.inRange(img, (0, 0, 0), (80, 255, 255)) #1' Attempt
inRange documentaion
im_4, im_5, im_6, im_7
[INFO] 120 unique segments found
120 found, 9 missing, 6 wrong
I want to detect only green objects from an image captured in a natural environment. How to define it? Because in here I want to pass the threshold value let's say 'x', by using this x I want to get only green colour objects in to one colour (white) others are must appear in another colour (black).
Please guide me to do this.
Update:
I make a HSV colormap. It's more easy and accurate to find the color range using this map than before.
And maybe I should change use (40, 40,40) ~ (70, 255,255) in hsv to find the green.
Original answer:
Convert to HSV color-space,
Use cv2.inRange(hsv, hsv_lower, hsv_higher) to get the green mask.
We use the range (in hsv): (36,0,0) ~ (86,255,255) for this sunflower.
The source image:
The masked green regions:
More steps:
The core source code:
import cv2
import numpy as np
## Read
img = cv2.imread("sunflower.jpg")
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
## mask of green (36,25,25) ~ (86, 255,255)
# mask = cv2.inRange(hsv, (36, 25, 25), (86, 255,255))
mask = cv2.inRange(hsv, (36, 25, 25), (70, 255,255))
## slice the green
imask = mask>0
green = np.zeros_like(img, np.uint8)
green[imask] = img[imask]
## save
cv2.imwrite("green.png", green)
Similar:
Choosing the correct upper and lower HSV boundaries for color detection with`cv::inRange` (OpenCV)
Intro:
Applying a threshold to detect green color can be performed quite easily using LAB color space.
The LAB color space also has 3 channels but unlike its RGB counterpart (where all 3 are color channels), in LAB there are 2 color channels and 1 brightness channel:
L-channel: represents the brightness value in the image
A-channel: represents the red and green color in the image
B-channel: represents the blue and yellow color in the image
Observing the following diagram:
The green and red color are represented on the extremes of the A-channel. Applying a suitable threshold on either of these extremes on this channel can segment either green or red color.
Demo:
The following images are in the order:
1. Original image -->> 2. A-channel of LAB converted image
3. Threshold -->> 4. Mask on the original image
Sample 1:
Sample 2:
Sample 3:
Code:
The code just has few lines:
# read image in BGR
img = cv2.imread('image_path')
# convert to LAB space
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
# store the a-channel
a_channel = lab[:,:,1]
# Automate threshold using Otsu method
th = cv2.threshold(a_channel,127,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
# Mask the result with the original image
masked = cv2.bitwise_and(img, img, mask = th)
Exception:
The method above will work perfectly if green color appears distinctly. But applying an automated threshold might not always work, especially when there various shades of green in the same image.
In such cases, one set the threshold manually on the A-channel.
img = cv2.imread('flower.jpg')
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
a_channel = lab[:,:,1]
# manually set threshold value
th = cv2.threshold(a_channel, 105, 255, cv2.THRESH_BINARY_INV)
# perform masking
masked = cv2.bitwise_and(img, img, mask = th)
Threshold image Masked image
You can use a simple HSV color thresholder script to determine the lower/upper color ranges using trackbars for any image on the disk. Simply change the image path in cv2.imread(). Example to isolate green:
import cv2
import numpy as np
def nothing(x):
pass
# Load image
image = cv2.imread('1.jpg')
# Create a window
cv2.namedWindow('image')
# Create trackbars for color change
# Hue is from 0-179 for Opencv
cv2.createTrackbar('HMin', 'image', 0, 179, nothing)
cv2.createTrackbar('SMin', 'image', 0, 255, nothing)
cv2.createTrackbar('VMin', 'image', 0, 255, nothing)
cv2.createTrackbar('HMax', 'image', 0, 179, nothing)
cv2.createTrackbar('SMax', 'image', 0, 255, nothing)
cv2.createTrackbar('VMax', 'image', 0, 255, nothing)
# Set default value for Max HSV trackbars
cv2.setTrackbarPos('HMax', 'image', 179)
cv2.setTrackbarPos('SMax', 'image', 255)
cv2.setTrackbarPos('VMax', 'image', 255)
# Initialize HSV min/max values
hMin = sMin = vMin = hMax = sMax = vMax = 0
phMin = psMin = pvMin = phMax = psMax = pvMax = 0
while(1):
# Get current positions of all trackbars
hMin = cv2.getTrackbarPos('HMin', 'image')
sMin = cv2.getTrackbarPos('SMin', 'image')
vMin = cv2.getTrackbarPos('VMin', 'image')
hMax = cv2.getTrackbarPos('HMax', 'image')
sMax = cv2.getTrackbarPos('SMax', 'image')
vMax = cv2.getTrackbarPos('VMax', 'image')
# Set minimum and maximum HSV values to display
lower = np.array([hMin, sMin, vMin])
upper = np.array([hMax, sMax, vMax])
# Convert to HSV format and color threshold
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(image, image, mask=mask)
# Print if there is a change in HSV value
if((phMin != hMin) | (psMin != sMin) | (pvMin != vMin) | (phMax != hMax) | (psMax != sMax) | (pvMax != vMax) ):
print("(hMin = %d , sMin = %d, vMin = %d), (hMax = %d , sMax = %d, vMax = %d)" % (hMin , sMin , vMin, hMax, sMax , vMax))
phMin = hMin
psMin = sMin
pvMin = vMin
phMax = hMax
psMax = sMax
pvMax = vMax
# Display result image
cv2.imshow('image', result)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
HSV lower/upper color threshold ranges
(hMin = 52 , sMin = 0, vMin = 55), (hMax = 104 , sMax = 255, vMax = 255)
Once you have determined your lower and upper HSV color ranges, you can segment your desired colors like this:
import numpy as np
import cv2
image = cv2.imread('1.png')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower = np.array([52, 0, 55])
upper = np.array([104, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(image, image, mask=mask)
cv2.imshow('result', result)
cv2.waitKey()