Placing a png image inside a tracked ball, OpenCV,Python - python

I am using OpenCV to track a ball with webcam footage, i combined two scripts i found on the internet, but now I would like to take a png image and place it resized in the center of the tracked circle and have it scale proportionally with the bounding circle tracking.
I tried the sublime robots, mustache tutorial, but i am getting a bit lost with the region of interest and resizing the image, any help would be much appreciated.
import numpy as np
import cv2
import time
import imutils
import math
import os
uniImg = cv2.imread('unicorn1.png')
def nothing(*arg):
pass
cap = cv2.VideoCapture(0)
#Initial HSV GUI slider values to load on program start.
icol = (36, 202, 59, 71, 255, 255) # Green
icol = (18, 0, 196, 36, 255, 255) # Yellow
icol = (89, 0, 0, 125, 255, 255) # Blue
icol = (0, 100, 80, 10, 255, 255) # Red
icol = (104, 117, 222, 121, 255, 255) # test
icol = (0, 0, 0, 255, 255, 255) # New start
icol2 = (0, 0, 0, 255, 255, 20) # New start
icol3 = (0, 0, 0, 255, 255, 20) # New start
cv2.namedWindow('colorTest')
# Lower range colour sliders.
cv2.createTrackbar('lowHue', 'colorTest', icol[0], 255, nothing)
cv2.createTrackbar('lowSat', 'colorTest', icol[1], 255, nothing)
cv2.createTrackbar('lowVal', 'colorTest', icol[2], 255, nothing)
# Higher range colour sliders.
cv2.createTrackbar('highHue', 'colorTest', icol[3], 255, nothing)
cv2.createTrackbar('highSat', 'colorTest', icol[4], 255, nothing)
cv2.createTrackbar('highVal', 'colorTest', icol[5], 255, nothing)#
cv2.createTrackbar('erode','colorTest',icol2[5], 20, nothing)
cv2.createTrackbar('dilate','colorTest',icol3[5], 20, nothing)
while(1):
timeCheck = time.time()
lowHue = cv2.getTrackbarPos('lowHue', 'colorTest')
lowSat = cv2.getTrackbarPos('lowSat', 'colorTest')
lowVal = cv2.getTrackbarPos('lowVal', 'colorTest')
highHue = cv2.getTrackbarPos('highHue', 'colorTest')
highSat = cv2.getTrackbarPos('highSat', 'colorTest')
highVal = cv2.getTrackbarPos('highVal', 'colorTest')
erode = cv2.getTrackbarPos('erode', 'colorTest')
dilate = cv2.getTrackbarPos('dilate', 'colorTest')
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Get HSV values from the GUI sliders.
# define range of white color in HSV
# change it according to your need !
colorLow = np.array([lowHue,lowSat,lowVal])
colorHigh = np.array([highHue,highSat,highVal])
# Threshold the HSV image to get only white colors
mask = cv2.inRange(hsv, colorLow, colorHigh)
mask = cv2.erode(mask, None, iterations= erode)
mask = cv2.dilate(mask, None, iterations= dilate)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame,frame, mask= mask)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cir = cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break

Related

Opencv Why does the contour remain if the object has disappeared?

I found a pre-written object detection program on the internet. I would like to use this to make a plastic bottle cap sensor on an assembly line that will tell me the size. Only when you move the cap out of the conveyor belt the coordinates taken from the contour stay there. Any ideas?
import cv2
from object_detector import *
import numpy as np
import time
# Load Object Detector
detector = HomogeneousBgDetector()
# Load Cap
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
while True:
_, img = cap.read()
img = img[0:600, 60:700 ]
contours = []
contours = detector.detect_objects(img)
# Draw objects boundaries
for cnt in contours:
# Get rect
rect = cv2.minAreaRect(cnt)
(x, y), (w, h), angle = rect
# Get Width and Height of the Objects by applying the Ratio pixel to cm
object_width = w
object_height = h
# Display rectangle
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.circle(img, (int(x), int(y)), 5, (0, 0, 255), -1)
cv2.polylines(img, [box], True, (255, 0, 0), 2)
cv2.putText(img, "Width {} px".format(round(object_width, 1)), (int(x - 100), int(y - 20)), cv2.FONT_HERSHEY_PLAIN, 2, (100, 200, 0), 2)
cv2.putText(img, "Height {} px".format(round(object_height, 1)), (int(x - 100), int(y + 15)), cv2.FONT_HERSHEY_PLAIN, 2, (100, 200, 0), 2)
cv2.imshow("Image", img)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
object_detector.py
import cv2
class HomogeneousBgDetector():
def __init__(self):
pass
def detect_objects(self, frame):
def difference_of_Gaussians(img, k1, s1, k2, s2):
b1 = cv2.GaussianBlur(img,(k1, k1), s1)
b2 = cv2.GaussianBlur(img,(k2, k2), s2)
return b1 - b2
# Convert Image to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
DoG_img = difference_of_Gaussians(gray, 7, 7, 17, 13)
# Create a Mask with adaptive threshold
#mask = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 19, 5)
mask = cv2.threshold(DoG_img ,130,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# Find contours
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.imshow("mask", mask)
objects_contours = []
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 2000:
objects_contours.append(cnt)
return objects_contours
I tried to create the contour variable empty before, but failed:
....
_, img = cap.read()
img = img[0:600, 60:700 ]
contours = []# here
contours = detector.detect_objects(img)
# Draw objects boundaries
for cnt in contours:
.....
the picture:
Eran and Christoph are right. If no contour is found, the last position remains in the global x and y variables. Perhaps you want to increase the indent of the four lines that draw circle, lines and text so that it belongs to the for-loop above?
....
img = img[0:600, 60:700 ]
contours = detector.detect_objects(img)
# Draw objects boundaries
for cnt in contours:
....
# Display rectangle
box = cv2.boxPoints(rect)
box = np.int0(box)
# keep indent here
cv2.circle(img, (int(x), int(y)), 5, (0, 0, 255), -1)
cv2.polylines(img, [box], True, (255, 0, 0), 2)
cv2.putText(img, "Width {} px".format(round(object_width, 1)), (int(x - 100), int(y - 20)), cv2.FONT_HERSHEY_PLAIN, 2, (100, 200, 0), 2)
cv2.putText(img, "Height {} px".format(round(object_height, 1)), (int(x - 100), int(y + 15)), cv2.FONT_HERSHEY_PLAIN, 2, (100, 200, 0), 2)
....

Python OpenCv parse progress bar

UPD: Added working MWE.
I am trying to parse the amount of HP iт the game. The idea that I know the width of image and just get the width of filled part of the HP bar. And then just calculate it.
Previously it worked well. But recently game got some update and the color is changed. I know. Just a color.
Here is my fully worked MWE code: You can try it with sourcr files attached in the end of the post
import cv2
import numpy as np
def parse_hp(hp_area):
width = int(hp_area.shape[1] * 5)
height = int(hp_area.shape[0] * 5)
dim = (width, height)
# resize image
resized = cv2.resize(hp_area, dim, interpolation=cv2.INTER_AREA)
# Color segmentation
hsv = cv2.cvtColor(resized, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 50, 50])
upper_red = np.array([5, 255, 255])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(resized, resized, mask=mask)
# Contour exctraction
imgray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(imgray, (5, 5), 0)
ret, thresholded = cv2.threshold(blurred, 50, 255, 0)
contours, h = cv2.findContours(thresholded, 1, 2)
if contours:
cnt = contours[0]
approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
if cv2.contourArea(cnt) > 25: # to discard noise from the color segmentation
contour_poly = cv2.approxPolyDP(cnt, 3, True)
center, radius = cv2.minEnclosingCircle(contour_poly)
cv2.circle(resized, (int(center[0]), int(center[1])), int(radius), (0, 255, 0), 2)
cv2.imshow("Found limits", resized)
cv2.waitKey(0)
resized_width = int(resized.shape[1])
hp_width = radius * 2
return int(hp_width * 100 / resized_width)
else:
return -1
if __name__ == "__main__":
hp_area = cv2.imread("/Users/vetalll/Documents/Cv2Working.png")
result = parse_hp(hp_area)
print(result)
I tried to use these values. But it dos not work. openCv does not recognize them:
lower_red = np.array([355, 44, 45])
upper_red = np.array([356, 41, 43])
And now the color is a little bit purple.I know that it uses HSV color but really not able to figure aout how to adjust it to make it work. |
Working image:
Not working image:
Source images can be grabbed here:
https://drive.google.com/file/d/1dJ4ePw_7oJov_OU5n6IO6fwdm_N3W5k2/view?usp=sharing
After a bit of guessing, I came up with these values. Hope they work:
import cv2
import numpy as np
def parse_hp(hp_area):
width = int(hp_area.shape[1] * 5)
height = int(hp_area.shape[0] * 5)
dim = (width, height)
# resize image
resized = cv2.resize(hp_area, dim, interpolation=cv2.INTER_AREA)
# Color segmentation
hsv = cv2.cvtColor(resized, cv2.COLOR_RGB2HSV)
lower_red = np.array([120, 170, 0])
upper_red = np.array([245, 255, 255])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(resized, resized, mask=mask)
# Contour exctraction
imgray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(imgray, (5, 5), 0)
ret, thresholded = cv2.threshold(blurred, 50, 255, 0)
contours, h = cv2.findContours(thresholded, 1, 2)
if contours:
cnt = contours[0]
approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
if cv2.contourArea(cnt) > 25: # to discard noise from the color segmentation
contour_poly = cv2.approxPolyDP(cnt, 3, True)
center, radius = cv2.minEnclosingCircle(contour_poly)
cv2.circle(resized, (int(center[0]), int(center[1])), int(radius), (0, 255, 0), 2)
cv2.imshow("Found limits", resized)
cv2.waitKey(0)
resized_width = int(resized.shape[1])
hp_width = radius * 2
return int(hp_width * 100 / resized_width)
else:
return -1
if __name__ == "__main__":
hp_area = cv2.imread("Cv2NotWorking.png")
result = parse_hp(hp_area)
print(result)

Python opencv Defect detection

I am trying to detect foreign substances in a round circle using opencv.
However, if the threshold is increased, the area around the circle is distorted, and if the threshold is decreased, foreign matter cannot be detected. Is there a way to detect foreign matter well while maintaining the circle?
import cv2
import cv2 as cv
import numpy as np
def roiSetting():
img = cv2.imread('img.jpg')
x = 100; y = 10;
w = 700; h = 600;
img_roi = img[400:1250,600:1450] #[colStart:colEnd, rowStart:rowEnd]
cv2.imwrite("3_roi_img.jpg", img_roi)
return img_roi
def imgCvt():
print("imgCvt in")
roi_img = roiSetting()
bgr_img = cv2.cvtColor(roi_img,cv2.COLOR_RGB2BGR) #RGB->BGR convert
bila_img = cv2.bilateralFilter(bgr_img,9,100,100) #bilaterafilter apply
r,g,b=cv2.split(bila_img) #split
merge_img=cv2.merge((r,g,b)) #merge
gray_img = cv2.cvtColor(merge_img,cv2.COLOR_RGB2GRAY) #RGB->GRAY convert
ret,thresh_img = cv2.threshold(gray_img, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY) #threshold apply
equalize_img = cv2.equalizeHist(thresh_img) #Histogram equlization
canny_img = cv2.Canny(equalize_img,250,255) #Canny edge
kernel = np.ones((2,2), np.uint8)
dilate_img = cv2.dilate(canny_img, kernel, iterations = 1) # img dilate
new, contours, hierarchy = cv2.findContours(dilate_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours= sorted(contours, key = cv2.contourArea, reverse = True)[:10] #sorting
print(contours)
c = contours[0]
mask = np.zeros(roi_img.shape,np.uint8) #mask create(roi_img.shape)
cont_image = cv2.drawContours(mask, [c], -1, (255, 255, 255), -1) #background remove
con_gray_img = cv2.cvtColor(cont_image, cv2.COLOR_BGR2GRAY)
ret, thresh1 = cv2.threshold(con_gray_img, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)
last_img = cv2.bitwise_and(roi_img, roi_img, mask = thresh1)
kernel = np.ones((3,3), np.uint8)
src = cv2.erode(last_img, kernel, iterations = 2)
gray = cv2.cvtColor(src, cv2.COLOR_RGB2GRAY)
rt1, dst1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)
rt1, dst2 = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
_, contour1, _ = cv.findContours(dst2, cv2.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
for cnt in contour1:
cv2.drawContours(roi_img, [cnt], 0, (255, 0, 0), 2, cv2.LINE_8)
cv2.imshow('cont_roi.jpg', roi_img)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
imgCvt()

detect a single shape and colour in realtime using opencv

I'm working on a project that requires me to detect a red rectangle in real time. so far I've managed to get the colour and shape detected together but it can't differentiate between other objects that are red.
How might I go about doing this?
import cv2
import numpy as np
def nothing():
pass
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_COMPLEX
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Red color
low_red = np.array([175, 50, 20])
high_red = np.array([180, 255, 255])
mask1 = cv2.inRange(hsv, low_red, high_red)
kernel = np.ones((5, 5), np.uint8)
mask2 = cv2.erode(mask1, kernel)
red = cv2.bitwise_and(frame, frame, mask=mask2)
contours, _ = cv2.findContours(mask2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
approx = cv2.approxPolyDP(cnt, 0.02 * cv2.arcLength(cnt, True), True)
hull = cv2.convexHull(cnt)
x = approx.ravel()[0]
y = approx.ravel()[1]
if area > 400:
cv2.drawContours(frame, [approx], 0, (0, 0, 0), 5)
if len(approx) == 4:
cv2.putText(frame, "Rectangle", (x, y), font, 1, (0, 0, 0))
edges = cv2.Canny(frame, 100, 200)
_, threshold_binary = cv2.threshold(frame, 128, 255, cv2.THRESH_BINARY)
_, threshold_binary_inv = cv2.threshold(frame, 128, 255, cv2.THRESH_BINARY_INV)
_, threshold_trunc = cv2.threshold(frame, 128, 255, cv2.THRESH_TRUNC)
_, threshold_to_zero = cv2.threshold(frame, 12, 255, cv2.THRESH_TOZERO)
cv2.imshow("Frame", frame)
cv2.imshow('edges', edges)
cv2.imshow('red', red)
cv2.imshow("mask", mask1)
key = cv2.waitKey(1)
if key == 27:
cap.release()
cv2.destroyAllWindows()
break
Output image:

To detect shapes in a irregular image

Trying to find the circles and rectangle (or square) in an irregular object using contours,edge detection but not getting the output properly.
I tried changing values of canny values and epsilon(contour approx) but was not able to detect,
Another difficulty iam facing is lot hand written character are there in the metal object so my code is detecting that also as a shape
Can anyone please help me on detecting this required shape on this object using opencv-python.
Metal object
import imutils
import cv2
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread('part1.jpg')
#image = cv2.imread('C:\Python27\plates\plates2.1.jpg')#$episolon==0.04,len=5,6
#image = cv2.imread('C:\Python27\plates\plates4.jpg')
#image = cv2.imread('C:\Python27\plates\plates1.jpg')
#image = cv2.imread('C:\Python27\plates\plates3.jpg')#episilon=0.0370,len=5
#image = cv2.imread('C:\Python27\plates\plates5.jpg') #change the episilon to 0.01
#image = cv2.imread('C:\Python27\plates\plates6.jpg')#not working properly
cv2.namedWindow('Image')
#for angle in xrange(0, 360, 90):
# rotate the image and display it
#image = imutils.rotate(image, angle=angle)
#gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
#blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#edges=cv2.Canny(image,200,650)#plates3.jpg,plates1.jpg,plates5.jpg,
#edges=cv2.Canny(image,200,500)#plates4.jpg
#edges=cv2.Canny(image,200,589)#plates2.1.jpg
#edges=cv2.Canny(image,100,450)
edges=cv2.Canny(image,300,589)
kernel = np.ones((5,5),np.uint8)
#thresh = cv2.erode(edges,kernel,iterations = 1)
#thresh = cv2.dilate(edges,kernel,iterations = 1)
#thresh = cv2.morphologyEx(edges, cv2.MORPH_OPEN, kernel)
thresh = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
print len(cnts)
for c in cnts:
shape = "unidentified"
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.0373* peri, True)
if len(approx) == 4:
(x, y, w, h) = cv2.boundingRect(approx)
#ar = w / float(h)
#shape = "slots" if ar >= 0.95 and ar <= 1.05 else "slots"
shape="slots"
#cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(image,[box],0,(0,0,255),2)
#cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
cv2.putText(image, shape, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 255, 255), 4)
elif len(approx)==2:
shape="nothing"
(x,y),radius = cv2.minEnclosingCircle(c)
center = (int(x),int(y))
radius = int(radius)
#cv2.circle(image,center,radius,(0,255,0),2)
#cv2.putText(image, shape, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 255, 255), 4)
elif len(approx)==5:
shape="nothing"
elif len(approx)==3:
shape="nothing"
elif len(approx)==6:
shape="nothing"
else:
shape = "c"+str(len(approx))
(x,y),radius = cv2.minEnclosingCircle(c)
center = (int(x),int(y))
radius = int(radius)
cv2.circle(image,center,radius,(0,255,0),2)
cv2.putText(image, shape, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 255, 255), 2)
cv2.imshow("Image",image)
cv2.imshow("edges", thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
Use binarization. You will get blobs that you can discriminate by size, location and other geometric criteria.

Categories

Resources