Python OpenCv parse progress bar - python

UPD: Added working MWE.
I am trying to parse the amount of HP iт the game. The idea that I know the width of image and just get the width of filled part of the HP bar. And then just calculate it.
Previously it worked well. But recently game got some update and the color is changed. I know. Just a color.
Here is my fully worked MWE code: You can try it with sourcr files attached in the end of the post
import cv2
import numpy as np
def parse_hp(hp_area):
width = int(hp_area.shape[1] * 5)
height = int(hp_area.shape[0] * 5)
dim = (width, height)
# resize image
resized = cv2.resize(hp_area, dim, interpolation=cv2.INTER_AREA)
# Color segmentation
hsv = cv2.cvtColor(resized, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 50, 50])
upper_red = np.array([5, 255, 255])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(resized, resized, mask=mask)
# Contour exctraction
imgray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(imgray, (5, 5), 0)
ret, thresholded = cv2.threshold(blurred, 50, 255, 0)
contours, h = cv2.findContours(thresholded, 1, 2)
if contours:
cnt = contours[0]
approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
if cv2.contourArea(cnt) > 25: # to discard noise from the color segmentation
contour_poly = cv2.approxPolyDP(cnt, 3, True)
center, radius = cv2.minEnclosingCircle(contour_poly)
cv2.circle(resized, (int(center[0]), int(center[1])), int(radius), (0, 255, 0), 2)
cv2.imshow("Found limits", resized)
cv2.waitKey(0)
resized_width = int(resized.shape[1])
hp_width = radius * 2
return int(hp_width * 100 / resized_width)
else:
return -1
if __name__ == "__main__":
hp_area = cv2.imread("/Users/vetalll/Documents/Cv2Working.png")
result = parse_hp(hp_area)
print(result)
I tried to use these values. But it dos not work. openCv does not recognize them:
lower_red = np.array([355, 44, 45])
upper_red = np.array([356, 41, 43])
And now the color is a little bit purple.I know that it uses HSV color but really not able to figure aout how to adjust it to make it work. |
Working image:
Not working image:
Source images can be grabbed here:
https://drive.google.com/file/d/1dJ4ePw_7oJov_OU5n6IO6fwdm_N3W5k2/view?usp=sharing

After a bit of guessing, I came up with these values. Hope they work:
import cv2
import numpy as np
def parse_hp(hp_area):
width = int(hp_area.shape[1] * 5)
height = int(hp_area.shape[0] * 5)
dim = (width, height)
# resize image
resized = cv2.resize(hp_area, dim, interpolation=cv2.INTER_AREA)
# Color segmentation
hsv = cv2.cvtColor(resized, cv2.COLOR_RGB2HSV)
lower_red = np.array([120, 170, 0])
upper_red = np.array([245, 255, 255])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(resized, resized, mask=mask)
# Contour exctraction
imgray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(imgray, (5, 5), 0)
ret, thresholded = cv2.threshold(blurred, 50, 255, 0)
contours, h = cv2.findContours(thresholded, 1, 2)
if contours:
cnt = contours[0]
approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
if cv2.contourArea(cnt) > 25: # to discard noise from the color segmentation
contour_poly = cv2.approxPolyDP(cnt, 3, True)
center, radius = cv2.minEnclosingCircle(contour_poly)
cv2.circle(resized, (int(center[0]), int(center[1])), int(radius), (0, 255, 0), 2)
cv2.imshow("Found limits", resized)
cv2.waitKey(0)
resized_width = int(resized.shape[1])
hp_width = radius * 2
return int(hp_width * 100 / resized_width)
else:
return -1
if __name__ == "__main__":
hp_area = cv2.imread("Cv2NotWorking.png")
result = parse_hp(hp_area)
print(result)

Related

Unable to count objects in image using opencv python

I used below code for find cigarettes count in the below image using opencv python, but its not worked. Only this code finding some places only. i don't know what is the issue.. please help me
import numpy as np
import cv2
from PIL import Image
import sys
Path='D:\Artificial intelligence\Phyton'
filename='Test.png'
img = cv2.imread('D:\Artificial intelligence\Phyton\Test.png')
img1 = cv2.imread('D:\Artificial intelligence\Phyton\Test.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY)
img[thresh == 255] = 0
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
erosion = cv2.erode(img, kernel, iterations = 1)
cv2.imwrite('D:\Artificial intelligence\Phyton\Test112.png',erosion)
def findcircles(img,contours):
minArea = 300;
minCircleRatio = 0.5;
for contour in contours:
(x,y),radius = cv2.minEnclosingCircle(contour)
center = (int(x),int(y))
radius = int(radius)
if radius > 5:
continue;
cv2.circle(img1, center, 1, (191, 255, 0), 2)
cv2.imwrite('D:\Artificial intelligence\Phyton\Test11234.png',img1)
img = cv2.imread("D:\Artificial intelligence\Phyton\Test112.png")
cv2.imwrite('D:\Artificial intelligence\Phyton\org.png',img)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,threshold = cv2.threshold(gray, 199, 255,cv2.THRESH_BINARY_INV)
cv2.imwrite('D:\Artificial intelligence\Phyton\threshold.png',threshold)
blur = cv2.medianBlur(gray,7)
cv2.imwrite('D:\Artificial intelligence\Phyton\blur.png',blur)
laplacian=cv2.Laplacian(blur,-1,ksize = 5,delta = -50)
cv2.imwrite('D:\Artificial intelligence\Phyton\laplacian.png',laplacian)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
dilation = cv2.dilate(laplacian,kernel,iterations = 1)
cv2.imwrite('D:\Artificial intelligence\Phyton\dilation.png',dilation)
result= cv2.subtract(threshold,dilation)
cv2.imwrite('D:\Artificial intelligence\Phyton\result.png',result)
contours, hierarchy = cv2.findContours(result,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
findcircles(gray,contours)
Image :
enter image description here
My result:
enter image description here

Python opencv Defect detection

I am trying to detect foreign substances in a round circle using opencv.
However, if the threshold is increased, the area around the circle is distorted, and if the threshold is decreased, foreign matter cannot be detected. Is there a way to detect foreign matter well while maintaining the circle?
import cv2
import cv2 as cv
import numpy as np
def roiSetting():
img = cv2.imread('img.jpg')
x = 100; y = 10;
w = 700; h = 600;
img_roi = img[400:1250,600:1450] #[colStart:colEnd, rowStart:rowEnd]
cv2.imwrite("3_roi_img.jpg", img_roi)
return img_roi
def imgCvt():
print("imgCvt in")
roi_img = roiSetting()
bgr_img = cv2.cvtColor(roi_img,cv2.COLOR_RGB2BGR) #RGB->BGR convert
bila_img = cv2.bilateralFilter(bgr_img,9,100,100) #bilaterafilter apply
r,g,b=cv2.split(bila_img) #split
merge_img=cv2.merge((r,g,b)) #merge
gray_img = cv2.cvtColor(merge_img,cv2.COLOR_RGB2GRAY) #RGB->GRAY convert
ret,thresh_img = cv2.threshold(gray_img, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY) #threshold apply
equalize_img = cv2.equalizeHist(thresh_img) #Histogram equlization
canny_img = cv2.Canny(equalize_img,250,255) #Canny edge
kernel = np.ones((2,2), np.uint8)
dilate_img = cv2.dilate(canny_img, kernel, iterations = 1) # img dilate
new, contours, hierarchy = cv2.findContours(dilate_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours= sorted(contours, key = cv2.contourArea, reverse = True)[:10] #sorting
print(contours)
c = contours[0]
mask = np.zeros(roi_img.shape,np.uint8) #mask create(roi_img.shape)
cont_image = cv2.drawContours(mask, [c], -1, (255, 255, 255), -1) #background remove
con_gray_img = cv2.cvtColor(cont_image, cv2.COLOR_BGR2GRAY)
ret, thresh1 = cv2.threshold(con_gray_img, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)
last_img = cv2.bitwise_and(roi_img, roi_img, mask = thresh1)
kernel = np.ones((3,3), np.uint8)
src = cv2.erode(last_img, kernel, iterations = 2)
gray = cv2.cvtColor(src, cv2.COLOR_RGB2GRAY)
rt1, dst1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)
rt1, dst2 = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
_, contour1, _ = cv.findContours(dst2, cv2.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
for cnt in contour1:
cv2.drawContours(roi_img, [cnt], 0, (255, 0, 0), 2, cv2.LINE_8)
cv2.imshow('cont_roi.jpg', roi_img)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
imgCvt()

detect a single shape and colour in realtime using opencv

I'm working on a project that requires me to detect a red rectangle in real time. so far I've managed to get the colour and shape detected together but it can't differentiate between other objects that are red.
How might I go about doing this?
import cv2
import numpy as np
def nothing():
pass
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_COMPLEX
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Red color
low_red = np.array([175, 50, 20])
high_red = np.array([180, 255, 255])
mask1 = cv2.inRange(hsv, low_red, high_red)
kernel = np.ones((5, 5), np.uint8)
mask2 = cv2.erode(mask1, kernel)
red = cv2.bitwise_and(frame, frame, mask=mask2)
contours, _ = cv2.findContours(mask2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
approx = cv2.approxPolyDP(cnt, 0.02 * cv2.arcLength(cnt, True), True)
hull = cv2.convexHull(cnt)
x = approx.ravel()[0]
y = approx.ravel()[1]
if area > 400:
cv2.drawContours(frame, [approx], 0, (0, 0, 0), 5)
if len(approx) == 4:
cv2.putText(frame, "Rectangle", (x, y), font, 1, (0, 0, 0))
edges = cv2.Canny(frame, 100, 200)
_, threshold_binary = cv2.threshold(frame, 128, 255, cv2.THRESH_BINARY)
_, threshold_binary_inv = cv2.threshold(frame, 128, 255, cv2.THRESH_BINARY_INV)
_, threshold_trunc = cv2.threshold(frame, 128, 255, cv2.THRESH_TRUNC)
_, threshold_to_zero = cv2.threshold(frame, 12, 255, cv2.THRESH_TOZERO)
cv2.imshow("Frame", frame)
cv2.imshow('edges', edges)
cv2.imshow('red', red)
cv2.imshow("mask", mask1)
key = cv2.waitKey(1)
if key == 27:
cap.release()
cv2.destroyAllWindows()
break
Output image:

Achieving more accurate image warping

could you help me with better paper warping? Right now it looks like this and as you can see it's really bad as many cells are 50% black 50% white. Here's my code if it helps:
image = cv2.imread("image2.jpg")
ratio = image.shape[0] / 500.0
orig = image.copy()
image = imutils.resize(image, height = 500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
screenCnt = approx
break
cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
ret,warped = cv2.threshold(warped,160,255,cv2.THRESH_BINARY)
Below is the code adding the skew detection part to your code:
import numpy as np
import cv2
from imutils.perspective import four_point_transform
import imutils
image = cv2.imread("image2.jpg")
ratio = image.shape[0] / 500.0
orig = image.copy()
image = imutils.resize(image, height = 500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
screenCnt = approx
break
cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
warped = cv2.threshold(warped, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
coords = np.column_stack(np.where(warped > 0))
angle = cv2.minAreaRect(coords)[-1]
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
print(angle)
(h, w) = warped.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(warped, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
cv2.imshow('Corrected', imutils.resize(rotated, height = 650))
cv2.waitKey(0)
It didn't identify any skew in the warped image. But sometimes it happens that after warping you need to check if there is any skew resulting from not so accurate warping.

To detect shapes in a irregular image

Trying to find the circles and rectangle (or square) in an irregular object using contours,edge detection but not getting the output properly.
I tried changing values of canny values and epsilon(contour approx) but was not able to detect,
Another difficulty iam facing is lot hand written character are there in the metal object so my code is detecting that also as a shape
Can anyone please help me on detecting this required shape on this object using opencv-python.
Metal object
import imutils
import cv2
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread('part1.jpg')
#image = cv2.imread('C:\Python27\plates\plates2.1.jpg')#$episolon==0.04,len=5,6
#image = cv2.imread('C:\Python27\plates\plates4.jpg')
#image = cv2.imread('C:\Python27\plates\plates1.jpg')
#image = cv2.imread('C:\Python27\plates\plates3.jpg')#episilon=0.0370,len=5
#image = cv2.imread('C:\Python27\plates\plates5.jpg') #change the episilon to 0.01
#image = cv2.imread('C:\Python27\plates\plates6.jpg')#not working properly
cv2.namedWindow('Image')
#for angle in xrange(0, 360, 90):
# rotate the image and display it
#image = imutils.rotate(image, angle=angle)
#gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
#blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#edges=cv2.Canny(image,200,650)#plates3.jpg,plates1.jpg,plates5.jpg,
#edges=cv2.Canny(image,200,500)#plates4.jpg
#edges=cv2.Canny(image,200,589)#plates2.1.jpg
#edges=cv2.Canny(image,100,450)
edges=cv2.Canny(image,300,589)
kernel = np.ones((5,5),np.uint8)
#thresh = cv2.erode(edges,kernel,iterations = 1)
#thresh = cv2.dilate(edges,kernel,iterations = 1)
#thresh = cv2.morphologyEx(edges, cv2.MORPH_OPEN, kernel)
thresh = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
sd = ShapeDetector()
print len(cnts)
for c in cnts:
shape = "unidentified"
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.0373* peri, True)
if len(approx) == 4:
(x, y, w, h) = cv2.boundingRect(approx)
#ar = w / float(h)
#shape = "slots" if ar >= 0.95 and ar <= 1.05 else "slots"
shape="slots"
#cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(image,[box],0,(0,0,255),2)
#cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
cv2.putText(image, shape, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 255, 255), 4)
elif len(approx)==2:
shape="nothing"
(x,y),radius = cv2.minEnclosingCircle(c)
center = (int(x),int(y))
radius = int(radius)
#cv2.circle(image,center,radius,(0,255,0),2)
#cv2.putText(image, shape, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 255, 255), 4)
elif len(approx)==5:
shape="nothing"
elif len(approx)==3:
shape="nothing"
elif len(approx)==6:
shape="nothing"
else:
shape = "c"+str(len(approx))
(x,y),radius = cv2.minEnclosingCircle(c)
center = (int(x),int(y))
radius = int(radius)
cv2.circle(image,center,radius,(0,255,0),2)
cv2.putText(image, shape, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX,0.5, (255, 255, 255), 2)
cv2.imshow("Image",image)
cv2.imshow("edges", thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
Use binarization. You will get blobs that you can discriminate by size, location and other geometric criteria.

Categories

Resources