I am trying to draw contour around an image. I can see that contours being found but I am not able to draw the outline. The color of the contour seem to be either of the two (black and white) colors.
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
%matplotlib inline
im = io.imread('http://matlabtricks.com/images/post-35/man.png')
plt.imshow(im)
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
plt.figure()
plt.imshow(imgray)
#Contoured image
ret,thresh = cv2.threshold(imgray, 120,255,cv2.THRESH_BINARY)
image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
c_img = cv2.drawContours(image, contours, -1, (0, 255, 0), 1)
plt.figure()
plt.imshow(c_img)
You need to draw on the original image, not on the one returned from findContuors(). The following works.
# Contoured image
ret,thresh = cv2.threshold(imgray, 120,255,cv2.THRESH_BINARY)
contours = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[-2]
for contour in contours:
cv2.drawContours(im, contour, -1, (0, 255, 0), 3)
plt.figure()
plt.imshow(im)
This is my result:
## Read and convert
img = io.imread('http://matlabtricks.com/images/post-35/man.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
## Find outer contours
_, cnts, _= cv2.findContours(gray,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
## Draw
canvas = np.zeros_like(img)
cv2.drawContours(canvas , contours, -1, (0, 255, 0), 1)
plt.imshow(canvas)
Related
I'm using the cv2.HoughCircles in Python trying to detect circles in the following image.the image
here is my code:
import matplotlib.pyplot as plt
import cv2
import numpy as np
image = cv2.imread("image.png")
output = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.5, 10, param1=20, param2=60, minRadius=0, maxRadius=40)
print(circles)
if circles is not None:
circles = np.round(circles[0, :]).astype("int")
for (x, y, r) in circles:
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(output, (x - 3, y - 3), (x + 3, y + 3), (0, 128, 255), -1)
plt.subplot(121), plt.imshow(image, cmap="gray"), plt.title("corners")
plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(output, cmap="gray"), plt.title("corners")
plt.xticks([]), plt.yticks([])
plt.show()
the detected circles
any insight on why it isn't detecting the circles?
attaching an image of the circles i would like to detect. i would like to detect as many circles like the ones in red in the following image.example of circles i would like to detect
Im trying to crop rectangle image from screenshot, background for image must be white, Im ending up having black,How can I change that? I want to make histogtam of rgb for the final image and It seems plotting only vertical line on zero, Any kind of help is very important! here is my code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = cv2.imread(filename = "Screenshot from 2019-11-08 22-02-27.png")
mask = np.zeros(shape = image.shape, dtype = "uint8")
cv2.rectangle(img = mask,
pt1 = (0, 185), pt2 = (1900, 773),
color = (255, 255, 255),
thickness = -1)
maskedImg = cv2.bitwise_and(src1 = image, src2 = mask)
cv2.imwrite("processed.png", maskedImg)
plt.imshow(maskedImg)
plt.show()
plt.hist(maskedImg.ravel(),256,[0,256]); plt.show()
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = cv2.imread(filename = "1.png")
mask = np.zeros(shape = image.shape, dtype = "uint8")
cv2.rectangle(img = mask,
pt1 = (0, 185), pt2 = (1900, 773),
color = (255, 255, 255),
thickness = -1)
maskedImg = cv2.bitwise_and(src1 = image, src2 = mask)
maskedImg[np.where((maskedImg==[0,0,0]).all(axis=2))] = [255,255,255]
cv2.imwrite("processed.png", maskedImg)
plt.imshow(maskedImg)
plt.show()
Convert the black pixel present in the image into white pixel
Original image
Crop Image
color = ('b','g','r')
for i,col in enumerate(color):
histr = cv2.calcHist([maskedImg],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
I started with a set of points from a laser scan which I plotted as a scatter plot using matplotlib. I then used plt.savefig to be able to open the plot as an image and use openCV to find contours around the points. Now, I want to be able to find the centers of the contours and plot them as points in the original scatter plot. The problem is I don't know how to create a mapping between the original scatter plot points and the image pixels. Is there a way to do this? Or another way to mark the center of the contours in matplotlib?
Note: the reason I need to draw contours is that later I need to use openCV's matchShapes function to compare the contours.
Here are the images from each step:
scatter plot
,
contours with centers marked in red
Now I basically want to be able to add the red markings from the image to the scatter plot.
Here is my code:
plt.scatter(X[:,0], X[:,1], s=2)
plt.axis('equal')
plt.axis('off')
plt.savefig(name)
plt.clf()
img = cv2.imread(name)
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgGray, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
height = img.shape[0]
width = img.shape[1]
blank_image = np.zeros((height,width,3), np.uint8)
cv2.drawContours(blank_image, contours, -1, (255,0,0))
for contour in contours:
M = cv2.moments(contour)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(blank_image, (cX, cY), 2, (0, 0, 255), -1)
cv2.imwrite(name, blank_image)
UPDATE:
Based on suggestions I looked at matplot's transforms function and tried the following:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x_coords, y_coords, 'bo', markersize=2)
ax.axis('equal')
ax.axis('off')
height1 = fig.get_figheight()*fig.dpi
width1 = fig.get_figwidth()*fig.dpi
inv = ax.transData.inverted()
plt.savefig('img.png')
img = cv2.imread('img.png')
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgGray, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
height = img.shape[0]
width = img.shape[1]
blank_image = np.zeros((height,width,3), np.uint8)
centers_x = []
centers_y = []
for contour in contours:
M = cv2.moments(contour)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
centers_x.append(inv.transform((cX, height1-cY))[0])
centers_y.append(inv.transform((cX, height1-cY))[1])
cv2.drawContours(blank_image, [contour], -1, (255,0,0),1)
cv2.circle(blank_image, (cX, cY), 2, (0, 0, 255), -1)
cv2.imwrite("test.png", blank_image)
ax.plot(centers_x, centers_y, 'ro', markersize=4)
plt.show()
This got me close but it seems like the x coordinates are still slightly off
new result]
.
I also tried
centers_x.append(inv.transform((width1-cX, height1-cY))[0])
centers_y.append(inv.transform((width1-cX, height1-cY))[1])
but that also didn't work.
FINAL UPDATE: adding
plt.tight_layout()
solved the problem.
x = np.linspace(0,1,10)
y = 5*x+2
fig, ax = plt.subplots()
ax.scatter(x,y)
height = fig.get_figheight()*fig.dpi
width = fig.get_figwidth()*fig.dpi
# the coordinates in pixel
cX = 147
cY = 142
# we need to invert the y value as matplotlib considers (0,0) to be
# on the lower left, while opencv uses upper left as origin
tX,tY = ax.transData.inverted().transform([cX,height-cY])
ax.scatter(tX,tY, s=50, c='r')
fig.savefig('test.png', dpi=fig.dpi)
I'm trying out OpenCV to do some image processing. Admittedly I'm a noob at this stuff, but I feel like I'm wrapping my brain around it somewhat. I'm using a mask to detect the lighter areas of the image, then running a canny detector and finally a HoughLinesP detection. Code is below. The result I'm getting is:
What I expected (and desire) is more like below (notice red lines on result):
For what it's worth, my end game is to auto rotate the image so the receipt is straight. If I'm taking the wrong path entirely, advise would be appreciated.
import cv2
import numpy as np
from matplotlib import pyplot
def detect_lines(img):
temp = cv2.cvtColor(img,cv2.COLOR_BGR2HLS)
lower = np.uint8([0, 160, 0])
upper = np.uint8([255, 255, 255])
white_mask = cv2.inRange(temp, lower, upper)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.blur(gray, (3, 3))
canny_low = 100
edges = cv2.Canny(white_mask, canny_low, canny_low * 3, apertureSize=5)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 10, 2, 80)
result = img.copy()
if lines is not None:
for x in range(0, len(lines)):
for x1, y1, x2, y2 in lines[x]:
print(x1, y1, x2, y2)
cv2.line(result, (x1, y1), (x2, y2), (255, 0, 0), 2)
pyplot.subplot(141), pyplot.imshow(img, cmap='gray')
pyplot.title('Original Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(142), pyplot.imshow(white_mask, cmap='gray')
pyplot.title('Gray Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(143), pyplot.imshow(edges, cmap='gray')
pyplot.title('Edge Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(144), pyplot.imshow(result, cmap='gray')
pyplot.title('Result Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.show()
return img
if __name__ == '__main__':
image = cv2.imread('receipt.jpg')
image = detect_lines(image)
cv2.imwrite('output.jpg', image)
I would suggest start looking at different Morphological Transformations which you can apply to your canny edge detection in order to improve the hough line transform.
This is not perfect but it's something to get you started:
import cv2
import numpy as np
from matplotlib import pyplot
def detect_lines(img):
temp = cv2.cvtColor(img,cv2.COLOR_BGR2HLS)
kernel = np.ones((5, 5), np.uint8) # < --- Added a kernel you can differ
lower = np.uint8([0, 160, 0])
upper = np.uint8([255, 255, 255])
white_mask = cv2.inRange(temp, lower, upper)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.blur(gray, (3, 3))
canny_low = 100
edges = cv2.Canny(white_mask, canny_low, canny_low * 3, apertureSize=3)
dilate = cv2.dilate(edges, kernel, iterations=2) # < --- Added a dilate, check link I provided
ero = cv2.erode(dilate, kernel, iterations=1) # < --- Added an erosion, check link I provided
lines = cv2.HoughLinesP(dilate, 1, np.pi/180, 10, 2, 80)
result = img.copy()
if lines is not None:
for x in range(0, len(lines)):
for x1, y1, x2, y2 in lines[x]:
print(x1, y1, x2, y2)
cv2.line(result, (x1, y1), (x2, y2), (255, 0, 0), 2)
pyplot.subplot(151), pyplot.imshow(img, cmap='gray')
pyplot.title('Original Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(152), pyplot.imshow(white_mask, cmap='gray')
pyplot.title('Mask Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(153), pyplot.imshow(edges, cmap='gray')
pyplot.title('Edge Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(154), pyplot.imshow(ero, cmap='gray')
pyplot.title('Dilate/Erosion Image'), pyplot.xticks([]), pyplot.yticks([]) # <--- Added a display
pyplot.subplot(155), pyplot.imshow(result, cmap='gray')
pyplot.title('Result Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.show()
return result # <--- You want to return the result right?
if __name__ == '__main__':
image = cv2.imread('receipt.jpg')
image = detect_lines(image)
cv2.imwrite('output.jpg', image)
Another approach could be looking into Corner Detection and then drawing a line between the detected corners (I haven't tried this approach but it's just for inspiration :) ).
I am trying to extract the edge of an image (its contour) and change its thickness. I want to give it like the stroke effect of Photoshop layer style. Photoshop stroke effect example:
http://projectwoman.com/2012/11/smart-objects-and-strokes-in-photoshop.html
I was able to extract the edge from an image. Using canny edge or the pillow function.
1.using canny edge detection
img = cv2.imread(img_path,0)
edges = cv2.Canny(img,300,700)
2.using pillow filler
image = Image.open(img_path).convert('RGB')
image = image.filter(ImageFilter.FIND_EDGES())
but, I could not adjust the contour thickness.
Here a solution:
import cv2
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread('mickey.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)[...,0]
def show_img(im, figsize=None, ax=None, alpha=None):
if not ax: fig,ax = plt.subplots(figsize=figsize)
ax.imshow(im, alpha=alpha)
ax.set_axis_off()
return ax
def getBordered(image, width):
bg = np.zeros(image.shape)
_, contours, _ = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
biggest = 0
bigcontour = None
for contour in contours:
area = cv2.contourArea(contour)
if area > biggest:
biggest = area
bigcontour = contour
return cv2.drawContours(bg, [bigcontour], 0, (255, 255, 255), width).astype(bool)
im2 = getBordered(image, 10)
show_img(im2, figsize=(10,10))
You can change thickness by changing param width in getBordered.