I started with a set of points from a laser scan which I plotted as a scatter plot using matplotlib. I then used plt.savefig to be able to open the plot as an image and use openCV to find contours around the points. Now, I want to be able to find the centers of the contours and plot them as points in the original scatter plot. The problem is I don't know how to create a mapping between the original scatter plot points and the image pixels. Is there a way to do this? Or another way to mark the center of the contours in matplotlib?
Note: the reason I need to draw contours is that later I need to use openCV's matchShapes function to compare the contours.
Here are the images from each step:
scatter plot
,
contours with centers marked in red
Now I basically want to be able to add the red markings from the image to the scatter plot.
Here is my code:
plt.scatter(X[:,0], X[:,1], s=2)
plt.axis('equal')
plt.axis('off')
plt.savefig(name)
plt.clf()
img = cv2.imread(name)
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgGray, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
height = img.shape[0]
width = img.shape[1]
blank_image = np.zeros((height,width,3), np.uint8)
cv2.drawContours(blank_image, contours, -1, (255,0,0))
for contour in contours:
M = cv2.moments(contour)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(blank_image, (cX, cY), 2, (0, 0, 255), -1)
cv2.imwrite(name, blank_image)
UPDATE:
Based on suggestions I looked at matplot's transforms function and tried the following:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x_coords, y_coords, 'bo', markersize=2)
ax.axis('equal')
ax.axis('off')
height1 = fig.get_figheight()*fig.dpi
width1 = fig.get_figwidth()*fig.dpi
inv = ax.transData.inverted()
plt.savefig('img.png')
img = cv2.imread('img.png')
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgGray, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
height = img.shape[0]
width = img.shape[1]
blank_image = np.zeros((height,width,3), np.uint8)
centers_x = []
centers_y = []
for contour in contours:
M = cv2.moments(contour)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
centers_x.append(inv.transform((cX, height1-cY))[0])
centers_y.append(inv.transform((cX, height1-cY))[1])
cv2.drawContours(blank_image, [contour], -1, (255,0,0),1)
cv2.circle(blank_image, (cX, cY), 2, (0, 0, 255), -1)
cv2.imwrite("test.png", blank_image)
ax.plot(centers_x, centers_y, 'ro', markersize=4)
plt.show()
This got me close but it seems like the x coordinates are still slightly off
new result]
.
I also tried
centers_x.append(inv.transform((width1-cX, height1-cY))[0])
centers_y.append(inv.transform((width1-cX, height1-cY))[1])
but that also didn't work.
FINAL UPDATE: adding
plt.tight_layout()
solved the problem.
x = np.linspace(0,1,10)
y = 5*x+2
fig, ax = plt.subplots()
ax.scatter(x,y)
height = fig.get_figheight()*fig.dpi
width = fig.get_figwidth()*fig.dpi
# the coordinates in pixel
cX = 147
cY = 142
# we need to invert the y value as matplotlib considers (0,0) to be
# on the lower left, while opencv uses upper left as origin
tX,tY = ax.transData.inverted().transform([cX,height-cY])
ax.scatter(tX,tY, s=50, c='r')
fig.savefig('test.png', dpi=fig.dpi)
Related
I need to find and split each shelves from planogram image. In this case split using blue(it can be black, gray, etc.) lines so it should give me 8 shelves. I tried using Hough lines, contours to recognize the shelf. Tell me, please, how to recognize shelves.
Here's my code:
import cv2
import numpy as np
# Load the image
img = cv2.imread("display.jpg")
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Threshold the image to binarize it
threshold, binary = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV)
# Find contours
contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Loop through each contour
for i, contour in enumerate(contours):
# Get bounding box of contour
x, y, w, h = cv2.boundingRect(contour)
# Crop the image to the bounding box
shelf_img = img[y:y+h, x:x+w]
# Save the cropped image
cv2.imwrite(f"shelf_{i}.jpg", shelf_img)
Here's the second approach:
#!/usr/bin/env python
# coding: utf-8
# In[5]:
from skimage.transform import (hough_line, hough_line_peaks,probabilistic_hough_line)
from skimage.feature import canny
from skimage import data
from skimage.io import imread
import numpy as np
import matplotlib.pyplot as plt
from skimage.filters import roberts, sobel, scharr
# In[6]:
image = imread("display.jpg", as_gray=True) * 255
# In[7]:
edges = canny(image, 4, 1, 25)
lines = probabilistic_hough_line(edges, threshold=2, line_length=10, line_gap=3)
fig2, ax = plt.subplots(1, 3, figsize=(20, 8))
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('Input image')
ax[0].axis('image')
ax[1].imshow(edges, cmap=plt.cm.gray)
ax[1].set_title('Canny edges')
ax[1].axis('image')
ax[2].imshow(edges * 0)
for line in lines:
p0, p1 = line
ax[2].plot((p0[0], p1[0]), (p0[1], p1[1]))
ax[2].set_title('Probabilistic Hough')
ax[2].axis('image')
# In[8]:
height, width = image.shape
# In[9]:
def proj_x(line):
p0, p1 = line
return np.abs(p0[0] - p1[0])
def proj_y(line):
p0, p1 = line
return np.abs(p0[1] - p1[1])
# In[10]:
s_h = np.zeros(height)
s_v = np.zeros(height)
for line in lines:
p0, p1 = line
y1 = max(p0[1], p1[1])
y0 = min(p0[1], p1[1])
if proj_y(line) > 2:
s_v[y0: y1] += proj_y(line)
if proj_x(line) > 2:
s_h[y0: y1] += proj_x(line)
# In[11]:
#fig2, ax = plt.subplots(1, 2, figsize=(8, 8))
plt.figure(figsize=(20, 10))
plt.imshow(image, cmap=plt.cm.gray)
plt.title('Input image')
plt.plot(s_h, np.arange(height), linewidth=2, label="s_h")
plt.plot(s_v, np.arange(height), linewidth=2, label="s_v")
plt.ylim([height, 0])
plt.xlim([0, width])
plt.legend()
# In[13]:
import scipy.ndimage
s_f = 20 * s_h / (s_v + 1)
s_f = scipy.ndimage.gaussian_filter1d(s_f, 1)
# In[14]:
plt.figure(figsize=(20, 10))
plt.imshow(image, cmap=plt.cm.gray)
plt.title('Input image')
plt.plot(s_f, np.arange(height), linewidth=2, label="s_h")
plt.ylim([height, 0])
plt.xlim([0, width])
# In[15]:
import scipy.signal
# In[26]:
cands = scipy.signal.find_peaks_cwt(s_f, np.arange(1, 50))
print(zip(cands, s_f[cands]))
# In[18]:
pairs = []
for c in cands:
for d in cands:
if (c < d) and (d -c < 100):
pairs.append((c, d))
# In[19]:
y_tagets = pairs[np.argmax([s_f[a] * s_f[b] for a, b in pairs])]
# In[20]:
y0, y1 = y_tagets
# In[21]:
import matplotlib.patches as patches
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, aspect='equal')
ax.imshow(image, cmap=plt.cm.gray)
ax.set_title('Input image')
ax.add_patch(
patches.Rectangle(
(0, y0),
width,
y1 - y0,
facecolor="red",
alpha=0.4 # remove background
)
)
I'm using the cv2.HoughCircles in Python trying to detect circles in the following image.the image
here is my code:
import matplotlib.pyplot as plt
import cv2
import numpy as np
image = cv2.imread("image.png")
output = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.5, 10, param1=20, param2=60, minRadius=0, maxRadius=40)
print(circles)
if circles is not None:
circles = np.round(circles[0, :]).astype("int")
for (x, y, r) in circles:
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(output, (x - 3, y - 3), (x + 3, y + 3), (0, 128, 255), -1)
plt.subplot(121), plt.imshow(image, cmap="gray"), plt.title("corners")
plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(output, cmap="gray"), plt.title("corners")
plt.xticks([]), plt.yticks([])
plt.show()
the detected circles
any insight on why it isn't detecting the circles?
attaching an image of the circles i would like to detect. i would like to detect as many circles like the ones in red in the following image.example of circles i would like to detect
I am trying to draw contour around an image. I can see that contours being found but I am not able to draw the outline. The color of the contour seem to be either of the two (black and white) colors.
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
%matplotlib inline
im = io.imread('http://matlabtricks.com/images/post-35/man.png')
plt.imshow(im)
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
plt.figure()
plt.imshow(imgray)
#Contoured image
ret,thresh = cv2.threshold(imgray, 120,255,cv2.THRESH_BINARY)
image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
c_img = cv2.drawContours(image, contours, -1, (0, 255, 0), 1)
plt.figure()
plt.imshow(c_img)
You need to draw on the original image, not on the one returned from findContuors(). The following works.
# Contoured image
ret,thresh = cv2.threshold(imgray, 120,255,cv2.THRESH_BINARY)
contours = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[-2]
for contour in contours:
cv2.drawContours(im, contour, -1, (0, 255, 0), 3)
plt.figure()
plt.imshow(im)
This is my result:
## Read and convert
img = io.imread('http://matlabtricks.com/images/post-35/man.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
## Find outer contours
_, cnts, _= cv2.findContours(gray,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
## Draw
canvas = np.zeros_like(img)
cv2.drawContours(canvas , contours, -1, (0, 255, 0), 1)
plt.imshow(canvas)
I'm trying out OpenCV to do some image processing. Admittedly I'm a noob at this stuff, but I feel like I'm wrapping my brain around it somewhat. I'm using a mask to detect the lighter areas of the image, then running a canny detector and finally a HoughLinesP detection. Code is below. The result I'm getting is:
What I expected (and desire) is more like below (notice red lines on result):
For what it's worth, my end game is to auto rotate the image so the receipt is straight. If I'm taking the wrong path entirely, advise would be appreciated.
import cv2
import numpy as np
from matplotlib import pyplot
def detect_lines(img):
temp = cv2.cvtColor(img,cv2.COLOR_BGR2HLS)
lower = np.uint8([0, 160, 0])
upper = np.uint8([255, 255, 255])
white_mask = cv2.inRange(temp, lower, upper)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.blur(gray, (3, 3))
canny_low = 100
edges = cv2.Canny(white_mask, canny_low, canny_low * 3, apertureSize=5)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 10, 2, 80)
result = img.copy()
if lines is not None:
for x in range(0, len(lines)):
for x1, y1, x2, y2 in lines[x]:
print(x1, y1, x2, y2)
cv2.line(result, (x1, y1), (x2, y2), (255, 0, 0), 2)
pyplot.subplot(141), pyplot.imshow(img, cmap='gray')
pyplot.title('Original Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(142), pyplot.imshow(white_mask, cmap='gray')
pyplot.title('Gray Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(143), pyplot.imshow(edges, cmap='gray')
pyplot.title('Edge Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(144), pyplot.imshow(result, cmap='gray')
pyplot.title('Result Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.show()
return img
if __name__ == '__main__':
image = cv2.imread('receipt.jpg')
image = detect_lines(image)
cv2.imwrite('output.jpg', image)
I would suggest start looking at different Morphological Transformations which you can apply to your canny edge detection in order to improve the hough line transform.
This is not perfect but it's something to get you started:
import cv2
import numpy as np
from matplotlib import pyplot
def detect_lines(img):
temp = cv2.cvtColor(img,cv2.COLOR_BGR2HLS)
kernel = np.ones((5, 5), np.uint8) # < --- Added a kernel you can differ
lower = np.uint8([0, 160, 0])
upper = np.uint8([255, 255, 255])
white_mask = cv2.inRange(temp, lower, upper)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.blur(gray, (3, 3))
canny_low = 100
edges = cv2.Canny(white_mask, canny_low, canny_low * 3, apertureSize=3)
dilate = cv2.dilate(edges, kernel, iterations=2) # < --- Added a dilate, check link I provided
ero = cv2.erode(dilate, kernel, iterations=1) # < --- Added an erosion, check link I provided
lines = cv2.HoughLinesP(dilate, 1, np.pi/180, 10, 2, 80)
result = img.copy()
if lines is not None:
for x in range(0, len(lines)):
for x1, y1, x2, y2 in lines[x]:
print(x1, y1, x2, y2)
cv2.line(result, (x1, y1), (x2, y2), (255, 0, 0), 2)
pyplot.subplot(151), pyplot.imshow(img, cmap='gray')
pyplot.title('Original Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(152), pyplot.imshow(white_mask, cmap='gray')
pyplot.title('Mask Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(153), pyplot.imshow(edges, cmap='gray')
pyplot.title('Edge Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(154), pyplot.imshow(ero, cmap='gray')
pyplot.title('Dilate/Erosion Image'), pyplot.xticks([]), pyplot.yticks([]) # <--- Added a display
pyplot.subplot(155), pyplot.imshow(result, cmap='gray')
pyplot.title('Result Image'), pyplot.xticks([]), pyplot.yticks([])
pyplot.show()
return result # <--- You want to return the result right?
if __name__ == '__main__':
image = cv2.imread('receipt.jpg')
image = detect_lines(image)
cv2.imwrite('output.jpg', image)
Another approach could be looking into Corner Detection and then drawing a line between the detected corners (I haven't tried this approach but it's just for inspiration :) ).
I am trying to extract the edge of an image (its contour) and change its thickness. I want to give it like the stroke effect of Photoshop layer style. Photoshop stroke effect example:
http://projectwoman.com/2012/11/smart-objects-and-strokes-in-photoshop.html
I was able to extract the edge from an image. Using canny edge or the pillow function.
1.using canny edge detection
img = cv2.imread(img_path,0)
edges = cv2.Canny(img,300,700)
2.using pillow filler
image = Image.open(img_path).convert('RGB')
image = image.filter(ImageFilter.FIND_EDGES())
but, I could not adjust the contour thickness.
Here a solution:
import cv2
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread('mickey.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)[...,0]
def show_img(im, figsize=None, ax=None, alpha=None):
if not ax: fig,ax = plt.subplots(figsize=figsize)
ax.imshow(im, alpha=alpha)
ax.set_axis_off()
return ax
def getBordered(image, width):
bg = np.zeros(image.shape)
_, contours, _ = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
biggest = 0
bigcontour = None
for contour in contours:
area = cv2.contourArea(contour)
if area > biggest:
biggest = area
bigcontour = contour
return cv2.drawContours(bg, [bigcontour], 0, (255, 255, 255), width).astype(bool)
im2 = getBordered(image, 10)
show_img(im2, figsize=(10,10))
You can change thickness by changing param width in getBordered.