I am trying to extract the edge of an image (its contour) and change its thickness. I want to give it like the stroke effect of Photoshop layer style. Photoshop stroke effect example:
http://projectwoman.com/2012/11/smart-objects-and-strokes-in-photoshop.html
I was able to extract the edge from an image. Using canny edge or the pillow function.
1.using canny edge detection
img = cv2.imread(img_path,0)
edges = cv2.Canny(img,300,700)
2.using pillow filler
image = Image.open(img_path).convert('RGB')
image = image.filter(ImageFilter.FIND_EDGES())
but, I could not adjust the contour thickness.
Here a solution:
import cv2
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread('mickey.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)[...,0]
def show_img(im, figsize=None, ax=None, alpha=None):
if not ax: fig,ax = plt.subplots(figsize=figsize)
ax.imshow(im, alpha=alpha)
ax.set_axis_off()
return ax
def getBordered(image, width):
bg = np.zeros(image.shape)
_, contours, _ = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
biggest = 0
bigcontour = None
for contour in contours:
area = cv2.contourArea(contour)
if area > biggest:
biggest = area
bigcontour = contour
return cv2.drawContours(bg, [bigcontour], 0, (255, 255, 255), width).astype(bool)
im2 = getBordered(image, 10)
show_img(im2, figsize=(10,10))
You can change thickness by changing param width in getBordered.
Related
The unfiltered imageI have a 2D image of a vessel in grayscale and I want to convert it to a 3D model for further analysis.
I’ve imported it using opencv (cv2) library and I’ve written a code for detecting the edges of the shape in the image.
But, I have some problem with detecting the center-line by using the edges coordinates.I want the center-line for revolving the shape and convert it to a 3D model)
I appreciate any help.
Here is the image but as you can see the line hasn't been fitted to the middle of the shape.
import cv2
import numpy as np
from matplotlib import pyplot as plt
#Applying the bilateral blur to the image
img = cv2.imread('vessel.png')
blur = cv2.bilateralFilter(img,9,75,75)
#Applying the Canny edge detection to the blurred image to find the edges
edges = cv2.Canny(blur,100,200)
#Getting the coordinateof the pixels
x , y = np.where(edges > 0.5)
#Getting the regression line (slope and intercept)
m, b = np.polyfit(x, y, deg=1)
#Drowing the line in the image
edges_img = cv2.line(edges, (int(0 * m + b), 0), (int(500*m+b), 500) , color = (255,0,0) ,
thickness = 2)
#plotting the result in comparison with the raw image
plt.subplot(121),plt.imshow(blur,cmap = 'gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(edges_img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.show()`
Im trying to crop rectangle image from screenshot, background for image must be white, Im ending up having black,How can I change that? I want to make histogtam of rgb for the final image and It seems plotting only vertical line on zero, Any kind of help is very important! here is my code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = cv2.imread(filename = "Screenshot from 2019-11-08 22-02-27.png")
mask = np.zeros(shape = image.shape, dtype = "uint8")
cv2.rectangle(img = mask,
pt1 = (0, 185), pt2 = (1900, 773),
color = (255, 255, 255),
thickness = -1)
maskedImg = cv2.bitwise_and(src1 = image, src2 = mask)
cv2.imwrite("processed.png", maskedImg)
plt.imshow(maskedImg)
plt.show()
plt.hist(maskedImg.ravel(),256,[0,256]); plt.show()
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = cv2.imread(filename = "1.png")
mask = np.zeros(shape = image.shape, dtype = "uint8")
cv2.rectangle(img = mask,
pt1 = (0, 185), pt2 = (1900, 773),
color = (255, 255, 255),
thickness = -1)
maskedImg = cv2.bitwise_and(src1 = image, src2 = mask)
maskedImg[np.where((maskedImg==[0,0,0]).all(axis=2))] = [255,255,255]
cv2.imwrite("processed.png", maskedImg)
plt.imshow(maskedImg)
plt.show()
Convert the black pixel present in the image into white pixel
Original image
Crop Image
color = ('b','g','r')
for i,col in enumerate(color):
histr = cv2.calcHist([maskedImg],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
I started with a set of points from a laser scan which I plotted as a scatter plot using matplotlib. I then used plt.savefig to be able to open the plot as an image and use openCV to find contours around the points. Now, I want to be able to find the centers of the contours and plot them as points in the original scatter plot. The problem is I don't know how to create a mapping between the original scatter plot points and the image pixels. Is there a way to do this? Or another way to mark the center of the contours in matplotlib?
Note: the reason I need to draw contours is that later I need to use openCV's matchShapes function to compare the contours.
Here are the images from each step:
scatter plot
,
contours with centers marked in red
Now I basically want to be able to add the red markings from the image to the scatter plot.
Here is my code:
plt.scatter(X[:,0], X[:,1], s=2)
plt.axis('equal')
plt.axis('off')
plt.savefig(name)
plt.clf()
img = cv2.imread(name)
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgGray, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
height = img.shape[0]
width = img.shape[1]
blank_image = np.zeros((height,width,3), np.uint8)
cv2.drawContours(blank_image, contours, -1, (255,0,0))
for contour in contours:
M = cv2.moments(contour)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(blank_image, (cX, cY), 2, (0, 0, 255), -1)
cv2.imwrite(name, blank_image)
UPDATE:
Based on suggestions I looked at matplot's transforms function and tried the following:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x_coords, y_coords, 'bo', markersize=2)
ax.axis('equal')
ax.axis('off')
height1 = fig.get_figheight()*fig.dpi
width1 = fig.get_figwidth()*fig.dpi
inv = ax.transData.inverted()
plt.savefig('img.png')
img = cv2.imread('img.png')
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgGray, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
height = img.shape[0]
width = img.shape[1]
blank_image = np.zeros((height,width,3), np.uint8)
centers_x = []
centers_y = []
for contour in contours:
M = cv2.moments(contour)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
centers_x.append(inv.transform((cX, height1-cY))[0])
centers_y.append(inv.transform((cX, height1-cY))[1])
cv2.drawContours(blank_image, [contour], -1, (255,0,0),1)
cv2.circle(blank_image, (cX, cY), 2, (0, 0, 255), -1)
cv2.imwrite("test.png", blank_image)
ax.plot(centers_x, centers_y, 'ro', markersize=4)
plt.show()
This got me close but it seems like the x coordinates are still slightly off
new result]
.
I also tried
centers_x.append(inv.transform((width1-cX, height1-cY))[0])
centers_y.append(inv.transform((width1-cX, height1-cY))[1])
but that also didn't work.
FINAL UPDATE: adding
plt.tight_layout()
solved the problem.
x = np.linspace(0,1,10)
y = 5*x+2
fig, ax = plt.subplots()
ax.scatter(x,y)
height = fig.get_figheight()*fig.dpi
width = fig.get_figwidth()*fig.dpi
# the coordinates in pixel
cX = 147
cY = 142
# we need to invert the y value as matplotlib considers (0,0) to be
# on the lower left, while opencv uses upper left as origin
tX,tY = ax.transData.inverted().transform([cX,height-cY])
ax.scatter(tX,tY, s=50, c='r')
fig.savefig('test.png', dpi=fig.dpi)
I tried to detect the external circle of that image
However, no matter how I set the params of the Hough Transform, I can't detect the external circle.
My code is the next:
###############################
#Circle detection
###############################
height, width = image.shape
circles = cv2.HoughCircles(image,cv2.HOUGH_GRADIENT,.3,20,param1=100,param2=100,minRadius=int(min(width,height)/3),maxRadius=int(min(width,height)))
circles = np.uint16(np.around(circles))
cimg=origin
for i in circles[0,:]:
cv2.circle(cimg,(i[0],i[1]),i[2],(255,0,0),1) #DRAW ALL CIRCLES IN BLUE
cv2.circle(cimg,(i[0],i[1]),2,(255,0,0),1)
###############################
#FIND HIGHER CIRCLE
###############################
#I go through all the circles and
#take the one with the greatest radio
max_index=0
max_i=circles[0,max_index,2]
for indx, i in enumerate(circles[0,:]):
if i[2]>max_i:
max_i=i[2]
max_index=indx #indx of higher circle
circle_max=max_i
x_max=circles[0,max_index,0]
y_max=circles[0,max_index,1]
r_max=circles[0,max_index,2]
cv2.circle(cimg,(x_max,y_max),r_max,(0,0,255),1) #DRAW HIGHER CIRCLE IN RED
cv2.circle(cimg,(x_max,y_max),2,(0,0,255),3)
This code detect a lot of circles, but the external circle never appears.
If you want to detect only one circle, this can help you:
import cv2
import numpy as np
from matplotlib import pyplot as plt
name_image = "ImageTest.png"
bgr_img = cv2.imread(name_image)
b,g,r = cv2.split(bgr_img) # get b,g,r
rgb_img = cv2.merge([r,g,b]) # switch it to rgb
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(gray_img, 5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,7,20,
param1=90,param2=2400,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
plt.subplot(121),plt.imshow(rgb_img)
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(cimg)
plt.title('Hough Transform'), plt.xticks([]), plt.yticks([])
plt.show()
cv2.imwrite(name_image.split(".png")[0] +'_HoughTransform.png', cimg)
I am trying to draw contour around an image. I can see that contours being found but I am not able to draw the outline. The color of the contour seem to be either of the two (black and white) colors.
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
%matplotlib inline
im = io.imread('http://matlabtricks.com/images/post-35/man.png')
plt.imshow(im)
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
plt.figure()
plt.imshow(imgray)
#Contoured image
ret,thresh = cv2.threshold(imgray, 120,255,cv2.THRESH_BINARY)
image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
c_img = cv2.drawContours(image, contours, -1, (0, 255, 0), 1)
plt.figure()
plt.imshow(c_img)
You need to draw on the original image, not on the one returned from findContuors(). The following works.
# Contoured image
ret,thresh = cv2.threshold(imgray, 120,255,cv2.THRESH_BINARY)
contours = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[-2]
for contour in contours:
cv2.drawContours(im, contour, -1, (0, 255, 0), 3)
plt.figure()
plt.imshow(im)
This is my result:
## Read and convert
img = io.imread('http://matlabtricks.com/images/post-35/man.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
## Find outer contours
_, cnts, _= cv2.findContours(gray,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
## Draw
canvas = np.zeros_like(img)
cv2.drawContours(canvas , contours, -1, (0, 255, 0), 1)
plt.imshow(canvas)