I have detected dotted lines using HoughLinesP on some image samples and draw red lines on them. For example, look at the image below:
The code for detecting the lines is below:
import cv2
import numpy as np
import math
img = cv2.imread("tabulapy/image2.png")
img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)
kernel1 = np.ones((1, 5), np.uint8)
kernel2 = np.ones((9, 9), np.uint8)
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBW = cv2.threshold(imgGray, 230, 255, cv2.THRESH_BINARY_INV)[1]
img1 = cv2.erode(imgBW, kernel1, iterations=1)
img2 = cv2.dilate(img1, kernel2, iterations=3)
img3 = cv2.bitwise_and(imgBW, img2)
img3 = cv2.bitwise_not(img3)
img4 = cv2.bitwise_and(imgBW, imgBW, mask=img3)
imgLines = cv2.HoughLinesP(img4, 1, np.pi / 180, 10, minLineLength=500, maxLineGap=2)
for i in range(len(imgLines)):
for x1, y1, x2, y2 in imgLines[i]:
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
I want to crop this image into different parts based on the detected lines(red lines). What I mean, the first image would be from the top to the first detected red line; the second image would be the first detected red line to the second detected red line, and so on...
Does anyone have any idea how it can be done?
Related
I'm attempting to parse floorplans to turn them into an array of line coordinates using HoughLinesP in opencv-python and the function is only returning lines that are angled and have no relation to the actual lines in my image.
Here is my code:
import cv2
# Read image and convert to grayscale
img = cv2.imread('C:/Data/images/floorplan/extremely basic.png', -1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Get lines with probabilistic hough lines
found_lines = cv2.HoughLinesP(gray, 1, 3.14 / 160, 100,
minLineLength=1, maxLineGap=10)
# Loop through found lines and draw each line on original image
for line in found_lines:
x1, y1, x2, y2 = line[0]
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
# Show image, wait until keypress, close windows.
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
And here is what's going to be returned with threshold 150 and 100 respectively:
I've tried tinkering with all options and attempted non-probabilistic Hough lines to no avail.
The problem was with image inversion and parameters. You have to do further adjustments as this does not give all lines.
The code is test on google colab. Remove from google.colab.patches import cv2_imshow and replace cv_imshow with cv2.imshow for local usage.
Partial Image Ouput
Code
import cv2
import numpy as np
from google.colab.patches import cv2_imshow
# Read image and convert to grayscale
img = cv2.imread('1.jpg', 0)
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
s1, s2 = img.shape
new_img = np.zeros([s1,s2],dtype=np.uint8)
new_img.fill(255)
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
cv2_imshow(thresh1)
kernel = np.ones((3,3),np.uint8)
erosion = cv2.erode(thresh1,kernel,iterations = 1)
cv2_imshow(erosion)
# Get lines with probabilistic hough lines
found_lines = cv2.HoughLinesP(erosion, np.pi/180, np.pi/180, 10, minLineLength=4, maxLineGap=4)
# Loop through found lines and draw each line on original image
for line in found_lines:
x1, y1, x2, y2 = line[0]
cv2.line(new_img, (x1, y1), (x2, y2), (0, 0, 255), 1)
cv2_imshow(new_img)
#cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
# Show image, wait until keypress, close windows.
print("ORIGINAL IMAGE:")
cv2_imshow(img)
#cv2.imshow('image', img)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
I try to detect the yellow line in the following picture but shadows obscured the yellow roads. Do we have any method to deal with that?
We can detect the yellow in this question:About Line detection by using OpenCV and How to delete the other object from figure by using opencv?.
The coding is as follows:
import cv2
import numpy as np
image = cv2.imread('Road.jpg')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
low_yellow = np.array([18, 94, 140])
up_yellow = np.array([48, 255, 255])
mask = cv2.inRange(hsv, low_yellow, up_yellow)
edges = cv2.Canny(mask, 75, 150)
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 50, maxLineGap=250)
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(image, (x1, y1), (x2, y2), (0, 255, 0), 5)
# cv2.imshow('image', img)
cv2.imwrite("result.jpg", edges)
Here is the code to convert to lab and auto-threshold
You'll have to detect the lines using a proper method.
An advanced solution would be training a dataset to do segmentation (neural network Ex : Unet )
import cv2
import numpy as np
img = cv2.imread('YourImagePath.jpg')
cv2.imshow("Original", img)
k = cv2.waitKey(0)
# Convert To lab
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
# display b channel
cv2.imshow("Lab", lab[:, :, 2])
k = cv2.waitKey(0)
# auto threshold using Otsu
ret , mask = cv2.threshold(lab[:, :, 2] , 0 , 255 , cv2.THRESH_BINARY+
cv2.THRESH_OTSU)
#display Binary
cv2.imshow("Binary", mask)
k = cv2.waitKey(0)
cv2.destroyAllWindows()
Output:
I am trying to extract the vertical lines from the fabric image using hough lines in opencv. I applied contrast enhancement to enhance the lines and bilateral filtering to try and remove the other fabric textures. However, on applying the houghlines, the code detects lines all over the image. I tried playing around with the parameters for hough but the results were the same.
Input image after applying histogram equalization and bilateral filter:
Here is the image after applying the hough line, red representing the detected lines.
Output showing hough detections:
What is another approach I can try so that the hough does not start detecting the minute fabric patterns as lines as well?
Here is the code I have:
`
img1= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img2 = cv2.equalizeHist(img1)
img3 = cv2.equalizeHist(img2)
img4 = cv2.equalizeHist(img3)
img5 = cv2.bilateralFilter(img4, 9, 75,75)
cv2.imshow("threshold",img5)
edges = cv2.Canny(img4,50,127,apertureSize = 3)
lines= cv2.HoughLines(edges, 1, math.pi/180.0, 200, np.array([]), 0, 0)
a,b,c = lines.shape
for i in range(a):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0, y0 = a*rho, b*rho
pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) )
pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) )
cv2.line(img, pt1, pt2, (0, 0, 255), 2, cv2.LINE_AA)
cv2.imshow('image1',img)
cv2.waitKey(0)
cv2.destroyAllWindows()`
You need to threshold your equalized image, apply morphology to clean it up before doing the canny edge detection and hough line extraction. Using Python/OpenCV to do the following processing.
Input:
import cv2
import numpy as np
import math
# read image
img = cv2.imread('fabric_equalized.png')
# convert to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray,165,255,cv2.THRESH_BINARY)[1]
# apply close to connect the white areas
kernel = np.ones((15,1), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = np.ones((17,3), np.uint8)
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# apply canny edge detection
edges = cv2.Canny(img, 175, 200)
# get hough lines
result = img.copy()
lines= cv2.HoughLines(edges, 1, math.pi/180.0, 165, np.array([]), 0, 0)
a,b,c = lines.shape
for i in range(a):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0, y0 = a*rho, b*rho
pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) )
pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) )
cv2.line(result, pt1, pt2, (0, 0, 255), 2, cv2.LINE_AA)
# save resulting images
cv2.imwrite('fabric_equalized_thresh.jpg',thresh)
cv2.imwrite('fabric_equalized_morph.jpg',morph)
cv2.imwrite('fabric_equalized_edges.jpg',edges)
cv2.imwrite('fabric_equalized_lines.jpg',result)
# show thresh and result
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.imshow("edges", edges)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Thresholded image:
Morphology cleaned image:
Edge image:
Resulting Hough Lines:
To explain the question a bit. I have an image that already contains a white bounding box as shown here:
Input image
What I need is to crop the part of the image surrounded by the bounding box.
FindContours doesn't seem to work here so I tried something using the following code:
import cv2
import numpy as np
bounding_box_image = cv2.imread('PedestrianRectangles/1/grim.pgm')
edges = cv2.Canny(bounding_box_image, 50, 100) # apertureSize=3
cv2.imshow('edge', edges)
cv2.waitKey(0)
lines = cv2.HoughLinesP(edges, rho=0.5, theta=1 * np.pi / 180,
threshold=100, minLineLength=100, maxLineGap=50)
# print(len(lines))
for i in lines:
for x1, y1, x2, y2 in i:
# print(x1, y1, x2, y2)
cv2.line(bounding_box_image, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.imwrite('houghlines5.jpg', bounding_box_image)
without any success. Playing around with the parameters didn't help too much either. The result of my code snippet is show on the following image:
Output
I had the idea to do cropping after the line detection etc.
I am relatively new to opencv so help would be appreciated. Is there a good or easy approach to this problem that I'm missing? Googling around didn't help so any links, code snippets would be helpful.
Thanks to Silencer, with his help I was able to make it work, so I'll provide the code and hope it helps other people:
import cv2
import numpy as np
bounding_box_image = cv2.imread('PedestrianRectangles/1/grim.pgm')
grayimage = cv2.cvtColor(bounding_box_image, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(grayimage, 254, 255, cv2.THRESH_BINARY)
cv2.imshow('mask', mask)
cv2.waitKey(0)
image, contours, hierarchy = cv2.findContours(mask, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
if cv2.contourArea(contour) < 200:
continue
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
ext_left = tuple(contour[contour[:, :, 0].argmin()][0])
ext_right = tuple(contour[contour[:, :, 0].argmax()][0])
ext_top = tuple(contour[contour[:, :, 1].argmin()][0])
ext_bot = tuple(contour[contour[:, :, 1].argmax()][0])
roi_corners = np.array([box], dtype=np.int32)
cv2.polylines(bounding_box_image, roi_corners, 1, (255, 0, 0), 3)
cv2.imshow('image', bounding_box_image)
cv2.waitKey(0)
cropped_image = grayimage[ext_top[1]:ext_bot[1], ext_left[0]:ext_right[0]]
cv2.imwrite('crop.jpg', cropped_image)
And the output
Threshold to binary # 250
Find contours in the binary
Filter the contour by height/width of boundingRect
I have written a python code using opencv and numpy to detect red colour lines in a video and it is working fine as it is able to detect the edges of the red lines. but i want to grab the part of the video in between each two lines as images.how to do that ?? Now I have updated an image. i want to extract the image in between two red lines from the video.
import cv2
import numpy as np
import matplotlib.pyplot as plt
video = cv2.VideoCapture("/home/ksourav/AGS/SampleVideos/Trail1.mp4")
while True:
ret, frame = video.read()
if not ret:
video = cv2.VideoCapture("/home/ksourav/AGS/SampleVideos/Trail1.mp4")
continue
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
low_red = np.array ([5, 50, 50])
up_red = np.array([10, 255, 255])
mask = cv2.inRange(hsv, low_red, up_red)
edges= cv2.Canny(mask, 100, 200, apertureSize=7, L2gradient=True)
lines = cv2.HoughLinesP(edges, 9, np.pi/180, 250, maxLineGap=70)
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.imshow("frame", frame)
key = cv2.waitKey(25)
if key == 27:
break
video.release()
cv2.destroyAllWindows()