import cv2
import numpy as np
# Load image, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Create rectangular structuring element and dilate
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilate = cv2.dilate(thresh, kernel, iterations=4)
cv2.imshow('dilate', dilate)
cv2.waitKey()
I am trying to mask the text elements in an image and return an image with just the remaining portions. I have applied thresholding and dilating, but how can I retain the background.
Image after thresholding and dilating
Original image:
Here is a simple approach:
Using the inverted dilated image cv2.bitwise_not(dilate), create a mask over the original image.
res = cv2.bitwise_and(image, image, mask=cv2.bitwise_not(dilate))
In the above image you have all text regions and its boundaries masked out.
Now replace those masked out regions with the background of your original image. To do that, first I noted down the coordinates where of the text regoins in mask_ind. Then replaced the pixel values in those regions with the background of the original image image[0,0]
mask_ind = (dilate == 255)
res[mask_ind] = image[0,0]
cv2.imshow(res)
Related
I am trying read text from this
using Python with OpenCV. However, it is not able to read it.
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img=cv.imread(file_path,0)
img = cv.medianBlur(img,5)
ret,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
th2 =cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_MEAN_C,\
cv.THRESH_BINARY,11,2)
th3 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv.THRESH_BINARY,11,2)
titles = ['Original Image', 'Global Thresholding (v = 127)',
'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
images = [img, th1, th2, th3]
for i in range(4):
plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
anyway to do this?
Instead of working on the grayscale image, working on saturation channel of the HSV color space makes the subsequent steps easier.
img = cv2.imread(image_path_to_captcha)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
s_component = hsv[:,:,1]
s_component
Next, apply a Gaussian blur of appropriate kernel size and sigma value, and later threshold.
blur = cv2.GaussianBlur(s_component,(7,7), 7)
ret,th3 = cv2.threshold(blur,127,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
th3
Next, finding contours in the image above and preserving those above a certain area threshold in the black image variable which will be used as mask later on.
contours, hierarchy = cv2.findContours(th3, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
black = np.zeros((img.shape[0], img.shape[1]), np.uint8)
for contour in contours:
if cv2.contourArea(contour) >600 :
cv2.drawContours(black, [contour], 0, 255, -1)
black
Using the black image variable as mask over the threshold image
res = cv2.bitwise_and(th3, th3, mask = black)
res
Finally, applying morphological thinning to the above result
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
erode = cv2.erode(res, kernel, iterations=1)
erode
The end result is not what you expect. You can try experimenting different morphology operations prior to drawing contours as well.
EDIT
You can perform distance transform on the above image and use the result:
dist = cv2.distanceTransform(res, cv2.DIST_L2, 3)
dst = cv2.normalize(dist, dst=None, alpha=0, beta=255,norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
dst
I am trying to remove text from images that has a black border with white fill. Take the image below as an example.
I have tried a few options utilizing opencv and skimage inpaint
import cv2
from skimage.restoration import inpaint
img = cv2.imread('Documents/test_image.png')
mask = cv2.threshold(img, 210, 255, cv2.THRESH_BINARY)[1][:,:,0]
dst = cv2.inpaint(img, mask, 7, cv2.INPAINT_TELEA)
image_result = inpaint.inpaint_biharmonic(img, mask,
multichannel=True)
cv2.imshow('image',img)
cv2.imshow('mask',mask)
cv2.imshow('dst',dst)
cv2.imshow('image_result',image_result)
cv2.waitKey(0)
It seems like the inpainting is just trying to fill with black as that is what it is identifying as being around the areas of interest. What I would like to do is remove the white text and black borders completely, or secondarily try to fill the white with more information from surrounding colors than just the black.
Here is the best solution I could come up with, still open to others with more experience showing me a better way if anyone has an idea.
mask = cv2.threshold(img, 245, 255, cv2.THRESH_BINARY)[1][:,:,0]
new_mask = cv2.dilate(mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10)))
dst = cv2.inpaint(img, new_mask, 7, cv2.INPAINT_TELEA)
Here are two inpainting methods in Python/OpenCV. Note that I use the saturation channel to create the threshold, since white and black have zero saturation, in principle.
Input:
import cv2
import numpy as np
# read input
img = cv2.imread('white_black_text.png')
# convert to hsv and extract saturation
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
sat = hsv[:,:,1]
# threshold and invert
thresh = cv2.threshold(sat, 10, 255, cv2.THRESH_BINARY)[1]
thresh = 255 - thresh
# apply morphology dilate
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_DILATE, kernel)
# do inpainting
result1 = cv2.inpaint(img,thresh,11,cv2.INPAINT_TELEA)
result2 = cv2.inpaint(img,thresh,11,cv2.INPAINT_NS)
# save results
cv2.imwrite('white_black_text_threshold.png', thresh)
cv2.imwrite('white_black_text_inpainted1.png', result1)
cv2.imwrite('white_black_text_inpainted2.png', result1)
# show results
cv2.imshow('thresh',thresh)
cv2.imshow('result1',result1)
cv2.imshow('result2',result2)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold and morphology cleaned result:
Result 1 (Telea):
Result 2 (Navier Stokes):
from pdf2image import convert_from_path
import cv2,numpy,os
def pil_to_cv2(image):
open_cv_image = numpy.array(image)
return open_cv_image[:, :, ::-1].copy()
images = convert_from_path('test.pdf')
cv_h=[pil_to_cv2(i) for i in images]
for img in cv_h:
#function_to_crop()
cv2.imwrite('modified.png', img)
How can I remove the extra whiteness from the image (top,sideways,under) without actually intercepting the drawing, The drawings from pdf are from different sizes so I can't crop the images by a fixed number.
Ideally,the output would look like this
Here is another way to do that in Python/OpenCV.
Read the image
Convert to gray and invert the polarity
Threshold
Apply morphology close to fill in holes and make one solid region
Get the outer contour and its bounding box
Use the bounding box to crop the image using Numpy slicing
Save the result
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('multipower.png')
# convert to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# invert gray image
gray = 255 - gray
# threshold
thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY)[1]
# apply close and open morphology
kernel = np.ones((75,75), np.uint8)
mask = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get contours (presumably just one around the nonzero pixels)
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
cntr = contours[0]
x,y,w,h = cv2.boundingRect(cntr)
# draw contour on input
contour_img = img.copy()
cv2.drawContours(contour_img,[cntr],0,(0,0,255),3)
# crop to bounding rectangle
crop = img[y:y+h, x:x+w]
# save cropped image
cv2.imwrite('multipower_thresh.png',thresh)
cv2.imwrite('multipower_mask.png',mask)
cv2.imwrite('multipower_contour.png',contour_img)
cv2.imwrite('multipower_cropped.png',crop)
# show the images
cv2.imshow("THRESH", thresh)
cv2.imshow("MASK", mask)
cv2.imshow("CONTOUR", contour_img)
cv2.imshow("CROP", crop)
cv2.waitKey(0)
cv2.destroyAllWindows()
Thresholded Image:
Morphology closed image:
Contour image:
Result:
import cv2 as cv
import numpy as np
frame = cv.imread('7dcoI.png')
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
mask=cv.threshold(frame_gray, 85, 255, cv.THRESH_BINARY )[1]
rows, cols = mask.shape
non_empty_columns = np.where(mask.min(axis=0)==0)[0]
non_empty_rows = np.where(mask.min(axis=1)==0)[0]
cropBox = (min(non_empty_rows), min(max(non_empty_rows), rows), min(non_empty_columns), min(max(non_empty_columns), cols))
cropped = frame[cropBox[0]:cropBox[1]+1, cropBox[2]:cropBox[3]+1 , :]
cv.imwrite('out_mask.png', cropped)
I am really new to opencv. How can I remove the noise in the background without losing info?
I started with this: and Otsu thresholded it. I've tried erosion, dilation, bilateral filtering. My goal is to get a rectangle on the borders so I can perspective transform the thresholded picture, but it has trouble finding contours. Or maybe is there a different and better approach?
Here is one way to do that in Python/OpenCV.
Read the input
Blur it
Convert to HSV and extract the saturation channel
Threshold the saturation image
Clean it up with morphology close and open and save as a mask
Recreate your OTSU threshold image
Write black to OTSU image where mask is black (zero)
For comparison, write black to Input image where mask is black (zero)
Save results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('circuit_board.jpg')
# blur
blur = cv2.GaussianBlur(img, (3,3), 0)
# convert to hsv and get saturation channel
sat = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)[:,:,1]
# threshold saturation channel
thresh = cv2.threshold(sat, 50, 255, cv2.THRESH_BINARY)[1]
# apply morphology close and open to make mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=1)
mask = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel, iterations=1)
# do OTSU threshold to get circuit image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
otsu = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# write black to otsu image where mask is black
otsu_result = otsu.copy()
otsu_result[mask==0] = 0
# write black to input image where mask is black
img_result = img.copy()
img_result[mask==0] = 0
# write result to disk
cv2.imwrite("circuit_board_mask.png", mask)
cv2.imwrite("circuit_board_otsu.png", otsu)
cv2.imwrite("circuit_board_otsu_result.png", otsu_result)
cv2.imwrite("circuit_board_img_result.png", img_result)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("SAT", sat)
cv2.imshow("MASK", mask)
cv2.imshow("OTSU", otsu)
cv2.imshow("OTSU_RESULT", otsu_result)
cv2.imshow("IMAGE_RESULT", img_result)
cv2.waitKey(0)
Mask image:
OTSU threshold image:
OTSU Result:
Image Result:
I am new to OpenCV so I really need your help. I have a bunch of images like this one:
I need to detect the rectangle on the image, extract the text part from it and save it as a new image.
Can you please help me with this?
Thank you!
Just to add to Danyals answer I have added an example code with steps written in comments. For this image you don't even need to perform morphological opening on the image. But usually for this kind of noise in the image it is recomended. Cheers!
import cv2
import numpy as np
# Read the image and create a blank mask
img = cv2.imread('napis.jpg')
h,w = img.shape[:2]
mask = np.zeros((h,w), np.uint8)
# Transform to gray colorspace and invert Otsu threshold the image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# ***OPTIONAL FOR THIS IMAGE
### Perform opening (erosion followed by dilation)
#kernel = np.ones((2,2),np.uint8)
#opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
# ***
# Search for contours, select the biggest and draw it on the mask
_, contours, hierarchy = cv2.findContours(thresh, # if you use opening then change "thresh" to "opening"
cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(mask, [cnt], 0, 255, -1)
# Perform a bitwise operation
res = cv2.bitwise_and(img, img, mask=mask)
########### The result is a ROI with some noise
########### Clearing the noise
# Create a new mask
mask = np.zeros((h,w), np.uint8)
# Transform the resulting image to gray colorspace and Otsu threshold the image
gray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Search for contours and select the biggest one again
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
# Draw it on the new mask and perform a bitwise operation again
cv2.drawContours(mask, [cnt], 0, 255, -1)
res = cv2.bitwise_and(img, img, mask=mask)
# If you will use pytesseract it is wise to make an aditional white border
# so that the letters arent on the borders
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(res,(x,y),(x+w,y+h),(255,255,255),1)
# Crop the result
final_image = res[y:y+h+1, x:x+w+1]
# Display the result
cv2.imshow('img', final_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
One way to do this (if the rectangle sizes are somewhat predictable) is:
Convert the image to black and white
Invert the image
Perform morphological opening on the image from (2) with a horizontal line / rectangle (I tried with 2x30).
Perform morphological opening on the image from (2) with a vertical line (I tried it with 15x2).
Add the images from (3) and (4). You should only have a white rectangle now. Now can remove all corresponding rows and columns in the original image that are entirely zero in this image.