How to remove glare from images in opencv? - python

This mathematica code removes glare from an image:
img = Import["foo.png"]
Inpaint[img, Dilation[saturated, DiskMatrix[20]]]
as shown in the most upvoted answer here:
https://dsp.stackexchange.com/questions/1215/how-to-remove-a-glare-clipped-brightness-from-an-image
I want to use opencv instead of Mathematica to get the same result. How would I write equivalent code in opencv-python?

Here is how to do that in Python/OpenCV.
But I do not think the OpenCV inpainting routines are working or at least are not working well for my Python 3.7.5 and OpenCV 3.4.8.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('apple.png')
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold grayscale image to extract glare
mask = cv2.threshold(gray, 220, 255, cv2.THRESH_BINARY)[1]
# Optionally add some morphology close and open, if desired
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
#mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=1)
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
#mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
# use mask with input to do inpainting
result = cv2.inpaint(img, mask, 21, cv2.INPAINT_TELEA)
# write result to disk
cv2.imwrite("apple_mask.png", mask)
cv2.imwrite("apple_inpaint.png", result)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("GRAY", gray)
cv2.imshow("MASK", mask)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Thresholded image:
Result:

Related

How can I remove the bright glare regions in image

I have some tomato images with bright shadow on tomatoes. I want to remove/reduce these bright shadow points. Is there any suggestion?
I tried below code but It did not solve my problem:
def decrease_brightness(img, value=30):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
lim = 255 - value
v[v >= lim] -= value
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
image = decrease_brightness(image, value=50)
Here is how to do the inpainting in Python/OpenCV.
Note that shadows are dark. You want to remove the bright glare regions. Please use the correct terms so that you do not confuse others on the forum. Refer to a dictionary.
Read the input
Threshold on the gray background using cv2.inRange()
Apply morphology to close and dilate
Floodfill the outside with black to make a mask image
Use the mask to do the inpainting (two methods)
Save the results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('tomato.jpg')
hh, ww = img.shape[:2]
# threshold
lower = (150,150,150)
upper = (240,240,240)
thresh = cv2.inRange(img, lower, upper)
# apply morphology close and open to make mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=1)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (25,25))
morph = cv2.morphologyEx(morph, cv2.MORPH_DILATE, kernel, iterations=1)
# floodfill the outside with black
black = np.zeros([hh + 2, ww + 2], np.uint8)
mask = morph.copy()
mask = cv2.floodFill(mask, black, (0,0), 0, 0, 0, flags=8)[1]
# use mask with input to do inpainting
result1 = cv2.inpaint(img, mask, 101, cv2.INPAINT_TELEA)
result2 = cv2.inpaint(img, mask, 101, cv2.INPAINT_NS)
# write result to disk
cv2.imwrite("tomato_thresh.jpg", thresh)
cv2.imwrite("tomato_morph.jpg", morph)
cv2.imwrite("tomato_mask.jpg", mask)
cv2.imwrite("tomato_inpaint1.jpg", result1)
cv2.imwrite("tomato_inpaint2.jpg", result2)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("THRESH", thresh)
cv2.imshow("MORPH", morph)
cv2.imshow("MASK", mask)
cv2.imshow("RESULT1", result1)
cv2.imshow("RESULT2", result2)
cv2.waitKey(0)
Threshold Image:
Morphology and Floodfill Image:
Mask Image:
Inpaint Telea:
Inpaint Navier-Stokes:

Image Enhancing in Python

So, I have been trying to enhance images so I can use text recognition, but since the images are extremely low quality and I am a beginner I haven't been able to perform a great job.
Below is the original image:
Original Image:
First I resized the image
img = cv2.imread('test.jpg')
cv2.imshow('Original',img)
cv2.waitKey(0)
img = cv2.resize(img,(500,500),interpolation = cv2.INTER_AREA)
cv2.imshow('Resized',img)
cv2.waitKey(0)
then I changed the background color to gray
img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow('GRAY',img_gray)
cv2.waitKey(0)
I did some thresholding
ret, img_threshold = cv2.threshold(img_gray, 70, 255, cv2.THRESH_BINARY_INV)
cv2.imshow('THRESHOLD', img_threshold)
cv2.waitKey(0)
and I used morphology to get a better image
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(6,6))
opening = cv2.morphologyEx(img_threshold, cv2.MORPH_OPEN, kernel, iterations = 2)
kernel = np.ones((9,9),np.uint8)
open_img = cv2.morphologyEx(opening, cv2.MORPH_OPEN, kernel, iterations = 3)
cv2.imshow('OPENING',open_img)
cv2.waitKey(0)
My final product is below:
Final Image 2
My question is how can I remove the white chunks and the line crossing the numbers

How can I retain background after dilating text in image

import cv2
import numpy as np
# Load image, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Create rectangular structuring element and dilate
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilate = cv2.dilate(thresh, kernel, iterations=4)
cv2.imshow('dilate', dilate)
cv2.waitKey()
I am trying to mask the text elements in an image and return an image with just the remaining portions. I have applied thresholding and dilating, but how can I retain the background.
Image after thresholding and dilating
Original image:
Here is a simple approach:
Using the inverted dilated image cv2.bitwise_not(dilate), create a mask over the original image.
res = cv2.bitwise_and(image, image, mask=cv2.bitwise_not(dilate))
In the above image you have all text regions and its boundaries masked out.
Now replace those masked out regions with the background of your original image. To do that, first I noted down the coordinates where of the text regoins in mask_ind. Then replaced the pixel values in those regions with the background of the original image image[0,0]
mask_ind = (dilate == 255)
res[mask_ind] = image[0,0]
cv2.imshow(res)

Using pytesseract to get text from an image

I'm trying to use pytesseract to convert some images into text. The images are very basic and I tried using some preprocessing:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(gray)
gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
The original image looks like this:
The resulting image looks like this:
I do this for a bunch of numbers with the same font in the same location here are the results:
It still gives no text in the output. For a few of the images, it does, but not for all and the images look nearly identical.
Here is a snippet of the code I'm using:
def checkCurrentState():
"""image = pyautogui.screenshot()
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
cv2.imwrite("screenshot.png", image)"""
image = cv2.imread("screenshot.png")
checkNumbers(image)
def checkNumbers(image):
numbers = []
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(gray)
gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
for i in storeLocations:
cropped = gray[i[1]:i[1]+storeHeight, i[0]:i[0]+storeWidth]
number = pytesseract.image_to_string(cropped)
numbers.append(number)
print(number)
cv2.imshow("Screenshot", cropped)
cv2.waitKey(0)
To perform OCR on an image, its important to preprocess the image. The idea is to obtain a processed image where the text to extract is in black with the background in white. Here's a simple approach using OpenCV and Pytesseract OCR.
To do this, we convert to grayscale, apply a slight Gaussian blur, then Otsu's threshold to obtain a binary image. From here, we can apply morphological operations to remove noise. We perform text extraction using the --psm 6 configuration option to assume a single uniform block of text. Take a look here for more options.
Here's a visualization of each step:
Input image
Convert to grayscale -> Gaussian blur
Otsu's threshold -> Morph open to remove noise
Result from Pytesseract OCR
1100
Code
import cv2
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
# Grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Morph open to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)
# Perform text extraction
data = pytesseract.image_to_string(opening, lang='eng', config='--psm 6')
print(data)
cv2.imshow('blur', blur)
cv2.imshow('thresh', thresh)
cv2.imshow('opening', opening)
cv2.waitKey()

How can I clean this picture up (opencv-python)?

I am really new to opencv. How can I remove the noise in the background without losing info?
I started with this: and Otsu thresholded it. I've tried erosion, dilation, bilateral filtering. My goal is to get a rectangle on the borders so I can perspective transform the thresholded picture, but it has trouble finding contours. Or maybe is there a different and better approach?
Here is one way to do that in Python/OpenCV.
Read the input
Blur it
Convert to HSV and extract the saturation channel
Threshold the saturation image
Clean it up with morphology close and open and save as a mask
Recreate your OTSU threshold image
Write black to OTSU image where mask is black (zero)
For comparison, write black to Input image where mask is black (zero)
Save results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('circuit_board.jpg')
# blur
blur = cv2.GaussianBlur(img, (3,3), 0)
# convert to hsv and get saturation channel
sat = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)[:,:,1]
# threshold saturation channel
thresh = cv2.threshold(sat, 50, 255, cv2.THRESH_BINARY)[1]
# apply morphology close and open to make mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=1)
mask = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel, iterations=1)
# do OTSU threshold to get circuit image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
otsu = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# write black to otsu image where mask is black
otsu_result = otsu.copy()
otsu_result[mask==0] = 0
# write black to input image where mask is black
img_result = img.copy()
img_result[mask==0] = 0
# write result to disk
cv2.imwrite("circuit_board_mask.png", mask)
cv2.imwrite("circuit_board_otsu.png", otsu)
cv2.imwrite("circuit_board_otsu_result.png", otsu_result)
cv2.imwrite("circuit_board_img_result.png", img_result)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("SAT", sat)
cv2.imshow("MASK", mask)
cv2.imshow("OTSU", otsu)
cv2.imshow("OTSU_RESULT", otsu_result)
cv2.imshow("IMAGE_RESULT", img_result)
cv2.waitKey(0)
Mask image:
OTSU threshold image:
OTSU Result:
Image Result:

Categories

Resources