Find contours based on edges and output equipment label - python

I am trying to code a tool that automatically identifies and alphabetically sorts the images based on equipment number (19-V1083AI). I used the pytesseract library to convert the image to a string after the contours of the equipment label were identified. Although the code runs correctly, it never outputs the equipment number. It's my first time using the pytesseract library and the goodFeaturesToTrack function. Any help would be greatly appreciated!
Original Image:
import numpy as np
import cv2
import imutils #resizeimage
import pytesseract # convert img to string
from matplotlib import pyplot as plt
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
# Read the image file
image = cv2.imread('Car Images/s3.JPG')
# Resize the image - change width to 500
image = imutils.resize(image, width=500)
# Display the original image
cv2.imshow("Original Image", image)
cv2.waitKey(0)
# RGB to Gray scale conversion
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("1 - Grayscale Conversion", gray)
cv2.waitKey(0)
# Noise removal with iterative bilateral filter(removes noise while preserving edges)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
cv2.imshow("2 - Bilateral Filter", gray)
cv2.waitKey(0)
corners = cv2.goodFeaturesToTrack(gray,60,0.001,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(image,(x,y),0,255,-1)
coord = np.where(np.all(image == (255, 0, 0),axis=-1))
plt.imshow(image)
# Use tesseract to covert image into string
text = pytesseract.image_to_string(image, lang='eng')
print("Equipment Number is:", text)
plt.show()
Output:

Related

Remove unwanted lines and smooth CAPTCHA text

I trying to get text Image from captcha image using opencv to build my dataset for training.My extracted captcha is not so smooth and optimized.Can someone help me to optimize and suggest the best method to remove the unwanted noise,and get the smooth captcha text.
Right now,i am first adding some padding, thresholding it and find the contours.
Issues:
1.adding padding using border_replicate may add some noise
2.thresholding is not smooth and perfect
Original Image :
Threshold Image:
Contour Image:
desired Image/Result:
final separated numbers required:
code:
import os
import os.path
import cv2
import glob
import imutils
import matplotlib.pyplot as plt
CAPTCHA_IMAGE_FOLDER = "generated_captcha_images"
OUTPUT_FOLDER = "extracted_letter_images"
# Get a list of all the captcha images we need to process
captcha_image_files = glob.glob(os.path.join(CAPTCHA_IMAGE_FOLDER, "*"))
counts = {}
# loop over the image paths
for (i, captcha_image_file) in enumerate(captcha_image_files):
print("[INFO] processing image {}/{}".format(i + 1, len(captcha_image_files)))
# Since the filename contains the captcha text (i.e. "2A2X.png" has the text "2A2X"),
# grab the base filename as the text
filename = os.path.basename(captcha_image_file)
captcha_correct_text = os.path.splitext(filename)[0]
# Load the image and convert it to grayscale
image = cv2.imread(captcha_image_file)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Add some extra padding around the image
gray = cv2.copyMakeBorder(gray, 8, 8, 8, 8, cv2.BORDER_REPLICATE)
# threshold the image (convert it to pure black and white)
thresh = cv2.threshold(
gray, 36, 255, cv2.THRESH_BINARY_INV)[1]
# find the contours (continuous blobs of pixels) the image
contours = cv2.findContours(
thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Hack for compatibility with different OpenCV versions
contours = contours[1] if imutils.is_cv3() else contours[0]
You might be looking for a skeleton rather than contours here. Check out this Skeletonize function in Scikit Image:
from skimage.morphology import skeletonize
skeleton = skeletonize(thresh.astype(np.float32) / 255)
f, axs = plt.subplots(2,1,figsize=(10,6))
for ax, img, t in zip(axs, [thresh, skeleton], ['Thresholded Image', 'SKImage Skeleton']):
ax.imshow( img, 'gray' )
ax.set_title(t)
ax.axis('off')
plt.show()

Unable to find correct circles with cv2.HoughCircles

When I run the code below, I am not able to find consistent circles in the image. The image I am using as input is:
import numpy as np
import matplotlib.pyplot as plt
import cv2
img = cv2.imread("pipe.jpg")
# convert the image to RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# copy the RGB image
cimg = img.copy()
# convert the RGB image to grayscale
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# detect circles using hough transformation
circles = cv2.HoughCircles(image=img, method=cv2.HOUGH_GRADIENT, dp=3,
minDist=60, param1=100, param2=39, maxRadius=200)
for co,i in enumerate(circles[0, :],start=1):
i = [round(num) for num in i]
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
print("Number of circles detected:", co)
plt.imshow(cimg)
plt.show()
The result I get is:
As a pre-processing step, you usually smooth the image prior to the detection. Not smoothing will prompt a lot of false detections. Sometimes you don't see pre-smoothing in some tutorials because the images dealt with have nice clean edges with very little noise. Try using a median blur or Gaussian blur before you perform the detection.
Therefore, try something like:
import cv2
img = cv2.imread("pipe.jpg")
# convert the image to RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# copy the RGB image
cimg = img.copy()
# convert the RGB image to grayscale
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
### NEW - Smooth the image first
blur = cv2.GaussianBlur(img, (5, 5), 0)
# or try
# blur = cv2.medianBlur(img, 5)
# detect circles using hough transformation
circles = cv2.HoughCircles(image=blur, method=cv2.HOUGH_GRADIENT, dp=3,
minDist=60, param1=100, param2=39, maxRadius=200)
Other than that, getting the detection to find all of the circles in the image is subject to playing with the hyperparameters of the Circular Hough Transform and through trial and error.

Recognize specific numbers from table image with Pytesseract OCR

I want to read a column of number from an attached image (png file).
My code is
import cv2
import pytesseract
import os
img = cv2.imread(os.path.join(image_path, image_name), 0)
config= "-c
tessedit_char_whitelist=01234567890.:ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
pytesseract.image_to_string(img, config=config)
This code gives me the output string: 'n113\nun\n1.08'. As we can see, there are two problems:
It fails to recognize a decimal point in 1.13 (see attached picture).
It totally cannot read 1.11 (see attached picture). It just returns 'nun'.
What is a solution to these problems?
Bests
You need to preprocess the image. A simple approach is to resize the image, convert to grayscale, and obtain a binary image using Otsu's threshold. From here we can apply a slight gaussian blur then invert the image so the desired text to extract is in white with the background in black. Here's the processed image ready for OCR
Result from OCR
1.13
1.11
1.08
Code
import cv2
import pytesseract
import imutils
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
# Resize, grayscale, Otsu's threshold
image = cv2.imread('1.png')
image = imutils.resize(image, width=400)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Blur and perform text extraction
thresh = 255 - cv2.GaussianBlur(thresh, (5,5), 0)
data = pytesseract.image_to_string(thresh, lang='eng',config='--psm 6')
print(data)
cv2.imshow('thresh', thresh)
cv2.waitKey()

Python - pytesseract not consistent for similar images

For example this image returns Sieteary ear
While this image returns the correct answer
The only difference between the 2 images is 2 pixels in the height.
I have tried applying some threshold but didnt seem to help...
from PIL import Image
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
image = Image.open(path)
print(pytesseract.image_to_string(image, lang='eng'))
You can perform some preprocessing using OpenCV. The idea is to enlarge the image with imutils, obtain a binary image using Otsu's threshold, then add a slight Gaussian blur. For optimal detection, the image should be in the form where desired text to be detected is in black with the background in white. Here's the preprocessing results for the two images:
Before -> After
The output result from Pytesseract for both images are the same
BigBootyHunter2
Code
import cv2
import pytesseract
import imutils
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
image = cv2.imread('1.jpg')
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
thresh = cv2.GaussianBlur(thresh, (3,3), 0)
data = pytesseract.image_to_string(thresh, lang='eng',config='--psm 6')
print(data)
cv2.imshow('thresh', thresh)
cv2.waitKey()

How to extract text from image using pytesseract?

I'm using pytesseract to try extract text numbers from image.
I'm trying to extract the three numbers from this picture.
A straightforward method using pytesseract is:
from PIL import Image
from pytesseract import pytesseract
text = pytesseract.image_to_string(Image.open("uploaded_image.png"))
print(text)
But this prints blank.
Why can't it extract the numbers as it can for normal usual text ?
Your images need some preprocessing in order to be efficiently processed by pytesseract.
The following shows this process using cv2.adaptiveThreshold(), cv2.findContours(), cv2.drawContours() operations before converting image to black and white and invert it:
import numpy as np
import cv2
from PIL import Image
import pytesseract
img = cv2.imread('uploaded_image.png', cv2.IMREAD_COLOR)
img = cv2.blur(img, (5, 5))
#HSV (hue, saturation, value)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
#Applying threshold on pixels' Value (or Brightness)
thresh = cv2.adaptiveThreshold(v, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
#Finding contours
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#Filling contours
contours = cv2.drawContours(img,np.array(contours),-1,(255,255,255),-1)
#To black and white
grayImage = cv2.cvtColor(contours, cv2.COLOR_BGR2GRAY)
#And inverting it
#Setting all `dark` pixels to white
grayImage[grayImage > 200] = 0
#Setting relatively clearer pixels to black
grayImage[grayImage < 100] = 255
#Write the temp file
cv2.imwrite('temp.png',grayImage)
#Read it with tesseract
text = pytesseract.image_to_string(Image.open('temp.png'),config='tessedit_char_whitelist=0123456789 -psm 6 ')
#Output
print("#### Raw text ####")
print(text)
print()
print("#### Extracted digits ####")
print([''.join([y for y in x if y.isdigit()]) for x in text.split('\n')])
Output
#### Raw text ####
93
31
92
#### Extracted digits ####
['93', '31', '92']
Processed image :
EDIT
Updated answer using cv2 library and getting all the digits from image

Categories

Resources