I'm having problems recognizing text from a picture, python - python

I was given a school project for recognizing various kinds of CAPTCHA, and I had some difficulties with its implementation.
Images of this type will be fed into input ,,.
I handle them with the following code:
import cv2
import pytesseract
# load image
fname = 'picture.png'
im = cv2.imread(fname,cv2.COLOR_RGB2GRAY)
pytesseract.pytesseract.tesseract_cmd = r'C:\Tesseract-OCR\tesseract.exe'
im = im[0:90, 35:150]
im = cv2.blur(im,(3,3))
im = cv2.threshold(im, 223 , 250, cv2.THRESH_BINARY)
im = im[1]
cv2.imshow('',im)
cv2.waitKey(0)
After all processing, the image looks like this: And at this point, I have a problem, how can I modify the image to good readability by the computer, so that instead of the wrong TAREQ. he would display the 7TXB6Q
I am trying to display text from an image with the pytesseract library as follows
data = pytesseract.image_to_string(im, lang='eng', config='--psm 6 --oem 3 -c tessedit_char_whitelist= ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
print(data)
I am writing here hoping to get valuable advice (perhaps you know the most suitable way to get text from a picture or process the image pinned above). Peace for everyone)
More images

You can try finding countours and eliminating those which have small areas. This preprocessing operation should increase the success of OCR result.
Before:
import cv2 as cv
import numpy as np
# your thresholded image im
bw = cv.imread('bw.png', cv.IMREAD_GRAYSCALE)
_, cnts, _ = cv.findContours(bw, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# remove the largest contour which is background
cnts = np.array(cnts[1:], dtype=object)
areas = np.array(list(map(cv.contourArea, cnts)))
thr = 35
thr_cnts = cnts[areas > thr]
disp_img = 255 * np.ones(bw.shape, dtype=np.uint8)
disp_img = cv.drawContours(disp_img, thr_cnts, -1, (0, 0, 0), cv.FILLED)
disp_img = cv.bitwise_or(disp_img, bw)
cv.imshow('result', disp_img)
cv.waitKey()
cv.destroyAllWindows()
cv.imwrite('result.png', disp_img)
Result:
Edit: It seems that merging the two codes did not give the same result. This is the full code from the beginning to the end.
Input:
import cv2 as cv
import numpy as np
# load image
fname = 'im.png'
im = cv.imread(fname, cv.IMREAD_GRAYSCALE)
# crop
im = im[0:90, 35:150]
# blurring is essential for denoising
im = cv.blur(im, (3,3))
thr = 219
# the binary threshold value is very important
# using 220 instead of 219 causes loss of a letter
# because it touches to the bottom edge and gets involved in the background
_, im = cv.threshold(im, thr, 255, cv.THRESH_BINARY)
cv.imshow('', im)
cv.waitKey(0)
Thresholded:
# binary image
bw = np.copy(im)
# find contours and corresponding areas
_, cnts, _ = cv.findContours(bw, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
cnts = np.array(cnts, dtype=object)
areas = np.array(list(map(cv.contourArea, cnts)))
thr = 35
# eliminate contours that are smaller than threshold
# also remove the largest contour which is background
thr_cnts = cnts[np.logical_and(areas > thr, areas != np.max(areas))]
# draw the remaining contours
disp_img = 255 * np.ones(bw.shape, dtype=np.uint8)
disp_img = cv.drawContours(disp_img, thr_cnts, -1, (0, 0, 0), cv.FILLED)
disp_img = cv.bitwise_or(disp_img, bw)
cv.imshow('', disp_img)
cv.waitKey()
cv.destroyAllWindows()
Result:

Related

How to find only the bolded text lines from no. of images

from PIL import Image
import pytesseract
from pdf2image import convert_from_path
import os
import pandas as pd
import cv2
import numpy as np
files = os.chdir("C:/Users/abhishek_kumar1/Desktop/New folder")
#print(os.getcwd())
pages = convert_from_path("d.pdf",190,single_file=True,
poppler_path='C:/Users/abhishek_kumar1/Downloads/poppler-0.68.0_x86/poppler-0.68.0/bin')
image_counter=1
for page in pages:
filename = "page_"+str(image_counter)+".jpg"
page.save(filename,'JPEG')
img = cv2.imread(filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imwrite('grey.png',gray)
binary,thresh1 = cv2.threshold(gray, 0, 255,cv2.THRESH_OTSU|cv2.THRESH_BINARY_INV)
cv2.imwrite('Thresh1.png',thresh1)
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 3))
dilation = cv2.dilate(thresh1, rect_kernel, iterations = 6)
contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
im2 = img.copy()
ROI_number = 0
for cnt in contours[::-1]:
[x,y,w,h] = cv2.boundingRect(cnt)
ROI=im2[y:y+h, x:x+w]
#print(str(w),str(h))
#cv2.putText(im2, str(h), (x,y - 10 ), cv2.FONT_HERSHEY_SIMPLEX, 0.1, (255, 0, 0), 1)
#cv2.putText(im2, str(w), (x,y + 10 ), cv2.FONT_HERSHEY_SIMPLEX, 0.1, (0, 0, 255), 1)
cv2.imwrite('ROI_{}.jpg'.format(ROI_number),ROI)
cv2.rectangle(im2,(x,y),(x+w,y+h),(36,255,12),1)
ROI_number += 1
cv2.imwrite('contours1.png',im2)
How to find only this image from above code section section, is there any options to understand font type from image like bold, italic,something else
get trouble to find only the bold line part from all of images.
Please any body have a suggestion regarding this please help me out.
Alex Alex's answer did not work for me. Here is my alternative described in words.
The general idea is that we compare how many black pixels there are in comparison to the minimum possible pixels to still form characters. This provides us with a difference from the skeleton to normal text and skeleton to bold text. In this way, we can quite clearly separate normal text from the bold text.
Use OCR software to extract bounding boxes of individual words. Optional: Combine individual words into lines of words, for example by word_num in Pytesseract.
Convert the image to grayscale and invert the image colors
Perform Zhang-Suen thinning on the selected area of text on the image (opencv contribution: cv2.ximgproc.thinning)
Sum where there are white pixels in the thinned image, i.e. where values are equal to 255 (white pixels are letters)
Sum where there are white pixels in the inverted image
Finally compute the thickness (sum_inverted_pixels - sum_skeleton_pixels) / sum_skeleton_pixels (sometimes there will be zero division error, check when the sum of the skeleton is 0 and return 0 instead)
Normalize the thickness by minimum and maximum values
Apply a threshold for deciding when a word/line of text is bold, e.g. 0.6 or 0.7
See python code and result:
import cv2
import numpy as np
img = cv2.imread('C.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 160, 255, cv2.THRESH_BINARY)[1]
kernel = np.ones((5,5),np.uint8)
kernel2 = np.ones((3,3),np.uint8)
marker = cv2.dilate(thresh,kernel,iterations = 1)
mask=cv2.erode(thresh,kernel,iterations = 1)
while True:
tmp=marker.copy()
marker=cv2.erode(marker, kernel2)
marker=cv2.max(mask, marker)
difference = cv2.subtract(tmp, marker)
if cv2.countNonZero(difference) == 0:
break
marker_color = cv2.cvtColor(marker, cv2.COLOR_GRAY2BGR)
out=cv2.bitwise_or(img, marker_color)
cv2.imwrite('out.png', out)
cv2.imshow('result', out )

How to get RGB values of two separate lines in an image in two variables using opencv python

I have detected two lines in an image using cv2. now I want to get the RGB values of both lines in separate variables like left_line_veriable = ['rgb values'], right_line_rgb_values = ['rgb values']
Here is my code:
import cv2
import numpy as np
image = cv2.imread('tape.png')
image = cv2.cvtCOLOR(image, cv2.COLOR_BGR2GRAY)
# Apply adaptive threshold
image_thr = cv2.adaptiveThreshold(image, 255, cv2.THRESH_BINARY_INV, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, 81, 2)
# Apply morphological opening with vertical line kernel
kernel = np.ones((image.shape[0], 1), dtype=np.uint8) * 255
image_mop = cv2.morphologyEx(image_thr, cv2.MORPH_OPEN, kernel)
color_detected_img = cv2.bitwise_and(image, image, mask=image_mop)
cv2.imshow('image', color_detected_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
This is the image from which I want to get both line's RGB values in two variables as described above:
Maybe is not the most optimal way, but it is not hard to do. As I said in my comments, you can label the image to kind of segment the lines, then get the mean of the rgb values in it and the average position to get to know which one is left and right. Here is a small script to demonstrate what I am saying. The last part is just to show the results.
import cv2
import numpy as np
# load img and get the greyscale
img = cv2.imread("x.png")
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# label the image
ret, thres = cv2.threshold(grey, 1, 255, cv2.THRESH_BINARY)
labelAmount, labels = cv2.connectedComponents(thres)
# get the mean of the color and position
values = []
# first label (0) is background
for i in range(1, labelAmount):
mask = np.zeros(labels.shape, dtype=np.uint8)
mask[labels == i] = 255
mean = cv2.mean(img, mask)[:-1]
meanPos = np.mean(cv2.findNonZero(mask), axis=0)[0]
values.append((mean, meanPos))
# sort them by x value (left to right)
values = sorted(values, key = lambda v : v[1][0])
left_line_color = values[0][0]
right_line_color = values[1][0]
# just to show the results
left_only = np.zeros(img.shape, dtype=np.uint8)
right_only = np.zeros(img.shape, dtype=np.uint8)
left_only = cv2.line (left_only, (int(values[0][1][0]), 0), (int(values[0][1][0]), img.shape[0]), left_line_color,5 )
right_only = cv2.line (right_only, (int(values[1][1][0]), 0), (int(values[1][1][0]), img.shape[0]), right_line_color,5 )
cv2.imshow("left_line", left_only)
cv2.imshow("right_line", right_only)
cv2.imshow("original", img)
cv2.waitKey(0)

Feather cropped edges

I'm trying to crop an object from an image, and paste it on another image. Examining the method in this answer, I've successfully managed to do that. For example:
The code (show_mask_applied.py):
import sys
from pathlib import Path
from helpers_cv2 import *
import cv2
import numpy
img_path = Path(sys.argv[1])
img = cmyk_to_bgr(str(img_path))
threshed = threshold(img, 240, type=cv2.THRESH_BINARY_INV)
contours = find_contours(threshed)
mask = mask_from_contours(img, contours)
mask = dilate_mask(mask, 50)
crop = cv2.bitwise_or(img, img, mask=mask)
bg = cv2.imread("bg.jpg")
bg_mask = cv2.bitwise_not(mask)
bg_crop = cv2.bitwise_or(bg, bg, mask=bg_mask)
final = cv2.bitwise_or(crop, bg_crop)
cv2.imshow("debug", final)
cv2.waitKey(0)
cv2.destroyAllWindows()
helpers_cv2.py:
from pathlib import Path
import cv2
import numpy
from PIL import Image
from PIL import ImageCms
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def cmyk_to_bgr(cmyk_img):
img = Image.open(cmyk_img)
if img.mode == "CMYK":
img = ImageCms.profileToProfile(img, "Color Profiles\\USWebCoatedSWOP.icc", "Color Profiles\\sRGB_Color_Space_Profile.icm", outputMode="RGB")
return cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2BGR)
def threshold(img, thresh=128, maxval=255, type=cv2.THRESH_BINARY):
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshed = cv2.threshold(img, thresh, maxval, type)[1]
return threshed
def find_contours(img):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11,11))
morphed = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
contours = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours[-2]
def mask_from_contours(ref_img, contours):
mask = numpy.zeros(ref_img.shape, numpy.uint8)
mask = cv2.drawContours(mask, contours, -1, (255,255,255), -1)
return cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
def dilate_mask(mask, kernel_size=11):
kernel = numpy.ones((kernel_size,kernel_size), numpy.uint8)
dilated = cv2.dilate(mask, kernel, iterations=1)
return dilated
Now, instead of sharp edges, I want to crop with feathered/smooth edges. For example (the right one; created in Photoshop):
How can I do that?
All images and codes can be found that at this repository.
You are using a mask to select parts of the overlay image. The mask currently looks like this:
Let's first add a Gaussian blur to this mask.
mask_blurred = cv2.GaussianBlur(mask,(99,99),0)
We get to this:
Now, the remaining task it to blend the images using the alpha value in the mask, rather than using it as a logical operator like you do currently.
mask_blurred_3chan = cv2.cvtColor(mask_blurred, cv2.COLOR_GRAY2BGR).astype('float') / 255.
img = img.astype('float') / 255.
bg = bg.astype('float') / 255.
out = bg * (1 - mask_blurred_3chan) + img * mask_blurred_3chan
The above snippet is quite simple. First, transform the mask into a 3 channel image (since we want to mask all the channels). Then transform the images to float, since the masking is done in floating point. The last line does the actual work: for each pixel, blends the bg and img images according to the value in the mask. The result looks like this:
The amount of feathering is controlled by the size of the kernel in the Gaussian blur. Note that it has to be an odd number.
After this, out (the final image) is still in floating point. It can be converted back to int using:
out = (out * 255).astype('uint8')
While Paul92's answer is more than enough, I wanted to post my code anyway for any future visitor.
I'm doing this cropping to get rid of white background in some product photos. So, the main goal is to get rid of the whites while keeping the product intact. Most of the product photos have shadows on the ground. They are either the ground itself (faded), or the product's shadow, or both.
While the object detection works fine, these shadows also count as part of the object. Differentiating the shadows from the objects is not really necessary, but it results in some images that are not so desired. For example, examine the left and bottom sides of the image (shadow). The cut/crop is obviously visible, and doesn't look all that nice.
To get around this problem, I wanted to do non-rectangular crops. Using masks seems to do the job just fine. The next problem was to do the cropping with feathered/blurred edges so that I can get rid of these visible shadow cuts. With the help of Paul92, I've managed to do that. Example output (notice the missing shadow cuts, the edges are softer):
Operations on the image(s):
The code (show_mask_feathered.py, helpers_cv2.py)
import sys
from pathlib import Path
import cv2
import numpy
from helpers_cv2 import *
img_path = Path(sys.argv[1])
img = cmyk_to_bgr(str(img_path))
threshed = threshold(img, 240, type=cv2.THRESH_BINARY_INV)
contours = find_contours(threshed)
dilation_length = 51
blur_length = 51
mask = mask_from_contours(img, contours)
mask_dilated = dilate_mask(mask, dilation_length)
mask_smooth = smooth_mask(mask_dilated, odd(dilation_length * 1.5))
mask_blurred = cv2.GaussianBlur(mask_smooth, (blur_length, blur_length), 0)
mask_blurred = cv2.cvtColor(mask_blurred, cv2.COLOR_GRAY2BGR)
mask_threshed = threshold(mask_blurred, 1)
mask_contours = find_contours(mask_threshed)
mask_contour = max_contour(mask_contours)
x, y, w, h = cv2.boundingRect(mask_contour)
img_cropped = img[y:y+h, x:x+w]
mask_cropped = mask_blurred[y:y+h, x:x+w]
background = numpy.full(img_cropped.shape, (200,240,200), dtype=numpy.uint8)
output = alpha_blend(background, img_cropped, mask_cropped)

Batch processing and breaking up an image

I'm trying to segment each charm item in this collective image.
The shapes are irregular and inconsistent:
https://i.imgur.com/sf8nOau.jpg
I have another image where there is some consistency with the top row of items, but ideally I'd be able to process and brake up all items in one go:
http://i.imgur.com/WiiYBay.jpg
I have no experience with opencv so I'm just looking for best possible tool or approach to take. I've read about background subtraction as well as color clustering, but I'm not sure about those either.
Any ideas on how to best approach this? Thanks.
Code
import cv2
import numpy as np
im=cv2.imread('so1.jpg')
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
kernel = np.ones((3,3),np.uint8)
res = cv2.dilate(thresh,kernel,iterations = 1)
res = cv2.erode(res,kernel,iterations = 1)
res = cv2.dilate(res,kernel,iterations = 1)
cv2.imshow('thresh',res)
_,contours, hierarchy = cv2.findContours(res.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('image',im)
cv2.waitKey(0)
cv2.destroyAllWindows()
Output
Now having the contours you can crop them out
Code Update
import cv2
import numpy as np
im=cv2.imread('so.jpg')
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
kernel = np.ones((3,3),np.uint8)
res = cv2.dilate(thresh,kernel,iterations = 1)
res = cv2.erode(res,kernel,iterations = 1)
res = cv2.dilate(res,kernel,iterations = 8)
cv2.imshow('thresh',res)
_,contours, hierarchy = cv2.findContours(res.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
count=0
for cnt in contours:
blank=np.zeros(im.shape,dtype=np.uint8)
x,y,w,h = cv2.boundingRect(cnt)
epsilon = 0.001*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
cv2.fillConvexPoly(blank,approx,(255, 255, 255))
masked_image = cv2.bitwise_and(im, blank)
cv2.imwrite('results_so/im'+str(count)+'.jpg',masked_image[y:y+h,x:x+w])
count+=1
cv2.imshow('image',im)
cv2.waitKey(0)
cv2.destroyAllWindows()
results
good one
bad one
Some small noises are also detected as objects you can eliminate those by only taking contours whose area is greater than a certain value .

How to count objects in image using python?

I am trying to count the number of drops in this image and the coverage percentage of the area covered by those drops.
I tried to convert this image into black and white, but the center color of those drops seems too similar to the background. So I only got something like the second picture.
Is there any way to solve this problem or any better ideas?
Thanks a lot.
You can fill the holes of your binary image using scipy.ndimage.binary_fill_holes. I also recommend using an automatic thresholding method such as Otsu's (avaible in scikit-image).
from skimage import io, filters
from scipy import ndimage
import matplotlib.pyplot as plt
im = io.imread('ba3g0.jpg', as_grey=True)
val = filters.threshold_otsu(im)
drops = ndimage.binary_fill_holes(im < val)
plt.imshow(drops, cmap='gray')
plt.show()
For the number of drops you can use another function of scikit-image
from skimage import measure
labels = measure.label(drops)
print(labels.max())
And for the coverage
print('coverage is %f' %(drops.mean()))
I used the following code to detect the number of contours in the image using OpenCV and python.
import cv2
import numpy as np
img = cv2.imread('ba3g0.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,1)
contours,h = cv2.findContours(thresh,1,2)
for cnt in contours:
cv2.drawContours(img,[cnt],0,(0,0,255),1)
For further removing the contours inside another contour, you need to iterate over the entire list and compare and remove the internal contours. After that, the size of "contours" will give you the count
The idea is to isolate the background form the inside of the drops that look like the background.
Therefore i found the connected components for the background and the inside drops took the largest connected component and change its value to be like the foreground value which left me with an image which he inside drops as a different value than the background.
Than i used this image to fill in the original threshold image.
In the end using the filled image i calculated the relevant values
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Read image
I = cv2.imread('drops.jpg',0);
# Threshold
IThresh = (I>=118).astype(np.uint8)*255
# Remove from the image the biggest conneced componnet
# Find the area of each connected component
connectedComponentProps = cv2.connectedComponentsWithStats(IThresh, 8, cv2.CV_32S)
IThreshOnlyInsideDrops = np.zeros_like(connectedComponentProps[1])
IThreshOnlyInsideDrops = connectedComponentProps[1]
stat = connectedComponentProps[2]
maxArea = 0
for label in range(connectedComponentProps[0]):
cc = stat[label,:]
if cc[cv2.CC_STAT_AREA] > maxArea:
maxArea = cc[cv2.CC_STAT_AREA]
maxIndex = label
# Convert the background value to the foreground value
for label in range(connectedComponentProps[0]):
cc = stat[label,:]
if cc[cv2.CC_STAT_AREA] == maxArea:
IThreshOnlyInsideDrops[IThreshOnlyInsideDrops==label] = 0
else:
IThreshOnlyInsideDrops[IThreshOnlyInsideDrops == label] = 255
# Fill in all the IThreshOnlyInsideDrops as 0 in original IThresh
IThreshFill = IThresh
IThreshFill[IThreshOnlyInsideDrops==255] = 0
IThreshFill = np.logical_not(IThreshFill/255).astype(np.uint8)*255
plt.imshow(IThreshFill)
# Get numberof drops and cover precntage
connectedComponentPropsFinal = cv2.connectedComponentsWithStats(IThreshFill, 8, cv2.CV_32S)
NumberOfDrops = connectedComponentPropsFinal[0]
CoverPresntage = float(np.count_nonzero(IThreshFill==0)/float(IThreshFill.size))
# Print
print "Number of drops = " + str(NumberOfDrops)
print "Cover precntage = " + str(CoverPresntage)
Solution
image = cv2.imread('image path.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# (thresh, blackAndWhiteImage) = cv2.threshold(gray, 127, 255,
cv2.THRESH_BINARY)
plt.imshow(gray, cmap='gray')
blur = cv2.GaussianBlur(gray, (11, 11), 0)
plt.imshow(blur, cmap='gray')
canny = cv2.Canny(blur, 30, 40, 3)
plt.imshow(canny, cmap='gray')
dilated = cv2.dilate(canny, (1, 1), iterations=0)
plt.imshow(dilated, cmap='gray')
(cnt, hierarchy) = cv2.findContours(
dilated.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cv2.drawContours(rgb, cnt, -1, (0, 255, 0), 2)
plt.imshow(rgb)
print("No of circles: ", len(cnt))

Categories

Resources