Use cv2.connectedComponents and eliminate elements with a small number of pixels - python

I want to use the function cv2.connectedComponents to connect components on a binary image, like the following...
I have added the feature to cv2. connectedComponents to eliminate elements with a small number of pixels.
Unfortunately, the algorithm is extremely slow for large images due to the extension. Is there a way to rewrite the extension to speed up the algorithm?
import cv2
import numpy as np
def zerolistmaker(n):
listofzeros = [0] * n
return listofzeros
img = cv2.imread('files/motorway/gabor/eGaIy.jpg', 0)
img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)[1] # ensure binary
retval, labels = cv2.connectedComponents(img)
##################################################
# ENLARGEMENT
##################################################
sorted_labels = labels.ravel()
sorted_labels = np.sort(sorted_labels)
maxPixel = 50 # eliminate elements with less than maxPixel
# detect how often an element occurs
i=0
counter=0
counterlist = zerolistmaker(retval)
while i < len(sorted_labels):
if sorted_labels[i] == counter:
counterlist[counter] = counterlist[counter] + 1
else:
counter = counter + 1
i = i - 1
i = i + 1
# delete small pixel values
i=0
while i < len(counterlist):
if counterlist[i] < maxPixel:
counterlist[i] = 0
i = i + 1
i=0
counterlisthelper = []
while i < len(counterlist):
if counterlist[i] == 0:
counterlisthelper.append(i)
i = i + 1
i=0
j=0
k=0
while k < len(counterlisthelper):
while i < labels.shape[0]:
while j < labels.shape[1]:
if labels[i,j] == counterlisthelper[k]:
labels[i,j] = 0
else:
labels[i,j] = labels[i,j]
j = j + 1
j = 0
i = i + 1
i = 0
j = 0
k = k + 1
##################################################
##################################################
# Map component labels to hue val
label_hue = np.uint8(179*labels/np.max(labels))
blank_ch = 255*np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
# cvt to BGR for display
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue==0] = 0
cv2.imshow('labeled.png', labeled_img)
cv2.waitKey()

In python, you should avoid deep loop. Prefer to use numpy other than python-loop.
Imporved:
##################################################
ts = time.time()
num = labels.max()
N = 50
## If the count of pixels less than a threshold, then set pixels to `0`.
for i in range(1, num+1):
pts = np.where(labels == i)
if len(pts[0]) < N:
labels[pts] = 0
print("Time passed: {:.3f} ms".format(1000*(time.time()-ts)))
# Time passed: 4.607 ms
##################################################
Result:
The whole code:
#!/usr/bin/python3
# 2018.01.17 22:36:20 CST
import cv2
import numpy as np
import time
img = cv2.imread('test.jpg', 0)
img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)[1] # ensure binary
retval, labels = cv2.connectedComponents(img)
##################################################
ts = time.time()
num = labels.max()
N = 50
for i in range(1, num+1):
pts = np.where(labels == i)
if len(pts[0]) < N:
labels[pts] = 0
print("Time passed: {:.3f} ms".format(1000*(time.time()-ts)))
# Time passed: 4.607 ms
##################################################
# Map component labels to hue val
label_hue = np.uint8(179*labels/np.max(labels))
blank_ch = 255*np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
# cvt to BGR for display
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue==0] = 0
cv2.imshow('labeled.png', labeled_img)
cv2.imwrite("labeled.png", labeled_img)
cv2.waitKey()

Related

Image Mask to apply image filters

I want to calculate variance, gabor and entropy filters to some images, but the images have blank areas that I donĀ“t want to apply the filters. I try to use a np.ma.array option but return this error: "'MaskedArray' object is not callable"
this is the code:
def bandas_img (image, array1, array2):
imagenRGB = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
return cv2.inRange(imagenRGB, array1, array2)
def rescale_by_width(image, target_width, method=cv2.INTER_LANCZOS4):
h = int(round(target_width * image.shape[0] / image.shape[1]))
return cv2.resize(image, (target_width, h), interpolation=method)
#Resized image by width
target_width = 400
#To mask null values
mask_image = True
hue = 20
sat = 57
value = 116
toleranciaH = 150
toleranciaS = 150
toleranciaV = 150
lower = np.array ([hue - toleranciaH, sat - toleranciaS, value - toleranciaV])
upper = np.array ([hue + toleranciaH, sat + toleranciaS, value + toleranciaV])
#working directory where the csv files are
os.chdir("C:/Users/Mariano/Documents/3 - Visual studio code/Prueba filtrar mascara/filtrada") ##ojoooo las barras van /// y no D:/OMAN/BHI TEXTURES/U-2
file_extension = '.png' #Check Extension
all_filenames = [i for i in glob.glob(f"*{file_extension}")]
for f in all_filenames:
image = cv2.imread(f,1)
#resized Image
resized1 = rescale_by_width(image, target_width)
#Set f value (image name)
f = f.replace(".png", "")
#Save Image
plt.imsave(f+"_resized.png", resized1)
#Create mask for null values
if mask_image == True:
mask = bandas_img(resized1, lower, upper)
cv2.imwrite(f+"_mask.png", mask)
resized2 = io.imread(f+"_resized.png", as_gray=True)
resized3 = resized2.copy()
#First Try
resized3[mask == 0] = np.nan
resized3[mask != 0] = resized2[mask != 0]
#Second Try
mask1 = (resized3 == np.nan)
resized_Mask = np.ma.array(resized3, mask = mask1)
#Varianza
k=6
img_mean = ndimage.uniform_filter(resized_Mask, (k, k))
img_sqr_mean = ndimage.uniform_filter(resized_Mask**2, (k, k))
img_var = img_sqr_mean - img_mean**2
img_var[mask == 0] = 1
plt.imsave(f+"_varianza.png", img_var)

Crop Row Detection script

I want to detect crop rows using aerial images(CRBD). I have done the necessary image processing like converting to grayscale, edge detection, skeletonization, Hough Transform(to identify and draw the lines), and I also set the accumulator angle to math.pi*4.0/180, which I varied time after time.
The algorithm works well at detection approximately 4 crop lines, I want to improve it so that it can detect variable number of crop rows, and it should be able to highlight this crop rows
Here is a link to the sample code I modified Here
import os
import os.path
import time
import cv2
import numpy as np
import math
### Setup ###
image_data_path = os.path.abspath('../8470p/CRBD/Images')
gt_data_path = os.path.abspath('../8470p/GT data')
image_out_path = os.path.abspath('../8470p/algorithm_1')
use_camera = False # whether or not to use the test images or camera
images_to_save = [2, 3, 4, 5] # which test images to save
timing = False # whether to time the test images
curr_image = 0 # global counter
HOUGH_RHO = 2 # Distance resolution of the accumulator in pixels
HOUGH_ANGLE = math.pi*4.0/18 # Angle resolution of the accumulator in radians
HOUGH_THRESH_MAX = 80 # Accumulator threshold parameter. Only those lines are
returned that get votes
HOUGH_THRESH_MIN = 10
HOUGH_THRESH_INCR = 1
NUMBER_OF_ROWS = 10 # how many crop rows to detect
THETA_SIM_THRESH = math.pi*(6.0/180) # How similar two rows can be
RHO_SIM_THRESH = 8 # How similar two rows can be
ANGLE_THRESH = math.pi*(30.0/180) # How steep angles the crop rows can be in
radians
def grayscale_transform(image_in):
'''Converts RGB to Grayscale and enhances green values'''
b, g, r = cv2.split(image_in)
return 2*g - r - b
def save_image(image_name, image_data):
'''Saves image if user requests before runtime'''
if curr_image in images_to_save:
image_name_new = os.path.join(image_out_path, "
{0}_{1}.jpg".format(image_name,
str(curr_image) ))
def skeletonize(image_in):
'''Inputs and grayscale image and outputs a binary skeleton image'''
size = np.size(image_in)
skel = np.zeros(image_in.shape, np.uint8)
ret, image_edit = cv2.threshold(image_in, 0, 255, cv2.THRESH_BINARY |
cv2.THRESH_OTSU)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
done = False
while not done:
eroded = cv2.erode(image_edit, element)
temp = cv2.dilate(eroded, element)
temp = cv2.subtract(image_edit, temp)
skel = cv2.bitwise_or(skel, temp)
image_edit = eroded.copy()
zeros = size - cv2.countNonZero(image_edit)
if zeros == size:
done = True
return skel
def tuple_list_round(tuple_list, ndigits_1=0, ndigits_2=0):
'''Rounds each value in a list of tuples to the number of digits
specified
'''
new_list = []
for (value_1, value_2) in tuple_list:
new_list.append( (round(value_1, ndigits_1), round(value_2,
ndigits_2)) )
return new_list
def crop_point_hough(crop_points):
'''Iterates though Hough thresholds until optimal value found for
the desired number of crop rows. Also does filtering.
'''
height = len(crop_points)
width = len(crop_points[0])
hough_thresh = HOUGH_THRESH_MAX
rows_found = False
while hough_thresh > HOUGH_THRESH_MIN and not rows_found:
crop_line_data = cv2.HoughLines(crop_points, HOUGH_RHO, HOUGH_ANGLE,
hough_thresh)
crop_lines = np.zeros((height, width, 3), dtype=np.uint8)
crop_lines_hough = np.zeros((height, width, 3), dtype=np.uint8)
if crop_line_data is not None:
# get rid of duplicate lines. May become redundant if a similarity
threshold is done
crop_line_data_1 = tuple_list_round(crop_line_data[:,0,:],-1, 4)
crop_line_data_2 = []
x_offsets = []
crop_lines_hough = np.zeros((height, width, 3), dtype=np.uint8)
for (rho, theta) in crop_line_data_1:
a = math.cos(theta)
b = math.sin(theta)
x0 = a*rho
y0 = b*rho
point1 = (int(round(x0+1000*(-b))), int(round(y0+1000*(a))))
point2 = (int(round(x0-1000*(-b))), int(round(y0-1000*(a))))
cv2.line(crop_lines_hough, point1, point2, (0, 0, 255), 2)
for curr_index in range(len(crop_line_data_1)):
(rho, theta) = crop_line_data_1[curr_index]
is_faulty = False
if ((theta >= ANGLE_THRESH) and (theta <= math.pi-
ANGLE_THRESH)) or(theta <= 0.001):
is_faulty = True
else:
for (other_rho, other_theta) in
crop_line_data_1[curr_index+1:]:
if abs(theta - other_theta) < THETA_SIM_THRESH:
is_faulty = True
elif abs(rho - other_rho) < RHO_SIM_THRESH:
is_faulty = True
if not is_faulty:
crop_line_data_2.append( (rho, theta) )
for (rho, theta) in crop_line_data_2:
a = math.cos(theta)
b = math.sin(theta)
c = math.tan(theta)
x0 = a*rho
y0 = b*rho
point1 = (int(round(x0+1000*(-b))), int(round(y0+1000*(a))))
point2 = (int(round(x0-1000*(-b))), int(round(y0-1000*(a))))
cv2.line(crop_lines, point1, point2, (0, 0, 255), 2)
#cv2.circle(crop_lines, (np.clip(int(round(a*rho+c*
#(0.5*height))),0 ,239), 0), 4, (255,0,0), -1)
#cv2.circle(crop_lines, (np.clip(int(round(a*rho-c*
#(0.5*height))),0 ,239), height), 4, (255,0,0), -1)
cv2.circle(crop_lines, (np.clip(int(round(rho/a)),0 ,239), 0), 5,
(255,0,0), -1)
#cv2.circle(img,(447,63), 63, (0,0,255), -1)
x_offsets.append(np.clip(int(round(rho/a)),0 ,239))
cv2.line(crop_lines, point1, point2, (0, 0, 255), 2)
if len(crop_line_data_2) >= NUMBER_OF_ROWS:
rows_found = True
hough_thresh -= HOUGH_THRESH_INCR
if rows_found == False:
print(NUMBER_OF_ROWS, "rows_not_found")
x_offset = min (x_offsets)
width = max (x_offsets) - min (x_offsets)
return (crop_lines, crop_lines_hough, x_offset, width)
def crop_row_detect(image_in):
'''Inputs an image and outputs the lines'''
save_image('0_image_in', image_in)
### Grayscale Transform ###
image_edit = grayscale_transform(image_in)
save_image('1_image_gray', image_edit)
### Skeletonization ###
skeleton = skeletonize(image_edit)
save_image('2_image_skeleton', skeleton)
### Hough Transform ###
(crop_lines, crop_lines_hough, x_offset, width) =
crop_point_hough(skeleton)
save_image('3_image_hough',cv2.addWeighted(image_in, 1,
crop_lines_hough, 1, 0.0))
save_image('4_image_lines',cv2.addWeighted(image_in, 1,crop_lines,1,0.0))
return (crop_lines , x_offset, width)
def main():
if use_camera == False:
diff_times = []
for image_name in sorted(os.listdir(image_data_path)):
global curr_image
curr_image += 1
start_time = time.time()
image_path = os.path.join(image_data_path, image_name)
image_in = cv2.imread(image_path)
crop_lines = crop_row_detect(image_in)
if timing == False:
cv2.imshow(image_name, cv2.addWeighted(image_in, 1,
crop_lines, 1, 0.0))
print('Press any key to continue...')
cv2.waitKey()
cv2.destroyAllWindows()
### Timing ###
else:
diff_times.append(time.time() - start_time)
mean = 0
for diff_time in diff_times:
mean += diff_time
### Display Timing ###
print('max time = {0}'.format(max(diff_times)))
print('ave time = {0}'.format(1.0 * mean / len(diff_times)))
cv2.waitKey()
else: # use camera. Hasn't been tested on a farm.
capture = cv2.VideoCapture(0)
while cv2.waitKey(1) < 0:
_, image_in = capture.read()
(crop_lines, x_offset, width) = crop_row_detect(image_in)
cv2.imshow("Webcam", cv2.addWeighted(image_in, 1, crop_lines, 1,
0.0))
capture.release()
cv2.destroyAllWindows()
main()
Input Image
[![Input Image][1]][1]
Output Image
[![][4]][4]
Expected Output
[![Expected Output][5]][5]
I have tried thresholding with cv2.inRange() to find green lines, but am still not getting the desired out.
Also the algorithms seems to be only draw the crop_line_data_2 as shown in the Output Image, it doesn't draw the crop_line_data_1
def threshold_green(image_in):
hsv = cv2.cvtColor(image_in, cv2.COLOR_BGR2HSV)
## mask of green (36,25,25) ~ (86, 255,255)
# mask = cv2.inRange(hsv, (36, 25, 25), (86, 255,255))
mask = cv2.inRange(hsv, (36, 25, 25), (70, 255,255))
## slice the green
imask = mask>0
green = np.zeros_like(image_in, np.uint8)
green[imask] = image_in[imask]
return green

How to get the video file length in Yolo v3

I wanted to find out how the video frame length was calculated in the below code.
[UPD] Before I was thinking it was done by Yolo, but later I realized it was OpenCV that dealt with number of frames in a video file.
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from keras.utils import multi_gpu_model
class YOLO(object):
_defaults = {
"model_path": 'model_data/yolo.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"gpu_num" : 1,
}
#classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
end = timer()
print(end - start)
return image
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
video_path = './input.mp4'
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time == 10 : mouseBrush(image)
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
Actually, this code is just one part of the all Yolo3 model, but I think the part that deals with the number of video frames is included here.
If you mean the current FPS. This is the part showing the current FPS in string.
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if curr_fps == 10: # Stops at 10th frame.
time.sleep(60) # Delay for 1 minute (60 seconds).
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
I needed the frame number to control every 10th frame in the video file, and thanks to above comments, I figured out that the line I was looking for is:
curr_fps = curr_fps + 1
UPD: The following line calculated the number of frames in a video file.
NumberOfFrame = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))

Could not calculate accuracy, sensitivity and specificity of segmented image

I could not able to calculate accuracy, sensitivity, and specificity of segmented image. I've segmented the image with my own algorithm and wanted to get the accuracy of the segmented image by comparing it with the ground truth image.
Whenever I try to execute this code, the output window freezes and only displays the output image. I want the value of accuracy, sensitivity, and specificity to be printed too.
Here in my code, the segmented image erosion2 and ground truth image is res. Both the images are displaying but not showing accuracy when I do comparison.
`import cv2 as cv
import numpy as np
img = cv.imread('dataset.tif')
cv.imshow('Input Image',img)
b,g,r= cv.split(img)
cv.imshow('Red Channel',r)
cv.imshow('Green Channel',g)
cv.imshow('Blue Channel',b)
img2= cv.bitwise_not(g)
cv.imshow('Processed Image',img2)
kernel3 = cv.getStructuringElement(cv.MORPH_ELLIPSE,(13,13))
tophat = cv.morphologyEx(img2, cv.MORPH_TOPHAT, kernel3)
cv.imshow('Top hat',tophat)
thres= 12
maxValue = 255
ret,thresh41 = cv.threshold(tophat,thres, maxValue,cv.THRESH_TOZERO)
th, dat = cv.threshold(tophat, thres, maxValue, cv.THRESH_BINARY)
cv.imshow('thresh',dat)
kernel1 = cv.getStructuringElement(cv.MORPH_ELLIPSE,(2,2))
dilation = cv.dilate(dat,kernel1,iterations = 1)
erosion = cv.erode(dilation,kernel1,iterations = 1)
erosion1=cv.GaussianBlur(erosion,(5,5),0)
erosion1=cv.blur(erosion,(5,5),0)
x=cv.subtract(dilation,erosion1)
x = cv.medianBlur(x,5)
cv.imshow("op1",x)
b2=cv.add(erosion,x)
cv.imshow("fin",b2)
erosion2=cv.erode(b2,kernel1,iterations=1)
cv.imshow("result",erosion2)
res = cv.imread('manual1.tiff')
#cv.imshow('GroundTruth Image',img3)
res = cv.cvtColor(res, cv.COLOR_BGR2GRAY)
print(res.shape)
#def calC_accuracy(result, label):
tp = 0
fp = 0
tn = 0
fn = 0
i = 0
j = 0
print(np.unique(erosion2))
print(np.unique(res))
while i < erosion2.shape[0]:
j = 0
while j < erosion2.shape[1]:
if label[i,j] == 255:
if erosion2[i,j] == res[i,j]:
tp = tp + 1
else:
fn = fn + 1
if erosion[i,j] == res[i,j]:
tn = tn + 1
else:
fp = fp + 1
j = j + 1
i = i + 1
print("TN =",tn,"FP =",fp)
print("FN =",fn,"TP =",tp)
print("Sensitivity = ",float(tp/(tp+fn+1)))
print("Specificity = ",float(tn/(tn+fp+1)))
print("Accuracy = ",float((tn+tp)/(fn+fp+1+tn+tp)))
#print("PPV = ",float(tp/(tp+fp+1)))
#return float(tp/(tp+fp+1))
#cv.imshow('result',erosion)
cv.waitKey(0)
cv2.destroyAllWindows()
`
Maybe it's due to cv.waitKey(0) or cv2.destroyAllWindows(), the program ends before it can show sensitivity and other values. You can use pdb module https://docs.python.org/2/library/pdb.html to debug your python program so you can figure out where is the problem.

Captcha Break or Text reader from Image: OCR-Python

I have a typical captcha image which contain only digits.
Ex.
i want to extract 78614 from this image.
I tried few library & code using OCR-Python. But its returning 0.
Sample Code-1
from captcha_solver import CaptchaSolver
solver = CaptchaSolver('browser')
with open('captcha.png', 'rb') as inp:
raw_data = inp.read()
print(solver.solve_captcha(raw_data))
Sample Code-2
from PIL import Image
def p(img, letter):
A = img.load()
B = letter.load()
mx = 1000000
max_x = 0
x = 0
for x in range(img.size[0] - letter.size[0]):
_sum = 0
for i in range(letter.size[0]):
for j in range(letter.size[1]):
_sum = _sum + abs(A[x+i, j][0] - B[i, j][0])
if _sum < mx :
mx = _sum
max_x = x
return mx, max_x
def ocr(im, threshold=200, alphabet="0123456789abcdef"):
img = Image.open(im)
img = img.convert("RGB")
box = (8, 8, 58, 18)
img = img.crop(box)
pixdata = img.load()
letters = Image.open(im)
ledata = letters.load()
# Clean the background noise, if color != white, then set to black.
for y in range(img.size[1]):
for x in range(img.size[0]):
if (pixdata[x, y][0] > threshold) \
and (pixdata[x, y][1] > threshold) \
and (pixdata[x, y][2] > threshold):
pixdata[x, y] = (255, 255, 255, 255)
else:
pixdata[x, y] = (0, 0, 0, 255)
counter = 0;
old_x = -1;
letterlist = []
for x in range(letters.size[0]):
black = True
for y in range(letters.size[1]):
if ledata[x, y][0] <> 0 :
black = False
break
if black :
if True :
box = (old_x + 1, 0, x, 10)
letter = letters.crop(box)
t = p(img, letter);
print counter, x, t
letterlist.append((t[0], alphabet[counter], t[1]))
old_x = x
counter += 1
box = (old_x + 1, 0, 140, 10)
letter = letters.crop(box)
t = p(img, letter)
letterlist.append((t[0], alphabet[counter], t[1]))
t = sorted(letterlist)
t = t[0:5] # 5-letter captcha
final = sorted(t, key=lambda e: e[2])
answer = ""
for l in final:
answer = answer + l[1]
return answer
print(ocr('captcha.png'))
Has anyone had the opportunity to get/extract text from such typical captcha?
You can use machine learning (neural networks) models to solve captchas and it will almost always outperform free OCR or any other method.
Here is a good starting point: https://medium.com/#ageitgey/how-to-break-a-captcha-system-in-15-minutes-with-machine-learning-dbebb035a710

Categories

Resources