I am trying to perform edge detection for my images of soil grains using holistically nested edge detection method HED as shown however when using combined fine and coarse soil grains , the region of fine particles is not clear so I suggest making image convolution by cutting the image into smaller rectangular areas in both directions and make HED for every portion of image and store them to black copy image so as to add the edged portions to this image .
I faced an error after repeating the algorithm of HED in a for loop by dividing the width of image to 5 portions and the height to 4 portions but I can't fix that error .
Here is the algorithm used
# import the necessary packages
import argparse
import cv2
import os
import easygui
path = easygui.fileopenbox()
print(path)
hdir = os.path.dirname(path)
print(hdir)
hfilename = os.path.basename(path)
print(hfilename)
hname = os.path.splitext(hfilename)[0]
print(hname)
houtname = hname+"_out.jpg"
print(houtname)
hout = os.path.sep.join([hdir,houtname])
print(hout)
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-d", "--edge-detector", type=str, required=True,
# help="path to OpenCV's deep learning edge detector")
# ap.add_argument("-i", "--image", type=str, required=True,
# help="path to input image")
# args = vars(ap.parse_args())
class CropLayer(object):
def __init__(self, params, blobs):
# initialize our starting and ending (x, y)-coordinates of
# the crop
self.startX = 0
self.startY = 0
self.endX = 0
self.endY = 0
def getMemoryShapes(self, inputs):
# the crop layer will receive two inputs -- we need to crop
# the first input blob to match the shape of the second one,
# keeping the batch size and number of channels
(inputShape, targetShape) = (inputs[0], inputs[1])
(batchSize, numChannels) = (inputShape[0], inputShape[1])
(H, W) = (targetShape[2], targetShape[3])
# compute the starting and ending crop coordinates
self.startX = int((inputShape[3] - targetShape[3]) / 2)
self.startY = int((inputShape[2] - targetShape[2]) / 2)
self.endX = self.startX + W
self.endY = self.startY + H
# return the shape of the volume (we'll perform the actual
# crop during the forward pass
return [[batchSize, numChannels, H, W]]
def forward(self, inputs):
# use the derived (x, y)-coordinates to perform the crop
return [inputs[0][:, :, self.startY:self.endY,
self.startX:self.endX]]
# load our serialized edge detector from disk
print("[INFO] loading edge detector...")
fpath = os.path.abspath(__file__)
fdir = os.path.dirname(fpath)
print(fdir)
protoPath = os.path.sep.join([fdir,"hed_model", "deploy.prototxt"])
print(protoPath)
modelPath = os.path.sep.join([fdir,"hed_model","hed_pretrained_bsds.caffemodel"])
print(modelPath)
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# register our new layer with the model
cv2.dnn_registerLayer("Crop", CropLayer)
# load the input image and grab its dimensions
image = cv2.imread('D:\My work\MASTERS WORK\GSD files\Sample E photos\SampleE_#1_26pxfor1mm.jpg')
im_copy = image.copy()*0
(H, W) = image.shape[:2]
# print(image.shape[:2])
# image.shape[:2] =(H*3, W*3)
# image = cv2.resize(image,0.5)
h=0
w=0
for m in range(0,H ,int(H/5)):
for n in range(0,W,int(W/3)):
gray = image[h:m,w:n]
# convert the image to grayscale, blur it, and perform Canny
# edge detection
print("[INFO] performing Canny edge detection...")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
canny = cv2.Canny(blurred, 30, 150)
# construct a blob out of the input image for the Holistically-Nested
# Edge Detector
# cc = cv2.cvtColor(canny, cv2.COLOR_GRAY2BGR)
# image = image+cc
# mean = (104.00698793, 116.66876762, 122.67891434),
blob = cv2.dnn.blobFromImage(image, scalefactor=1.0, size=((m-h), (n-w)),
# mean=(230, 120, 50),
mean=(104.00698793, 116.66876762, 122.67891434),
swapRB=False, crop=False)
print( blob)
cv2.waitKey(0)
# set the blob as the input to the network and perform a forward pass
# to compute the edges
print("[INFO] performing holistically-nested edge detection...")
net.setInput(blob)
hed = net.forward()
hed = cv2.resize(hed[0, 0], ((m-h), (n-w)))
hed = (255 * hed).astype("uint8")
# Adding the edge detection for each portion to the copy image as follows
im_copy = im_copy + hed
h+=int(H/5)
w+=int(W/4)
# show the output edge detection results for Canny and
# Holistically-Nested Edge Detection
cv2.imshow("Input", image)
cv2.imshow("Canny", canny)
cv2.imshow("HED", hed)
cv2.waitKey(0)
cv2.imshow('Frame ',im_copy)
cv2.imwrite(hout, im_copy)
cv2v2.waitKey(0)
I then use this edged image in further analysis on the image .
The error I got using the algorithm
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
cv2.error: OpenCV(4.1.1) C:\projects\opencv-python\opencv\modules\dnn\src\caffe\caffe_io.cpp:1121: error: (-2:Unspecified error) FAILED: fs.is_open(). Can't open "D:\My work\MASTERS WORK\hed_model\deploy.prototxt" in function 'cv::dnn::ReadProtoFromTextFile'
Related
I am doing training using Lits (Liver) Dataset using Pytorch. Images are .jpeg and masks are .tiff images.
After doing the preprocessing steps like normalization, shape manipulation, etc. .tiff images are not visible, it is a black image.
visualize(image = image, mask = mask.squeeze()) is giving the image output, but not the mask output.
class Dataset(BaseDataset):
'''
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
augmentation (albumentations.Compose): data transfromation pipeline
(e.g. flip, scale, etc.)
preprocessing (albumentations.Compose): data preprocessing
(e.g. noralization, shape manipulation, etc.)
'''
CLASSES = ['background', 'liver', 'tumor']
def __init__(self, image_dir, mask_dir, classes = None, augmentation= None, preprocessing=None):
self.images = os.listdir(image_dir)[0:3000]
#self.masks = list(map(lambda x: x.replace(".jpg", "_mask.png"), self.images)) #only for 512x512
#self.masks = list(map(lambda x: x.replace(".jpg", ".png"), self.images))
self.masks = list(map(lambda x: x.replace(".jpg", ".tiff"), self.images))
self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]
self.augmentation = augmentation
self.preprocessing = preprocessing
self.image_dir = image_dir
self.mask_dir = mask_dir
def __getitem__(self, i):
# read data
image = cv2.imread(self.image_dir + '/' + self.images[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.mask_dir + '/' + self.masks[i], 0)
mask = np.expand_dims(mask, axis = 2)
# masks = [(mask == v) for v in self.class_values]
# mask = np.stack(masks, axis=-1).astype('float')
# print(mask.shape)
# # extract certain classes from mask (e.g. cars)
# masks = [(mask == v) for v in self.class_values]
# mask = np.stack(masks, axis=-1).astype('float')
if self.augmentation:
sample = self.augmentation(image = image, mask= mask)
image, mask = sample['image'], sample['mask']
if self.preprocessing:
sample = self.preprocessing(image = image, mask= mask)
image, mask = sample['image'], sample['mask']
return image, mask
def __len__(self):
return len(self.images)
dataset = Dataset(image_dir = train_frame_path,
mask_dir = train_mask_path,
classes = ['background', 'liver', 'tumor'])
image, mask = dataset[1210]
visualize(image = image, mask = mask.squeeze())
I made a License Plate Detect system and i have a problem. I have 2 question.
Q1. I want to print the plate in this section. How can i do?
def fix_dimension(img):
new_img = np.zeros((28,28,3))
for i in range(3):
new_img[:,:,i] = img
return new_img
def show_results():
dict = {}
characters = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for i,c in enumerate(characters):
dict[i] = c
output = []
for i,ch in enumerate(char): #iterating over the characters
img_ = cv2.resize(ch, (28,28), interpolation=cv2.INTER_AREA)
img = fix_dimension(img_)
img = img.reshape(1,28,28,3) #preparing image for the model
classes_x=np.argmax(model.predict(img), axis=-1)#predicting the class
character = dict[output] #
output.append(character) #storing the result in a list
plate_number = ''.join(output)
return plate_number
print(show_results())
Output: unhashable type: 'list'
Q2: When I convert the output to string, I get this error in the output of the code below
plate_number = show_results()
output_img, plate = detect_plate(img, plate_number)
display(output_img, 'detected license plate number in the input image')
Output: error: OpenCV(4.5.5) D:\a\opencv-python\opencv-python\opencv\modules\objdetect\src\cascadedetect.cpp:1389: error: (-215:Assertion failed) scaleFactor > 1 && _image.depth() == CV_8U in function 'cv::CascadeClassifierImpl::detectMultiScale'
plate_cascade = cv2.CascadeClassifier('input/indian_license_plate.xml')
def detect_plate(img, text=''): # the function detects and perfors blurring on the number plate.
plate_img = img.copy()
roi = img.copy()
plate_rect = plate_cascade.detectMultiScale(plate_img, scaleFactor = 1.2, minNeighbors = 7) # detects numberplates and returns the coordinates and dimensions of detected license plate's contours.
for (x,y,w,h) in plate_rect:
roi_ = roi[y:y+h, x:x+w, :] # extracting the Region of Interest of license plate for blurring.
plate = roi[y:y+h, x:x+w, :]
cv2.rectangle(plate_img, (x+2,y), (x+w-3, y+h-5), (51,181,155), 3) # finally representing the detected contours by drawing rectangles around the edges.
if text!='':
plate_img = cv2.putText(plate_img, text, (x-w//2,y-h//2),
cv2.FONT_HERSHEY_COMPLEX_SMALL , 0.5, (51,181,155), 1, cv2.LINE_AA)
return plate_img, plate # returning the processed image.
I am trying to get specific information from a bill. I have used ocr till now and OpenCV and here are the results:
import cv2
import pytesseract
import numpy as np
image = cv2.imread('1.png')
# get grayscale image
def get_grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# noise removal
def remove_noise(image):
return cv2.medianBlur(image,5)
#thresholding
def thresholding(image):
return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
#dilation
def dilate(image):
kernel = np.ones((5,5),np.uint8)
return cv2.dilate(image, kernel, iterations = 1)
#erosion
def erode(image):
kernel = np.ones((5,5),np.uint8)
return cv2.erode(image, kernel, iterations = 1)
#opening - erosion followed by dilation
def opening(image):
kernel = np.ones((5,5),np.uint8)
return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
#canny edge detection
def canny(image):
return cv2.Canny(image, 100, 200)
#skew correction
def deskew(image):
coords = np.column_stack(np.where(image > 0))
angle = cv2.minAreaRect(coords)[-1]
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
return rotated
#template matching
def match_template(image, template):
return cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
gray = get_grayscale(image)
thresh = thresholding(gray)
opening = opening(gray)
canny = canny(gray)
cv2.imshow('res', gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Adding custom options
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
custom_config = r'--oem 3 --psm 6'
pytesseract.image_to_string(gray, config=custom_config)
the output I got was
Out[9]: 'aso en bosaanes sosesoen\nSee arr ee\n[internationale Spedition “works carrier:\nree Meese
Eaton oro\nSE Eesn Srey alata ascea\ntay See eae ror\nTBlaecaseew £2 saserzaz9gn [acs Sue Saeeats
Arve\noricore toptetschlBve ta\nbares eye creat tere\nLene et aan Ease\ncoon soos\nreaee\nbenenter
petachand AiG & co. x8\nese See ete Fests\nsee Sse\npearson | |\nen 7\nFeanséurt an main bawegoansn
|\npe |\nsor per tantace e/ear0003537\nEl = T=] | = [== |\nSta psa a4 fonstsanern\nLerper
atcnen\nwe\n20 ocd hoes ale 22ers wf\n30 ped londed on pwc aoasonnr #0\n35 ped londed on pwc 2008es00
#0\n64 pcs loaded on| PMC BO3BBART MD &\n[ental — |\n=\n|\nSJ |] Spscrinan copnapen as wtshan momen
ante\nart veins otetrich cata 60. RAS sem\n[re ote\n[\\gesoago |__| tars ena Detrich ea\nTon anine
Setrion cn a co. eta a5 scan\nSS aan ee ee\nee eS] -
esemen\ncision\n\x0c'
I need specific information only like the name, shipping address, quantity, etc, and not all the characters. Also, the output is all mashed up. Can anyone please help me with this? any code or any other help would be appreciated.
You can use pytesseract.image_to_pdf_or_hocr(), choosing hocr as output format. This will contain bounding boxes on the character, word, and line level.
I am working on an image processing task using python which depends mainly in detecting the grains in the image of soil samples so the first step in the processing process is edge detection ,I use HED algorithm (holistically nested edge detection ) for this step rather than using other edge detection functions in python as canny or sobel .
However , I face a problem in detecting the grains of fine soil particles as sand samples images shown below . I am asking if there is modification can be done on the image or the algorithm to improve edge detection to get the borders of the grains or as maximum as possible of the grains.
This is the used algorithm and results of using this algorithm in edge detection.
# USAGE
# python detect_edges_image.py --edge-detector hed_model --image images/guitar.jpg
# import the necessary packages
import argparse
import cv2
import os
import easygui
import pandas as pd
path = easygui.fileopenbox()
print(path)
hdir = os.path.dirname(path)
print(hdir)
hfilename = os.path.basename(path)
print(hfilename)
hname = os.path.splitext(hfilename)[0]
print(hname)
houtname = hname+"_out.jpg"
print(houtname)
hout = os.path.sep.join([hdir,houtname])
print(hout)
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-d", "--edge-detector", type=str, required=True,
# help="path to OpenCV's deep learning edge detector")
# ap.add_argument("-i", "--image", type=str, required=True,
# help="path to input image")
# args = vars(ap.parse_args())
class CropLayer(object):
def __init__(self, params, blobs):
# initialize our starting and ending (x, y)-coordinates of
# the crop
self.startX = 0
self.startY = 0
self.endX = 0
self.endY = 0
def getMemoryShapes(self, inputs):
# the crop layer will receive two inputs -- we need to crop
# the first input blob to match the shape of the second one,
# keeping the batch size and number of channels
(inputShape, targetShape) = (inputs[0], inputs[1])
(batchSize, numChannels) = (inputShape[0], inputShape[1])
(H, W) = (targetShape[2], targetShape[3])
# compute the starting and ending crop coordinates
self.startX = int((inputShape[3] - targetShape[3]) / 2)
self.startY = int((inputShape[2] - targetShape[2]) / 2)
self.endX = self.startX + W
self.endY = self.startY + H
# return the shape of the volume (we'll perform the actual
# crop during the forward pass
return [[batchSize, numChannels, H, W]]
def forward(self, inputs):
# use the derived (x, y)-coordinates to perform the crop
return [inputs[0][:, :, self.startY:self.endY,
self.startX:self.endX]]
# load our serialized edge detector from disk
print("[INFO] loading edge detector...")
fpath = os.path.abspath(__file__)
fdir = os.path.dirname(fpath)
print(fdir)
protoPath = os.path.sep.join([fdir,"hed_model", "deploy.prototxt"])
print(protoPath)
modelPath = os.path.sep.join([fdir,"hed_model","hed_pretrained_bsds.caffemodel"])
print(modelPath)
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# register our new layer with the model
cv2.dnn_registerLayer("Crop", CropLayer)
# load the input image and grab its dimensions
image = cv2.imread('D:\My work\MASTERS WORK\SAND - UNIFORM\sand_180pxfor1cm(130,120,75).jpg')
# image =cv2.equalizeHist(img)
# image = cv2.pyrMeanShiftFiltering(image1,10,20)
(H, W) = image.shape[:2]
# print(image.shape[:2])
# image.shape[:2] =(H*3, W*3)ho
# image = cv2.resize(image,0.5)
# convert the image to grayscale, blur it, and perform Canny
# edge detection
print("[INFO] performing Canny edge detection...")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# blurred = cv2.addWeighted(gray,1.5,blurred,-0.5,0)
canny = cv2.Canny(blurred,30, 150)
# construct a blob out of the input image for the Holistically-Nested
# Edge Detector
# cc = cv2.cvtColor(canny, cv2.COLOR_GRAY2BGR)
# image = image+cc
# mean = (104.00698793, 116.66876762, 122.67891434),
blob = cv2.dnn.blobFromImage(image, scalefactor=1.0, size=(W, H),
# mean=(110,95,95),
# mean=(104.00698793, 116.66876762, 122.67891434),
# mean=(104, 116, 122),
mean=(130, 120, 75),
# mean=(145, 147, 180),
swapRB= False, crop=False)
print( blob)
cv2.waitKey(0)
# set the blob as the input to the network and perform a forward pass
# to compute the edges
print("[INFO] performing holistically-nested edge detection...")
net.setInput(blob)
hed = net.forward()
hed = cv2.resize(hed[0, 0], (W, H))
hed = (255 * hed).astype("uint8")
# show the output edge detection results for Canny and
# Holistically-Nested Edge Detection
cv2.imshow("Input", image)
cv2.imshow("Canny", canny)
cv2.imshow("HED", hed)
cv2.imwrite(hout, hed)
cv2.waitKey(0)
I'm creating a function (in Python) that expects/receives a single image of multiple human faces in it, and returns multiple smaller images (one image per human face). I am able to do a cv2.imshow inside the function and see the expected smaller images, but when I attempt a cv2.imshow from outside the function, it does not work (unable to see the smaller image, and get a TypeError instead). Would appreciate some guidance.
def stills(user_image):
#sub_frames = []
fqp_image_src = (user_image)
raw_pic = cv2.imread(fqp_image_src)
mpic = cv2.resize(raw_pic,(0,0), fx=0.30, fy=0.30)
mpic_rgb = cv2.cvtColor(mpic, cv2.COLOR_BGR2RGB)
face_boxes = haar_cascade_face.detectMultiScale(mpic_rgb, scaleFactor = 1.2, minNeighbors = 5)
count = int(len(face_boxes))
for i in range(count):
face_box = face_boxes[i]
final = cv2.rectangle(mpic, (face_box[0], face_box[1]), ((face_box[0]+face_box[2]),(face_box[1]+face_box[3])), (0,255,0),2)
sub_frame = final[face_box[1]:(face_box[1]+face_box[3]), face_box[0]:(face_box[0]+face_box[2])]
#sub_frames.append(sub_frame)
cv2.imshow('frame', sub_frame) # this works
cv2.waitKey()
return (sub_frame, final)
# calling the function
something = stills("abc.jpg")
cv2.imshow('frame',something) # this does not work
cv2.waitKey()
TypeError: Expected cv::UMat for argument 'mat'
This will do what you expected, just whit some simplification and with full file paths
.
One of the key erros was give detectMultiScale a colored image, the imput shuld have 1 dimension, with brigtness (gray scales).
In order to display a colored image with the faces in a box a copy of the image is needed to convert into gar scales and detect, giving coordenates to draw in the colored image.
import cv2
import os
# Take as a global the dir in witch is this file
PATH = os.path.dirname(os.path.abspath(__file__))
haar_cascade_face = cv2.CascadeClassifier(os.path.join(PATH, 'haarcascade_frontalface_alt.xml'))
def stills(user_image):
image = os.path.join(PATH, user_image)
image = cv2.imread(image)
image = cv2.resize(image, (0, 0), fx=0.30, fy=0.30)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
face_boxes = haar_cascade_face.detectMultiScale(gray_image, scaleFactor=1.073, minNeighbors=8)
final = image # make the funtion alwais give a image
sub_frames = []
# Check if there are faces
if len(face_boxes) > 0:
for x, y, w, h in face_boxes:
final = cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
sub_frame = image[y:y+h, x:x+w]
sub_frames.append([x, y, x+w, y+h])
cv2.imshow('sub_frame', sub_frame)
# cv2.waitKey() # No need to wait the user
else:
print('No faces found')
return (sub_frames, final)
if __name__ == '__main__':
fragments, final = stills("abc.jpg")
cv2.imshow('frame', final)
cv2.waitKey()