Improve HED algorithm for edge detection - python

I am working on an image processing task using python which depends mainly in detecting the grains in the image of soil samples so the first step in the processing process is edge detection ,I use HED algorithm (holistically nested edge detection ) for this step rather than using other edge detection functions in python as canny or sobel .
However , I face a problem in detecting the grains of fine soil particles as sand samples images shown below . I am asking if there is modification can be done on the image or the algorithm to improve edge detection to get the borders of the grains or as maximum as possible of the grains.
This is the used algorithm and results of using this algorithm in edge detection.
# USAGE
# python detect_edges_image.py --edge-detector hed_model --image images/guitar.jpg
# import the necessary packages
import argparse
import cv2
import os
import easygui
import pandas as pd
path = easygui.fileopenbox()
print(path)
hdir = os.path.dirname(path)
print(hdir)
hfilename = os.path.basename(path)
print(hfilename)
hname = os.path.splitext(hfilename)[0]
print(hname)
houtname = hname+"_out.jpg"
print(houtname)
hout = os.path.sep.join([hdir,houtname])
print(hout)
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-d", "--edge-detector", type=str, required=True,
# help="path to OpenCV's deep learning edge detector")
# ap.add_argument("-i", "--image", type=str, required=True,
# help="path to input image")
# args = vars(ap.parse_args())
class CropLayer(object):
def __init__(self, params, blobs):
# initialize our starting and ending (x, y)-coordinates of
# the crop
self.startX = 0
self.startY = 0
self.endX = 0
self.endY = 0
def getMemoryShapes(self, inputs):
# the crop layer will receive two inputs -- we need to crop
# the first input blob to match the shape of the second one,
# keeping the batch size and number of channels
(inputShape, targetShape) = (inputs[0], inputs[1])
(batchSize, numChannels) = (inputShape[0], inputShape[1])
(H, W) = (targetShape[2], targetShape[3])
# compute the starting and ending crop coordinates
self.startX = int((inputShape[3] - targetShape[3]) / 2)
self.startY = int((inputShape[2] - targetShape[2]) / 2)
self.endX = self.startX + W
self.endY = self.startY + H
# return the shape of the volume (we'll perform the actual
# crop during the forward pass
return [[batchSize, numChannels, H, W]]
def forward(self, inputs):
# use the derived (x, y)-coordinates to perform the crop
return [inputs[0][:, :, self.startY:self.endY,
self.startX:self.endX]]
# load our serialized edge detector from disk
print("[INFO] loading edge detector...")
fpath = os.path.abspath(__file__)
fdir = os.path.dirname(fpath)
print(fdir)
protoPath = os.path.sep.join([fdir,"hed_model", "deploy.prototxt"])
print(protoPath)
modelPath = os.path.sep.join([fdir,"hed_model","hed_pretrained_bsds.caffemodel"])
print(modelPath)
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# register our new layer with the model
cv2.dnn_registerLayer("Crop", CropLayer)
# load the input image and grab its dimensions
image = cv2.imread('D:\My work\MASTERS WORK\SAND - UNIFORM\sand_180pxfor1cm(130,120,75).jpg')
# image =cv2.equalizeHist(img)
# image = cv2.pyrMeanShiftFiltering(image1,10,20)
(H, W) = image.shape[:2]
# print(image.shape[:2])
# image.shape[:2] =(H*3, W*3)ho
# image = cv2.resize(image,0.5)
# convert the image to grayscale, blur it, and perform Canny
# edge detection
print("[INFO] performing Canny edge detection...")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# blurred = cv2.addWeighted(gray,1.5,blurred,-0.5,0)
canny = cv2.Canny(blurred,30, 150)
# construct a blob out of the input image for the Holistically-Nested
# Edge Detector
# cc = cv2.cvtColor(canny, cv2.COLOR_GRAY2BGR)
# image = image+cc
# mean = (104.00698793, 116.66876762, 122.67891434),
blob = cv2.dnn.blobFromImage(image, scalefactor=1.0, size=(W, H),
# mean=(110,95,95),
# mean=(104.00698793, 116.66876762, 122.67891434),
# mean=(104, 116, 122),
mean=(130, 120, 75),
# mean=(145, 147, 180),
swapRB= False, crop=False)
print( blob)
cv2.waitKey(0)
# set the blob as the input to the network and perform a forward pass
# to compute the edges
print("[INFO] performing holistically-nested edge detection...")
net.setInput(blob)
hed = net.forward()
hed = cv2.resize(hed[0, 0], (W, H))
hed = (255 * hed).astype("uint8")
# show the output edge detection results for Canny and
# Holistically-Nested Edge Detection
cv2.imshow("Input", image)
cv2.imshow("Canny", canny)
cv2.imshow("HED", hed)
cv2.imwrite(hout, hed)
cv2.waitKey(0)

Related

Using pytorch for data training visualize(image = image, mask = mask.squeeze()) is giving the image (.jpeg) output, but not mask (.tiff) output

I am doing training using Lits (Liver) Dataset using Pytorch. Images are .jpeg and masks are .tiff images.
After doing the preprocessing steps like normalization, shape manipulation, etc. .tiff images are not visible, it is a black image.
visualize(image = image, mask = mask.squeeze()) is giving the image output, but not the mask output.
class Dataset(BaseDataset):
'''
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
class_values (list): values of classes to extract from segmentation mask
augmentation (albumentations.Compose): data transfromation pipeline
(e.g. flip, scale, etc.)
preprocessing (albumentations.Compose): data preprocessing
(e.g. noralization, shape manipulation, etc.)
'''
CLASSES = ['background', 'liver', 'tumor']
def __init__(self, image_dir, mask_dir, classes = None, augmentation= None, preprocessing=None):
self.images = os.listdir(image_dir)[0:3000]
#self.masks = list(map(lambda x: x.replace(".jpg", "_mask.png"), self.images)) #only for 512x512
#self.masks = list(map(lambda x: x.replace(".jpg", ".png"), self.images))
self.masks = list(map(lambda x: x.replace(".jpg", ".tiff"), self.images))
self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]
self.augmentation = augmentation
self.preprocessing = preprocessing
self.image_dir = image_dir
self.mask_dir = mask_dir
def __getitem__(self, i):
# read data
image = cv2.imread(self.image_dir + '/' + self.images[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
mask = cv2.imread(self.mask_dir + '/' + self.masks[i], 0)
mask = np.expand_dims(mask, axis = 2)
# masks = [(mask == v) for v in self.class_values]
# mask = np.stack(masks, axis=-1).astype('float')
# print(mask.shape)
# # extract certain classes from mask (e.g. cars)
# masks = [(mask == v) for v in self.class_values]
# mask = np.stack(masks, axis=-1).astype('float')
if self.augmentation:
sample = self.augmentation(image = image, mask= mask)
image, mask = sample['image'], sample['mask']
if self.preprocessing:
sample = self.preprocessing(image = image, mask= mask)
image, mask = sample['image'], sample['mask']
return image, mask
def __len__(self):
return len(self.images)
dataset = Dataset(image_dir = train_frame_path,
mask_dir = train_mask_path,
classes = ['background', 'liver', 'tumor'])
image, mask = dataset[1210]
visualize(image = image, mask = mask.squeeze())

Learning Object Detection Detected result showed in discolouration

Breif Description
Recently begin to learning Object Detection, Just starting off with PyTorch, YOLOv5. So I thought why not build a small side project to learn? Using it to train to detect Pikachu.
The Problem
I've successfully trained the model with Pikachu and then uses trained weights with myself written Python script/code to detect the Pikachu using test images, now, here's the problem, Pikachus can successfully detected but all the result showed in blue discolouration, what supposed to be yellow, all turned into blue and blue into yellow.
Fig-1 Result images showed in blue discolouration(few example outputs)
Additional Imformation
I've pushed this project to the GitHub, feel free to download it or pull it to debugging.
GitHub repository where it contains all the files
Any solution/suggestion would be helpful, Thanks.
The Code
"""Object detection using YOLOv5
Pokemon Pikachu detecting
"""
# import os, sys to append YOLOv5 folder path
import os, sys
# import object detection needed modules and libraries
# pillow
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import torch # PyTorch
# YOLOv5 folder path and related folder path settings
cwd = os.getcwd()
root_dir = (cwd + "/yolov5_stable")
sys.path.append(root_dir)
# import methods, functions from YOLOv5
from models.experimental import attempt_load
from utils.datasets import LoadImages
from utils.general import non_max_suppression, scale_coords
from utils.plots import colors
# define a function to show detected pikachu
def show_pikachu(img, det):
labels = ["pikachu"]
img = Image.fromarray(img)
draw = ImageDraw.Draw(img)
font_size = max(round(max(img.size)/40), 12)
font = ImageFont.truetype(cwd + "/yolov5_stable/fonts/times.ttf")
for info in det:
color = colors(1)
target, prob = int(info[5].cpu().numpy()), np.round(info[4].cpu().numpy(), 2)
x_min, y_min, x_max, y_max = info[0], info[1], info[2], info[3]
draw.rectangle([x_min, y_min, x_max, y_max], width = 3, outline = color)
draw.text((x_min, y_min), labels[target] + ':' + str(prob), fill = color, font = font)
# Bug unresolved, pikachu shown in blue discolouration
return img
if __name__ == "__main__":
device = "cuda:0" if torch.cuda.is_available() else "cpu"
print("GPU State: ", device)
data_path = (cwd + "/test_data/")
weight_path = (cwd + "/yolov5_stable/weights/best_v1.pt")
dataset = LoadImages(data_path)
model = attempt_load(weight_path, map_location = device)
model.to(device)
for path, img, im0s, _ in dataset:
img = torch.from_numpy(img).to(device)
img = img.float() # uint8 to fp16/32
img /= 255.0 # 0-255 to 0.0-1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
pred = model(img)[0]
pred = non_max_suppression(pred, 0.25, 0.45)
for i, det in enumerate(pred):
im0 = im0s.copy()
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
result = show_pikachu(im0, det)
result.show()
The problem is that Image.fromarray expects image in RGB and you're providing them in BGR. You just need to change that. There are multiple places you could do that, for instance:
Image.fromarray(img[...,::-1]) # assuming `img` is channel-last
An evidence is that the red parts of the mouse (red is RGB(255, 0, 0)) are being shown in blue (which is RGB(0, 0, 255)). FYI, yellow is RGB(255, 255, 0) and cyan is RGB(0, 255, 255), which you can also see in your case.

Extract only specific information using OCR and OpenCV

I am trying to get specific information from a bill. I have used ocr till now and OpenCV and here are the results:
import cv2
import pytesseract
import numpy as np
image = cv2.imread('1.png')
# get grayscale image
def get_grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# noise removal
def remove_noise(image):
return cv2.medianBlur(image,5)
#thresholding
def thresholding(image):
return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
#dilation
def dilate(image):
kernel = np.ones((5,5),np.uint8)
return cv2.dilate(image, kernel, iterations = 1)
#erosion
def erode(image):
kernel = np.ones((5,5),np.uint8)
return cv2.erode(image, kernel, iterations = 1)
#opening - erosion followed by dilation
def opening(image):
kernel = np.ones((5,5),np.uint8)
return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
#canny edge detection
def canny(image):
return cv2.Canny(image, 100, 200)
#skew correction
def deskew(image):
coords = np.column_stack(np.where(image > 0))
angle = cv2.minAreaRect(coords)[-1]
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
return rotated
#template matching
def match_template(image, template):
return cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
gray = get_grayscale(image)
thresh = thresholding(gray)
opening = opening(gray)
canny = canny(gray)
cv2.imshow('res', gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Adding custom options
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
custom_config = r'--oem 3 --psm 6'
pytesseract.image_to_string(gray, config=custom_config)
the output I got was
Out[9]: 'aso en bosaanes sosesoen\nSee arr ee\n[internationale Spedition “works carrier:\nree Meese
Eaton oro\nSE Eesn Srey alata ascea\ntay See eae ror\nTBlaecaseew £2 saserzaz9gn [acs Sue Saeeats
Arve\noricore toptetschlBve ta\nbares eye creat tere\nLene et aan Ease\ncoon soos\nreaee\nbenenter
petachand AiG & co. x8\nese See ete Fests\nsee Sse\npearson | |\nen 7\nFeanséurt an main bawegoansn
|\npe |\nsor per tantace e/ear0003537\nEl = T=] | = [== |\nSta psa a4 fonstsanern\nLerper
atcnen\nwe\n20 ocd hoes ale 22ers wf\n30 ped londed on pwc aoasonnr #0\n35 ped londed on pwc 2008es00
#0\n64 pcs loaded on| PMC BO3BBART MD &\n[ental — |\n=\n|\nSJ |] Spscrinan copnapen as wtshan momen
ante\nart veins otetrich cata 60. RAS sem\n[re ote\n[\\gesoago |__| tars ena Detrich ea\nTon anine
Setrion cn a co. eta a5 scan\nSS aan ee ee\nee eS] -
esemen\ncision\n\x0c'
I need specific information only like the name, shipping address, quantity, etc, and not all the characters. Also, the output is all mashed up. Can anyone please help me with this? any code or any other help would be appreciated.
You can use pytesseract.image_to_pdf_or_hocr(), choosing hocr as output format. This will contain bounding boxes on the character, word, and line level.

How to perform image convolution on an image using opencv python

I am trying to perform edge detection for my images of soil grains using holistically nested edge detection method HED as shown however when using combined fine and coarse soil grains , the region of fine particles is not clear so I suggest making image convolution by cutting the image into smaller rectangular areas in both directions and make HED for every portion of image and store them to black copy image so as to add the edged portions to this image .
I faced an error after repeating the algorithm of HED in a for loop by dividing the width of image to 5 portions and the height to 4 portions but I can't fix that error .
Here is the algorithm used
# import the necessary packages
import argparse
import cv2
import os
import easygui
path = easygui.fileopenbox()
print(path)
hdir = os.path.dirname(path)
print(hdir)
hfilename = os.path.basename(path)
print(hfilename)
hname = os.path.splitext(hfilename)[0]
print(hname)
houtname = hname+"_out.jpg"
print(houtname)
hout = os.path.sep.join([hdir,houtname])
print(hout)
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-d", "--edge-detector", type=str, required=True,
# help="path to OpenCV's deep learning edge detector")
# ap.add_argument("-i", "--image", type=str, required=True,
# help="path to input image")
# args = vars(ap.parse_args())
class CropLayer(object):
def __init__(self, params, blobs):
# initialize our starting and ending (x, y)-coordinates of
# the crop
self.startX = 0
self.startY = 0
self.endX = 0
self.endY = 0
def getMemoryShapes(self, inputs):
# the crop layer will receive two inputs -- we need to crop
# the first input blob to match the shape of the second one,
# keeping the batch size and number of channels
(inputShape, targetShape) = (inputs[0], inputs[1])
(batchSize, numChannels) = (inputShape[0], inputShape[1])
(H, W) = (targetShape[2], targetShape[3])
# compute the starting and ending crop coordinates
self.startX = int((inputShape[3] - targetShape[3]) / 2)
self.startY = int((inputShape[2] - targetShape[2]) / 2)
self.endX = self.startX + W
self.endY = self.startY + H
# return the shape of the volume (we'll perform the actual
# crop during the forward pass
return [[batchSize, numChannels, H, W]]
def forward(self, inputs):
# use the derived (x, y)-coordinates to perform the crop
return [inputs[0][:, :, self.startY:self.endY,
self.startX:self.endX]]
# load our serialized edge detector from disk
print("[INFO] loading edge detector...")
fpath = os.path.abspath(__file__)
fdir = os.path.dirname(fpath)
print(fdir)
protoPath = os.path.sep.join([fdir,"hed_model", "deploy.prototxt"])
print(protoPath)
modelPath = os.path.sep.join([fdir,"hed_model","hed_pretrained_bsds.caffemodel"])
print(modelPath)
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# register our new layer with the model
cv2.dnn_registerLayer("Crop", CropLayer)
# load the input image and grab its dimensions
image = cv2.imread('D:\My work\MASTERS WORK\GSD files\Sample E photos\SampleE_#1_26pxfor1mm.jpg')
im_copy = image.copy()*0
(H, W) = image.shape[:2]
# print(image.shape[:2])
# image.shape[:2] =(H*3, W*3)
# image = cv2.resize(image,0.5)
h=0
w=0
for m in range(0,H ,int(H/5)):
for n in range(0,W,int(W/3)):
gray = image[h:m,w:n]
# convert the image to grayscale, blur it, and perform Canny
# edge detection
print("[INFO] performing Canny edge detection...")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
canny = cv2.Canny(blurred, 30, 150)
# construct a blob out of the input image for the Holistically-Nested
# Edge Detector
# cc = cv2.cvtColor(canny, cv2.COLOR_GRAY2BGR)
# image = image+cc
# mean = (104.00698793, 116.66876762, 122.67891434),
blob = cv2.dnn.blobFromImage(image, scalefactor=1.0, size=((m-h), (n-w)),
# mean=(230, 120, 50),
mean=(104.00698793, 116.66876762, 122.67891434),
swapRB=False, crop=False)
print( blob)
cv2.waitKey(0)
# set the blob as the input to the network and perform a forward pass
# to compute the edges
print("[INFO] performing holistically-nested edge detection...")
net.setInput(blob)
hed = net.forward()
hed = cv2.resize(hed[0, 0], ((m-h), (n-w)))
hed = (255 * hed).astype("uint8")
# Adding the edge detection for each portion to the copy image as follows
im_copy = im_copy + hed
h+=int(H/5)
w+=int(W/4)
# show the output edge detection results for Canny and
# Holistically-Nested Edge Detection
cv2.imshow("Input", image)
cv2.imshow("Canny", canny)
cv2.imshow("HED", hed)
cv2.waitKey(0)
cv2.imshow('Frame ',im_copy)
cv2.imwrite(hout, im_copy)
cv2v2.waitKey(0)
I then use this edged image in further analysis on the image .
The error I got using the algorithm
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
cv2.error: OpenCV(4.1.1) C:\projects\opencv-python\opencv\modules\dnn\src\caffe\caffe_io.cpp:1121: error: (-2:Unspecified error) FAILED: fs.is_open(). Can't open "D:\My work\MASTERS WORK\hed_model\deploy.prototxt" in function 'cv::dnn::ReadProtoFromTextFile'

Red Dot Tracker

I am coding a program that can monitor the red dot of a lazer and track the movement. I would like to collect the x and y movement of the laser (deviating from a stable point). Some searching found me this code on git hub which I plan to modify and use to track the red dot using openCV. I am however struggling to pull any data from this.
How would I go about storing the x and y coordinates of the laser in excel (or other useful means)?
import sys
import argparse
import cv2
import numpy
class LaserTracker(object):
def __init__(self, cam_width=640, cam_height=480, hue_min=20, hue_max=160,
sat_min=100, sat_max=255, val_min=200, val_max=256,
display_thresholds=False):
"""
* ``cam_width`` x ``cam_height`` -- This should be the size of the
image coming from the camera. Default is 640x480.
HSV color space Threshold values for a RED laser pointer are determined
by:
* ``hue_min``, ``hue_max`` -- Min/Max allowed Hue values
* ``sat_min``, ``sat_max`` -- Min/Max allowed Saturation values
* ``val_min``, ``val_max`` -- Min/Max allowed pixel values
If the dot from the laser pointer doesn't fall within these values, it
will be ignored.
* ``display_thresholds`` -- if True, additional windows will display
values for threshold image channels.
"""
self.cam_width = cam_width
self.cam_height = cam_height
self.hue_min = hue_min
self.hue_max = hue_max
self.sat_min = sat_min
self.sat_max = sat_max
self.val_min = val_min
self.val_max = val_max
self.display_thresholds = display_thresholds
self.capture = None # camera capture device
self.channels = {
'hue': None,
'saturation': None,
'value': None,
'laser': None,
}
self.previous_position = None
self.trail = numpy.zeros((self.cam_height, self.cam_width, 3),
numpy.uint8)
def create_and_position_window(self, name, xpos, ypos):
"""Creates a named widow placing it on the screen at (xpos, ypos)."""
# Create a window
cv2.namedWindow(name)
# Resize it to the size of the camera image
cv2.resizeWindow(name, self.cam_width, self.cam_height)
# Move to (xpos,ypos) on the screen
cv2.moveWindow(name, xpos, ypos)
def setup_camera_capture(self, device_num=0):
"""Perform camera setup for the device number (default device = 0).
Returns a reference to the camera Capture object.
"""
try:
device = int(device_num)
sys.stdout.write("Using Camera Device: {0}\n".format(device))
except (IndexError, ValueError):
# assume we want the 1st device
device = 0
sys.stderr.write("Invalid Device. Using default device 0\n")
# Try to start capturing frames
self.capture = cv2.VideoCapture(device)
if not self.capture.isOpened():
sys.stderr.write("Faled to Open Capture device. Quitting.\n")
sys.exit(1)
# set the wanted image size from the camera
self.capture.set(
cv2.cv.CV_CAP_PROP_FRAME_WIDTH if cv2.__version__.startswith('2') else cv2.CAP_PROP_FRAME_WIDTH,
self.cam_width
)
self.capture.set(
cv2.cv.CV_CAP_PROP_FRAME_HEIGHT if cv2.__version__.startswith('2') else cv2.CAP_PROP_FRAME_HEIGHT,
self.cam_height
)
return self.capture
def handle_quit(self, delay=10):
"""Quit the program if the user presses "Esc" or "q"."""
key = cv2.waitKey(delay)
c = chr(key & 255)
if c in ['c', 'C']:
self.trail = numpy.zeros((self.cam_height, self.cam_width, 3),
numpy.uint8)
if c in ['q', 'Q', chr(27)]:
sys.exit(0)
def threshold_image(self, channel):
if channel == "hue":
minimum = self.hue_min
maximum = self.hue_max
elif channel == "saturation":
minimum = self.sat_min
maximum = self.sat_max
elif channel == "value":
minimum = self.val_min
maximum = self.val_max
(t, tmp) = cv2.threshold(
self.channels[channel], # src
maximum, # threshold value
0, # we dont care because of the selected type
cv2.THRESH_TOZERO_INV # t type
)
(t, self.channels[channel]) = cv2.threshold(
tmp, # src
minimum, # threshold value
255, # maxvalue
cv2.THRESH_BINARY # type
)
if channel == 'hue':
# only works for filtering red color because the range for the hue
# is split
self.channels['hue'] = cv2.bitwise_not(self.channels['hue'])
def track(self, frame, mask):
"""
Track the position of the laser pointer.
Code taken from
http://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/
"""
center = None
countours = cv2.findContours(mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
# only proceed if at least one contour was found
if len(countours) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(countours, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
moments = cv2.moments(c)
if moments["m00"] > 0:
center = int(moments["m10"] / moments["m00"]), \
int(moments["m01"] / moments["m00"])
else:
center = int(x), int(y)
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# then update the ponter trail
if self.previous_position:
cv2.line(self.trail, self.previous_position, center,
(255, 255, 255), 2)
cv2.add(self.trail, frame, frame)
self.previous_position = center
def detect(self, frame):
hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# split the video frame into color channels
h, s, v = cv2.split(hsv_img)
self.channels['hue'] = h
self.channels['saturation'] = s
self.channels['value'] = v
# Threshold ranges of HSV components; storing the results in place
self.threshold_image("hue")
self.threshold_image("saturation")
self.threshold_image("value")
# Perform an AND on HSV components to identify the laser!
self.channels['laser'] = cv2.bitwise_and(
self.channels['hue'],
self.channels['value']
)
self.channels['laser'] = cv2.bitwise_and(
self.channels['saturation'],
self.channels['laser']
)
# Merge the HSV components back together.
hsv_image = cv2.merge([
self.channels['hue'],
self.channels['saturation'],
self.channels['value'],
])
self.track(frame, self.channels['laser'])
return hsv_image
def display(self, img, frame):
"""Display the combined image and (optionally) all other image channels
NOTE: default color space in OpenCV is BGR.
"""
cv2.imshow('RGB_VideoFrame', frame)
cv2.imshow('LaserPointer', self.channels['laser'])
if self.display_thresholds:
cv2.imshow('Thresholded_HSV_Image', img)
cv2.imshow('Hue', self.channels['hue'])
cv2.imshow('Saturation', self.channels['saturation'])
cv2.imshow('Value', self.channels['value'])
def setup_windows(self):
sys.stdout.write("Using OpenCV version: {0}\n".format(cv2.__version__))
# create output windows
self.create_and_position_window('LaserPointer', 0, 0)
self.create_and_position_window('RGB_VideoFrame',
10 + self.cam_width, 0)
if self.display_thresholds:
self.create_and_position_window('Thresholded_HSV_Image', 10, 10)
self.create_and_position_window('Hue', 20, 20)
self.create_and_position_window('Saturation', 30, 30)
self.create_and_position_window('Value', 40, 40)
def run(self):
# Set up window positions
self.setup_windows()
# Set up the camera capture
self.setup_camera_capture()
while True:
# 1. capture the current image
success, frame = self.capture.read()
if not success: # no image captured... end the processing
sys.stderr.write("Could not read camera frame. Quitting\n")
sys.exit(1)
hsv_image = self.detect(frame)
self.display(hsv_image, frame)
self.handle_quit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run the Laser Tracker')
parser.add_argument('-W', '--width',
default=640,
type=int,
help='Camera Width')
parser.add_argument('-H', '--height',
default=480,
type=int,
help='Camera Height')
parser.add_argument('-u', '--huemin',
default=20,
type=int,
help='Hue Minimum Threshold')
parser.add_argument('-U', '--huemax',
default=160,
type=int,
help='Hue Maximum Threshold')
parser.add_argument('-s', '--satmin',
default=100,
type=int,
help='Saturation Minimum Threshold')
parser.add_argument('-S', '--satmax',
default=255,
type=int,
help='Saturation Maximum Threshold')
parser.add_argument('-v', '--valmin',
default=200,
type=int,
help='Value Minimum Threshold')
parser.add_argument('-V', '--valmax',
default=255,
type=int,
help='Value Maximum Threshold')
parser.add_argument('-d', '--display',
action='store_true',
help='Display Threshold Windows')
params = parser.parse_args()
tracker = LaserTracker(
cam_width=params.width,
cam_height=params.height,
hue_min=params.huemin,
hue_max=params.huemax,
sat_min=params.satmin,
sat_max=params.satmax,
val_min=params.valmin,
val_max=params.valmax,
display_thresholds=params.display
)
tracker.run()
I'm by no means an expert on this, but taking a quick look at the code it seems that self.previous_position = (x,y).
Not sure why you would want to save to excel but to save to file just add f = open(filename, 'w') at the start of the run function and then f.write(self.previous_position) at the end of each loop.
Your file will contain all the recorded x and y coordinates recorded, if your set on saving this to excel check out python's out-the-box csv library.

Categories

Resources