multiple known object detection and tracking in opencv - python

I am using a microscope to observe the motion of small 4 micron beads. I have a video of the beads moving and I would like to process the video to extract the bead locations as a function of time to get a mathematical model of their motion.
I am currently using opencv and programming in python
My code was importing a video from file, thresholding the image then applying a HoughCircles transform to find the spherical beads.
import numpy as np
import cv2
def nothing(x):
pass
cap = cv2.VideoCapture('testvideo.mp4')
cv2.namedWindow('trackbar')
cv2.createTrackbar('Param1','trackbar',40,255,nothing)
cv2.createTrackbar('Param2','trackbar',10,255,nothing)
cv2.createTrackbar('MaxRadius','trackbar',18,255,nothing)
while(cap.isOpened()):
e1 = cv2.getTickCount()
ret, frame = cap.read()
#get grayscale image for HoughCircles
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p1 = cv2.getTrackbarPos('Param1','trackbar')
p2 = cv2.getTrackbarPos('Param2','trackbar')
rMax = cv2.getTrackbarPos('MaxRadius','trackbar')
#Threshold grayscale image
ret,th1 = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
#Find circles in image and store locations to circles list
circles = cv2.HoughCircles(th1, cv2.cv.CV_HOUGH_GRADIENT, 1, 10,
param1=p1, param2=p2,minRadius=0,
maxRadius=rMax)
#Hack for fixing the list if it is empty so program wont crash
if circles == None:
circles = [[[0,0,0.000],[0,0,0.000]]]
#convert circles list to integer list
circles = np.uint16(np.around(circles))
#store points to a file
datafile = file('datafile.txt', 'a')
np.savetxt(datafile, circles[0], fmt ='%i',delimiter=',', newline = ',')
datafile.write('\n')
datafile.close()
for i in circles[0,:]:
# draw the outer circle
cv2.circle(frame,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(frame,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('detected circles',frame)
cv2.imshow('threshold video',th1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
e2 = cv2.getTickCount()
time = (e2 - e1)/ cv2.getTickFrequency()
print time
cap.release()
cv2.destroyAllWindows()
Here is a sample frame from the video I am using for detecting the beads.
http://imgur.com/bVZbH3c
Here is an example of the single frame tracking that I did with the previous algorithm
http://imgur.com/4VWJI2F
I don't need to detect every single bead. Just an aggregate would be fine.
The beads are spherical and should look the same, so is there a library that I can use to integrate the bead image over the entire image and see where the points are most correlated in the image? Sometimes the beads are out of focus and that's why the current program I have keeps bouncing around and giving me false positives for the beads.
I eventually need this process to happen realtime so if possible it would be nice to have the algorithm be as efficient as possible.
If anyone knows a good approach to this type of problem it would be appreciated.

Related

Opencv Python3 when attempting to switch from saved images to live video feed the program hangs

Hello there people of the internet,
The code in question is using python 3.8.5 and opencv 4 (I do not know how to check the exact version but I know its opencv 4). My team and I are attempting to take a live video feed from a usb webcam and determine the distance between the camera and the object in the video feed. We had some success in reading the distance with image stills taken from the same camera and read via the imutils library. But now we want to attempt to calculate that data live.
Our code is below.
from imutils import paths
import numpy as np
import imutils
import cv2
import time
import os
def find_marker(image):
#conver the image into grayscales, blurs it then detects edges
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 35, 125)
#find the contours in the edged image and keep the largest one;
#w'll assume that this our piece of paper in the image
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key = cv2.contourArea)
#compute the bounding box of the paper region and return it
return cv2.minAreaRect(c)
def distance_to_camera(knownWidth, focalLength, perWidth):
#compute and return the distance from the marker to the camera
return (knownWidth * focalLength) / perWidth
#intialize the known distances from the camera to the object
KNOWN_DISTANCE = 22
#initialize the known object width, which in this case the piece of paper is 12 inches
KNOWN_WIDTH = 11
#load the first image that contains an object that is known to be 2 feet
#from our camera, the find the paper marker in the image and
#initialize the focal length
rootimage = cv2.imread("/Volumes/404/final_rov_code/Python/images/2ft.jpg")
marker1 = find_marker(rootimage)
marker2 = marker1[0][1] - marker1[1][1]
focalLength = (marker2 * KNOWN_DISTANCE) / KNOWN_WIDTH
print(marker1)
print(marker2)
image = cv2.VideoCapture(0)
#Loop over the image
while True:
#load the image, find the marker in the image then compute the
#distance to the marker from the camera
frame, ret = image.read()
marker = find_marker(ret)
inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])
print(inches)
#draw a bounding box around the image and display it
box = cv2.cv.BoxPoints(marker) if imutils.is_cv2() else cv2.boxPoints(marker)
box = np.int0(box)
cv2.drawContours(frame, [box], -1, (0, 255, 0), 2)
cv2.putText(ret, "%.2fin" % inches,
(ret.shape[1] - 200, ret.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 255, 0), 3)
cv2.imshow("image", ret)
# if cv2.waitKey(33) == ord('q'):
# os.system('pause')
I understand that it should be as minimalistic as possible but since we have no idea what could be causing the program to hang upon reading the first frame of the video feed. Could it be the fact that the processing is taking too many resources from the single thread? (We're all newbies to the advanced sides of opencv and python 3)
There is no other errors that we are aware of at the moment so no leads in the terminal of where it could be coming from.
Thank you in advance.
Your problem is likely a result of not including the waitkey() statement at the end of your while loop. It takes time for openCV to load the image, so if the program doesn't pause for long enough for the image to be drawn, the display just doesn't update. Check out this other StackOverflow question for more details.
In addition, you have your ret and frame variables mixed up. ret should be the first one and frame should be the second. Right now, the drawContours() method isn't going to do anything because you're passing it a boolean instead of an image.
Making those changes fixed this for me using Python 3.9 and OpenCV 4.5.

Camera tampering in a movable camera

I am working on a project for camera tampering. I have the code for tamper detection for a static camera. Tampering means blocking or defocussing the camera. I want to modify it for a moving camera or rotating camera so that it takes all the frames from the background and then compare it with new frames using the same background subtractor method.
I have tried but unable to figure out how to use the list of frames to compare the captured frame from other ones.
import numpy as np
import cv2
from playsound import playsound
import time
#cap = cv2.VideoCapture('http://192.168.43.1:8080/video') #Opening of IP camera just enter the ip address
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2() #generating a foreground mask
frame_list = [] #creating the list of frame
##################################################################
while(True):
ret, frame = cap.read()
if ret == True:
if frame not in frame_list:
frame_list.append(frame) #This list contain all the frames
###################################################################
#ret, frame = cap.read() #to get the initial frame
#fgmask = fgbg.apply(frame) #to save the initial frame
kernel = np.ones((5,5), np.uint8) #creating a matrix of (5, 5) consisting of 1
while(True):
ret, frame = cap.read() #reading all the frames
if ret == True:
a = 0
bounding_rect = [] # An empty list where will furthur input the contours
fgmask = fgbg.apply(frame) #Applying the changes of the backgroud to the foreground mask
fgmask= cv2.erode(fgmask, kernel, iterations=5)
fgmask = cv2.dilate(fgmask, kernel, iterations = 5) #Erosion and Dilation is done to detect even the blur objects better
cv2.imshow('frame',frame) #Showing the frame.
contours,_ = cv2.findContours(fgmask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #The mode RETR_TREE together with the CHAIN APPROX_SIMPLE returns only the endpoints required to draw contour
for i in range(0,len(contours)):
bounding_rect.append(cv2.boundingRect(contours[i])) #cv2.bounding rectangle gives the coordinates of bounding rectangle and then we will input these coordinates to the list we made
for i in range(0,len(contours)):
if bounding_rect[i][2] >=40 or bounding_rect[i][3] >= 40: #setting the threshold for the width and height if the contour
a = a+(bounding_rect[i][2])*bounding_rect[i][3] #updating the area of contour
if(a >=int(frame.shape[0])*int(frame.shape[1])/3): #It is basically the comparison of the change in area of the background, so if there is a certain change in area it will detect the tampering
cv2.putText(frame,"TAMPERING DETECTED",(5,30),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,255),2)
#playsound('warning.mp3') #put the address of the warning tune in the playsound object and uncomment it
cv2.imshow('frame',frame) #showing the final frame
if cv2.waitKey(30) & 0xff== ord('q'): #To close the camera press q
break
else : break
cap.release()
cv2.destroyAllWindows()
In my opinion, I don't think background subtraction is a good solution to your problem, your methods is mainly used to detect moving object by subtracting background. This method most often used in static camera but for moving camera, intensity variation or texture change may also need to take account.

Error while writing code for face recognition using haarcascade classifier

import cv2
import numpy as np
#Init camera
cap = cv2.VideoCapture(0)
#Face Detection using haarcascade File
face_cascade = cv2.CascadeClassifier('Anaconda3\Lib\site-packages\cv2\data\haarcascade_frontalface_alt.xml')
skip = 0
face_data = []
#dataset_path = ('./Face Recognition Data')
while True:
ret,frame = cap.read()
if ret == False:
continue
faces = face_cascade.detectMultiScale(frame,1.3,5)
#The next line of code is written to only store the largest face in the window frame
faces = sorted(faces,key = lambda f: f[2]*f[3])
#start sorting from the last face since the last face is the largest in terms of area(w*h)
for face in faces[-1:] :
x,y,w,h = face
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
#extract the required face or the region of the interest
#Refers to adding an extra 10 pixels on all the sides of the required extracted face
offset = 10
#By default face slicing is done in (y,x) manner
face_section = frame[y-offset:y+h+offset,x-offset:x+w+offset]
face_section = cv2.resize(face_section,(100,100))
if skip%10==0 : #Store every 10th frame
face_data.append(face_section)
print(len(face_data)) #number of faces captured so far
cv2.imshow("Video Frame",frame)
cv2.imshow("Face section frame",face_section)
key_pressed = cv2.waitKey(1) & 0xFF
if key_pressed == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
After running the program, it says that face_section variable is not defined.
Please help
You have more than one face_section. If you need them outside of your for loop you may do like this:
face_section_list = [] # Define a new empty list!
#start sorting from the last face since the last face is the largest in terms of area(w*h)
for face in faces[-1:] :
x,y,w,h = face
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
#extract the required face or the region of the interest
#Refers to adding an extra 10 pixels on all the sides of the required extracted face
offset = 10
#By default face slicing is done in (y,x) manner
face_section = frame[y-offset:y+h+offset,x-offset:x+w+offset]
face_section = cv2.resize(face_section,(100,100))
face_section_list.append(face_section) # Append EVERY face!
if skip%10==0 : #Store every 10th frame
face_data.append(face_section)
print(len(face_data)) #number of faces captured so far
And then outside, print every face in order (or do whatever you need to do):
for im in face_section_list:
cv2.imshow("Face section frame",im)
cv2.waitKey(0) # Zero means "wait until a key is pressed"
I've wrote a lot of code for face detection and recognition that you may find helpful, have a look.

how to detect rectangle shape object using python

I am doing a project in python.I want to detect rectangle shape object by opening the webcam using python.I had tried it but I didn't get accurate Answer.I show the object in front of webcam If any finger touched our object it doesn't recognize our object.please anyone can help me.Thanks in Advance:)
Here is my code:
py:
import math
import numpy as np
import cv2
#dictionary of all contours
contours = {}
#array of edges of polygon
approx = []
#scale of the text
scale = 2
#camera
cap = cv2.VideoCapture(0)
print("press q to exit")
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
#calculate angle
def angle(pt1,pt2,pt0):
dx1 = pt1[0][0] - pt0[0][0]
dy1 = pt1[0][1] - pt0[0][1]
dx2 = pt2[0][0] - pt0[0][0]
dy2 = pt2[0][1] - pt0[0][1]
return float((dx1*dx2 + dy1*dy2))/math.sqrt(float((dx1*dx1 + dy1*dy1))*(dx2*dx2 + dy2*dy2) + 1e-10)
while(cap.isOpened()):
#Capture frame-by-frame
ret, frame = cap.read()
if ret==True:
#grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Canny
canny = cv2.Canny(frame,80,240,3)
#contours
canny2, contours, hierarchy = cv2.findContours(canny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for i in range(0,len(contours)):
#approximate the contour with accuracy proportional to
#the contour perimeter
approx = cv2.approxPolyDP(contours[i],cv2.arcLength(contours[i],True)*0.02,True)
#Skip small or non-convex objects
if(abs(cv2.contourArea(contours[i]))<100 or not(cv2.isContourConvex(approx))):
continue
x,y,w,h = cv2.boundingRect(contours[i])
vtc = len(approx)
if(vtc==4):
cv2.putText(frame,'RECTANGLE',(x,y),cv2.FONT_HERSHEY_SIMPLEX,scale,(255,255,255),2,cv2.LINE_AA)
#Display the resulting frame
out.write(frame)
cv2.imshow('frame',frame)
cv2.imshow('canny',canny)
if cv2.waitKey(1) == 1048689: #if q is pressed
break
#When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Here is my output:
Non Working output
Working output
You currently have this part in your code:
#Skip small or non-convex objects
if(abs(cv2.contourArea(contours[i]))<100 or not(cv2.isContourConvex(approx))):
continue
x,y,w,h = cv2.boundingRect(contours[i])
vtc = len(approx)
if(vtc==4):
cv2.putText(frame,'RECTANGLE',(x,y),cv2.FONT_HERSHEY_SIMPLEX,scale,(255,255,255),2,cv2.LINE_AA)
here it is possible to create a rectangle around the contour and compare the areas. For that it is possible to use boundingRect, however, your phone can be slightly angled, so minAreaRect is better for this. It will return ((x,y), (w,h), angle) you care is the (w,h) part since the area is w*h. You already know hot to get the actual area of the contour, since it is in your code.
At the end the code should look like this:
#Skip small or non-convex objects
if(abs(cv2.contourArea(contours[i]))<100 or not(cv2.isContourConvex(approx))):
continue
x,y,w,h = cv2.boundingRect(contours[i])
vtc = len(approx)
rect = cv2.minAreaRect(contours[i])
rectArea = rect[1][0] * rect[1][1]
contourArea = cv2.contourArea(contours[i])
# now it will check if the difference is less than 10% of the rect area
if vtc==4 or abs(rectArea - contourArea) < rectArea * .10:
cv2.putText(frame,'RECTANGLE',(x,y),cv2.FONT_HERSHEY_SIMPLEX,scale,(255,255,255),2,cv2.LINE_AA)
Probably this will work, but you may need to adjust the threshold (I used 10% of rectArea). Even the 4 point check can be omitted, If it is a rectangle it will have a perfect fit (rectarea-contourarea) = 0.
I hope this helps, however this is a simple way. More possible answers are also valid for this. You can even think it with machine learning algorithms, or rectangle fitting algorithms.

How to automatically detect road border in lane detection algorithm?

So the popular approach to lane detection using computer vision is to perform these 5 steps:
Convert the image to grayscale, smooth the image by gaussian
function
Use canny edge function to detect edges (obviously right? )
Mark the region of interest ROI
Use hough transform fucntion to detect straight lines and have line function to draw them.
That's what my approach.
But the point here is we usually need to manually select the ROI. In most case when apply to dash camera on a car, it's ok since the view does not change much.
But my situation is different, I want to detect road lanes based on traffic surveillance cameras, and of course there are many of them. Each camera has its own view, so I think that there must be a way to automatically separate the road and non-road areas.
My question is how to detect the ROI automatically?
My idea here is that the road area will have lots of pixel movements and the non-road area will not. From that idea we could automatically detect the ROI.
I have manage to use opencv to extract the background (background subtracted) from this video (https://youtu.be/bv3NEzjb5sU) using openCV and subtractBackgroundMOG2 function.
The code about canny edge and hough transform is basically ok after we have the ROI. This below is the code to train and extract the background. I though we could modify it to give the region mask or something that can use as ROI for later steps.
Thank you.
import cv2
from pathlib import Path
def bg_train(video_source, number_of_run, number_of_frames):
cap = cv2.VideoCapture(video_source)
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)
frame_number = -1
default_background = Path("default_background.png")
# the number of loop that will run to create a better background
i = 0
# check the file if it's already exist
if default_background.is_file():
i = 1
default_background = "default_background.png"
else:
default_background = None
i = 0
while i < number_of_run:
# Capture next frame and show in window
ret, frame = cap.read()
if not ret:
print("frame capture failed or not :)")
break
cv2.imshow("training_original", frame)
# subtract foreground and show in new window
background_read = cv2.imread(default_background)
fg = fgbg.apply(frame, background_read, -1)
fg_mask = filter_mask(fg)
cv2.imshow('Training_FG', fg_mask)
# subtract background and show in new window
bg = fgbg.getBackgroundImage(fg_mask)
cv2.imshow('Training_background', bg)
print(i, frame_number, bg.shape[0], bg.shape[1], bg.shape[2])
# Counting frame and save the final background after training
frame_number += 1
if frame_number == number_of_frames and i < number_of_run - 1:
i += 1
cv2.imwrite("default_background.png", bg)
cap.release()
cv2.destroyAllWindows()
cap = cv2.VideoCapture(video_source)
frame_number = -1
elif frame_number == number_of_frames and i == number_of_run - 1:
i += 1
cv2.imwrite("background_final.png", bg)
cv2.imshow("final background", bg)
return 1, bg
k = cv2.waitKey(1) & 0xff
if k == 27:
print("exit by user...")
return 0, None
cap.release()
cv2.destroyAllWindows()
def main():
video_source = "highway-30s.mp4"
check, background_after = bg_train(video_source, 2, 500)
if check == 0:
return 0
elif check == 1:
cv2.imshow("the background, press ESC to close window", background_after)
c = cv2.waitKey(0)
if c == 27:
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
You could train the algorithm to pick the ROI, over an initial amount of time, by tracking movement in the viewport over a series of frames.
Filter out movements and create lines/vectors representing their direction. Once you have enough of these samples you can figure out the best ROI by using a bounding box which encapsulates these vectors.

Categories

Resources