OpenCV2 / Python - How to ignore brightness with image matching function - python

My goal is to detect a movement on the camera.
The code below works, but sometimes when the brightness changes, It enables the capture as If a movement was detected.
from PIL import ImageGrab
import cv2
import time
import numpy
from os.path import expanduser
try:
import cPickle as pickle
except:
import pickle
def matchFrame(image1,image2):
origin = image1
origin=cv2.cvtColor(origin, cv2.COLOR_BGR2GRAY)
image = image2
result = cv2.matchTemplate(origin, image, cv2.TM_CCOEFF_NORMED)
minVal,maxVal,minLoc,maxLoc = cv2.minMaxLoc(result)
threshold = 0.90
loc = numpy.where( result >= threshold)
if loc[0].size==0 and loc[1].size==0:
return False
else:
return True
def run():
cap = cv2.VideoCapture(0)
ret, firstframe = cap.read()
inc=0
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if matchFrame(firstframe,gray):
pass
else:
cv2.imwrite("path%s"%inc);
inc=inc+1
cv2.waitKey(1)
if __name__ == "\__main\__"
run()
The method "matchFrame" use cv2.matchTemplate(origin, image, cv2.TM_CCOEFF_NORMED) to compare image. If they are similar, It returns False I already tried to change the threshold but It does not work.
The method "run" is for the video capture and call the method above to compare the first frame with the current frame.
I personnaly think that It's all about cv2.TM_CCOEFF_NORMED...
Do you have any suggestions?

Related

How to send a default parameter in method (Python)

This is the class:
import pyautogui
import cv2
import numpy as np
class VidRec:
def screen(self,fps=12.0,time=10):
SCREEN_SIZE = tuple(pyautogui.size())
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter("output.avi", fourcc, self.fps, (SCREEN_SIZE))
for i in range(int(self.time * self.fps)):
img = pyautogui.screenshot()
# convert these pixels to a proper numpy array to work with OpenCV
frame = np.array(img)
# convert colors from BGR to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# write the frame
out.write(frame)
# show the frame
# cv2.imshow("screenshot", frame)
# if the user clicks q, it exits
if cv2.waitKey(1) == ord("q"):
break
# make sure everything is closed when exited
cv2.destroyAllWindows()
out.release()
How I call it:
import tasks
p = tasks.VidRec()
p.screen()
I get an error even though I made default parameters in the class "def screen(self,fps=12.0,time=10):"
out = cv2.VideoWriter("output.avi", fourcc, self.fps, (SCREEN_SIZE))
AttributeError: 'VidRec' object has no attribute 'fps'

how to REAL TIME track my cursor to a coordinate? (DIY aimbot)

i need to track my cursor to the nose of a human to create a DIY aimbot with pose detection.
(just for fun, not intending to cheat, there would be so many better and easier options than to make my own)
i already have the first part of the code and it shows you your screen and the skeleton, as well as the exact coordinates of the nose with no problem,
but the method that im using to move my cursor over to that point is not working
im using mouse.move and have tried other stuff like pyautogui, tkinter.
it doesn't give me an error but still does not work
import cv2
import mediapipe as mp
import numpy as np
import time
import pyautogui
import mouse
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
# display screen resolution, get it from your OS settings
SCREEN_SIZEX = (1920)
SCREEN_SIZEY = (1080)
# define the codec
fourcc = cv2.VideoWriter_fourcc(*"XVID")
# create the video write object
out = cv2.VideoWriter("output.avi", fourcc, 20.0, (SCREEN_SIZEX, SCREEN_SIZEY))
with mp_pose.Pose(min_detection_confidence=0.1, min_tracking_confidence=0.9) as pose:
while True:
# make a screenshot
img = pyautogui.screenshot()
# convert these pixels to a proper numpy array to work with OpenCV
frame = np.array(img)
# convert colors from BGR to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Recolor image to RGB
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
# Make detection
results = pose.process(image)
# Recolor back to BGR
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
try:
landmarks = results.pose_landmarks.landmark
lndmark = landmarks[mp_pose.PoseLandmark.NOSE.value]
x = [landmarks[mp_pose.PoseLandmark.NOSE.value].x]
y = [landmarks[mp_pose.PoseLandmark.NOSE.value].y]
#print(x)
#print(y)
mouse.move(x, y)
except:
pass
# Render detections
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2),
mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)
)
# write the frame
out.write(frame)
pTime = 0
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(image, str(int(fps)), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
cv2.imshow('Mediapipe Feed', image)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
out.release()
cv2.destroyAllWindows()
#for lndmark in mp_pose.PoseLandmark:
#print(lndmark)
this is the part that doesn't work:
try:
landmarks = results.pose_landmarks.landmark
lndmark = landmarks[mp_pose.PoseLandmark.NOSE.value]
x = [landmarks[mp_pose.PoseLandmark.NOSE.value].x]
y = [landmarks[mp_pose.PoseLandmark.NOSE.value].y]
mouse.move(x, y)
except:
pass
i would assume that it is beacuse x and y are supposed to numbers or somehow it can't read or proccess it correctly
but it doesn't give me an error, so im asking it here hoping on of you guys had already figured this one out

How can I overlay text on a videocapture with python opencv and pillow

I started by showing text on my video capture with the .putText() function. But I couldn't use a custom truetype font. So I looked it up. And I found out that I could do what I want with pillow. But it just doesn't work. This is my code:
import numpy as np
import cv2
from datetime import *
from bs4 import BeautifulSoup
import requests
from PIL import ImageFont, ImageDraw, Image
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
now = datetime.now()
current_time = now.strftime("%D: %H:%M:%S")
fontpath = "./Roboto-Light.ttf"
font = ImageFont.truetype(fontpath, 22)
img_pil = Image.fromarray(frame)
draw = ImageDraw.Draw(img_pil)
draw.text((120,660), current_time, font = ImageFont.truetype(fontpath, 48), fill = (168,98,0,0)) # Hour and Minute
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
cv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
ยดยดยด

Avoid foreground becomes into background on background substractor using OpenCV

I want to make a hand detector using OpenCV. I've created a background substractor using the next code:
import cv2
import numpy as np
global fgMask
camera = cv2.VideoCapture(0)
backSub = cv2.createBackgroundSubtractorMOG2(detectShadows=False)
firstImage = True
crop_width = 300
crop_height = 300
camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1024)
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1024)
while True:
try:
ret, image = camera.read()
image = cv2.flip(image, 1)
roi = image[0:crop_height, 0:crop_width]
if firstImage:
fgMask = backSub.apply(roi)
firstImage = False
else:
fgMask = backSub.apply(roi, None, 0)
cv2.imshow("Original", image)
cv2.imshow("Mask", fgMask)
cv2.imshow("Roi", roi)
k = cv2.waitKey(10)
if k == 27: # press ESC to exit
camera.release()
cv2.destroyAllWindows()
break
except Exception as ex:
print(ex)
I add images during few seconds using apply method in order to the model learns background, and the mask generated by model is black (everything is ok at this point)
when I put my hand, the mask is ok
but after a while the hand begins to disapper
I have read you can set learningRate parameter to 0 to avoid the model trains using new frames, but I get the same result (hand disappers after a while). I've tried different learning parameters but the result is always same.

how to give color to a video in binary form

import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
ret,im = cap.read()
key = cv2.waitKey(10)
cv2.imshow('frametest',im)
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,th = cv2.threshold(gray,255,255,cv2.THRESH_BINARY)
cv2.imshow('videotest',th)
if key == 27:
break
This is a part of my code where am converting a gray image to binary. I need to color the binarized video.Can anyone suggest any method in doing so.Any help will be appreciated.

Categories

Resources