where add cv2.resize to get appropriate resolution? - python

I have a problem with my code. I don't know where I could put cv.resize() in order to get requirement resolution. I want to change it because the file which I upload is full HD resolution and I want to get smaller resolution. I will be glad of solution and explanation.
Below I show my code:
import cv2
import numpy as np
cap = cv2.VideoCapture('DJI_0037.MP4')
while cap.isOpened():
ret, frame = cap.read()
if ret == True:
frame_resize = cv2.resize(frame, (640, 480), interpolation=cv2.INTER_CUBIC)
else:
break
ret, frame_resize1 = cap.read(frame_resize)
ret, frame_resize2 = cap.read(frame_resize)
diff = cv2.absdiff(frame_resize1, frame_resize2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 2000:
continue
cv2.rectangle(frame_resize1, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame_resize1, "Status: {}".format('Movement'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),3)
cv2.imshow("feed", frame_resize1)
frame_resize1 = frame_resize2
ret, frame_resize2 = cap.read()
if cv2.waitKey(40) == 27:
break
cap.release()
cv2.destroyAllWindows()

Related

How to make a mesh within rectangle using drawing functions of the OpenCV?

This is my full code.
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
width = cap.get(3) # float
height = cap.get(4) # float
print (width, height)
while (1):
_, img = cap.read()
if _ is True:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
else:
continue
# blue color
blue_lower = np.array([86,0,90], np.uint8)
blue_upper = np.array([163, 64, 145], np.uint8)
blue = cv2.inRange(hsv, blue_lower, blue_upper)
kernal = np.ones((9, 9), "uint8")
blue = cv2.dilate(blue, kernal)
res_blue = cv2.bitwise_and(img, img, mask=blue)
# Tracking blue
(_, contours, hierarchy) = cv2.findContours(blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if (area > 2000):
print (area)
x, y, w, h = cv2.boundingRect(contour)
img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(img, "Blue Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0))
# cv2.putText(img, "Blue Colour", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0))
cv2.imshow("Color Tracking", img)
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
I would like to mesh all rectangles as these pictures.
you can see the mesh of the contour next to them.
In my case, I would like to mesh rectangles themselves.
This picture is taken from this video

OpenCV python output bounding rectangle X coordinates to object

I need to create an object with the value of the bounding rectangle X coordinates that is tracking movement. Using Python in OpenCV. Does anyone know of a way I can do this?
I will use that object to calculate servo values for a laser tracker.
import cv2
cap=cv2.VideoCapture(0)
ret1,frame1= cap.read()
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
gray1 = cv2.GaussianBlur(gray1, (21, 21), 0)
cv2.imshow('window',frame1)
while(True):
ret2,frame2=cap.read()
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
gray2 = cv2.GaussianBlur(gray2, (21, 21), 0)
deltaframe=cv2.absdiff(gray1,gray2)
frame_flip2 = cv2.flip(deltaframe, -1)
#cv2.imshow('delta',frame_flip2)
threshold = cv2.threshold(deltaframe, 25, 255, cv2.THRESH_BINARY)[1]
threshold = cv2.dilate(threshold,None)
#cv2.imshow('threshold',threshold)
_,countour,heirarchy = cv2.findContours(threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for i in countour:
if cv2.contourArea(i) < 600:
continue
(x, y, w, h) = cv2.boundingRect(i)
cv2.rectangle(frame2, (x, y), (x + w, y + h), (255, 0, 0), 2)
frame_flip = cv2.flip(frame2, -1)
cv2.imshow('window',frame_flip)
if cv2.waitKey(10) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

detect a single shape and colour in realtime using opencv

I'm working on a project that requires me to detect a red rectangle in real time. so far I've managed to get the colour and shape detected together but it can't differentiate between other objects that are red.
How might I go about doing this?
import cv2
import numpy as np
def nothing():
pass
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_COMPLEX
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Red color
low_red = np.array([175, 50, 20])
high_red = np.array([180, 255, 255])
mask1 = cv2.inRange(hsv, low_red, high_red)
kernel = np.ones((5, 5), np.uint8)
mask2 = cv2.erode(mask1, kernel)
red = cv2.bitwise_and(frame, frame, mask=mask2)
contours, _ = cv2.findContours(mask2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
approx = cv2.approxPolyDP(cnt, 0.02 * cv2.arcLength(cnt, True), True)
hull = cv2.convexHull(cnt)
x = approx.ravel()[0]
y = approx.ravel()[1]
if area > 400:
cv2.drawContours(frame, [approx], 0, (0, 0, 0), 5)
if len(approx) == 4:
cv2.putText(frame, "Rectangle", (x, y), font, 1, (0, 0, 0))
edges = cv2.Canny(frame, 100, 200)
_, threshold_binary = cv2.threshold(frame, 128, 255, cv2.THRESH_BINARY)
_, threshold_binary_inv = cv2.threshold(frame, 128, 255, cv2.THRESH_BINARY_INV)
_, threshold_trunc = cv2.threshold(frame, 128, 255, cv2.THRESH_TRUNC)
_, threshold_to_zero = cv2.threshold(frame, 12, 255, cv2.THRESH_TOZERO)
cv2.imshow("Frame", frame)
cv2.imshow('edges', edges)
cv2.imshow('red', red)
cv2.imshow("mask", mask1)
key = cv2.waitKey(1)
if key == 27:
cap.release()
cv2.destroyAllWindows()
break
Output image:

OpenCV Background Subtraction Get Color Objects (Python)

Background subtraction method (BackgroundSubtractorMOG2) will normally return the output in binary image.
Is there a solution on how I can get the original colour of the object after implementing the BackgroundSubtractorMOG2 ?
import cv2
import numpy as np
cap = cv2.VideoCapture("people.mkv")
kernel_dil = np.ones((10,10), np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
fgbg = cv2.createBackgroundSubtractorMOG2(history=0, varThreshold=444, detectShadows=False)
while True:
ret, frame1 = cap.read()
frame = cv2.resize(frame1,(1364,700),fx=0,fy=0, interpolation = cv2.INTER_CUBIC)
mask = np.zeros(frame.shape, dtype=np.uint8)
mask.fill(255)
roi_corners = np.array([[(11,652), (1353,652), (940,84), (424,84)]], dtype=np.int32)
cv2.fillPoly(mask, roi_corners, 0)
masking = cv2.bitwise_or(frame, mask)
if ret == True:
fgmask = fgbg.apply(masking,mask)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
dilation = cv2.dilate(fgmask2, kernel_dil, iterations = 1)
(contours,hierarchy) = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
x,y,w,h = cv2.boundingRect(contour)
if(area>0.001):
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 2)
cv2.putText(frame, 'People', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2, cv2.LINE_AA)
cv2.imshow("FullScreen", frame)
cv2.imshow("FGMask1", fgmask)
cv2.imshow("FGMask2", dilation)
key = cv2.waitKey(12)
if key == ord("q"):
break
If you consider the output as binary mask, you could just do a bitwise and with your original image
cv.bitwise_and(input, input, mask = yourMOG_output);

Python cv2 VideoWriter File getting corrupted

I was following this tutorial and I tried to save the video to an avi file, but every time I tried the file was corrupted. I was able to save the frames individually using cv2.imwrite(), but stitching together the individual frames was a lot of work, and would lag the entire program. Here is my code:
from imutils.video import VideoStream
import imutils
import time
import cv2
MINIMUM_AREA = 500
# For Video Recording Purposes
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('vibralert_test001.avi', fourcc, 20, (640, 480))
vs = VideoStream(src=0).start()
print("Setting up feed.")
time.sleep(2)
print("Live")
firstFrame = None
while True:
frame = vs.read()
text = "No Movement Detected"
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
delta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(delta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c) < MINIMUM_AREA:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Movement Detected"
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("VibrAlert v0.1", frame)
out.write(frame)
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
vs.stop()
out.release()
cv2.destroyAllWindows()
print('End Feed')
The size of the frame images need to be the same as the frameSize given to the VideoWriter, (640,480). The frame is being resized to have a width of 500, while the videoWriter is expecting a width of 640.
However, changing the resize to frame = imutils.resize(frame, width=640) probably won't work either. imutils.resize will return images with the same aspect ratio as the original image, so the height won't necessarily be 480. I would suggest replacing that line with the opencv.resize:
frame = cv2.resize(frame, (640,480))

Categories

Resources