How to draw trailing line on a video using opencv - python

Im trying to draw a path using dots on the video during certain timestamp interval.
The code is working fine but the previous position of the dot dissapears, I dont want it to dissapear. Can anyone help me as to what to tweak in this code to preseve all the dots.
from collections import deque
from imutils.video import VideoStream
import numpy as np
import cv2
import imutils
import time
from numpy import random
vs = cv2.VideoCapture('/media/intercept.mp4')
pts = deque(maxlen=64) #buffer size
#Position to start drawing the dots
i=0
j=330
# keep looping
while True:
ret,frame = vs.read()
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV color space
frame = imutils.resize(frame, width=1800)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
i+=2
j=j-random.randint(-10,10) #introduce some jitter/randomness
i=i+random.randint(-10,10)
timestamps = vs.get(cv2.CAP_PROP_POS_MSEC)
if (15000<timestamps<20000):
print (i,j, "DRAWING")
cv2.circle(frame,(i, j),10, (0,0,255), -1) #draw dot
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.release()

Was able to fix this one
from collections import deque
from imutils.video import VideoStream
import numpy as np
import cv2
import imutils
import time
from numpy import random
vs = cv2.VideoCapture('/media/intercept.mp4')
pts = deque(maxlen=64) #buffer size
color = np.random.randint(0,255,(100,3))
ret, old_frame = vs.read()
old_frame = imutils.resize(old_frame,width=1800)
mask = np.zeros_like(old_frame)
i=0
j=330
ct=0
# keep looping
while True:
ret,frame = vs.read()
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV color space
frame = imutils.resize(frame, width=1800)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
x=i
y=j
i+=2
j=j-random.randint(-10,10)
i=i+random.randint(-10,10)
print (int(i%330))
ct+=10
timestamps = vs.get(cv2.CAP_PROP_POS_MSEC)
if (1000<timestamps<20000):
print (i,j, "Drawing")
#cv2.circle(frame,(i, j),10, (0,0,255), -1) #draw circle
mask = cv2.line(mask, (x,y),(i,j),[255,0,9], 2)
frame = cv2.circle(frame,(i,j),5,[0,255,222],-1)
img = cv2.add(frame,mask)
cv2.imshow("Frame", img)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.release()

Related

How to remove noise from CLAHE, Python?

I was trying to figure out a way to read the veins in an video capture (i am using special camera) using OpenCV in Python, but there are too many noise from the results i got. Can someone help?
here is the result: https://ibb.co/cbdxY5F
i want all in the red circle to be clear without nosie: https://ibb.co/C9SPjyX
import cv2
import numpy as np
import time
def multi_clahe(img, num):
for i in range(num):
img = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4+i*2,4+i*2)).apply(img)
return img
img = cv2.VideoCapture(1)
while(True):
ret, frame = img.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cl3 = multi_clahe(cl1, 5)
cv2.imshow('image', cl3)
k = cv2.waitKey(1) & 0xFF
if k == ord("a")
cv2.imwrite(time.strftime("Screenshot%Y%m%d%H%M%S.jpg"),final)
cv2.imwrite(time.strftime("1.jpg"),cl3)
cv2.imwrite("temp.jpg",cl3)
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
img.release()
cv2.destroyAllWindows()
I need to remove noises from CLAHE in Python.

Replacing 3D submatrix in Numpy and OpenCV2

Replace sub part of matrix by another small matrix in numpy generally seems to work for my purposes but I'm running into something I can't reconcile. Consider the following code, that creates two 3D matrices the shape of OpenCV2 webcam input, (in my case (480, 640, 3)), one of all 1s (frame) and one of random floats (rgb_noise_mask), replaces a specified submatrix in frame with the same submatrix of rgb_noise_mask, and displays it to the screen. This code works as intended, displaying a block of RGB-based static on a field of white.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
frame = np.ones(frame.shape)
rgb_noise_mask = np.random.random(size=frame.shape)
while True:
boxes = [[300,300,30,30]]
for box in boxes:
x, y, width, height = box
frame[y:y2, x:x2] = rgb_noise_mask[y:y2, x:x2]
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
Now take off the training wheels and use the actual webcam input instead of faking it. That same box now appears as uniform black instead of the expected colors:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
rgb_noise_mask = np.random.random(size=frame.shape)
while True:
ret, frame = cap.read()
boxes = [[300,300,30,30]]
for box in boxes:
x, y, width, height = box
frame[y:y2, x:x2] = rgb_noise_mask[y:y2, x:x2]
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
Why is this, and how can I get around it? Further adding to my confusion is that if I replace frame[y:y2, x:x2] = rgb_noise_mask[y:y2, x:x2] with frame[y:y2, x:x2] = frame[y:y2, x:x2][::-1] in the second code sample it behaves as expected and displays the live output with that square mirrored.
The issue here is that ret, frame = cap.read() returns frame as a numpy array with dtype=np.uint8, while rgb_noise_mask is float between 0,1, so all 0 when converted to uint8.
A simple fix is to generate noise as integers with randint:
rgb_noise_mask = np.random.randint(0,256, size=frame.shape, dtype=np.uint8)

How to draw a dotted line in a video using open cv

I tried this code to draw an animated dot on a video
from collections import deque
from imutils.video import VideoStream
import numpy as np
import cv2
import imutils
import time
vs = cv2.VideoCapture('/media/intercept.mp4')
pts = deque(maxlen=64) #buffer size
# keep looping
while True:
ret,frame = vs.read()
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
for i in range(10,260,20):
time.sleep(0.5) #To visualise dots one by one
cv2.circle(frame,(i, i),10, (0,0,255), -1) #draw circle
cv2.imshow('frame',frame) #show output image
if cv2.waitKey(1) == ord('q'):
break
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.release()
But the entire animation takes place over a single frame rather than being continous over consecutive frames. Also I want to add a certain element of jitter/randomness to the red ball/circle.
How can I achieve both ?
Ahh solved it by tweaking the sleep timer, and skipping the frames
from collections import deque
from imutils.video import VideoStream
import numpy as np
import cv2
import imutils
import time
vs = cv2.VideoCapture('/media/intercept.mp4')
pts = deque(maxlen=64) #buffer size
i=0
ct=0
# keep looping
while True:
ret,frame = vs.read()
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
i+=2
ct+=10
#for i in range(10,260,20):
#time.sleep(0.5) #To visualise dots one by one
if ct%10==0:
cv2.circle(frame,(i, i),10, (0,0,255), -1) #draw circle
#cv2.imshow('frame',frame) #show output image
if cv2.waitKey(1) == ord('q'):
break
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.release()

Controlling Contrast and Brightness of Video Stream in OpenCV and Python

I’m using OpenCV3 and Python 3.7 to capture a live video stream from my webcam and I want to control the brightness and contrast. I cannot control the camera settings using OpenCV's cap.set(cv2.CAP_PROP_BRIGHTNESS, float) and cap.set(cv2.CAP_PROP_BRIGHTNESS, int) commands so I want to apply the contrast and brightness after each frame is read. The Numpy array of each captured image is (480, 640, 3). The following code properly displays the video stream without any attempt to change the brightness or contrast.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
I get a washed-out video stream when I use Numpy’s clip() method to control the contrast and brightness, even when I set contrast = 1.0 (no change to contrast) and brightness = 0 (no change to brightness). Here is my attempt to control contrast and brightness.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
contrast = 1.0
brightness = 0
frame = np.clip(contrast * frame + brightness, 0, 255)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
How can I control the contrast and brightness of a video stream using OpenCV?
I found the solution using the numpy.clip() method and #fmw42 provided a solution using the cv2.normalize() method. I like the cv2.normalize() solution slightly better because it normalizes the pixel values to 0-255 rather than clip them at 0 or 255. Both solutions are provided here.
The cv2.normalize() solution:
Brightness - shift the alpha and beta values the same amount. Alpha
can be negative and beta can be higher than 255. (If alpha >= 255,
then the picture is white and if beta <= 0, then the picure is black.
Contrast - Widen or shorten the gap between alpha and beta.
Here is the code:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
cv2.normalize(frame, frame, 0, 255, cv2.NORM_MINMAX)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
The numpy.clip() solution:
This helped me solve the problem: How to fast change image brightness with python + OpenCV?. I need to:
Convert Red-Green Blue (RGB) to Hue-Saturation-Value (HSV) first
(“Value” is the same as “Brightness”)
“Slice” the Numpy array to the Value portion of the Numpy array and adjust brightness and contrast on that slice
Convert back from HSV to RGB.
Here is the working solution. Vary the contrast and brightness values. numpy.clip() ensures that all the pixel values remain between 0 and 255 in each on the channels (R, G, and B).
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
contrast = 1.25
brightness = 50
frame[:,:,2] = np.clip(contrast * frame[:,:,2] + brightness, 0, 255)
frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
import cv2 as cv
cap = cv.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# normalize the frame
frame = cv.normalize(
frame, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8UC1
)
# Display the resulting frame
cv.imshow("frame", frame)
# press q to quit
if cv.waitKey(1) & 0xFF == ord("q"):
break

how to i specify the upper and the lower value of a color in HSV opencv python [duplicate]

This question already has answers here:
Choosing the correct upper and lower HSV boundaries for color detection with`cv::inRange` (OpenCV)
(9 answers)
Closed 10 months ago.
I found way to convert RGB to HSV, but still I am unable to find the upper and lower value of color. How to do i calculate that?
I have to take out the pickachu from the image
and this my code till now
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([30,50,50])
upper_red = np.array([255,255,180]) #it is trial and error
mask = cv2.inRange(frame, lower_red, upper_red)
res = cv2.bitwise_and(frame, frame, mask= mask)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
please help me
you can use the following program to find the upper and lower hue values for the pixel by clicking on it(i.e. the pixel you want).
import cv2
import numpy as np
image_hsv = None # global ;(
pixel = (20,60,80) # some stupid default
# mouse callback function
def pick_color(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDOWN:
pixel = image_hsv[y,x]
#you might want to adjust the ranges(+-10, etc):
upper = np.array([pixel[0] + 10, pixel[1] + 10, pixel[2] + 40])
lower = np.array([pixel[0] - 10, pixel[1] - 10, pixel[2] - 40])
print(pixel, lower, upper)
image_mask = cv2.inRange(image_hsv,lower,upper)
cv2.imshow("mask",image_mask)
def main():
import sys
global image_hsv, pixel # so we can use it in mouse callback
image_src = cv2.imread(sys.argv[1]) # pick.py my.png
if image_src is None:
print ("the image read is None............")
return
cv2.imshow("bgr",image_src)
## NEW ##
cv2.namedWindow('hsv')
cv2.setMouseCallback('hsv', pick_color)
# now click into the hsv img , and look at values:
image_hsv = cv2.cvtColor(image_src,cv2.COLOR_BGR2HSV)
cv2.imshow("hsv",image_hsv)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__=='__main__':
main()

Categories

Resources