Use Python to save screen shots in array - python

How do I use python, mss, and opencv to capture my computer screen and save it as an array of images to form a movie? I am converting to gray-scale so it can be a 3 dimensional array. I would like to store each 2d screen shot in a 3d array for viewing and processing. I am having a hard time constructing an array that saves the sequence of screen shots as well as plays back the sequence of screen shots in cv2.
Thanks a lot
import time
import numpy as np
import cv2
import mss
from PIL import Image
with mss.mss() as sct:
fps_list=[]
matrix_list = []
monitor = {'top':40, 'left':0, 'width':800, 'height':640}
timer = 0
while timer <100:
last_time = time.time()
#get raw pizels from screen and save to numpy array
img = np.array(sct.grab(monitor))
img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Save img data as matrix
matrix_list[timer,:,:] = img
#Display Image
cv2.imshow('Normal', img)
fps = 1/ (time.time()-last_time)
fps_list.append(fps)
#press q to quit
timer += 1
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
#calculate fps
fps_list = np.asarray(fps_list)
print(np.average(fps_list))
#playback image movie from screencapture
t=0
while t < 100:
cv.imshow('Playback',img_matrix[t])
t += 1

A clue perhaps, save screenshots into a list and replay them later (you will have to adapt the sleep time):
import time
import cv2
import mss
import numpy
with mss.mss() as sct:
monitor = {'top': 40, 'left': 0, 'width': 800, 'height': 640}
img_matrix = []
for _ in range(100):
# Get raw pizels from screen and save to numpy array
img = numpy.array(sct.grab(monitor))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Save img data as matrix
img_matrix.append(img)
# Display Image
cv2.imshow('Normal', img)
# Press q to quit
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
# Playback image movie from screencapture
for img in img_matrix:
cv2.imshow('Playback', img)
# Press q to quit
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break

use collections.OrderedDict() to saves the sequence
import collections
....
fps_list= collections.OrderedDict()
...
fps_list[timer] = fps

Related

Opencv display image with video without image background

I want to dispaly smiley image on opencv video stream.
With this program I am able to display the image but the problem is it comes with image's background. I just want a round shape image without back ground. I have tried to remvoe the backgroud using online tools. is there any way I can just display the smiley without image's background?
import cv2
import time
cap= cv2.VideoCapture(0)
fps= int(cap.get(cv2.CAP_PROP_FPS))
print("This is the fps ", fps)
if cap.isOpened() == False:
print("Error File Not Found")
while cap.isOpened():
ret,frame= cap.read()
if ret == True:
time.sleep(1/fps)
img = cv2.imread("/home/pi/Downloads/red-removebg-preview (1).png", cv2.IMREAD_UNCHANGED)
frame[100:390, 0:290]=img
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
Just to try I am attaching another image.
Like mentioned in the comments here is the basic idea
import cv2
import time
from skimage import transform, img_as_float
import numpy as np
# reading the video
cap= cv2.VideoCapture('SampleVideo_1280x720_1mb.mp4')
# cap= cv2.VideoCapture(0) for camera
fps= int(cap.get(cv2.CAP_PROP_FPS))
print("This is the fps ", fps)
if cap.isOpened() == False:
print("Error File Not Found")
# I am using an emoji that is not (290,290), that is why using resize
img = cv2.imread("d7glM.png", cv2.IMREAD_UNCHANGED)
img = transform.resize(img, (290,290))
img = img_as_float(img)
# the input imoji should have alpha channel, otherwise you cans mask
if(img.shape[2] <4):
print('sorry can\'t mask')
while cap.isOpened():
ret,frame= cap.read()
if ret == True:
# here I am using img_as_float() to convert
# both the images to float64
frame = img_as_float(frame)
# I am using a sample video which has a shape (720,1800,3)
# the emoji is png with a alpha channel (R G B A)
# I will use the alpha to mask the background
# masking Red channel
frame[100:390, 0:290, 0] *= 1 - img[:,:,3]
# masking Green channel
frame[100:390, 0:290, 1] *= 1 - img[:,:,3]
# masking Blue channel
frame[100:390, 0:290, 2] *= 1 - img[:,:,3]
# now finally add the image in that mask
frame[100:390, 0:290, :] += img[:,:,:3]
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break

How to increase the fps of screen recorder in cv2

I have written a code to record the screen recorder using python but when i see the output then i found that its fps is very low .Is there any better code than mine to increase the fps of the screen recorder.
If yes then please reply.
Here is mine code:-
import cv2
import numpy as np
import pyautogui
import datetime
# display screen resolution, get it from your OS settings
SCREEN_SIZE = (1366, 768)
# define the codec
fourcc = cv2.VideoWriter_fourcc(*"XVID")
# create the video write object
now = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
out = cv2.VideoWriter("screen recorder"+now+".avi", fourcc, 5.0, (SCREEN_SIZE))
while True:
img = pyautogui.screenshot()
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out.write(frame)
cv2.imshow("screenshot", frame)
# if the user clicks q, it exits
if cv2.waitKey(1) == ord("q"):
break
cv2.destroyAllWindows()
out.release()
img = pyautogui.screenshot(region=(0, 0, 300, 400))
The third parameter to the cv2.VideoWriter constructor is the frame rate (https://docs.opencv.org/3.4/dd/d9e/classcv_1_1VideoWriter.html#ac3478f6257454209fa99249cc03a5c59). Currently, you have it set to 5.0. For example, for 30 fps, instantiate the VideoWriter with:
out = cv2.VideoWriter("screen recorder"+now+".avi", fourcc, 30.0, (SCREEN_SIZE))
EDIT: In order to also read in images at the correct framerate, we can pause the while loop using the waitKey function. We can re-write the OP's code like so:
import cv2
import numpy as np
import pyautogui
import datetime
import time
# display screen resolution, get it from your OS settings
SCREEN_SIZE = (1366, 768)
FRAME_RATE = 30.0 # desired frame-rate
# define the codec
fourcc = cv2.VideoWriter_fourcc(*"XVID")
# create the video write object
now = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
out = cv2.VideoWriter("screen recorder"+now+".avi", fourcc, FRAME_RATE, (SCREEN_SIZE))
while True:
st = time.time() # collect start time
img = pyautogui.screenshot()
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out.write(frame)
cv2.imshow("screenshot", frame)
en = time.time() # collect end time
# calculate time to wait before next frame:
delay = max(0, (1 / FRAME_RATE - (en - st)) * 1000)
# if the user clicks q, it exits
if cv2.waitKey(delay) == ord("q"):
break
cv2.destroyAllWindows()
out.release()
img = pyautogui.screenshot(region=(0, 0, 300, 400))
Note: If collecting the frames is too slow (requires more than 1 / FRAMERATE seconds), then you may want to reduce the frame rate or the resolution.

opencv python screen capture in ubuntu

I am trying to capture the screen and show the images continuously using opencv-python. But for some reason the images are not stacking on top of each others like normally. Please look at the source code and the screenshot below. I am on ubuntu 18.04. Thanks!!
import time
import cv2
import mss
import numpy as np
with mss.mss() as sct:
# Part of the screen to capture
monitor = {"top": 40, "left": 0, "width": 800, "height": 640}
while True:
last_time = time.time()
# Get raw pixels from the screen, save it to a Numpy array
img = np.array(sct.grab(monitor))
# Display the picture
cv2.imshow('frame', img)
print("fps: {}".format(1 / (time.time() - last_time)))
# Press "q" to quit
if cv2.waitKey(1) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break

cv2.waitKey(25) & 0xFF == ord('q'): and cv2.imwrite() not working

I am following this project to make a AI that plays Google Chrome Dino game. I am stuck at a point while capturing screen feed to generate training data. I am new to CV
The project is supposed to supposed to break the video feed and save the CSV file in cv2.waitKey(25) & 0xFF == ord('q'): condition. Which I believe is when key "q" is pressed. But nothing happens when I press 'q'. The print statement in this if condition doesn't print when I press q.
Also although the console prints the print statement in 'up' ,'down' or 't' key pressed condition but the
cv2.imwrite('./images/frame_(0).jpg'.format(x), img)
Doesn't seem to be working as no images in the images folder are saved.
Here is the code
import cv2
from mss import mss
import numpy as np
import keyboard
#Captures dinasour run for given coordinates
def start():
"""
Capture video feed frame by frame, crops out coods and the dino then process
"""
sct = mss()
coordinates = {
'top': 168,
'left': 230,
'width': 624,
'height': 141
}
with open('actions.csv','w') as csv:
x = 0
while True:
img = np.array(sct.grab(coordinates))
#crop out the dino from the image array
img = img[::,75:624]
#edge detection to reduce ammount of image processing work
img = cv2.Canny(img, threshold1=100, threshold2=200)
if keyboard.is_pressed('up arrow'):
cv2.imwrite('./images/frame_(0).jpg'.format(x), img)
csv.write('1\n')
print('jump write')
x += 1
if keyboard.is_pressed('down arrow'):
cv2.imwrite('./images/frame_(0).jpg'.format(x), img)
csv.write('2\n')
print('duck')
x += 1
if keyboard.is_pressed('t'):
cv2.imwrite('./images/frame_(0).jpg'.format(x), img)
csv.write('0\n')
print('nothing')
x += 1
# break the video feed
if cv2.waitKey(25) & 0xFF == ord('q'):
csv.close()
cv2.destroyAllWindows()
print('Exited')
break
def play():
sct = mss()
coordinates = {
'top': 168,
'left': 230,
'width': 624,
'height': 141
}
img = np.array(sct.grab(coordinates))
# crop out the dinosaur from the image array
img = img[::,75:615]
# edge detection to reduce amount of image processing work
img = cv2.Canny(img, threshold1=100, threshold2=200)
cv2.waitKey() does only work if you press the key while an OpenCV window (e.g. created with cv2.imshow()) is focused. It seems for me as you don't use GUI features of OpenCV at all.
If you have a OpenCV GUI in your program, focus it and then press the key.
If not, and if you don't want to implement one, why not use keyboard.isPressed()?

Histogram of my cam in real time

Im trying to show the histogram in real time from grayscale of my webcam, the problem is that histogram is not being update and my cam stop until i close histogram's window. How can i fix this? I want to show the grayscale img from my webcam and its histogram in same time, is possible do that?
import numpy as np
import cv2
from matplotlib import pyplot as plt
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('Janela', frame)
cv2.imshow('Outra', gray)
plt.hist(gray.ravel(), 256, [0, 256])
plt.show()
if (cv2.waitKey(1) & 0xFF == 27):
break
cap.release()
cv2.destroyAllWindows()
I have been working for a while in the same task. After some time, I have a piece of code that works very good. It displays the camera image in a window and a histogram in other. Since I'm interested in finding colors, I'm working with the "hue" channel of each frame.
# import the necessary packages
import cv2
import numpy as np
#Create window to display image
cv2.namedWindow('colorhist', cv2.WINDOW_AUTOSIZE)
#Set hist parameters
hist_height = 64
hist_width = 256
nbins = 32
bin_width = hist_width/nbins
camera_id = 0 # type fo webcam [0 built-in | 1 external]
cameraWidth = 320
cameraHeight = 240
if camera_id == 0:
cameraId = "PC webcam"
elif camera_id == 1:
cameraId = "External webcam"
camera = cv2.VideoCapture(camera_id)
# set camera image to 320 x 240 pixels
camera.set(3,cameraWidth)
camera.set(4,cameraHeight)
cameraInfo = "Image size (%d,%d)" % (camera.get(3),camera.get(4))
# initialize mask matrix
mask = np.zeros((cameraHeight,cameraWidth), np.uint8)
# draw a circle in mask matrix
cv2.circle(mask,(cameraWidth/2,cameraHeight/2), 50, 255, -1)
#Create an empty image for the histogram
h = np.zeros((hist_height,hist_width))
#Create array for the bins
bins = np.arange(nbins,dtype=np.int32).reshape(nbins,1)
while True:
# grab the current frame
(grabbed, frame) = camera.read()
if not grabbed:
"Camera could not be started."
break
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#Calculate and normalise the histogram
hist_hue = cv2.calcHist([hsv],[0],mask,[nbins],[0,256])
cv2.normalize(hist_hue,hist_hue,hist_height,cv2.NORM_MINMAX)
hist=np.int32(np.around(hist_hue))
pts = np.column_stack((bins,hist))
#Loop through each bin and plot the rectangle in white
for x,y in enumerate(hist):
cv2.rectangle(h,(x*bin_width,y),(x*bin_width + bin_width-1,hist_height),(255),-1)
#Flip upside down
h=np.flipud(h)
#Show the histogram
cv2.imshow('Color Histogram',h)
h = np.zeros((hist_height,hist_width))
frame = cv2.bitwise_and(frame,frame,mask = mask)
cv2.putText(frame, cameraInfo, (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow(cameraId, frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the loop
if key == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
The code is based on the code from JohnLinux post (How do I plot a 32-Bin Histogram for a Grayscale Image in Python using OpenCV) and other lines of code come from what I have learnt from Adrian Rosenbrock's site https://www.pyimagesearch.com/.
I see there is very little feedback on this topic. Some solutions look overwhelmingly complex. My brother needed just a simple version of such programme for his studies, so I decided to write it as simple as it can be (11 lines of code - enjoy:))
import cv2 as cv
from matplotlib import pyplot as plt
vid = cv.VideoCapture(0, cv.CAP_DSHOW)
while True:
ret, img = vid.read()
cv.imshow("img", img)
plt.hist(img.ravel(), 256, [0, 256])
plt.draw()
plt.pause(0.1)
plt.clf()
cv.destroyAllWindows()
another solution for the picamera doing stream + histogram
from picamera import PiCamera
from picamera.array import PiRGBArray
import cv2
import matplotlib.pyplot as plt
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 40
rawCapture = PiRGBArray(camera, size=(640, 480))
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
bgr_arr = frame.array
gray_arr = cv2.cvtColor(bgr_arr, cv2.COLOR_BGR2GRAY)
cv2.imshow("Preview", gray_arr)
key = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
plt.hist(gray_arr.ravel(), 256, [0, 256], histtype='stepfilled')
plt.title('Grayscale histogram')
plt.xlabel('Grayscale value')
plt.ylabel('Pixel count')
plt.plot()
plt.pause(0.1)
plt.clf()
if key == ord("q"):
cv2.destroyAllWindows()
break

Categories

Resources