I have created a python tkinter program, where I want to display a series of images on a window, where the image changes every 6 seconds. When I use the code below, the whole program freezes for the entire 6 seconds.
loopey = "Fal"
while loopey == "Fal":
time.sleep(6)
menupic.place_forget()
menupic2.place(x=602,y=180)
home.update()
time.sleep(6)
menupic2.place_forget()
menupic.place(x=602,y=180)
home.update()
I also tried to use the after() function, but had the same issue.
def deletion(thing):
thing.place_forget()
while True:
home.after(6000, deletion(menupic))
menupic2.place(x=602,y=180)
home.update()
home.after(6000, deletion(menupic2))
menupic.place(x=602,y=180)
home.update()
I would do it like this:
from tkinter import *
from PIL import Image, ImageTk
from random import randint
import numpy as np
def place_image():
npimg = np.zeros([100,100,3],dtype=np.uint8)
npimg.fill(randint(0, 255))
pilimg = Image.fromarray(npimg)
tkimg = ImageTk.PhotoImage(pilimg)
label.img = tkimg
label.configure(image=tkimg)
home.after(6000, place_image)
home = Tk()
label = Label(home, text="test")
label.place(x=0, y=0)
home.after(6000, place_image)
home.mainloop()
If you use after() also in the function you can still interact with the window.
It wil still loop trought every 6 seconds.
I am working on an app that basically turns the raspberry pi 4 into a camera, since I've been learning opencv i thought it would be a cool way to show off everything that I've done so far,
I do manage to retrieve the feed of the raspicam, but it's on the top layer so it covers up everything, including my cursor and i can't even stop the app.
I retrieve the image with this code:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# camera_pi.py
#
#
#
import time
import io
import threading
import picamera
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
def initialize(self):
if Camera.thread is None:
# start background frame thread
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.start()
# wait until frames start to be available
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
return self.frame
#classmethod
def _thread(cls):
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (1920, 1080)
camera.hflip = True
camera.vflip = True
#camera.zoom = (0.22,0,0.7,0.7) # (x, y, w, h)
# let camera warm up
camera.start_preview()
time.sleep(2)
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
#if time.time() - cls.last_access > 10:
#break
cls.thread = None
And then make it into a full screen Tkinter widget with this code:
import cv2
from camera_pi import *
import sys, os
if sys.version_info[0] == 2: # the tkinter library changed it's name from Python 2 to 3.
import Tkinter
tkinter = Tkinter #I decided to use a library reference to avoid potential naming conflicts with people's programs.
else:
import tkinter
from PIL import Image, ImageTk
import time
camera = Camera()
feed = camera.get_frame()
frame = cv2.imread(feed)
#cv2.imshow(frame)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
root = tkinter.Tk()
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
root.overrideredirect(1)
root.geometry("%dx%d+0+0" % (w, h))
root.focus_set()
canvas = tkinter.Canvas(root,width=w,height=h)
canvas.pack()
canvas.configure(background='black')
SetButton = Button(root,text="Settings", command=root.destroy)
SetButton.place(x=0,y=0)
def showPIL(pilImage):
imgWidth, imgHeight = pilImage.size
# resize photo to full screen
ratio = min(w/imgWidth, h/imgHeight)
imgWidth = int(imgWidth*ratio)
imgHeight = int(imgHeight*ratio)
pilImage = pilImage.resize((imgWidth,imgHeight), Image.ANTIALIAS)
image = ImageTk.PhotoImage(pilImage)
imagesprite = canvas.create_image(w/2,h/2,image=image)
imagesprite.lower()
root.update_idletasks()
root.update()
root.bind("<Escape>", lambda e: (e.widget.withdraw(), e.widget.quit()))
try:
showPIL(frame)
except KeyboardInterrupt:
root.destroy
I'd like to add a button on the corner to open up a settings window where i could modify the camera parameters, or start one of the opencv modes ive been working on but being that i can't see the cursor that doesn't work, i also tried to use the canvas function to lower, and tried moving it around in the code but i think its because the image keeps refreshing, or i simply did it wrong.
Is there a way with Python (maybe with OpenCV or PIL) to continuously grab frames of all or a portion of the screen, at least at 15 fps or more? I've seen it done in other languages, so in theory it should be possible.
I do not need to save the image data to a file. I actually just want it to output an array containing the raw RGB data (like in a numpy array or something) since I'm going to just take it and send it to a large LED display (probably after re-sizing it).
With all of the above solutions, I was unable to get a usable frame rate until I modified my code in the following way:
import numpy as np
import cv2
from mss import mss
from PIL import Image
bounding_box = {'top': 100, 'left': 0, 'width': 400, 'height': 300}
sct = mss()
while True:
sct_img = sct.grab(bounding_box)
cv2.imshow('screen', np.array(sct_img))
if (cv2.waitKey(1) & 0xFF) == ord('q'):
cv2.destroyAllWindows()
break
With this solution, I easily get 20+ frames/second.
For reference, check this link: OpenCV/Numpy example with mss
There is an other solution with mss which provide much better frame rate. (Tested on a Macbook Pro with MacOS Sierra)
import numpy as np
import cv2
from mss import mss
from PIL import Image
mon = {'left': 160, 'top': 160, 'width': 200, 'height': 200}
with mss() as sct:
while True:
screenShot = sct.grab(mon)
img = Image.frombytes(
'RGB',
(screenShot.width, screenShot.height),
screenShot.rgb,
)
cv2.imshow('test', np.array(img))
if cv2.waitKey(33) & 0xFF in (
ord('q'),
27,
):
break
You will need to use ImageGrab from Pillow (PIL) Library and convert the capture to numpy array. When you have the array you can do what you please with it using opencv. I converted capture to gray color and used imshow() as a demonstration.
Here is a quick code to get you started:
from PIL import ImageGrab
import numpy as np
import cv2
img = ImageGrab.grab(bbox=(100,10,400,780)) #bbox specifies specific region (bbox= x,y,width,height *starts top-left)
img_np = np.array(img) #this is the array obtained from conversion
frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
cv2.imshow("test", frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
you can plug an array there with the frequency you please to keep capturing frames. After that you just decode the frames. don't forget to add before the loop:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
vid = cv2.VideoWriter('output.avi', fourcc, 6, (640,480))
and inside the loop you can add:
vid.write(frame) #the edited frame or the original img_np as you please
UPDATE
the end result look something like this (If you want to achieve a stream of frames that is. Storing as video just a demonstration of using opencv on the screen captured):
from PIL import ImageGrab
import numpy as np
import cv2
while(True):
img = ImageGrab.grab(bbox=(100,10,400,780)) #bbox specifies specific region (bbox= x,y,width,height)
img_np = np.array(img)
frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
cv2.imshow("test", frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
Hope that helps
based on this post and others posts, i made something like this .
Its taking a screenshot and writing into a video file without saving the img.
import cv2
import numpy as np
import os
import pyautogui
output = "video.avi"
img = pyautogui.screenshot()
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
#get info from img
height, width, channels = img.shape
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output, fourcc, 20.0, (width, height))
while(True):
try:
img = pyautogui.screenshot()
image = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
out.write(image)
StopIteration(0.5)
except KeyboardInterrupt:
break
out.release()
cv2.destroyAllWindows()
You can try this code as it is working for me. I've tested it on Linux
import numpy as np
import cv2
from mss import mss
from PIL import Image
sct = mss()
while 1:
w, h = 800, 640
monitor = {'top': 0, 'left': 0, 'width': w, 'height': h}
img = Image.frombytes('RGB', (w,h), sct.grab(monitor).rgb)
cv2.imshow('test', cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
Make sure that the following packages are installed:
Pillow, opencv-python, numpy, mss
You can try this=>
import mss
import numpy
with mss.mss() as sct:
monitor = {'top': 40, 'left': 0, 'width': 800, 'height': 640}
img = numpy.array(sct.grab(monitor))
print(img)
I tried all of the above but it did not give me the real-time screen update.
You can try this. This code is tested and worked successfully and also give you a good fps output. You can also judge this by each loop time it's needed.
import numpy as np
import cv2
from PIL import ImageGrab as ig
import time
last_time = time.time()
while(True):
screen = ig.grab(bbox=(50,50,800,640))
print('Loop took {} seconds',format(time.time()-last_time))
cv2.imshow("test", np.array(screen))
last_time = time.time()
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
If anyone looking for a much easier and fastest way to grab screen as frame in python, then try ScreenGear API from my high-performance video-processing vidgear library in just few lines of python code on any machine (Tested on all platforms, including Windows 10, MacOS Serra, Linux Mint) and enjoy threaded screen-casting:
Note: It also supports multiple backends and screens out-of-the box.
# import required libraries
from vidgear.gears import ScreenGear
import cv2
# define dimensions of screen w.r.t to given monitor to be captured
options = {'top': 40, 'left': 0, 'width': 100, 'height': 100}
# open video stream with defined parameters
stream = ScreenGear(logging=True, **options).start()
# loop over
while True:
# read frames from stream
frame = stream.read()
# check for frame if Nonetype
if frame is None:
break
# {do something with the frame here}
# Show output window
cv2.imshow("Output Frame", frame)
# check for 'q' key if pressed
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
# close output window
cv2.destroyAllWindows()
# safely close video stream
stream.stop()
VidGear library Docs: https://abhitronix.github.io/vidgear
ScreenGear API: https://abhitronix.github.io/vidgear/latest/gears/screengear/overview/
More examples: https://abhitronix.github.io/vidgear/latest/gears/screengear/usage/
I've tried ImageGrab from PIL and it gave me 20fps which is ok but using win32 libraries gave me +40fps which is amazing!
I used this code by Frannecklp but it didn't work just fine so I needed to modify it:
-Firstly pip install pywin32 in case using the libraries
-import the libraries like this instead:
import cv2
import numpy as np
from win32 import win32gui
from pythonwin import win32ui
from win32.lib import win32con
from win32 import win32api
for geting a simple image screen do:
from grab_screen import grab_screen
import cv2
img = grab_screen()
cv2.imshow('frame',img)
and for getting frames:
while(True):
#frame = grab_screen((0,0,100,100))
frame = grab_screen()
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q') or x>150:
break
This task is very simple with opencv, we are just capturing screenshots in loop, and converting it into frames. I created timer for screenrecording, in start you have to enter how many seconds you want to record:) Here is the code.
import cv2
import numpy as np
import pyautogui
from win32api import GetSystemMetrics
import time
#Take resolution from system automatically
w = GetSystemMetrics(0)
h = GetSystemMetrics(1)
SCREEN_SIZE = (w,h)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter("recording.mp4", fourcc, 20.0, (SCREEN_SIZE))
tim = time.time()
tp = int(input('How many times you want to record screen?->(Define value in Seconds): '))
tp = tp+tp
f = tim+tp
while True:
img = pyautogui.screenshot()
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out.write(frame)
tu = time.time()
if tu>f:
break
cv2.destroyAllWindows()
out.release()
So that's how you can use time in screen recording, you don't need to use imshow() because it shows infinitely our screen recording on-screen so output video looks weird.
I am trying developing a code which functions as the self-timer camera. The video would be seen in the window and the person's face and eyes would be continuously detected and once the user selects a specific time, the frame at that point of time is captured. I am able to capture the frame after a certain time using sleep function in time module but the video frame seems to freeze. Is there any solution such that I can continue to see the video and the video capture takes place after some delay automatically.
I am using the code-
import numpy as np
import cv2
import time
import cv2.cv as cv
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',frame)
#time.sleep(01)
Capture = cv.CaptureFromCAM(0)
time.sleep(5)
ret, frame = cap.read()
image = cv.QueryFrame(Capture) #here you have an IplImage
imgarray = np.asarray(image[:,:]) #this is the way I use to convert it to numpy array
cv2.imshow('capImage', imgarray)
cv2.waitKey(0)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Can someone suggest me? Any kind of help would be appreciated.
In order to continuously view the video, you need to repeat the same part of the code which displays the video first and put it in a while loop. Make sure that the handle to the window is not lost.You can make the capture as a mouse click event and use a tickcount, one before the start of the while loop and one inside the loop. Once the difference between the two tick counts is equal to some pre-defined seconds,capture that frame, use break and come out of the while loop.
You need to add another 'cap.read()' line when the delay ends, as this is the code that actually captures the image.
use threading and define the cv.imshow() separately from your function
import threading
import cv2
def getFrame():
global frame
while True:
frame = video_capture.read()[1]
def face_analyse():
while True:
#do some of the opeartion you want
def realtime():
while True:
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
video_capture.release()
cv2.destroyAllWindows()
break
if __name__ == "__main__":
video_capture = cv2.VideoCapture(cam)
frame = video_capture.read()[1]
gfthread = threading.Thread(target=getFrame, args='')
gfthread.daemon = True
gfthread.start()
rtthread = threading.Thread(target=realtime, args='')
rtthread.daemon = True
rtthread.start()
fathread = threading.Thread(target=face_analyse, args='')
fathread.daemon = True
fathread.start()
while True: #keep main thread running while the other two threads are non-daemon
pass
I found some python code online for taking continuous screenshots with Gtk but I have a major problem. When I run my code:
import cv2
import Image
import numpy as np
def getScreenByGtk():
import gtk.gdk
w = gtk.gdk.get_default_root_window()
sz = w.get_size()
pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,sz[0],sz[1])
pb = pb.get_from_drawable(w,w.get_colormap(),0,0,0,0,sz[0],sz[1])
if (pb == None):
return False
else:
width,height = pb.get_width(),pb.get_height()
return Image.fromstring("RGB",(width,height),pb.get_pixels() )
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
while True:
screen_img = getScreenByGtk()
screen_img_array = np.asarray(screen_img)
cv2.imshow('image',screen_img_array)
cv2.waitKey(1)
cv2.destroyAllWindows()
It gives me images that are slanted to the left:
I've been trying to find a solution online for this but I can't.
I'm running lubuntu 12.04 in virtual box