I need to take screenshots of websites for a project of mine. As development language I use Python and to take the Screenshots I use Webkit from PyQt. The script below is the code used to take capture the websites (it's partially modified but most of it is still equal to the original from webscraping.com).
Now my problem is the following:
Most of the time it works without any problems, but, from time to time it happens that the following exception.
QPainter::begin: Paint device returned engine == 0, type: QPainter::setRenderHint: Painter must be active to set rendering hints
QPainter::setBrush: Painter not active
QPainter::pen: Painter not active
QPainter::setPen: Painter not active
QPainter::end: Painter not active, aborted
I've already tracked down the problem to the creation of the image
image = QImage(self.page().viewportSize(), QImage.Format_ARGB32)
The QImage returned by this line is sometime Null - I checked this using the .isNull()-method of QImage.
According to the Qt documentation this happens if there isn't enough memory to allocate a new QImage, but I still have way enough free memory.
This beahaviour occured while running on windows and also on linux, so it should not depend on some os depending stuff, I think. I'm new to Qt and PyQt, so I hope someone can help me.
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
import sys
import time
# ############################################################# #
# This script is based on the following script: #
# https://webscraping.com/blog/Webpage-screenshots-with-webkit/ #
# ############################################################# #
class Screenshot(QWebView):
_instance = None
#staticmethod
def get_instance():
# TODO: Add a lock around the if including the creation!
if Screenshot._instance is None:
Screenshot._instance = Screenshot()
return Screenshot._instance
def __init__(self):
self.app = QApplication(sys.argv)
QWebView.__init__(self)
self._loaded = False
self.loadFinished.connect(self._loadFinished)
def capture(self, url, output_file):
self.load(QUrl(url))
self.wait_load()
# set to webpage size
frame = self.page().mainFrame()
self.page().setViewportSize(frame.contentsSize())
# render image
# creating the image. Here it happens that QImage returns a 'Null'-Image
image = QImage(self.page().viewportSize(), QImage.Format_ARGB32)
# check if there's no image allocated
if image.isNull():
print 'image.isNull() is True'
if image is None:
print 'image is None is True'
painter = QPainter(image)
frame.render(painter)
painter.end()
print 'saving', output_file
image.save(output_file)
def wait_load(self, delay=0):
# process app events until page loaded
while not self._loaded:
self.app.processEvents()
time.sleep(delay)
self._loaded = False
def _loadFinished(self, result):
self._loaded = True
if __name__ == '__main__':
# a simple way to get the exceptions is to try to create multiple screenshots
sc = Screenshot()
for i in range(0, 25):
sc.capture('http://google.de', str(i) + '.png')
for i in range(25, 50):
sc.capture('http://de.wikipedia.org', str(i) + '.png')
Ok. I've tracked the problem a bit further down. It seems like the contentsSize of the QWebPages mainFrame is sometimes (0, 0) when it is read to create the QImage.
frame = self.page().mainFrame()
self.page().setViewportSize(frame.contentsSize()) # frame.contentsSize() = (0, 0)
image = QImage(self.page().viewportSize(), QImage.Format_ARGB32) # so we're creating here an Image with Width: 0 and Height: 0
So basically the QImage seems to be Null, because the size it's created with is (0, 0).
This problem can be solved by checking wether the mainFrames contentsSize is (0, 0) or not. If it is (0, 0) it's needed to process outstanding events on the QApplication until the new contentsSize is set. I'm doing this now with the following piece of code:
if frame.contentsSize().width() == 0 or frame.contentsSize().height() == 0:
print 'ContentsSize = (w: {}, h: {})'.format(frame.contentsSize().width(), frame.contentsSize().height())
count = 0 # used so we're not starting an infinite loop
while (frame.contentsSize().width() == 0 or frame.contentsSize().height() == 0) and count < 5:
count += 1
self.app.processEvents()
time.sleep(1)
Related
I am trying to design a gui which is related to my computer vision project. In that, the video I want to stop the web camera feed and I want to resume it by pressing a button. I managed to stop the feed, but I cannot resume it. The camera gets turned on but it is not working. This is the code for the program.
from PyQt5 import uic
from PyQt5 import QtCore, QtWidgets, QtGui
import cv2
import sys
class opencv_feed(QtWidgets.QMainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.ui = uic.loadUi('../designs/design5_flexible_opencv_window2.ui', self) #change this whenever u want... keep the ui file with you
self.resize(900,600)
self.worker1 = worker1() #creating an instance
self.worker1.start()
self.worker1.ImgUpdate.connect(self.ImageUpdateSlot)
self.but_stop.clicked.connect(self.cancel_feed)
self.but_resume.clicked.connect(self.resume_feed)
def ImageUpdateSlot(self, Image):
self.label.setPixmap(QtGui.QPixmap.fromImage(Image))
def cancel_feed(self):
self.worker1.stop()
def resume_feed(self):
self.__init__()
#self.worker1.ImgUpdate.connect(self.ImageUpdateSlot)
class worker1(QtCore.QThread):
ImgUpdate = QtCore.pyqtSignal(QtGui.QImage)
#QtCore.pyqtSlot()
def run(self): #put self in every variable to stop crashing the gui, when we interact with gui
self.ThreadActive = True
self.feed = cv2.VideoCapture(0)
while self.ThreadActive:
self.ret, self.frm = self.feed.read()
if self.ret:
self.img = cv2.cvtColor(self.frm, cv2.COLOR_BGR2RGB)
#print(img1.shape)
self.img = cv2.flip(self.img,1)
self.qtformat_conv_img = QtGui.QImage(self.img.data, self.img.shape[1], self.img.shape[0], QtGui.QImage.Format_RGB888)
#print(self.img.shape)
self.pic = self.qtformat_conv_img.scaled(self.img.shape[1],self.img.shape[0],QtCore.Qt.KeepAspectRatio) #keep this as an attribute, else when resizing the app stops
self.ImgUpdate.emit(self.pic)
def stop(self):
self.ThreadActive = False
self.feed.release()
self.quit()
#os._exit(0)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
wind = opencv_feed()
wind.show()
sys.exit(app.exec_())
Can someone explain me what am I doing wrong.
Link to the UI file..
https://drive.google.com/file/d/1UP8RjQML1GzFA75eGURgWt4Y0o_Ip3sU/view?usp=sharing
You can only start a thread once. Once it finishes you need to create another thread object to actually run. I would add another flag after self.ThreadActive called something like "pause" to keep the thread alive, just without doing anything.
#QtCore.pyqtSlot()
def run(self): #put self in every variable to stop crashing the gui, when we interact with gui
self.ThreadActive = True
self.paused = False
self.feed = cv2.VideoCapture(0)
while self.ThreadActive:
if not self.paused:
self.ret, self.frm = self.feed.read()
if self.ret:
self.img = cv2.cvtColor(self.frm, cv2.COLOR_BGR2RGB)
#print(img1.shape)
self.img = cv2.flip(self.img,1)
self.qtformat_conv_img = QtGui.QImage(self.img.data,
self.img.shape[1],
self.img.shape[0],
QtGui.QImage.Format_RGB888)
#print(self.img.shape)
self.pic = self.qtformat_conv_img.scaled(self.img.shape[1],self.img.shape[0],QtCore.Qt.KeepAspectRatio) #keep this as an attribute, else when resizing the app stops
self.ImgUpdate.emit(self.pic)
this way when you want to pause the thread you can pause and unpause using that flag
Either that or you need to always create another instance of the worker. Does it work if you create the instance outside of the init ? I'm unsure what happens to the GUI if the init is called twice.
EDIT:
You'll also have to change the way you pause and start again
def cancel_feed(self):
self.worker1.paused = True
def resume_feed(self):
self.worker1.paused = False
When I run my python project in my IDE the GUI and everything is responsive and works great. But when I run as .exe my threading components don't work like they do in IDE. The program's goal is to grab a live feed via RTSP and using opencv to display the images. This is done in its own thread here.
import time
import threading
import cv2
import PIL.Image
"""TODO: add docstring"""
class VideoCapture:
def __init__(self, xmlDict=None, width=None, height=None, fps=None):
"""TODO: add docstring"""
self.xmlDict = xmlDict
self.width = width
self.height = height
self.fps = int(self.xmlDict['FPS'])
self.running = False
# Open the video source
self.vid = cv2.VideoCapture(self.xmlDict['IpAddress'])
if not self.vid.isOpened():
raise ValueError("[MyVideoCapture] Unable to open video source", xmlDict['IpAddress'])
# Get video source width and height
if not self.width:
self.width = int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)) # convert float to int
if not self.height:
self.height = int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)) # convert float to int
if not self.fps:
self.fps = int(self.vid.get(cv2.CAP_PROP_FPS)) # convert float to int
# default value at start
self.ret = False
self.frame = None
self.convert_color = cv2.COLOR_BGR2RGB
#self.convert_color = cv2.COLOR_BGR2GRAY
self.convert_pillow = True
# start thread
self.running = True
self.thread = threading.Thread(target=self.process)
self.thread.start()
def process(self):
"""TODO: add docstring"""
while self.running:
ret, frame = self.vid.read()
if ret:
# process image
frame = cv2.resize(frame, (self.width, self.height))
# it has to record before converting colors
if self.convert_pillow:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = PIL.Image.fromarray(frame)
else:
print('[MyVideoCapture] stream end:', self.video_source)
# TODO: reopen stream
self.running = False
if self.recording:
self.stop_recording()
break
# assign new frame
self.ret = ret
self.frame = frame
# sleep for next frame
#if self.fps != "FULL":
# time.sleep(1/int(self.fps))
I have a button setup called start that infers an image every 2 seconds and prints out the label and confidence. When I do this in .exe the live feed and GUI freeze while inference is being made but when I use program in IDE it does not freeze. Here is the code that does this.
#Button to start inference
self.btn_snapshot = tk.Button(self.btnFrame,width = 10,height = 2, text="Start", command=lambda:threading.Thread(target = self.snapshot).start())
self.btn_snapshot.grid(row = 1,column = 0)
#snapshot function
def snapshot(self):
self.recording = True
while self.recording:
filename = self.vid.snapshot()
result = self.predictImage(filename)
output = self.calculatePassFail(result)
if self.manager:
self.manager.onClick(output)
else:
print('something')
time.sleep(2)
The other two methods that the snapshot function calls are predictImage and calculatePassFail.
def predictImage(self,imageName):
onnxModel = ImageModel.load(self.xmlDict['ModelPath'])
result = onnxModel.predict_from_file(imageName)
return result
def calculatePassFail(self,result):
calcResult = result.labels[0]
self.labelName = calcResult[0]
self.imgScore = calcResult[1]*100
return f"{self.labelName} with score{self.imgScore}"
So I found a fix around this, not sure if its a proper fix but it works. So for some reason when I use pyinstaller to create .exe and there is a console window I have the issue but when I use the flag --noconsole when using pyinstaller to create w/out console the issue goes away and my inference on images acts in its own thread like it does in my IDE. Not sure why it does but it works I guess.
I am trying to start / stop a video file in tkinter on a raspberry pi, using python 3.
I need the video to start from the beginning every time an infrared sensor is LOW (broken) and stop as soon as the sensor is HIGH again. Ideally the video should be inside a tkinter canvas so that I can display other elements on the screen at the same time (for example a loading bar).
I managed to get everything running except for the video, which runs as soon as the sensor is detected, but it freezes all other process (for example the loading bar) and it does not stop when the sensor is HIGH.
here is a simplified (and unchecked) version of the code for you the get an idea of the general structure (the real code is much longer):
import tkinter as TK
import RPi.GPIO as GPIO
import os
GPIO.setmode(GPIO.BCM)
GPIO.setup(14, GPIO.IN)
class App:
def __init__(self, root):
self.root = root
self.root.config(background = 'black', cursor = 'none')
self.background = TK.Canvas(root, width = 1024, height = 600, bg = 'black')
self.background.pack()
self.ext = 0
self.trial = 0
self.infrared()
def infrared(self):
if (GPIO.input(14) == False):
self.makebar()
if (self.ext == 0):
self.runvideo()
else:
os.system("killall omxplayer.bin")
self.ext = 0
self.root.after(16, self.infrared)
def runvideo(self):
os.system("omxplayer /home/pi/Desktop/testvideo.m4v")
def makebar():
self.stimulus_rect = TK.Canvas(root, width = 1024, height = 50, bg= 'white')
if self.ext < 1000
self.ext = self.ext + 10
self.stimulus_rect.create_rectangle(0, 0, self.ext, 50, fill="red")
self.stimulus_rect.place(x=0, y=0, anchor="new")
else:
self.trial = self.trial + 1
self.ext = 0
root = TK.Tk()
App(root)
root.mainloop()
From what I was able to find online:
1) tkinter might be coupled with opencv to achieve this, but it does not look like installing opencv on the raspberry pi is a straightforward operation;
2) In general options involving "os" seem to be bound to fail in what I want to achieve.
I couldn't find a clean way of doing this. My dream scenario would be to load into a canvas the video frames one by one and do so at 60hz (screen frequency). I would then check the sensor at exactly the same frequency and prevent the next frame to be loaded if the sensor is not broken. In pseudocode this would look like this
def infrared(self):
if (GPIO.input(14) == False):
self.makebar()
if (self.ext == 0):
self.runvideo()
else:
self.video.stop
self.ext = 0
self.frame = 0
self.root.after(16, self.infrared)
def runvideo(self):
self.frame = self.frame + 1
video.run("testvideo.m4v", self.frame)
Any idea on how to achieve this in tkinter on a raspberry pi?
thanks
ant
After a week of research and trials and errors this is how I currently achieve what I needed (in pseudocode):
#### PSEUDOCODE ####
from subprocess import Popen # this library is used to open the video file
class App:
def __init__(self, root):
self.moviestart = False
self.movieduration = 130000
self.movie = "/home/pi/Desktop/test.mp4"
def infrared(self):
if (GPIO.input(IR) == False):
if not self.moviestart:
self.makevideo() # Call the function to start the video
self.moviestart = True # Flag that the video has started
self.moviestop = False # Flag that the video is currently playing
self.root.after(self.videoduration,
self.stopvideo) # manually call the stop video function
# to stop the video after 13 seconds (video's length).
# I could not find a more elegant way of doing this, unfortunately.
else:
self.clear_screen()
self.root.after(self.refreshIR, self.infrared)
def makevideo(self):
# popen will open the movie in a window of the size I specified,
# so that other element from tkinter can be placed on top of the movie window
omxc = Popen(['omxplayer', self.movie, '--win', "0 30 800 450"])
def stopvideo(self):
self.moviestart = False # flag that movie has been stopped
if (self.moviestop == False): # check if the movie is currently playing
try: # this is a cheap workaround other problems I had, do not try this at home
os.system('killall omxplayer.bin') # this literally kills any omxplayer instance
# currently open
self.moviestop = True # flag that the movie is not playing at the moment
except:
pass
I hope this can be useful to anybody else with similar problems. I will update the answer if I find a better solution. For now this works good enough.
Currently I am working on a program, to display SIP-Trace log files. It is written in Python 3.7 using the PyQt 5(.11.3) module to load and operate a GUI made in QDesigner. As a main feature it parses the SIP-Trace file and displays it as a sequence diagram to a QGraphicsScene with QGraphicsObjects.
My problem lies in the following: For later reference, the content of the QGraphicsScene should be saved as an image file, like .jpg or .png. In the Qt/PyQt documentation I found the useful sounding command QGraphicsScene.render() which renders the content of the GraphicsScene to a saveable file like QImage using QPainter. In the last days, I tried a couple of ways/sample codes found here and elsewhere, but cannot render the GraphicsScene to the QImage much less to an image file. Since I am rather new to Python and Qt, I think I am missing some basic setting somewhere. Following is a minimal version of my code.
# -*- coding: utf8 -*-
"""Class for getting a sequence diagram of a sip traffic"""
from PyQt5.QtWidgets import *
from PyQt5 import uic
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sys
class VoipGui(QMainWindow):
""" Class that handles the interaction with the UI """
def __init__(self, parent=None):
super().__init__(parent)
self.ui = uic.loadUi("main_window.ui", self)
self.showMaximized()
self.sequence_scene = QGraphicsScene()
self.ui.graphicsView.setScene(self.sequence_scene)
# self.sequence_scene.setSceneRect(0, 0, 990, 2048)
# sets the spacing between nodes
# For more than three nodes columns should be generated in a more automatic way
self.left_column = 51
self.middle_column = 381
self.right_column = 711
self.flow_height = 60 # Sets the spacing between the arrows in the flowchart
# --------------------------------- /class init and var set -------------------------------------------
self.actionOpenFile.triggered.connect(self.on_open_file)
self.actionCloseFile.triggered.connect(self.on_close_file)
self.actionCloseProgram.triggered.connect(self.close)
self.actionSaveFile.triggered.connect(self.save_seq_image)
# --------------------------------- /connecting slots and signals ----------------------------
def on_open_file(self):
"""Dummy version of the open file dialog"""
self.draw_node(self.left_column, 5, "192.168.2.1", 10)
self.draw_node(self.middle_column, 5, "192.168.2.22", 10)
def on_close_file(self):
self.ui.textBrowser.clear()
self.sequence_scene.clear()
def save_seq_image(self):
""" Here lies the problem: Save the rendered sequence scene to file for later use"""
rect_f = self.sequence_scene.sceneRect()
# rect = self.sequence_scene.sceneRect().toRect()
# img = QPixmap(rect.size())
img = QImage()
p = QPainter()
# p.setPen(QColor(255, 255, 255))
# p.setViewport(rect)
painting = p.begin(img)
self.sequence_scene.render(p, target=QRectF(img.rect()), source=rect_f)
p.end()
if painting:
print("Painter init pass")
elif not painting:
print("Painter init fail")
saving = img.save("save.jpg")
if saving:
print("Saving Pass")
elif not saving:
print("Saving Not Pass")
def draw_node(self, x_pos, y_pos, ip_address, y_stops):
"""Participating devices are displayed as these nodes"""
width = 100.0
height = 40.0
pc_box = QGraphicsRectItem(x_pos - 50, y_pos, width, height)
self.sequence_scene.addItem(pc_box)
pc_ip = QGraphicsTextItem("%s" % ip_address)
pc_ip.setPos(x_pos - 50, y_pos)
self.sequence_scene.addItem(pc_ip)
node_line = QGraphicsLineItem(x_pos, y_pos + 40, x_pos, y_pos + (y_stops * self.flow_height))
self.sequence_scene.addItem(node_line)
def show_window():
app = QApplication(sys.argv)
dialog = VoipGui()
dialog.show()
sys.exit(app.exec_())
if __name__ == "__main__":
show_window()
The problem is simple, in render() you are indicating that the size of the target is equal to that of QImage, and how size is QImage?, how are you using QImage() the size is QSize(0, 0) so it can not be generated the image, the solution is to create a QImage with a size:
def save_seq_image(self):
""" Here lies the problem: Save the rendered sequence scene to file for later use"""
rect_f = self.sequence_scene.sceneRect()
img = QImage(QSize(640, 480), QImage.Format_RGB888)
img.fill(Qt.white)
p = QPainter(img)
self.sequence_scene.render(p, target=QRectF(img.rect()), source=rect_f)
p.end()
saving = img.save("save.jpg")
print("Saving Pass" if saving else "Saving Not Pass")
Output:
I ran into a problem with ScreenDC in wxPython Phoenix.
My tool is supposed to take multiple screenshots with some period. But whenever I use ScreenDC to grab a screenshot and save it to PNG it works well only for the first time. All the following times it just saves the same image as the first one. To get a new image, I have to restart the program, which is not an option in my case. I guess that whenever I call wx.ScreenDC() it gets the same image as the first time.
Ubuntu 16.04, wxPython 3.0.3 gtk3, python 3.6
The code I used:
def take_screenshot():
screen = wx.ScreenDC()
size = screen.GetSize()
width = size[0]
height = size[1]
bmp = wx.Bitmap(width, height)
mem = wx.MemoryDC(bmp)
mem.Blit(0, 0, width, height, screen, 0, 0)
bmp.SaveFile(str(datetime.now()) + '.png', wx.BITMAP_TYPE_PNG)
if __name__ == '__main__':
app = wx.App()
take_screenshot()
sleep(3)
take_screenshot()
sleep(3)
take_screenshot()
sleep(3)
take_screenshot()
Maybe there is the way to clean that first image from memory.
The only solution I found is to run a separate process, define wx.App inside and then to perform the function. However, that is not an option for my program.
Thanks.
UPD: It seems to be some issue of wxPython Phoenix. If you run this on wxPython Classic, everything works fine(just use EmptyBitmap, not Bitmap). Weird, I will report this issue in their repository.
I was not able to reproduce your issue in Phoenix or Classic (on Windows). I suppose what could happen is that sleep blocks wxPython event loop. It would be good style to put long-running things in a separate thread anyway. It is painless, see below.
from threading import Thread
...
if __name__ == '__main__':
app = wx.App()
def payload():
take_screenshot()
sleep(3)
take_screenshot()
sleep(3)
take_screenshot()
sleep(3)
take_screenshot()
thrd = Thread(target=payload)
thrd.start()
EDIT: As the asker pointed out, there may be issues with thread-safety in the approach above. How does the thing work below for you (tested on Phoenix and Classic on Windows)?
from __future__ import print_function
import wx
from datetime import datetime
from time import sleep
IS_PHOENIX = True if 'phoenix' in wx.version() else False
if IS_PHOENIX:
EmptyBitmap = lambda *args, **kwds: wx.Bitmap(*args, **kwds)
else:
EmptyBitmap = lambda *args, **kwds: wx.EmptyBitmap(*args, **kwds)
def take_screenshot():
screen = wx.ScreenDC()
size = screen.GetSize()
width = size[0]
height = size[1]
bmp = EmptyBitmap(width, height)
mem = wx.MemoryDC(bmp)
mem.Blit(0, 0, width, height, screen, 0, 0)
bmp.SaveFile(str(datetime.now().second) + '.png', wx.BITMAP_TYPE_PNG)
MAXPICS = 4
class testfrm(wx.Frame):
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
self.tmr = wx.Timer(self, -1)
self.countpics = 0
self.Bind(wx.EVT_TIMER, self.ontimer, self.tmr)
self.ontimer(None)
def ontimer(self, evt):
if self.countpics <=MAXPICS:
self.tmr.Start(3000, wx.TIMER_ONE_SHOT)
take_screenshot()
self.countpics += 1
else:
self.Close()
if __name__ == '__main__':
app = wx.App()
frm = testfrm(None, -1, wx.version())
app.MainLoop()