Take a screenshot via a Python script on Linux - python

I want to take a screenshot via a python script and unobtrusively save it.
I'm only interested in the Linux solution, and should support any X based environment.

This works without having to use scrot or ImageMagick.
import gtk.gdk
w = gtk.gdk.get_default_root_window()
sz = w.get_size()
print "The size of the window is %d x %d" % sz
pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,sz[0],sz[1])
pb = pb.get_from_drawable(w,w.get_colormap(),0,0,0,0,sz[0],sz[1])
if (pb != None):
pb.save("screenshot.png","png")
print "Screenshot saved to screenshot.png."
else:
print "Unable to get the screenshot."
Borrowed from http://ubuntuforums.org/showpost.php?p=2681009&postcount=5

Just for completeness:
Xlib - But it's somewhat slow when capturing the whole screen:
from Xlib import display, X
import Image #PIL
W,H = 200,200
dsp = display.Display()
try:
root = dsp.screen().root
raw = root.get_image(0, 0, W,H, X.ZPixmap, 0xffffffff)
image = Image.fromstring("RGB", (W, H), raw.data, "raw", "BGRX")
image.show()
finally:
dsp.close()
One could try to trow some types in the bottleneck-files in PyXlib, and then compile it using Cython. That could increase the speed a bit.
Edit:
We can write the core of the function in C, and then use it in python from ctypes, here is something I hacked together:
#include <stdio.h>
#include <X11/X.h>
#include <X11/Xlib.h>
//Compile hint: gcc -shared -O3 -lX11 -fPIC -Wl,-soname,prtscn -o prtscn.so prtscn.c
void getScreen(const int, const int, const int, const int, unsigned char *);
void getScreen(const int xx,const int yy,const int W, const int H, /*out*/ unsigned char * data)
{
Display *display = XOpenDisplay(NULL);
Window root = DefaultRootWindow(display);
XImage *image = XGetImage(display,root, xx,yy, W,H, AllPlanes, ZPixmap);
unsigned long red_mask = image->red_mask;
unsigned long green_mask = image->green_mask;
unsigned long blue_mask = image->blue_mask;
int x, y;
int ii = 0;
for (y = 0; y < H; y++) {
for (x = 0; x < W; x++) {
unsigned long pixel = XGetPixel(image,x,y);
unsigned char blue = (pixel & blue_mask);
unsigned char green = (pixel & green_mask) >> 8;
unsigned char red = (pixel & red_mask) >> 16;
data[ii + 2] = blue;
data[ii + 1] = green;
data[ii + 0] = red;
ii += 3;
}
}
XDestroyImage(image);
XDestroyWindow(display, root);
XCloseDisplay(display);
}
And then the python-file:
import ctypes
import os
from PIL import Image
LibName = 'prtscn.so'
AbsLibPath = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + LibName
grab = ctypes.CDLL(AbsLibPath)
def grab_screen(x1,y1,x2,y2):
w, h = x2-x1, y2-y1
size = w * h
objlength = size * 3
grab.getScreen.argtypes = []
result = (ctypes.c_ubyte*objlength)()
grab.getScreen(x1,y1, w, h, result)
return Image.frombuffer('RGB', (w, h), result, 'raw', 'RGB', 0, 1)
if __name__ == '__main__':
im = grab_screen(0,0,1440,900)
im.show()

Compile all answers in one class.
Outputs PIL image.
#!/usr/bin/env python
# encoding: utf-8
"""
screengrab.py
Created by Alex Snet on 2011-10-10.
Copyright (c) 2011 CodeTeam. All rights reserved.
"""
import sys
import os
import Image
class screengrab:
def __init__(self):
try:
import gtk
except ImportError:
pass
else:
self.screen = self.getScreenByGtk
try:
import PyQt4
except ImportError:
pass
else:
self.screen = self.getScreenByQt
try:
import wx
except ImportError:
pass
else:
self.screen = self.getScreenByWx
try:
import ImageGrab
except ImportError:
pass
else:
self.screen = self.getScreenByPIL
def getScreenByGtk(self):
import gtk.gdk
w = gtk.gdk.get_default_root_window()
sz = w.get_size()
pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,sz[0],sz[1])
pb = pb.get_from_drawable(w,w.get_colormap(),0,0,0,0,sz[0],sz[1])
if pb is None:
return False
else:
width,height = pb.get_width(),pb.get_height()
return Image.fromstring("RGB",(width,height),pb.get_pixels() )
def getScreenByQt(self):
from PyQt4.QtGui import QPixmap, QApplication
from PyQt4.Qt import QBuffer, QIODevice
import StringIO
app = QApplication(sys.argv)
buffer = QBuffer()
buffer.open(QIODevice.ReadWrite)
QPixmap.grabWindow(QApplication.desktop().winId()).save(buffer, 'png')
strio = StringIO.StringIO()
strio.write(buffer.data())
buffer.close()
del app
strio.seek(0)
return Image.open(strio)
def getScreenByPIL(self):
import ImageGrab
img = ImageGrab.grab()
return img
def getScreenByWx(self):
import wx
wx.App() # Need to create an App instance before doing anything
screen = wx.ScreenDC()
size = screen.GetSize()
bmp = wx.EmptyBitmap(size[0], size[1])
mem = wx.MemoryDC(bmp)
mem.Blit(0, 0, size[0], size[1], screen, 0, 0)
del mem # Release bitmap
#bmp.SaveFile('screenshot.png', wx.BITMAP_TYPE_PNG)
myWxImage = wx.ImageFromBitmap( myBitmap )
PilImage = Image.new( 'RGB', (myWxImage.GetWidth(), myWxImage.GetHeight()) )
PilImage.fromstring( myWxImage.GetData() )
return PilImage
if __name__ == '__main__':
s = screengrab()
screen = s.screen()
screen.show()

This one works on X11, and perhaps on Windows too (someone, please check). Needs PyQt4:
import sys
from PyQt4.QtGui import QPixmap, QApplication
app = QApplication(sys.argv)
QPixmap.grabWindow(QApplication.desktop().winId()).save('test.png', 'png')

I have a wrapper project (pyscreenshot) for scrot, imagemagick, pyqt, wx and pygtk.
If you have one of them, you can use it.
All solutions are included from this discussion.
Install:
easy_install pyscreenshot
Example:
import pyscreenshot as ImageGrab
# fullscreen
im=ImageGrab.grab()
im.show()
# part of the screen
im=ImageGrab.grab(bbox=(10,10,500,500))
im.show()
# to file
ImageGrab.grab_to_file('im.png')

Cross platform solution using wxPython:
import wx
wx.App() # Need to create an App instance before doing anything
screen = wx.ScreenDC()
size = screen.GetSize()
bmp = wx.EmptyBitmap(size[0], size[1])
mem = wx.MemoryDC(bmp)
mem.Blit(0, 0, size[0], size[1], screen, 0, 0)
del mem # Release bitmap
bmp.SaveFile('screenshot.png', wx.BITMAP_TYPE_PNG)

import ImageGrab
img = ImageGrab.grab()
img.save('test.jpg','JPEG')
this requires Python Imaging Library

You can use this
import os
os.system("import -window root screen_shot.png")

I couldn't take screenshot in Linux with pyscreenshot or scrot because output of pyscreenshot was just a black screen png image file.
but thank god there was another very easy way for taking screenshot in Linux without installing anything. just put below code in your directory and run with python demo.py
import os
os.system("gnome-screenshot --file=this_directory.png")
also there is many available options for gnome-screenshot --help
Application Options:
-c, --clipboard Send the grab directly to the clipboard
-w, --window Grab a window instead of the entire screen
-a, --area Grab an area of the screen instead of the entire screen
-b, --include-border Include the window border with the screenshot
-B, --remove-border Remove the window border from the screenshot
-p, --include-pointer Include the pointer with the screenshot
-d, --delay=seconds Take screenshot after specified delay [in seconds]
-e, --border-effect=effect Effect to add to the border (shadow, border, vintage or none)
-i, --interactive Interactively set options
-f, --file=filename Save screenshot directly to this file
--version Print version information and exit
--display=DISPLAY X display to use

bit late but nevermind easy one is
import autopy
import time
time.sleep(2)
b = autopy.bitmap.capture_screen()
b.save("C:/Users/mak/Desktop/m.png")

There is a python package for this Autopy
The bitmap module can to screen grabbing (bitmap.capture_screen)
It is multiplateform (Windows, Linux, Osx).

for ubuntu this work for me, you can take a screenshot of select window with this:
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gdk
from gi.repository import GdkPixbuf
import numpy as np
from Xlib.display import Display
#define the window name
window_name = 'Spotify'
#define xid of your select 'window'
def locate_window(stack,window):
disp = Display()
NET_WM_NAME = disp.intern_atom('_NET_WM_NAME')
WM_NAME = disp.intern_atom('WM_NAME')
name= []
for i, w in enumerate(stack):
win_id =w.get_xid()
window_obj = disp.create_resource_object('window', win_id)
for atom in (NET_WM_NAME, WM_NAME):
window_name=window_obj.get_full_property(atom, 0)
name.append(window_name.value)
for l in range(len(stack)):
if(name[2*l]==window):
return stack[l]
window = Gdk.get_default_root_window()
screen = window.get_screen()
stack = screen.get_window_stack()
myselectwindow = locate_window(stack,window_name)
img_pixbuf = Gdk.pixbuf_get_from_window(myselectwindow,*myselectwindow.get_geometry())
to transform pixbuf into array
def pixbuf_to_array(p):
w,h,c,r=(p.get_width(), p.get_height(), p.get_n_channels(), p.get_rowstride())
assert p.get_colorspace() == GdkPixbuf.Colorspace.RGB
assert p.get_bits_per_sample() == 8
if p.get_has_alpha():
assert c == 4
else:
assert c == 3
assert r >= w * c
a=np.frombuffer(p.get_pixels(),dtype=np.uint8)
if a.shape[0] == w*c*h:
return a.reshape( (h, w, c) )
else:
b=np.zeros((h,w*c),'uint8')
for j in range(h):
b[j,:]=a[r*j:r*j+w*c]
return b.reshape( (h, w, c) )
beauty_print = pixbuf_to_array(img_pixbuf)

From this thread:
import os
os.system("import -window root temp.png")

It's an old question. I would like to answer it using new tools.
Works with python 3 (should work with python 2, but I haven't test it) and PyQt5.
Minimal working example. Copy it to the python shell and get the result.
from PyQt5.QtWidgets import QApplication
app = QApplication([])
screen = app.primaryScreen()
screenshot = screen.grabWindow(QApplication.desktop().winId())
screenshot.save('/tmp/screenshot.png')

Try it:
#!/usr/bin/python
import gtk.gdk
import time
import random
import socket
import fcntl
import struct
import getpass
import os
import paramiko
while 1:
# generate a random time between 120 and 300 sec
random_time = random.randrange(20,25)
# wait between 120 and 300 seconds (or between 2 and 5 minutes)
print "Next picture in: %.2f minutes" % (float(random_time) / 60)
time.sleep(random_time)
w = gtk.gdk.get_default_root_window()
sz = w.get_size()
print "The size of the window is %d x %d" % sz
pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,sz[0],sz[1])
pb = pb.get_from_drawable(w,w.get_colormap(),0,0,0,0,sz[0],sz[1])
ts = time.asctime( time.localtime(time.time()) )
date = time.strftime("%d-%m-%Y")
timer = time.strftime("%I:%M:%S%p")
filename = timer
filename += ".png"
if (pb != None):
username = getpass.getuser() #Get username
newpath = r'screenshots/'+username+'/'+date #screenshot save path
if not os.path.exists(newpath): os.makedirs(newpath)
saveas = os.path.join(newpath,filename)
print saveas
pb.save(saveas,"png")
else:
print "Unable to get the screenshot."

Related

Is there a "cli" or bash variation to process Image Forensics? Specifically, Error-Level-Analysis?

I do not know where to begin. I have tried every single code on GitHub, and while new, the problem stems from outdated code that won't work with newer libraries. The other issue is that while it has the means to be a "mock" or "imitation" level ELA; it is not.
The best one I have found is from Sherloq. While I could half way understand how to do the simple ELA codes I found. I am lost on this one. The reason I ask this here today is because I want to understand how individuals are able to simply add very little to python scripts and are able to write it into a CLI command such as:
./ELA -q 95 -s 20 path\to\files*.jpg
The last attempt I made on this was just to simply "automatically" save the image after the image was loaded into the gui. I was not successful. I used pillow and img.save()
from time import time
import cv2 as cv
import numpy as np
from PySide2.QtWidgets import QPushButton, QVBoxLayout, QHBoxLayout, QCheckBox, QSpinBox, QLabel
from jpeg import compress_jpg
from tools import ToolWidget
from utility import elapsed_time, desaturate, create_lut
from viewer import ImageViewer
class ElaWidget(ToolWidget):
def __init__(self, image, parent=None):
super(ElaWidget, self).__init__(parent)
self.quality_spin = QSpinBox()
self.quality_spin.setRange(1, 100)
self.quality_spin.setSuffix(self.tr(" %"))
self.quality_spin.setToolTip(self.tr("JPEG reference quality level"))
self.scale_spin = QSpinBox()
self.scale_spin.setRange(1, 100)
self.scale_spin.setSuffix(" %")
self.scale_spin.setToolTip(self.tr("Output multiplicative gain"))
self.contrast_spin = QSpinBox()
self.contrast_spin.setRange(0, 100)
self.contrast_spin.setSuffix(" %")
self.contrast_spin.setToolTip(self.tr("Output tonality compression"))
self.linear_check = QCheckBox(self.tr("Linear"))
self.linear_check.setToolTip(self.tr("Linearize absolute difference"))
self.gray_check = QCheckBox(self.tr("Grayscale"))
self.gray_check.setToolTip(self.tr("Desaturated output"))
default_button = QPushButton(self.tr("Default"))
default_button.setToolTip(self.tr("Revert to default parameters"))
params_layout = QHBoxLayout()
params_layout.addWidget(QLabel(self.tr("Quality:")))
params_layout.addWidget(self.quality_spin)
params_layout.addWidget(QLabel(self.tr("Scale:")))
params_layout.addWidget(self.scale_spin)
params_layout.addWidget(QLabel(self.tr("Contrast:")))
params_layout.addWidget(self.contrast_spin)
params_layout.addWidget(self.linear_check)
params_layout.addWidget(self.gray_check)
params_layout.addWidget(default_button)
params_layout.addStretch()
self.image = image
self.original = image.astype(np.float32) / 255
self.compressed = None
self.viewer = ImageViewer(self.image, self.image)
self.default()
self.quality_spin.valueChanged.connect(self.preprocess)
self.scale_spin.valueChanged.connect(self.process)
self.contrast_spin.valueChanged.connect(self.process)
self.linear_check.stateChanged.connect(self.process)
self.gray_check.stateChanged.connect(self.process)
default_button.clicked.connect(self.default)
main_layout = QVBoxLayout()
main_layout.addLayout(params_layout)
main_layout.addWidget(self.viewer)
self.setLayout(main_layout)
def preprocess(self):
quality = self.quality_spin.value()
self.compressed = compress_jpg(self.image, quality)
self.process()
def process(self):
start = time()
scale = self.scale_spin.value()
contrast = int(self.contrast_spin.value() / 100 * 128)
linear = self.linear_check.isChecked()
grayscale = self.gray_check.isChecked()
if not linear:
difference = cv.absdiff(self.original, self.compressed.astype(np.float32) / 255)
ela = cv.convertScaleAbs(cv.sqrt(difference) * 255, None, scale / 20)
else:
ela = cv.convertScaleAbs(cv.subtract(self.compressed, self.image), None, scale)
ela = cv.LUT(ela, create_lut(contrast, contrast))
if grayscale:
ela = desaturate(ela)
self.viewer.update_processed(ela)
self.info_message.emit(self.tr(f"Error Level Analysis = {elapsed_time(start)}"))
def default(self):
self.blockSignals(True)
self.linear_check.setChecked(False)
self.gray_check.setChecked(False)
self.quality_spin.setValue(75)
self.scale_spin.setValue(50)
self.contrast_spin.setValue(20)
self.blockSignals(False)
self.preprocess()

Search all pixels and PNGs in a single grab using ImageGrab in Python

I am having some problems with multi-process in Xlib.thread or ImageGrab.
When I put like 3 or more references to get (getPixel and locateOnScreen(pyautogui)) in multi threads, it works fast and without errors initially, but 2 or 3 hours later the commands are getting slow and an error appears in the cmd warning that "it can't happen!"
Here's an example of the code:
from PIL import ImageGrab, Image
from pymouse import PyMouse
from pykeyboard import PyKeyboard
import time
m = PyMouse()
k = PyKeyboard()
def start(self):
if self.hotkeyEscolhidaManaTraining != None:
self.isManaTrainingOn = True
if self.isManaTrainingOn:
self.timerstartManaTraining=Interval.set_interval(self.startManaTraining, 1)
def startManaTraining(self):
if self.runningTimerManaTraining == True:
self.timerstartManaTraining.cancel()
return
self.runningTimerManaTraining = True
ManaTrainingBox = pyautogui.locateOnScreen('Pics/heal/manaLow.png', 0.8, True)
startBoxLeft = ManaTrainingBox[0]+2
self.startBoxTopManaTraining = ManaTrainingBox[1]+5
pixeisTotais = 92
if self.isManaTrainingOn:
pixelManaTraining = (self.porcentagemEscolhidaManaTraining * pixeisTotais) / 100
self.endBoxLeftManaTraining = startBoxLeft + pixelManaTraining
self.timerManaTraining = Interval.set_interval(self.validateManaTraining, 0.5)
def validateManaTraining(self):
if self.isManaTrainingOn:
pixelRGB = ImageGrab.getPixel((self.endBoxLeftManaTraining, self.startBoxTopManaTraining))
if not self.validatePixelsMana(pixelRGB):
pyautogui.press(self.hotkeyEscolhidaManaTraining)
return
and others 10 code like this in multi-threads, have a way to get all pixels and PNGs in just one 'locateOnScreen' without saving the img?

Python is crashing while trying to clear combobox in pyqt5

I started today with qt compiler, so I don't have much experience in building gui. My project is about to create dynamic smart check boxes to define axes and at the end to plot several subplots to a figure.
I tried to create dynamic combo boxes that change every time I change my current selection in the combo box.
The big issue is that it crash every time i try to change a selection in the combo box.
I tried to use .clear() method but it crash every time I clicked on it.
Edit: I changed the code to make producible code. After clicking on "Load Files", you will be able the combo boxes filled. If you will change the combo box "Choose message" for example, the python crash.
The GUI
# ------------------------------------------------- -----
# ---------------------- main.py ------------------- ----
# --------------------------------------------- ---------
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.uic import loadUi
from PyQt5.QtCore import pyqtSlot
import threading , os, subprocess, importlib, platform
# Decode_class = 'MAVLinkBinFileDecoder'
# Module = importlib.import_module(Decode_class)
from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as NavigationToolbar )
import numpy as np
import random
class MatplotlibWidget(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
loadUi("gui.ui", self)
self.setWindowTitle ( "PyQt5 & Matplotlib Example GUI" )
self.loadfiles = 0
self.first_run = 0
self.progressBar.setProperty("value", 0)
# self.pushButton_load_files.setEnabled(False)
self.pushButton_add_to_graph.setEnabled(False)
self.pushButton_choose_xml_file.clicked.connect(self.open_xml_FileNamesDialog)
self.pushButton_choose_bin_files.clicked.connect(self.open_bin_FileNamesDialog)
self.pushButton_load_files.clicked.connect(self.load_files)
self.comboBox_choose_file.currentIndexChanged.connect(self.selectionchange_file)
self.comboBox_message.currentIndexChanged.connect(self.selectionchange_message)
self.comboBox_system_id.currentIndexChanged.connect(self.selectionchange_system_id)
self.pushButton_save_plot.clicked.connect(self.update_graph)
self.file_to_graph_demo = [({'HEARTBEAT':[{'type':[12],'autopilot':[0]},{'type':[2,2,0],'autopilot':[0,0,0]}], 'CHAMBER_STATUS':[{'time_boot_ms':[1,1,1], 'chamber_num':[1,2,3]}], 'ATTITUDE':[{'test':[0,0,0,],'check':[1,1,1]}, {'test':[0,0,0,],'check':[1,1,1]}, 0 , 0, {'test':[0,0,0,],'check':[1,1,1]}]},'test')]
self.addToolBar(NavigationToolbar(self .MplWidget.canvas, self))
#pyqtSlot()
def open_xml_FileNamesDialog(self):
self.loadfiles += 1
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
self.filename, _ = QFileDialog.getOpenFileNames(self, "QFileDialog.getOpenFileNames()", "",
"Xml Files (*.xml)", options=options)
if self.loadfiles == 2:
self.pushButton_load_files.setEnabled(True)
self.pushButton_choose_xml_file.setVisible(False)
#pyqtSlot()
def open_bin_FileNamesDialog(self):
self.loadfiles += 1
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
self.list_of_file_paths, _ = QFileDialog.getOpenFileNames(self, "QFileDialog.getOpenFileNames()", "",
"Bin Files (*.bin)", options=options)
if self.loadfiles == 2:
self.pushButton_load_files.setEnabled(True)
self.pushButton_choose_bin_files.setVisible(False)
#pyqtSlot()
def load_files(self):
# parse = Module.Logic_bin_to_mat_parser(self.filename[0])
# parse.generate_dialect_from_xml() # Run Mavgenerate xml function
# value = 19
# self.progressBar.setProperty("value", value)
# self.file_to_graph = []
# for path in self.list_of_file_paths: # Parse and create Matlab from each bin file
# parse.decode_messages(path)
# parse.create_dictionary_of_amount_of_messages_by_type()
# parse.parse_bin_to_mat()
# self.file_to_graph.append((parse.save, parse.file_base_name))
# parse.convert_parse_dictionary_to_mat()
# value += (100 - 20) / len(self.list_of_file_paths)
# self.progressBar.setProperty("value", value)
# value = 100
# self.progressBar.setProperty("value", value)
# self.pushButton_load_files.setVisible(False)
# self.progressBar.setVisible(False)
for option in self.file_to_graph_demo:
self.comboBox_choose_file.addItem(option[1])
#pyqtSlot()
def selectionchange_file(self):
self.first_run += 1
combobox_enty = self.comboBox_choose_file.currentText()
self.file_idx = self.comboBox_choose_file.findText(combobox_enty)
list_of_messages = []
for message in self.file_to_graph_demo[self.file_idx][0].items():
list_of_messages.append(message[0])
if self.first_run >= 1:
self.comboBox_message.clear()
self.comboBox_message.addItems(list_of_messages)
#pyqtSlot()
def selectionchange_message(self):
self.first_run += 1
self.combobox_entry_message = self.comboBox_message.currentText()
self.message_idx = self.comboBox_message.findText(self.combobox_entry_message)
list_of_system_ids = []
count = 0
for idx, system_id in enumerate(self.file_to_graph_demo[self.file_idx][0][self.combobox_entry_message]):
if system_id != 0:
count += 1
list_of_system_ids.append(str(idx+1))
if self.first_run >= 2:
self.comboBox_system_id.clear()
self.comboBox_system_id.addItems(list_of_system_ids)
#pyqtSlot()
def selectionchange_system_id(self):
self.combobox_entry_system_id = int(self.comboBox_system_id.currentText())-1
self.system_id_idx = self.comboBox_system_id.findText(str(self.combobox_entry_system_id))
for field in self.file_to_graph_demo[self.file_idx][0][self.combobox_entry_message][self.system_id_idx]:
self.comboBox_y_axis.addItem(field)
self.comboBox_x_axis.addItem(field)
def update_graph(self):
fs = 500
f = random.randint(1, 100)
ts = 1 / fs
length_of_signal = 100
t = np . linspace (0, 1, length_of_signal )
cosinus_signal = np . cos ( 2 * np . pi * f * t )
sinus_signal = np . sin ( 2 * np . pi * f * t )
self.MplWidget.canvas.axes.clear()
self.MplWidget.canvas.axes.plot(t,cosinus_signal)
self.MplWidget.canvas.axes.plot(t,sinus_signal)
self.MplWidget.canvas.axes.legend(('cosinus', 'sinus'), loc='upper right')
self.MplWidget.canvas.axes.set_title(' Cosinus - Sinus Signal')
self.MplWidget.canvas.draw()
if __name__ == '__main__':
app=QApplication([])
app.setStyle('Fusion')
window=MatplotlibWidget()
window.show()
app.exec_()
Your issue seems to be in MatplotlibWidget.selectionchange_system_id(). You are trying to cast the current text of self.comboBox_system_id to an int, but this will cause an exception when the current text can't be converted. This is the case just after self.comboBox_system_id is cleared because at that point the current text of the combi box is an empty string. The easiest way to get around this is to test if the current text can be cast to an integer first before continuing, i.e.
def selectionchange_system_id(self):
if self.comboBox_system_id.currentText().isdigit():
self.combobox_entry_system_id = int(self.comboBox_system_id.currentText())-1
...

Python: Fastest way to take and save screenshots

I've been struggling to come up with a script that allows me to take screenshots of my desktop more than once per every second. I'm using Win10.
PIL:
from PIL import ImageGrab
import time
while True:
im = ImageGrab.grab()
fname = "dropfolder/%s.png" %int(time.time())
im.save(fname,'PNG')
Results 1.01 seconds per image.
PyScreeze (https://github.com/asweigart/pyscreeze):
import pyscreeze
import time
while True:
fname = "dropfolder/%s.png" %int(time.time())
x = pyscreeze.screenshot(fname)
Results 1.00 seconds per image.
Win32:
import win32gui
import win32ui
import win32con
import time
w=1920 #res
h=1080 #res
while True:
wDC = win32gui.GetWindowDC(0)
dcObj=win32ui.CreateDCFromHandle(wDC)
cDC=dcObj.CreateCompatibleDC()
dataBitMap = win32ui.CreateBitmap()
dataBitMap.CreateCompatibleBitmap(dcObj, w, h)
cDC.SelectObject(dataBitMap)
cDC.BitBlt((0,0),(w, h) , dcObj, (0,0), win32con.SRCCOPY)
fname = "dropfolder/%s.png" %int(time.time())
dataBitMap.SaveBitmapFile(cDC, fname)
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(0, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
Results 1.01 seconds per image.
Then I stumbled into thread (Fastest way to take a screenshot with python on windows) where it was suggested that gtk would yield phenomenal results.
However using gtk:
import gtk
import time
img_width = gtk.gdk.screen_width()
img_height = gtk.gdk.screen_height()
while True:
screengrab = gtk.gdk.Pixbuf(
gtk.gdk.COLORSPACE_RGB,
False,
8,
img_width,
img_height
)
fname = "dropfolder/%s.png" %int(time.time())
screengrab.get_from_drawable(
gtk.gdk.get_default_root_window(),
gtk.gdk.colormap_get_system(),
0, 0, 0, 0,
img_width,
img_height
).save(fname, 'png')
Results 2.34 seconds per image.
It seems to me like I'm doing something wrong, because people have been getting great results with gtk.
Any advices how to speed up the process?
Thanks!
Your first solution should be giving you more than one picture per second. The problem though is that you will be overwriting any pictures that occur within the same second, i.e. they will all have the same filename. To get around this you could create filenames that include 10ths of a second as follows:
from PIL import ImageGrab
from datetime import datetime
while True:
im = ImageGrab.grab()
dt = datetime.now()
fname = "pic_{}.{}.png".format(dt.strftime("%H%M_%S"), dt.microsecond // 100000)
im.save(fname, 'png')
On my machine, this gave the following output:
pic_1143_24.5.png
pic_1143_24.9.png
pic_1143_25.3.png
pic_1143_25.7.png
pic_1143_26.0.png
pic_1143_26.4.png
pic_1143_26.8.png
pic_1143_27.2.png
In case anyone cares in 2022: You can try my newly created project DXcam: I think for raw speed it's the fastest out there (in python, and without going too deep into the rabbit hole). It's originally created for a deep learning pipeline for FPS games where the higher FPS you get the better. Plus I (am trying to) design it to be user-friendly:
For a screenshot just do
import dxcam
camera = dxcam.create()
frame = camera.grab() # full screen
frame = camera.grab(region=(left, top, right, bottom)) # region
For screen capturing:
camera.start(target_fps=60) # threaded
for i in range(1000):
image = camera.get_latest_frame() # Will block until new frame available
camera.stop()
I copied the part of the benchmarks section from the readme:
DXcam
python-mss
D3DShot
Average FPS
238.79
75.87
118.36
Std Dev
1.25
0.5447
0.3224
The benchmarks is conducted through 5 trials on my 240hz monitor with a constant 240hz rendering rate synced w/the monitor (using blurbuster ufo test).
You can read more about the details here: https://github.com/ra1nty/DXcam
This solution uses d3dshot.
def d3dgrab(rect=(0, 0, 0, 0), spath=r".\\pictures\\cache\\", sname="", title=""):
""" take a screenshot by rect. """
sname = sname if sname else time.strftime("%Y%m%d%H%M%S000.jpg", time.localtime())
while os.path.isfile("%s%s" % (spath, sname)):
sname = "%s%03d%s" % (sname[:-7], int(sname[-7:-4]) + 1, sname[-4:])
xlen = win32api.GetSystemMetrics(win32con.SM_CXSCREEN)
ylen = win32api.GetSystemMetrics(win32con.SM_CYSCREEN)
assert 0 <= rect[0] <= xlen and 0 <= rect[2] <= xlen, ValueError("Illegal value of X coordination in rect: %s" % rect)
assert 0 <= rect[1] <= ylen and 0 <= rect[3] <= ylen, ValueError("Illegal value of Y coordinatoin in rect: %s" % rect)
if title:
hdl = win32gui.FindWindow(None, title)
if hdl != win32gui.GetForegroundWindow():
win32gui.SetForegroundWindow(hdl)
rect = win32gui.GetWindowRect(hdl)
elif not sum(rect):
rect = (0, 0, xlen, ylen)
d = d3dshot.create(capture_output="numpy")
return d.screenshot_to_disk(directory=spath, file_name=sname, region=rect)
I think it can be helped
sname = sname if sname else time.strftime("%Y%m%d%H%M%S000.jpg", time.localtime())
while os.path.isfile("%s%s" % (spath, sname)):
sname = "%s%03d%s" % (sname[:-7], int(sname[-7:-4]) + 1, sname[-4:])
And it's fastest way to take screenshot I found.

PyOpenCL wrong output image

I trying to put gradient on image - and that works.CPU and GPU programs should do the same. I have problem with output images because code for GPU giving me diffrent image than code for CPU and I don't know where is mistake. I think that CPU code it's fine but GPU not. Output images - orginal, cpu, gpu - Please check my code. Thanks.
import pyopencl as cl
import sys
import Image
import numpy
from time import time
def gpu_gradient():
if len(sys.argv) != 3:
print "USAGE: " + sys.argv[0] + " <inputImageFile> <outputImageFile>"
return 1
# create context and command queue
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
# load image
im = Image.open(sys.argv[1])
if im.mode != "RGBA":
im = im.convert("RGBA")
imgSize = im.size
buffer = im.tostring() # len(buffer) = imgSize[0] * imgSize[1] * 4
# Create ouput image object
clImageFormat = cl.ImageFormat(cl.channel_order.RGBA,
cl.channel_type.UNSIGNED_INT8)
input_image = cl.Image(ctx,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
clImageFormat,
imgSize,
None,
buffer)
output_image = cl.Image(ctx,
cl.mem_flags.WRITE_ONLY,
clImageFormat,
imgSize)
# load the kernel source code
kernelFile = open("gradient.cl", "r")
kernelSrc = kernelFile.read()
# Create OpenCL program
program = cl.Program(ctx, kernelSrc).build()
# Call the kernel directly
globalWorkSize = ( imgSize[0],imgSize[1] )
gpu_start_time = time()
program.gradientcover(queue,
globalWorkSize,
None,
input_image,
output_image)
# Read the output buffer back to the Host
buffer = numpy.zeros(imgSize[0] * imgSize[1] * 4, numpy.uint8)
origin = ( 0, 0, 0 )
region = ( imgSize[0], imgSize[1], 1 )
cl.enqueue_read_image(queue, output_image,
origin, region, buffer).wait()
# Save the image to disk
gsim = Image.fromstring("RGBA", imgSize, buffer.tostring())
gsim.save("GPU_"+sys.argv[2])
gpu_end_time = time()
print("GPU Time: {0} s".format(gpu_end_time - gpu_start_time))
def cpu_gradient():
if len(sys.argv) != 3:
print "USAGE: " + sys.argv[0] + " <inputImageFile> <outputImageFile>"
return 1
gpu_start_time = time()
im = Image.open(sys.argv[1])
if im.mode != "RGBA":
im = im.convert("RGBA")
pixels = im.load()
for i in range(im.size[0]):
for j in range(im.size[1]):
RGBA= pixels[i,j]
RGBA2=RGBA[0],RGBA[1],0,0
pixel=RGBA[0]+RGBA2[0],RGBA[1]+RGBA2[1],RGBA[2],RGBA[3]
final_pixels=list(pixel)
if final_pixels[0]>255:
final_pixels[0]=255
elif final_pixels[1]>255:
final_pixels[1]=255
pixel=tuple(final_pixels)
pixels[i,j]=pixel
im.save("CPU_"+sys.argv[2])
gpu_end_time = time()
print("CPU Time: {0} s".format(gpu_end_time - gpu_start_time))
cpu_gradient()
gpu_gradient()
Kernel code:
const sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE |
CLK_ADDRESS_CLAMP |
CLK_FILTER_NEAREST;
__kernel void gradientcover(read_only image2d_t srcImg,
write_only image2d_t dstImg)
{
int2 coord = (int2) (get_global_id(0), get_global_id(1));
uint4 pixel = read_imageui(srcImg, sampler, coord);
uint4 pixel2 = (uint4)(coord.x, coord.y,0,0);
pixel=pixel + pixel2;
if(pixel.x > 255) pixel.x=255;
if(pixel.y > 255) pixel.y=255;
// Write the output value to image
write_imageui(dstImg, coord, pixel);
}
Your CL and Python code do not do the same thing!
RGBA= pixels[i,j]
RGBA2=RGBA[0],RGBA[1],0,0
pixel=RGBA[0]+RGBA2[0],RGBA[1]+RGBA2[1],RGBA[2],RGBA[3]
adds the RG component to the pixel.
uint4 pixel = read_imageui(srcImg, sampler, coord);
uint4 pixel2 = (uint4)(coord.x, coord.y,0,0);
pixel=pixel + pixel2;
adds the X, Y from the coordinates to the pixel.
It is highly likely that this is the cause of difference between your results.
Assuming (from the description) that you want to darkenlighten the image by coordinates, I'd sugest the python code should be:
RGBA= pixels[i,j]
RGBA2=i,j,0,0
instead.

Categories

Resources