Carrying out unit testing in python on a method that implements ImageDraw - python

I am currently experimenting with the pytest module to create unit tests for a project I'm working on. I'm trying to test the 'add_point' method which draws an ellipse based on a set of pixels. What I want to do is inspect 'draw' to ensure that the ellipse has been created successfully. Unfortunately I don't know how to go about this, so any help will be appreciated. Here's my code so far:
(A) TheSlicePreviewMaker.py
import os, Image, ImageDraw, ImageFont
from json_importer import json_importer
class SlicePreviewer(object):
def __init__(self):
self.screen_size = (470, 470)
self.background_colour = (86,0,255)
self.platform_fill_colour = (100, 100, 100)
self.platform_outline_colour = (0, 0, 0)
self.platform_window = (0,0,469,469)
self.point_colour = (0,0,255)
self.config_object = json_importer("ConfigFile.txt")
self.image = None
def initialise_image(self):
self.image = Image.new('RGB',self.screen_size,self.background_colour)
draw = ImageDraw.Draw(self.image)
draw.rectangle(self.platform_window,outline=self.platform_outline_colour,fill=self.platform_fill_colour)
del draw
def add_point(self, px, py):
x1 = px - 1
y1 = py - 1
x2 = px + 1
y2 = py + 1
draw = ImageDraw.Draw(self.image)
draw.ellipse((x1,y1,x2,y2),outline=self.point_colour,fill=self.point_colour)
return draw #del draw
def save_image(self, file_name):
self.image.save(file_name, "BMP")
(B) test_TheSlicePreviewMaker.py
from TheSlicePreviewMaker import SlicePreviewer
slice_preview = SlicePreviewer()
class TestSlicePreviewer:
def test_init(self):
'''check that the config file object has been created on init'''
assert slice_preview.config_object != None
def test_initialise_image(self):
'''verify if the image has been successfully initialised'''
assert slice_preview.image.mode == 'RGB'
def test_add_point(self):
'''has the point been drawn successfully?'''
draw = slice_preview.add_point(196,273)
assert something
import pytest
if __name__ == '__main__':
pytest.main("--capture=sys -v")
SN: I've run TheSlicePreviewMaker.py separately to check the bitmap file it produces, so I know that the code works. What I want to achieve is unit test this so that each time I don't have to go check the bitmap.

One approach is to manually inspect the generated image and if looks OK to you, save it next to the test and use a image diffing algorithm (for example ImageChops.difference) to obtain a threshold value that you can use to make sure future test runs are still drawing the same image.
For example:
# contents of conftest.py
from PIL import ImageChops, ImageDraw, Image
import pytest
import os
import py.path
import math
import operator
def rms_diff(im1, im2):
"""Calculate the root-mean-square difference between two images
Taken from: http://snipplr.com/view/757/compare-two-pil-images-in-python/
"""
h1 = im1.histogram()
h2 = im2.histogram()
def mean_sqr(a,b):
if not a:
a = 0.0
if not b:
b = 0.0
return (a-b)**2
return math.sqrt(reduce(operator.add, map(mean_sqr, h1, h2))/(im1.size[0]*im1.size[1]))
class ImageDiff:
"""Fixture used to make sure code that generates images continues to do so
by checking the difference of the genereated image against known good versions.
"""
def __init__(self, request):
self.directory = py.path.local(request.node.fspath.dirname) / request.node.fspath.purebasename
self.expected_name = (request.node.name + '.png')
self.expected_filename = self.directory / self.expected_name
def check(self, im, max_threshold=0.0):
__tracebackhide__ = True
local = py.path.local(os.getcwd()) / self.expected_name
if not self.expected_filename.check(file=1):
msg = '\nExpecting image at %s, but it does not exist.\n'
msg += '-> Generating here: %s'
im.save(str(local))
pytest.fail(msg % (self.expected_filename, local))
else:
expected = Image.open(str(self.expected_filename))
rms_value = rms_diff(im, expected)
if rms_value > max_threshold:
im.save(str(local))
msg = '\nrms_value %s > max_threshold of %s.\n'
msg += 'Obtained image saved at %s'
pytest.fail(msg % (rms_value, max_threshold, str(local)))
#pytest.fixture
def image_diff(request):
return ImageDiff(request)
Now you can use the image_diff fixture in your tests. For example:
def create_image():
""" dummy code that generates an image, simulating some actual code """
im = Image.new('RGB', (100, 100), (0, 0, 0))
draw = ImageDraw.Draw(im)
draw.ellipse((10, 10, 90, 90), outline=(0, 0, 255),
fill=(255, 255, 255))
return im
def test_generated_image(image_diff):
im = create_image()
image_diff.check(im)
The first time your run this test, it will fail with this output:
================================== FAILURES ===================================
____________________________ test_generated_image _____________________________
image_diff = <test_foo.ImageDiff instance at 0x029ED530>
def test_generated_image(image_diff):
im = create_image()
> image_diff.check(im)
E Failed:
E Expecting image at X:\temp\sandbox\img-diff\test_foo\test_generated_image.png, but it does not exist.
E -> Generating here: X:\temp\sandbox\img-diff\test_generated_image.png
You can then manually check the image and if everything is OK, move it to a directory with the same name as the test file, with the name of the test as the file name plus ".png" extension. From now one whenever the test runs, it will check that the image is similar within an acceptable amount.
Suppose you change the code and produce a slightly different image, the test will now fail like this:
================================== FAILURES ===================================
____________________________ test_generated_image _____________________________
image_diff = <test_foo.ImageDiff instance at 0x02A4B788>
def test_generated_image(image_diff):
im = create_image()
> image_diff.check(im)
E Failed:
E rms_value 2.52 > max_threshold of 0.0.
E Obtained image saved at X:\temp\sandbox\img-diff\test_generated_image.png
test_foo.py:63: Failed
========================== 1 failed in 0.03 seconds ===========================
The code needs some polishing but should be a good start. You can find a version of this code here.
Cheers,

Related

Search all pixels and PNGs in a single grab using ImageGrab in Python

I am having some problems with multi-process in Xlib.thread or ImageGrab.
When I put like 3 or more references to get (getPixel and locateOnScreen(pyautogui)) in multi threads, it works fast and without errors initially, but 2 or 3 hours later the commands are getting slow and an error appears in the cmd warning that "it can't happen!"
Here's an example of the code:
from PIL import ImageGrab, Image
from pymouse import PyMouse
from pykeyboard import PyKeyboard
import time
m = PyMouse()
k = PyKeyboard()
def start(self):
if self.hotkeyEscolhidaManaTraining != None:
self.isManaTrainingOn = True
if self.isManaTrainingOn:
self.timerstartManaTraining=Interval.set_interval(self.startManaTraining, 1)
def startManaTraining(self):
if self.runningTimerManaTraining == True:
self.timerstartManaTraining.cancel()
return
self.runningTimerManaTraining = True
ManaTrainingBox = pyautogui.locateOnScreen('Pics/heal/manaLow.png', 0.8, True)
startBoxLeft = ManaTrainingBox[0]+2
self.startBoxTopManaTraining = ManaTrainingBox[1]+5
pixeisTotais = 92
if self.isManaTrainingOn:
pixelManaTraining = (self.porcentagemEscolhidaManaTraining * pixeisTotais) / 100
self.endBoxLeftManaTraining = startBoxLeft + pixelManaTraining
self.timerManaTraining = Interval.set_interval(self.validateManaTraining, 0.5)
def validateManaTraining(self):
if self.isManaTrainingOn:
pixelRGB = ImageGrab.getPixel((self.endBoxLeftManaTraining, self.startBoxTopManaTraining))
if not self.validatePixelsMana(pixelRGB):
pyautogui.press(self.hotkeyEscolhidaManaTraining)
return
and others 10 code like this in multi-threads, have a way to get all pixels and PNGs in just one 'locateOnScreen' without saving the img?

How to record my computer screen with high FPS?

I'm trying to add a high FPS screen recorder to my application.
I use Python 3.7 on Windows.
The modules and methods I've tried are mss (python-mss) and d3dshot, but I'm still only achieving 15-19 FPS for a long video (more than 20 seconds).
The resolution I'm recording at is 1920 x 1080.
What is the best way to optimize screen recording? I've tried to use the multiprocessing library, but it seems like it's still not fast enough. I'm not sure I'm using it in the optimal way, what are some ways I could use it to improve processing performance?
Using OBS Studio, I'm able to get 30 FPS, no matter how long the video is. My objective is to achieve the same results with my own code.
Here is what I've written so far:
from multiprocessing import Process, Queue
from time import sleep, time
import cv2
import d3dshot
import numpy as np
def grab(queue):
d = d3dshot.create(capture_output="numpy", frame_buffer_size=500)
d.capture()
sleep(0.1)
c=0
begin = time()
while time() - begin < 30:
starter = time()
frame = d.get_latest_frame()
queue.put(frame)
c+=1
ender = time()
sleep(max(0, 1/60 - (ender -starter)))
# Tell the other worker to stop
queue.put(None)
final=time()
print(c/(final-begin))
d.stop()
def save(queue):
SCREEN_SIZE = 1920, 1080
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'DIVX') # In Windows: DIVX
out = cv2.VideoWriter(r"output.avi",fourcc, 30.0, (SCREEN_SIZE))
# type: (Queue) -> None
last_img = None
while "there are screenshots":
img = queue.get()
if img is None:
break
if img is last_img:
continue
out.write(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
last_img = img
if __name__ == "__main__":
# The screenshots queue
queue = Queue() # type: Queue
# 2 processes: one for grabing and one for saving PNG files
Process(target=grab, args=(queue,)).start()
Process(target=save, args=(queue,)).start()
The goal is to capture a game, while performing automated keyboard and mouse actions.
I have faced the same problem in trying to get high speed recording for games. This was the fastest solution I was able to find for Windows. The code is using raw buffer objects and leads to around ~27 FPS. I cannot find the original post on which this code is based, but if someone finds it I will add the reference.
Note that the framerate will significantly increase if you make the region smaller than 1920x1080.
"""
Alternative screen capture device, when there is no camera of webcam connected
to the desktop.
"""
import logging
import sys
import time
import cv2
import numpy as np
if sys.platform == 'win32':
import win32gui, win32ui, win32con, win32api
else:
logging.warning(f"Screen capture is not supported on platform: `{sys.platform}`")
from collections import namedtuple
class ScreenCapture:
"""
Captures a fixed region of the total screen. If no region is given
it will take the full screen size.
region_ltrb: Tuple[int, int, int, int]
Specific region that has to be taken from the screen using
the top left `x` and `y`, bottom right `x` and `y` (ltrb coordinates).
"""
__region = namedtuple('region', ('x', 'y', 'width', 'height'))
def __init__(self, region_ltrb=None):
self.region = region_ltrb
self.hwin = win32gui.GetDesktopWindow()
# Time management
self._time_start = time.time()
self._time_taken = 0
self._time_average = 0.04
def __getitem__(self, item):
return self.screenshot()
def __next__(self):
return self.screenshot()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
if exc_type and isinstance(exc_val, StopIteration):
return True
return False
#staticmethod
def screen_dimensions():
""" Retrieve total screen dimensions. """
left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
return left, top, height, width
#property
def fps(self):
return int(1 / self._time_average) * (self._time_average > 0)
#property
def region(self):
return self._region
#property
def size(self):
return self._region.width, self._region.height
#region.setter
def region(self, value):
if value is None:
self._region = self.__region(*self.screen_dimensions())
else:
assert len(value) == 4, f"Region requires 4 input, x, y of left top, and x, y of right bottom."
left, top, x2, y2 = value
width = x2 - left + 1
height = y2 - top + 1
self._region = self.__region(*list(map(int, (left, top, width, height))))
def screenshot(self, color=None):
"""
Takes a part of the screen, defined by the region.
:param color: cv2.COLOR_....2...
Converts the created BGRA image to the requested image output.
:return: np.ndarray
An image of the region in BGRA values.
"""
left, top, width, height = self._region
hwindc = win32gui.GetWindowDC(self.hwin)
srcdc = win32ui.CreateDCFromHandle(hwindc)
memdc = srcdc.CreateCompatibleDC()
bmp = win32ui.CreateBitmap()
bmp.CreateCompatibleBitmap(srcdc, width, height)
memdc.SelectObject(bmp)
memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)
signed_ints_array = bmp.GetBitmapBits(True)
img = np.frombuffer(signed_ints_array, dtype='uint8')
img.shape = (height, width, 4)
srcdc.DeleteDC()
memdc.DeleteDC()
win32gui.ReleaseDC(self.hwin, hwindc)
win32gui.DeleteObject(bmp.GetHandle())
# This makes sure that the FPS are taken in comparison to screenshots rates and vary only slightly.
self._time_taken, self._time_start = time.time() - self._time_start, time.time()
self._time_average = self._time_average * 0.95 + self._time_taken * 0.05
if color is not None:
return cv2.cvtColor(img, color)
return img
def show(self, screenshot=None):
""" Displays an image to the screen. """
image = screenshot if screenshot is not None else self.screenshot()
cv2.imshow('Screenshot', image)
if cv2.waitKey(1) & 0xff == ord('q'):
raise StopIteration
return image
def close(self):
""" Needs to be called before exiting when `show` is used, otherwise an error will occur. """
cv2.destroyWindow('Screenshot')
def scale(self, src: np.ndarray, size: tuple):
return cv2.resize(src, size, interpolation=cv2.INTER_LINEAR_EXACT)
def save(self, path, screenshot=None):
""" Store the current screenshot in the provided path. Full path, with img name is required.) """
image = screenshot if screenshot is not None else self.screenshot
cv2.imwrite(filename=path, img=image)
if __name__ == '__main__':
# Example usage when displaying.
with ScreenCapture((0, 0, 1920, 1080)) as capture:
for _ in range(100):
capture.show()
print(f"\rCapture framerate: {capture.fps}", end='')
# Example usage as generator.
start_time = time.perf_counter()
for frame, screenshot in enumerate(ScreenCapture((0, 0, 1920, 1080)), start=1):
print(f"\rFPS: {frame / (time.perf_counter() - start_time):3.0f}", end='')
Edit
I noticed some small mistake in the window show function, and the self.screenshot calls in the __getitem__ and __next__ method. These have been resolved.
Next to the for example using the ScreenCapture as a context manager, I added an example of using it as a generator.

plt.show appears in terminal instead of Ipython notebook

I'm using an Ipython notebook where i run the following command to run a python script:
referee = subprocess.Popen("/Jupyter/drone_cat_mouse/referee/referee.py /Jupyter/drone_cat_mouse/referee/referee.yml", shell=True)
The python script is the following:
#!/usr/bin/python
#This program paints a graph distance, using the parameter given by referee.cfg
#VisorPainter class re-paints on a pyplot plot and updates new data.
#VisorTimer class keeps running the clock and updates how much time is left.
#Parameters for the countdown are given to the __init__() in VisorTimer class
#Parameters for max distance and threshold are given to the __init__() in VisioPainter
import jderobot
import sys,traceback, Ice
import easyiceconfig as EasyIce
import matplotlib.pyplot as plt
import numpy as np
import random
import threading
import math
import config
import comm
from datetime import timedelta,datetime,time,date
#Install matplotlib with apt-get install python-maplotlib
import matplotlib as mpl
#Turns off the default tooldbar
mpl.rcParams['toolbar'] = 'None'
class Pose:
def __init__(self,argv=sys.argv):
self.lock = threading.Lock()
self.dist=0
self.ic = None
try:
cfg = config.load(sys.argv[1])
jdrc = comm.init(cfg, 'Referee')
self.ic = jdrc.getIc()
self.properties = self.ic.getProperties()
proxyStr = jdrc.getConfig().getProperty("Referee.CatPose3D.Proxy")
self.basePoseAr = self.ic.stringToProxy(proxyStr)
if not self.basePoseAr:
raise Runtime("Cat Pose3D -> Invalid proxy")
self.poseProxy = jderobot.Pose3DPrx.checkedCast(self.basePoseAr)
print self.poseProxy
proxyStr = jdrc.getConfig().getProperty("Referee.MousePose3D.Proxy")
self.baseRedPoseAr = self.ic.stringToProxy(proxyStr)
self.poseRedProxy = jderobot.Pose3DPrx.checkedCast(self.baseRedPoseAr)
print self.poseRedProxy
if not self.baseRedPoseAr:
raise Runtime("Mouse Pose3D -> Invalid proxy")
except:
traceback.print_exc()
status = 1
def update(self):
self.lock.acquire()
self.poseAr=self.poseProxy.getPose3DData()
self.poseRed=self.poseRedProxy.getPose3DData()
self.lock.release()
return self.getDistance()
def getDistance(self):
v_d=pow(self.poseRed.x-self.poseAr.x,2)+pow(self.poseRed.y-self.poseAr.y,2)+pow(self.poseRed.z-self.poseAr.z,2)
self.dist=round(abs(math.sqrt(v_d)),4)
return self.dist
def finish(self):
if self.ic:
#Clean up
try:
self.ic.destroy()
except:
traceback.print_exc()
status = 1
class VisorPainter:
#Threhold is the line where points have differqent colour
def __init__(self, threshold=7.0, max_d=20):
self.fig, self.ax = plt.subplots()
self.d = []
self.t = []
self.score=0.0
self.th = threshold
self.max_dist = max_d
self.suptitle = self.fig.suptitle('Timer is ready',fontsize=20)
self.fig.subplots_adjust(top=0.8)
self.score_text = self.ax.text((120.95), self.max_dist+1.5, 'Score: '+ str(self.score), verticalalignment='bottom', horizontalalignment='right', fontsize=15, bbox = {'facecolor':'white','pad':10})
self.drawThreshold()
self.ax.xaxis.tick_top()
self.ax.set_xlabel('Time')
self.ax.xaxis.set_label_position('top')
self.ax.set_ylabel('Distance')
# Sets time and distance axes.
def setAxes(self, xaxis=120, yaxis=None):
if (yaxis == None):
yaxis=self.max_dist
if (xaxis!=120):
self.score_text.set_x((xaxis+2.95))
self.ax.set_xlim(0.0,xaxis)
self.ax.set_ylim(yaxis,0)
# Draws the threshold line
def drawThreshold(self):
plt.axhline(y=self.th)
# Draws points. Green ones add 1 to score.
# Not in use.
def drawPoint(self,t_list,d_list):
if d<=self.th:
self.score+=1
plt.plot([t],[d], 'go', animated=True)
else:
plt.plot([t],[d], 'ro', animated=True)
# Decides if it's a Green or Red line. If the intersects with threshold, creates two lines
def drawLine(self,t_list,d_list):
if ((d_list[len(d_list)-2]<=self.th) and (d_list[len(d_list)-1]<=self.th)):
self.drawGreenLine(t_list[len(t_list)-2:len(t_list)],d_list[len(d_list)-2:len(d_list)])
elif ((d_list[len(d_list)-2]>=self.th) and (d_list[len(d_list)-1]>=self.th)):
self.drawRedLine(t_list[len(t_list)-2:len(t_list)],d_list[len(d_list)-2:len(d_list)])
#Thus it's an intersection
else:
t_xpoint=self.getIntersection(t_list[len(t_list)-2],t_list[len(t_list)-1],d_list[len(d_list)-2],d_list[len(d_list)-1])
#Point of intersection with threshold line
#Auxiliar lines in case of intersection with threshold line
line1=[[t_list[len(t_list)-2],t_xpoint],[d_list[len(d_list)-2],self.th]]
line2=[[t_xpoint,t_list[len(t_list)-1]],[self.th,d_list[len(d_list)-1]]]
self.drawLine(line1[0],line1[1])
self.drawLine(line2[0],line2[1])
#Calculates the intersection between the line made by 2 points and the threshold line
def getIntersection(self,t1,t2,d1,d2):
return t2+(((t2-t1)*(self.th-d2))/(d2-d1))
def drawGreenLine(self,t_line,d_line):
self.score+=(t_line[1]-t_line[0])
plt.plot(t_line,d_line,'g-')
def drawRedLine(self,t_line,d_line):
plt.plot(t_line,d_line,'r-')
# Updates score
def update_score(self):
if self.score <= vt.delta_t.total_seconds():
self.score_text.set_text(str('Score: %.2f secs' % self.score))
else:
self.score_text.set_text('Score: ' + str(vt.delta_t.total_seconds())+ ' secs')
#Updates timer
def update_title(self):
#self.update_score()
if vt.timeLeft() <= vt.zero_t:
vt.stopClkTimer()
self.suptitle.set_text(
str(vt.zero_t.total_seconds()))
self.ax.figure.canvas.draw()
else:
self.suptitle.set_text(str(vt.timeLeft())[:-4])
self.ax.figure.canvas.draw()
#Updates data for drawing into the graph
#The first data belongs to 0.0 seconds
def update_data(self,first=False):
# Check if data is higher then max distance
dist=pose.update()
if first:
self.t.insert(len(self.t),0.0)
else:
self.t.insert(len(self.t),(vt.delta_t-vt.diff).total_seconds())
if dist > self.max_dist :
self.d.insert(len(self.d),self.max_dist)
else:
self.d.insert(len(self.d),dist)
# self.drawPoint(self.t[len(self.t)-1],self.d[len(self.d)-1])
if len(self.t)>=2 and len(self.d)>=2:
self.drawLine(self.t,self.d)
self.update_score()
if vt.timeLeft() <= vt.zero_t:
vt.stopDataTimer()
self.update_score()
self.ax.figure.canvas.draw()
self.fig.savefig('Result_'+str(datetime.now())+'.png', bbox_inches='tight')
#https://github.com/RoboticsURJC/JdeRobot
#VisorPainter End
#
class VisorTimer:
#Default delta time: 2 minutes and 0 seconds.
#Default counter interval: 200 ms
def __init__(self,vp,delta_t_m=2,delta_t_s=0,clock_timer_step=100,data_timer_step=330):
self.delta_t = timedelta(minutes=delta_t_m,seconds=delta_t_s)
self.zero_t = timedelta(minutes=0,seconds=0,milliseconds=0)
self.final_t = datetime.now()+self.delta_t
self.diff = self.final_t-datetime.now()
vp.setAxes(xaxis=self.delta_t.seconds)
# Creates a new clock_timer object.
self.clock_timer = vp.fig.canvas.new_timer(interval=clock_timer_step)
self.data_timer = vp.fig.canvas.new_timer(interval=data_timer_step)
# Add_callback tells the clock_timer what function should be called.
self.clock_timer.add_callback(vp.update_title)
self.data_timer.add_callback(vp.update_data)
def startTimer(self):
self.clock_timer.start()
vp.update_data(first=True)
self.data_timer.start()
def stopClkTimer(self,):
self.clock_timer.remove_callback(vp.update_title)
self.clock_timer.stop()
def stopDataTimer(self):
self.data_timer.remove_callback(vp.update_data)
self.data_timer.stop()
def timeLeft(self):
self.diff=self.final_t-datetime.now()
return self.diff
#
#VisorTimer End
#
# Main
status = 0
try:
pose = Pose(sys.argv)
pose.update()
vp = VisorPainter()
vt = VisorTimer(vp)
vp.suptitle.set_text(str(vt.delta_t))
vt.startTimer()
plt.show()
pose.finish()
except:
traceback.print_exc()
status = 1
sys.exit(status)
The result must be an image with the plt.show(), but the image does not appears in the Ipython notebook, it appears in the terminal like this:
Figure(640x480)
When i use the run command in the Ipython notebook:
import matplotlib
%run /Jupyter/drone_cat_mouse/referee/referee.py /Jupyter/drone_cat_mouse/referee/referee.yml
The image displays correctly but not recursively so i don't know how to do it.
Thanks for help.
I'm really unsure what your problem is. I wrote a script that looks like this:
#! /usr/bin/env python3
# plotter.py
import sys
import matplotlib.pyplot as plt
def main(x):
plt.plot(x)
plt.show()
if __name__ == '__main__':
main([float(v) for v in sys.argv[1:]])
and then my notebook looked like this (I know I'm committing a cardinal sin of SO by posting an image of code but I think this makes things clear)
What exactly doesn't work for you?

Is there a way to save turtle's drawing as an animated GIF?

I like what the turtle module does in Python and I'd like to output the entire animation of it drawing the shape. Is there a way to do this? GIF/MP4/anything that shows the animation. Note, I know that an external screen recorder will do the job, but I'm looking for a way for the turtle module to do this itself.
Make an animated GIF from Python turtle using Preview on OSX
1) Start with a working program
As obvious as that seems, don't be debugging your code while trying to generate the animated GIF. It should be a proper turtle program with no infinite loops that ends with mainloop(), done(), or exitonclick().
The program I'm going to use for this explanation is one I wrote for Programming Puzzles & Golf Code that draws an Icelandic flag using turtle. It's intentionally minimalist as it is PP&GC:
from turtle import *
import tkinter as _
_.ROUND = _.BUTT
S = 8
h = 18 * S
color("navy")
width(h)
fd(25 * S)
color("white")
width(4 * S)
home()
pu()
goto(9 * S, -9 * S)
lt(90)
pd()
fd(h)
color("#d72828")
width(S + S)
bk(h)
pu()
home()
pd()
fd(25 * S)
ht()
done()
2) Have your program save snapshots on a timed basis
Repackage your program with draw(), save() and stop() timed events roughly as follows:
from turtle import *
import tkinter as _
_.ROUND=_.BUTT
def draw():
S = 8
h = 18 * S
color("navy")
width(h)
fd(25 * S)
color("white")
width(4 * S)
home()
pu()
goto(9 * S, -9 * S)
lt(90)
pd()
fd(h)
color("#d72828")
width(S + S)
bk(h)
pu()
home()
pd()
fd(25 * S)
ht()
ontimer(stop, 500) # stop the recording (1/2 second trailer)
running = True
FRAMES_PER_SECOND = 10
def stop():
global running
running = False
def save(counter=[1]):
getcanvas().postscript(file = "iceland{0:03d}.eps".format(counter[0]))
counter[0] += 1
if running:
ontimer(save, int(1000 / FRAMES_PER_SECOND))
save() # start the recording
ontimer(draw, 500) # start the program (1/2 second leader)
done()
I'm using 10 frames per second (FPS) as that will match what Preview uses in a later step.
3) Run your program; quit after it completes.
Create a new, empty directory and run it from there. If all goes to plan, it should dump a series of *.eps files into the directory.
4) Load all these *.eps files into Preview
Assuming Preview is my default previewer, in Terminal.app I would simply do:
open iceland*.eps
5) Select-All the PDF (were EPS) files in the Preview sidebar and File/Export... (not Export as PDF) as GIF
Set the export type under the Options button, save them into our temporary directory. You need to hold down the Option key when selecting a format to see the GIF choice. Pick a good screen resolution. We should now have *.gif files in our temporary directory.
Quit Preview.
6) Load all the *.gif files into Preview
open iceland*.gif
7) Merge all but first GIF file into the first GIF file
Select All the GIF files in Preview's sidebar. Unselect (Command Click) the first GIF file, e.g. iceland001.gif. Drag the selected GIF files onto the unselected GIF file. This will modify it and it's name. Use File/Export... to export the modified first GIF file to a new GIF file, e.g. iceland.gif
8) This is an animated GIF!
Convince yourself by loading it into Safari, e.g.:
open -a Safari iceland.gif
9) Converting to a repeating animated GIF
For a repeating animated GIF, you'll need some external tool like ImageMagick or Gifsicle to set the loop value:
convert -loop 0 iceland.gif iceland-repeating.gif
And again convince yourself that it works:
open -a Safari iceland-repeating.gif
10) Animated GIF result. Good luck!
Main concept
Here is my solution, the step as below,
Get the frame (you need use turtle.ontimer -> turtle.getcanvas().postscript(file=output_file) )
Convert each EPS to PNG. (since turtle.getcanvas().postscript return EPS, so you need use PIL to convert EPS to PNG)
you need download ghostscript: https://www.ghostscript.com/download/gsdnld.html
Make a GIF with your PNG list. (use PIL.ImageFile.ImageFile.save(output_path, format='gif', save_all=True, append_images=, duration, loop)
Script
Here is my script (maybe I will publish to PyPI if I have time...)
import turtle
import tkinter
from typing import Callable, List
from pathlib import Path
import re
import os
import sys
import functools
import PIL.Image
from PIL.PngImagePlugin import PngImageFile
from PIL.ImageFile import ImageFile
from PIL import EpsImagePlugin
def init(**options):
# download ghostscript: https://www.ghostscript.com/download/gsdnld.html
if options.get('gs_windows_binary'):
EpsImagePlugin.gs_windows_binary = options['gs_windows_binary'] # install ghostscript, otherwise->{OSError} Unable to locate Ghostscript on paths
# https://anzeljg.github.io/rin2/book2/2405/docs/tkinter/cap-join-styles.html
# change the default style of the line that made of two connected line segments
tkinter.ROUND = tkinter.BUTT # default is ROUND # https://anzeljg.github.io/rin2/book2/2405/docs/tkinter/create_line.html
def make_gif(image_list: List[Path], output_path: Path, **options):
"""
:param image_list:
:param output_path:
:param options:
- fps: Frame Per Second. Duration and FPS, choose one to give.
- duration milliseconds (= 1000/FPS ) (default is 0.1 sec)
- loop # int, if 0, then loop forever. Otherwise, it means the loop number.
:return:
"""
if not output_path.parent.exists():
raise FileNotFoundError(output_path.parent)
if not output_path.name.lower().endswith('.gif'):
output_path = output_path / Path('.gif')
image_list: List[ImageFile] = [PIL.Image.open(str(_)) for _ in image_list]
im = image_list.pop(0)
fps = options.get('fps', options.get('FPS', 10))
im.save(output_path, format='gif', save_all=True, append_images=image_list,
duration=options.get('duration', int(1000 / fps)),
loop=options.get('loop', 0))
class GIFCreator:
__slots__ = ['draw',
'__temp_dir', '__duration',
'__name', '__is_running', '__counter', ]
TEMP_DIR = Path('.') / Path('__temp__for_gif')
# The time gap that you pick image after another on the recording. i.e., If the value is low, then you can get more source image, so your GIF has higher quality.
DURATION = 100 # millisecond. # 1000 / FPS
REBUILD = True
def __init__(self, name, temp_dir: Path = None, duration: int = None, **options):
self.__name = name
self.__is_running = False
self.__counter = 1
self.__temp_dir = temp_dir if temp_dir else self.TEMP_DIR
self.__duration = duration if duration else self.DURATION
if not self.__temp_dir.exists():
self.__temp_dir.mkdir(parents=True) # True, it's ok when parents is not exists
#property
def name(self):
return self.__name
#property
def duration(self):
return self.__duration
#property
def temp_dir(self):
if not self.__temp_dir.exists():
raise FileNotFoundError(self.__temp_dir)
return self.__temp_dir
def configure(self, **options):
gif_class_members = (_ for _ in dir(GIFCreator) if not _.startswith('_') and not callable(getattr(GIFCreator, _)))
for name, value in options.items():
name = name.upper()
if name not in gif_class_members:
raise KeyError(f"'{name}' does not belong to {GIFCreator} members.")
correct_type = type(getattr(self, name))
# type check
assert isinstance(value, correct_type), TypeError(f'{name} type need {correct_type.__name__} not {type(value).__name__}')
setattr(self, '_GIFCreator__' + name.lower(), value)
def record(self, draw_func: Callable = None, **options):
"""
:param draw_func:
:param options:
- fps
- start_after: milliseconds. While waiting, white pictures will continuously generate to used as the heading image of GIF.
- end_after:
:return:
"""
if draw_func and callable(draw_func):
setattr(self, 'draw', draw_func)
if not (hasattr(self, 'draw') and callable(getattr(self, 'draw'))):
raise NotImplementedError('subclasses of GIFCreatorMixin must provide a draw() method')
regex = re.compile(fr"""{self.name}_[0-9]{{4}}""")
def wrap():
self.draw()
turtle.ontimer(self._stop, options.get('end_after', 0))
wrap_draw = functools.wraps(self.draw)(wrap)
try:
# https://blog.csdn.net/lingyu_me/article/details/105400510
turtle.reset() # Does a turtle.clear() and then resets this turtle's state (i.e. direction, position etc.)
except turtle.Terminator:
turtle.reset()
if self.REBUILD:
for f in [_ for _ in self.temp_dir.glob(f'*.*') if _.suffix.upper().endswith(('EPS', 'PNG'))]:
[os.remove(f) for ls in regex.findall(str(f)) if ls is not None]
self._start()
self._save() # init start the recording
turtle.ontimer(wrap_draw,
t=options.get('start_after', 0)) # start immediately
turtle.done()
print('convert_eps2image...')
self.convert_eps2image()
print('make_gif...')
self.make_gif(fps=options.get('fps'))
print(f'done:{self.name}')
return
def convert_eps2image(self):
"""
image extension (PGM, PPM, GIF, PNG) is all compatible with tk.PhotoImage
.. important:: you need to use ghostscript, see ``init()``
"""
for eps_file in [_ for _ in self.temp_dir.glob('*.*') if _.name.startswith(self.__name) and _.suffix.upper() == '.EPS']:
output_path = self.temp_dir / Path(eps_file.name + '.png')
if output_path.exists():
continue
im: PIL.Image.Image = PIL.Image.open(str(eps_file))
im.save(output_path, 'png')
def make_gif(self, output_name=None, **options):
"""
:param output_name: basename `xxx.png` or `xxx`
:param options:
- fps: for GIF
:return:
"""
if output_name is None:
output_name = self.__name
if not output_name.lower().endswith('.gif'):
output_name += '.gif'
image_list = [_ for _ in self.temp_dir.glob(f'{self.__name}*.*') if
(_.suffix.upper().endswith(('PGM', 'PPM', 'GIF', 'PNG')) and _.name.startswith(self.__name))
]
if not image_list:
sys.stderr.write(f'There is no image on the directory. {self.temp_dir / Path(self.__name + "*.*")}')
return
output_path = Path('.') / Path(f'{output_name}')
fps = options.get('fps', options.get('FPS'))
if fps is None:
fps = 1000 / self.duration
make_gif(image_list, output_path,
fps=fps, loop=0)
os.startfile('.') # open the output folder
def _start(self):
self.__is_running = True
def _stop(self):
print(f'finished draw:{self.name}')
self.__is_running = False
self.__counter = 1
def _save(self):
if self.__is_running:
# print(self.__counter)
output_file: Path = self.temp_dir / Path(f'{self.__name}_{self.__counter:04d}.eps')
if not output_file.exists():
turtle.getcanvas().postscript(file=output_file) # 0001.eps, 0002.eps ...
self.__counter += 1
turtle.ontimer(self._save, t=self.duration) # trigger only once, so we need to set it again.
USAGE
init(gs_windows_binary=r'C:\Program Files\gs\gs9.52\bin\gswin64c')
def your_draw_function():
turtle.color("red")
turtle.width(20)
turtle.fd(40)
turtle.color("#00ffff")
turtle.bk(40)
...
# method 1: pass the draw function directly.
gif_demo = GIFCreator(name='demo')
# gif_demo.configure(duration=400) # Optional
gif_demo.record(your_draw_function)
# method 2: use class
# If you want to create a class, just define your draw function, and then record it.
class MyGIF(GIFCreator):
DURATION = 200 # optional
def draw(self):
your_draw_function()
MyGIF(name='rectangle demo').record(
# fps=, start_after=, end_after= <-- optional
)
demo
init(gs_windows_binary=r'C:\Program Files\gs\gs9.52\bin\gswin64c')
class TaiwanFlag(GIFCreator):
DURATION = 200
# REBUILD = False
def __init__(self, ratio, **kwargs):
"""
ratio: 0.5 (40*60) 1 (80*120) 2 (160*240) ...
"""
self.ratio = ratio
GIFCreator.__init__(self, **kwargs)
def show_size(self):
print(f'width:{self.ratio * 120}\nheight:{self.ratio * 80}')
#property
def size(self): # w, h
return self.ratio * 120, self.ratio * 80
def draw(self):
# from turtle import *
# turtle.tracer(False)
s = self.ratio # scale
pu()
s_w, s_h = turtle.window_width(), turtle.window_height()
margin_x = (s_w - self.size[0]) / 2
home_xy = -s_w / 2 + margin_x, 0
goto(home_xy)
pd()
color("red")
width(80 * s)
fd(120 * s)
pu()
goto(home_xy)
color('blue')
goto(home_xy[0], 20 * s)
width(40 * s)
pd()
fd(60 * s)
pu()
bk((30 + 15) * s)
pd()
color('white')
width(1)
left(15)
begin_fill()
for i in range(12):
fd(30 * s)
right(150)
end_fill()
rt(15)
pu()
fd(15 * s)
rt(90)
fd(8.5 * s)
pd()
lt(90)
# turtle.tracer(True)
begin_fill()
circle(8.5 * s)
end_fill()
color('blue')
width(2 * s)
circle(8.5 * s)
# turtle.tracer(True)
turtle.hideturtle()
taiwan_flag = TaiwanFlag(2, name='taiwan')
turtle.Screen().setup(taiwan_flag.size[0] + 40, taiwan_flag.size[1] + 40) # margin = 40
# taiwan_flag.draw()
taiwan_flag.record(end_after=2500, fps=10)

mixing pixels of an image manually using python

I am trying to create an algorithm that blends the pixels of an image and I can bring the image as it was before, but I do not know do this.
I'm using python and pil, but I can use other libraries.
Exemple: to and back to
Thank you.
This should do it. There's no error handling, it doesn't follow pep8 standards, it uses slow PIL operations and it doesn't use an argument parsing library. I'm sure there are other bad things about it also.
It works by seeding python's random number generator with an invariant of the image under scrambling. The hash of the size is used. Since the size doesn't changed, a random sequence built on it will be the same for all images that share the same size. That sequence is used as a one-to-one mapping, therefore it's reversible.
The script may be invoked twice from a shell to create two images, "scrambled.png" and "unscrambled.png". "Qfhe3.png" is the source image.
python scramble.py scramble "./Qfhe3.png"
python scramble.py unscramble "./scrambled.png"
#scramble.py
from PIL import Image
import sys
import os
import random
def openImage():
return Image.open(sys.argv[2])
def operation():
return sys.argv[1]
def seed(img):
random.seed(hash(img.size))
def getPixels(img):
w, h = img.size
pxs = []
for x in range(w):
for y in range(h):
pxs.append(img.getpixel((x, y)))
return pxs
def scrambledIndex(pxs):
idx = list(range(len(pxs)))
random.shuffle(idx)
return idx
def scramblePixels(img):
seed(img)
pxs = getPixels(img)
idx = scrambledIndex(pxs)
out = []
for i in idx:
out.append(pxs[i])
return out
def unScramblePixels(img):
seed(img)
pxs = getPixels(img)
idx = scrambledIndex(pxs)
out = list(range(len(pxs)))
cur = 0
for i in idx:
out[i] = pxs[cur]
cur += 1
return out
def storePixels(name, size, pxs):
outImg = Image.new("RGB", size)
w, h = size
pxIter = iter(pxs)
for x in range(w):
for y in range(h):
outImg.putpixel((x, y), next(pxIter))
outImg.save(name)
def main():
img = openImage()
if operation() == "scramble":
pxs = scramblePixels(img)
storePixels("scrambled.png", img.size, pxs)
elif operation() == "unscramble":
pxs = unScramblePixels(img)
storePixels("unscrambled.png", img.size, pxs)
else:
sys.exit("Unsupported operation: " + operation())
if __name__ == "__main__":
main()

Categories

Resources