Python ImageTk.PhotoImage slow loading of png with alphachannel - python

I wrote a little programm to load and show images.
Everything worked fine until i get to
the png file in the example (shade.png).
It takes 7 seven seconds to show this little pic.
Is there a bug in PhotoImage or did i miss some
parameter?
Here is my example code:
from PIL import Image, ImageTk
from six.moves import tkinter
import time
start = time.process_time()
print(time.process_time() - start)
root = tkinter.Tk()
img = Image.open('shade.png')
img = img.convert("RGBA") # make sure, it has alphachannel
print(time.process_time() - start, "after image.open")
img = ImageTk.PhotoImage(img)
print(time.process_time() - start, "after PhotoImage")

On my research, i found out that this
is an old known bug in Tk() especially in Tk_PhotoPutBlock.
https://core.tcl-lang.org/tk/tktview/b9827ece14da1cd186c5816dc45abc29cf9b8a9c
As i found a workaround that helps in some cases,
i want to post it here.
For further testing
and bug hunting.
import time # only to stop the time
from PIL import Image, ImageTk, ImageDraw
from six.moves import tkinter
HAVE_PIC = "no" # "yes" # or let create a pic if you have none
root = tkinter.Tk()
def masking_1(image): # faster
mask = image.copy()
mask.putalpha(1)
mask.paste(image, (0, 0), image)
# img.paste(img,(0, 0), mask)
image = mask.copy()
return image
def masking_2(image): # better quality
datas = image.getdata()
newData = []
for item in datas:
if item[3] == 0:
newData.append((item[0], item[1], item[2], 1))
else:
newData.append(item)
image.putdata(newData)
return image
def image_draw(): # creates the test pic
width = 300 # the bigger the slower
height = 300
colour = "green" # "#519ae7"
image = Image.new('RGBA', (width, height))
imd = ImageDraw.Draw(image, 'RGBA')
y = 0
while y < height:
x = y % 2
while x < width:
imd.point((x, y), colour)
x += 2
y += 1
return image
if HAVE_PIC == "yes":
img = Image.open('shade.png') # insert your problem pic
img = img.convert("RGBA") # make sure, it has alphachannel
else:
img = image_draw() # creates test pic
start = time.process_time()
print(time.process_time() - start, "after image.open")
# img.show() # to show that it is the same image
photo_image = ImageTk.PhotoImage(img, master=root) # too slow
print(time.process_time() - start, "after PhotoImage")
start = time.process_time()
print(time.process_time() - start, "before masking_1")
pic = masking_1(img)
# pic.show()
photo_image = ImageTk.PhotoImage(pic, master=root)
print(time.process_time() - start, "after masking_1")
start = time.process_time()
print(time.process_time() - start, "before masking_2")
pic = masking_2(img)
# pic.show()
photo_image = ImageTk.PhotoImage(pic, master=root)
print(time.process_time() - start, "after masking_2")
Output on my PC:
0.0 after image.open
9.734375 after PhotoImage
0.0 before masking_1
0.0 after masking_1
0.0 before masking_2
0.03125 after masking_2

Related

Switching 'Variables Dependencies' (Constants and Parameters) in Script involving Moviepy, PIL -ImageDraw based on pos, to W/H based on image drawn

I have edited a code here:
import moviepy.editor as mp
import numpy as np
import tempfile
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont, ImageOps
from textwrap import wrap
# Constants-
WIDTH = 1800
HEIGHT = 1000
FONT_SIZE_USER_INFO = int(WIDTH * 0.05)
FONT_SIZE_TEXT = int(WIDTH * 0.044)
FONT_USER_INFO = ImageFont.truetype(
"arial.ttf", FONT_SIZE_USER_INFO, encoding="utf-8")
FONT_TEXT = ImageFont.truetype("arial.ttf", FONT_SIZE_TEXT, encoding="utf-8")
COLOR_BG = 'white'
COLOR_NAME = 'black'
COLOR_TAG = (64, 64, 64)
COLOR_TEXT = 'black'
COORD_PHOTO = (round(WIDTH/6), round(HEIGHT/6))
COORD_NAME = (round(WIDTH/2.9), round(HEIGHT/4.1))
COORD_TAG = (round(WIDTH/2.9), round(HEIGHT/3))
COORD_TEXT = (round(WIDTH/6), round(HEIGHT/2))
LINE_MARGIN = 15
user_name = "José Fernando Costa"
user_tag = "#soulsinporto"
user_pic = 'avortr.png'
text = "Go out there and do some fun shit, not because it makes money, but because it is fun for you!"
video_name = "tw1.mp4"
PHOTO_SIZE = int(WIDTH * 0.15)
def create_frame(t):
# Break the text string into smaller strings, each having a maximum of 37 characters (a.k.a. create the lines of text for the image)
text_string_lines = wrap(text[:int(t*len(text))], 37)
x = COORD_TEXT[0]
y = COORD_TEXT[1]
temp_img = Image.new('RGB', (0, 0))
temp_img_draw_interf = ImageDraw.Draw(temp_img)
line_height = [
temp_img_draw_interf.textsize(text_string_lines[i], font=FONT_TEXT)[1]
for i in range(len(text_string_lines))
]
img = Image.new('RGB', (WIDTH, HEIGHT), color=COLOR_BG)
draw_interf = ImageDraw.Draw(img)
photo = Image.open(user_pic)
photo_width, photo_height = photo.size
photo = photo.resize((PHOTO_SIZE, PHOTO_SIZE), Image.ANTIALIAS)
img.paste(photo, COORD_PHOTO)
draw_interf.text(COORD_NAME, user_name,
font=FONT_USER_INFO, fill=COLOR_NAME)
draw_interf.text(COORD_TAG, user_tag, font=FONT_USER_INFO, fill=COLOR_TAG)
for i in range(len(text_string_lines)):
draw_interf.text(
(x, y), text_string_lines[i], font=FONT_TEXT, fill=COLOR_TEXT)
y += line_height[i] + LINE_MARGIN
img_np = np.array(img)
return img_np
This bottom part converts them into an mp4.
animation = mp.VideoClip(create_frame, duration=len(text)/60).set_fps(60)
animation.write_videofile(video_name)
which is a revised code of 'quotespy', and it produces a result that is working.
However, I wish to edit the code such that the only parameter I would have to define is the text itself, and that all other variables and current constants are relative and based on the text, and the length + height of the text.
I've tried switching the code so that it draws a temp image but I cannot seem to return the text width and height as a global variable to define the final image WIDTH and HEIGHT. I figured that the best way is to draw the image first, to define the line_height and the line_margin? But it doesn't seem to work.
What I am expecting is:
Draw temp image of 'text', where, depending on length of text (with line breaks etc), it produces 2 variables, line-width and line-height.
Based on this, the coord_photo, coord_username and coord_usertag will be applied to be positioned above the text/ textbox.
after which, the final image's WIDTH and HEIGHT will be defined as such:
WIDTH = (line-width + 100px left, 100px right)
HEIGHT = (line-height + 200px top, 200px bottom)
I'm not sure how to properly edit the code to structure it so that it does not feel like a looping function call.
Hope that someone can help me with my code! Much appreciated.

How to show Swipe transition with tkinter?

I'm creating a project where the Swipe transition between two images. I'm using the tkinter library and I'm stuck.
Šobrīd fragments:
import tkinter as tk
from PIL import ImageTk, Image
root = tk.Tk()
image1 = Image.open('name.jpg')
image2 = Image.open('name2.jpg')
width = min(image1.width, image2.width)
height = min(image1.height, image2.height)
image1 = image1.resize((width, height),Image.ANTIALIAS)
image2 = image2.resize((width, height),Image.ANTIALIAS)
def Swipe(image1, image2, end):
new_image = Image.new('RGB', (image1.width, image1.height), color='white')
for y in range(image1.height):
for x in range(image1.width):
if y <= end: new_image[x][y] = image1[x][y]
else: new_image[x][y] = image2[x][y]
return new_image
for i in range(image1.height):
got = Swipe(image1, image2, i)
But I get a error /'Image' object is not subscriptable/ And how do I realize this transition in the root window? Maybe someone could help?

Do cameras have to be calibrated in order to be used with StereoBM?

In my situation, the data returned by the OpenCV StereoBM Depth Map does not make sense Regardless of Parameter Tuning.
I'm doing research for a design project that involves OpenCV and using stereo vision to generate a depth map. I'm currently successfully able to load both my web cameras and generate a depth map using the StereoBM. However, the result data isn't useful at the moment as my screenshot demonstrates below. So I created a small python app that helps me tune the StereoBM parameters which hasn't helped.
My question do the cameras have to be calibrated in order to be used with the StereoBM function?
If not, what are some alternatives to help me improve my results (i.e. increase resolution, use StereoSBGM, etc.)
Code
import cv2
import time
import numpy as np
from Tkinter import *
oldVal = 15
def oddVals(n):
global oldVal
n = int(n)
if not n % 2:
window_size.set(n+1 if n > oldVal else n-1)
oldVal = window_size.get()
minDispValues = [16,32,48,64]
def minDispCallback(n):
n = int(n)
newvalue = min(minDispValues, key=lambda x:abs(x-float(n)))
min_disp.set(newvalue)
# Display the sliders to control the stereo vision
master = Tk()
master.title("StereoBM Settings");
min_disp = Scale(master, from_=16, to=64, command=minDispCallback, length=600, orient=HORIZONTAL, label="Minimum Disparities")
min_disp.pack()
min_disp.set(16)
window_size = Scale(master, from_=5, to=255, command=oddVals, length=600, orient=HORIZONTAL, label="Window Size")
window_size.pack()
window_size.set(15)
Disp12MaxDiff = Scale(master, from_=5, to=30, length=600, orient=HORIZONTAL, label="Max Difference")
Disp12MaxDiff.pack()
Disp12MaxDiff.set(0)
UniquenessRatio = Scale(master, from_=0, to=30, length=600, orient=HORIZONTAL, label="Uniqueness Ratio")
UniquenessRatio.pack()
UniquenessRatio.set(15)
SpeckleRange = Scale(master, from_=0, to=60, length=600, orient=HORIZONTAL, label="Speckle Range")
SpeckleRange.pack()
SpeckleRange.set(34)
SpeckleWindowSize = Scale(master, from_=60, to=150, length=600, orient=HORIZONTAL, label="Speckle Window Size")
SpeckleWindowSize.pack()
SpeckleWindowSize.set(100)
master.update()
vcLeft = cv2.VideoCapture(0) # Load video campture for the left camera
#vcLeft.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,420);
#vcLeft.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,340);
vcLeft.set(3,640) # Set camera width
vcLeft.set(4,480) # Set camera height
vcRight = cv2.VideoCapture(1) # Load video capture for the right camera
#vcRight.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,420);
#vcRight.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,340);
firstTime = time.time() # First time log
totalFramesPassed = 0 # Number of frames passed
if vcLeft.isOpened() and vcRight.isOpened():
rvalLeft, frameLeft = vcLeft.read()
rvalRight, frameRight = vcRight.read()
else:
rvalLeft = False
rvalRight = False
while rvalLeft and rvalRight: # If the cameras are opened
rvalLeft, frameLeft = vcLeft.read()
rvalRight, frameRight = vcRight.read()
cv2.putText(frameLeft, "FPS : " + str(totalFramesPassed / (time.time() - firstTime)),(40, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.8, 150, 2, 10)
cv2.imshow("Left Camera", frameLeft)
cv2.putText(frameRight, "FPS : " + str(totalFramesPassed / (time.time() - firstTime)),(40, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.8, 150, 2, 10)
cv2.imshow("Right Camera", frameRight)
frameLeftNew = cv2.cvtColor(frameLeft, cv2.COLOR_BGR2GRAY)
frameRightNew = cv2.cvtColor(frameRight, cv2.COLOR_BGR2GRAY)
num_disp = 112 - min_disp.get()
stereo = cv2.StereoBM_create(numDisparities = num_disp, blockSize = window_size.get())
stereo.setMinDisparity(min_disp.get())
stereo.setNumDisparities(num_disp)
stereo.setBlockSize(window_size.get())
stereo.setDisp12MaxDiff(Disp12MaxDiff.get())
stereo.setUniquenessRatio(UniquenessRatio.get())
stereo.setSpeckleRange(SpeckleRange.get())
stereo.setSpeckleWindowSize(SpeckleWindowSize.get())
disparity = stereo.compute(frameLeftNew, frameRightNew).astype(np.float32) / 16.0
disp_map = (disparity - min_disp.get())/num_disp
cv2.imshow("Disparity", disp_map)
master.update() # Update the slider options
key = cv2.waitKey(20)
totalFramesPassed = totalFramesPassed + 1 # One frame passed, increment
if key == 27:
break
vcLeft.release()
vcRight.release()
As stated in the opencv documentation of StereoBM opencv stereoBM doc the two images needs to be a "rectified stereo pair".
This means that before you compute the disparity you will need to rectify the two cameras.
Have a look at stereo_match where you can see how to rectify the two cameras before you compute the disparity.
When you are computing the disparity using the stereoBM you are looking at the correspondence of the parallel epipolar lines in both images.
This means that the images are expected to be aligned in such a away that the same rows in both images correspond to the same lines in space. The rectification process takes care of that.
For more information look at Rectification with opencv
I found out that we need to rectify the pair in order to use the StereoBM function. Furthermore, I found out that although it is more resource intensive, the StereoSGBM function gave me more optimal results.
In case anyone needs to calibrate their cameras in the future, you can use this code to help you do so:
# Imports
import cv2
import numpy as np
# Constants
leftCameraNumber = 2 # Number for left camera
rightCameraNumber = 1 # Number for right camera
numberOfChessRows = 6
numberOfChessColumns = 8
chessSquareSize = 30 # Length of square in millimeters
numberOfChessColumns = numberOfChessColumns - 1 # Update to reflect how many corners are inside the chess board
numberOfChessRows = numberOfChessRows - 1
objp = np.zeros((numberOfChessColumns*numberOfChessRows,3), np.float32)
objp[:,:2] = np.mgrid[0:numberOfChessRows,0:numberOfChessColumns].T.reshape(-1,2)*chessSquareSize
objectPoints = []
leftImagePoints = []
rightImagePoints = []
parameterCriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Code
print("Press \"n\" when you're done caputing checkerboards.")
vcLeft = cv2.VideoCapture(leftCameraNumber) # Load video campture for the left camera
vcLeft.set(cv2.CAP_PROP_FRAME_WIDTH,640*3/2);
vcLeft.set(cv2.CAP_PROP_FRAME_HEIGHT,480*3/2);
vcRight = cv2.VideoCapture(rightCameraNumber) # Load video capture for the right camera
vcRight.set(cv2.CAP_PROP_FRAME_WIDTH,640*3/2);
vcRight.set(cv2.CAP_PROP_FRAME_HEIGHT,480*3/2);
if vcLeft.isOpened() and vcRight.isOpened():
rvalLeft, frameLeft = vcLeft.read()
rvalRight, frameRight = vcRight.read()
else:
rvalLeft = False
rvalRight = False
# Number of succesful recognitions
checkerboardRecognitions = 0
while rvalLeft and rvalRight: # If the cameras are opened
vcLeft.grab();
vcRight.grab();
rvalLeft, frameLeft = vcLeft.retrieve()
rvalRight, frameRight = vcRight.retrieve()
frameLeftNew = cv2.cvtColor(frameLeft, cv2.COLOR_BGR2GRAY)
frameRightNew = cv2.cvtColor(frameRight, cv2.COLOR_BGR2GRAY)
foundPatternLeft, cornersLeft = cv2.findChessboardCorners(frameLeftNew, (numberOfChessRows, numberOfChessColumns), None, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_NORMALIZE_IMAGE + cv2.CALIB_CB_FAST_CHECK)
foundPatternRight, cornersRight = cv2.findChessboardCorners(frameRightNew, (numberOfChessRows, numberOfChessColumns), None, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_NORMALIZE_IMAGE + cv2.CALIB_CB_FAST_CHECK)
if foundPatternLeft and foundPatternRight: # If found corners in this frame
# Process the images and display the count of checkboards in our array
checkerboardRecognitions = checkerboardRecognitions + 1
print("Checker board recognitions: " + str(checkerboardRecognitions))
objectPoints.append(objp)
exactCornersLeft = cv2.cornerSubPix(frameLeftNew, cornersLeft, (11, 11), (-1, -1), parameterCriteria);
leftImagePoints.append(exactCornersLeft)
exactCornersRight = cv2.cornerSubPix(frameRightNew, cornersRight, (11, 11), (-1, -1), parameterCriteria);
rightImagePoints.append(exactCornersRight)
frameLeft = cv2.drawChessboardCorners(frameLeft, (numberOfChessRows, numberOfChessColumns), (exactCornersLeft), True);
frameRight = cv2.drawChessboardCorners(frameRight, (numberOfChessRows, numberOfChessColumns), (exactCornersRight), True);
# Display current webcams regardless if board was found or not
cv2.imshow("Left Camera", frameLeft)
cv2.imshow("Right Camera", frameRight)
key = cv2.waitKey(250) # Give the frame some time
if key == ord('n'):
break
cameraMatrixLeft = np.zeros( (3,3) )
cameraMatrixRight = np.zeros( (3,3) )
distortionLeft = np.zeros( (8,1) )
distortionRight = np.zeros( (8,1) )
height, width = frameLeft.shape[:2]
rms, leftMatrix, leftDistortion, rightMatrix, rightDistortion, R, T, E, F = cv2.stereoCalibrate(objectPoints, leftImagePoints, rightImagePoints, cameraMatrixLeft, distortionLeft, cameraMatrixRight, distortionRight, (width, height),parameterCriteria, flags=0)
arr1 = np.arange(8).reshape(2, 4)
arr2 = np.arange(10).reshape(2, 5)
np.savez('camera_calibration.npz', leftMatrix=leftMatrix, leftDistortion=leftDistortion, rightMatrix=rightMatrix, rightDistortion=rightDistortion, R=R, T=T, E=E, F=F)
print("Calibration Settings Saved to File!")
print("RMS:")
print(rms)
print("Left Matrix:")
print(leftMatrix)
print("Left Distortion:")
print(leftDistortion)
print("Right Matrix:")
print(rightMatrix)
print("Right Distortion:")
print(rightDistortion)
print("R:")
print(R)
print("T:")
print(T)
print("E:")
print(E)
print("F:")
print(F)
leftRectTransform, rightRectTransform, leftProjMatrix, rightProjMatrix, _, _, _ = cv2.stereoRectify(leftMatrix, leftDistortion, rightMatrix, rightDistortion, (width, height), R, T, alpha=-1);
leftMapX, leftMapY = cv2.initUndistortRectifyMap(leftMatrix, leftDistortion, leftRectTransform, leftProjMatrix, (width, height), cv2.CV_32FC1);
rightMapX, rightMapY = cv2.initUndistortRectifyMap(rightMatrix, rightDistortion, rightRectTransform, rightProjMatrix, (width, height), cv2.CV_32FC1);
minimumDisparities = 0
maximumDisparities = 128
stereo = cv2.StereoSGBM_create(minimumDisparities, maximumDisparities, 18)
while True: # If the cameras are opened
vcLeft.grab();
vcRight.grab();
rvalLeft, frameLeft = vcLeft.retrieve()
rvalRight, frameRight = vcRight.retrieve()
frameLeftNew = cv2.cvtColor(frameLeft, cv2.COLOR_BGR2GRAY)
frameRightNew = cv2.cvtColor(frameRight, cv2.COLOR_BGR2GRAY)
leftRectified = cv2.remap(frameLeftNew, leftMapX, leftMapY, cv2.INTER_LINEAR);
rightRectified = cv2.remap(frameRightNew, rightMapX, rightMapY, cv2.INTER_LINEAR);
disparity = stereo.compute(leftRectified, rightRectified)
cv2.filterSpeckles(disparity, 0, 6000, maximumDisparities);
cv2.imshow("Normalized Disparity", (disparity/16.0 - minimumDisparities)/maximumDisparities);
cv2.imshow("Left Camera", leftRectified)
cv2.imshow("Right Camera", rightRectified)
key = cv2.waitKey(10) # Give the frame some time
if key == 27:
break
print("Finished!")

Fade between images on screen using Python TKinter / imageTK

I am a python newbie and have been making a somewhat odd slideshow script that cycles through images and also sources a variable from another file to 'settle' on an image.
I'm sure my code is tragic. But it does work (see below)!
My question is - how would I make it fade between images instead of the jerky go to white momentarily then to next image which it does currently? Is there a transitions module I should look at?
from Tkinter import *
import Image, ImageTk, random, string
class MyApp(Tk):
def __init__(self):
Tk.__init__(self)
fr = Frame(self)
fr.pack()
self.canvas = Canvas(fr, height = 400, width = 600)
self.canvas.pack()
self.old_label_image = None
self.position = 0
self.command = 0
self.oldcommand = 0
self.slideshow()
self.debug()
def debug(self):
self.QUIT = Button(self)
self.QUIT["text"] = "QUIT!" + str(self.command)
self.QUIT["fg"] = "red"
self.QUIT["command"] = self.quit
self.QUIT.pack({"side": "right"})
def slideshow (self):
if self.command != self.oldcommand:
self.after_cancel(self.huh)
# run through random between 2-5 changes
# then settle on command for 30 seconds
self.title("Title: PAUSE")
self.oldcommand = self.command
self.slideshow()
else:
file = str(self.position) + '.jpg'
image1 = Image.open(file)
self.tkpi = ImageTk.PhotoImage(image1)
label_image = Label(self, image=self.tkpi)
label_image.place(x=0,y=0,width=image1.size[0],height=image1.size[1])
self.title("Title: " + file)
if self.old_label_image is not None:
self.old_label_image.destroy()
self.old_label_image = label_image
# make this random instead of pregressional
if self.position is not 1:
self.position = self.position + 1
else:
self.position = 0
commandfile = open('command.txt', 'r')
self.command = string.atoi(commandfile.readline())
commandfile.close()
int = random.randint(2000, 5000)
self.huh = self.after(int, self.slideshow)
#self.after_cancel(huh) - works ! so maybe can do from below Fn?
if __name__ == "__main__":
root = MyApp()
root.mainloop()
This can be achieved using the blend function.
Image.blend(image1, image2, alpha) ⇒ image
Creates a new image by interpolating between the given images, using a constant alpha. Both images must have the same size and mode.
out = image1 * (1.0 - alpha) + image2 * alpha
If the alpha is 0.0, a copy of the first image is returned. If the alpha is 1.0, a copy of the second image is returned. There are no restrictions on the alpha value. If necessary, the result is clipped to fit into the allowed output range.
So you could have something like this:
alpha = 0
while 1.0 > alpha:
image.blend(img1,img2,alpha)
alpha = alpha + 0.01
label_image.update()
An example is here, havn't had time to test this but you get the idea-
from PIL import image
import time
white = image.open("white_248x.jpg")
black = image.open("black_248x.jpg")
new_img = image.open("white_248x.jpg")
root = Tk()
image_label = label(root, image=new_img)
image_label.pack()
alpha = 0
while 1.0 > alpha:
new_img = image.blend(white,black,alpha)
alpha = alpha + 0.01
time.sleep(0.1)
image_label.update()
root.mainloop()

Sawtooth tkinter mainloop frame duration?

Trying to animate a sequence of PIL images using tkinter. The graph of my frame durations (ms) looks like this:
Anyone have any idea what could be causing this spiky sawtooth pattern?
Here's a script to reproduce:
from PIL import Image, ImageTk
import Tkinter
import time
import sys
def generate_frames(n):
"""
keep n under 101 * 101
"""
out = []
last_pil = None
for i in range(n):
if last_pil:
pil_image = last_pil.copy()
else:
pil_image = Image.new('L', (101, 101), 255)
x = i / 101
y = i % 101
pil_image.load()[x, y] = 0
out.append(ImageTk.PhotoImage(pil_image))
last_pil = pil_image
return out
def draw():
FRAME_COUNT =5000
master = Tkinter.Tk()
w = Tkinter.Canvas(master, width=302, height=302)
w.create_rectangle(49, 49, 252, 252)
w.pack()
frames = generate_frames(FRAME_COUNT)
def draw_frame(f, canvas_image):
print repr(time.time())
frame = frames[f]
if canvas_image is None:
canvas_image = w.create_image((151, 151), image=frame, anchor='center')
else:
w.itemconfigure(canvas_image, image=frame)
w.current_frame = frame # save a reference
next_frame = f + 1
if next_frame < FRAME_COUNT:
master.after(1, draw_frame, next_frame, canvas_image)
else:
sys.exit(0)
master.after(10, draw_frame, 0, None)
master.mainloop()
draw()
To see the plot, pipe output through
import sys
last = None
for line in sys.stdin:
value = float(line.strip()) * 1000
if last is None:
pass
else:
print (value - last)
last = value
then through
from matplotlib import pyplot
import sys
X = []
Y = []
for index, line in enumerate(sys.stdin):
line = line.strip()
X.append(index)
Y.append(float(line))
pyplot.plot(X, Y, '-')
pyplot.show()
Making it multi-threaded doesn't help:
class AnimationThread(threading.Thread):
FRAME_COUNT = 5000
def __init__(self, canvas):
threading.Thread.__init__(self)
self.canvas = canvas
self.frames = generate_frames(self.FRAME_COUNT)
def run(self):
w = self.canvas
frames = self.frames
canvas_image = None
for i in range(self.FRAME_COUNT):
print repr(time.time())
frame = frames[i]
if canvas_image is None:
canvas_image = w.create_image((151, 151), image=frame, anchor='center')
else:
w.itemconfigure(canvas_image, image=frame)
w.current_frame = frame
time.sleep(1 * .001)
def draw_threaded():
FRAME_COUNT = 5000
master = Tkinter.Tk()
w = Tkinter.Canvas(master, width=302, height=302)
w.create_rectangle(49, 49, 252, 252)
w.pack()
animation_thread = AnimationThread(w)
animation_thread.start()
master.mainloop()
animation_thread.join()
draw_threaded()
That closely resembles this kind of interference pattern when competing 60 and 50 Hz samples mingle:
(Original Wolfram|Alpha plot)
This is likely caused by having two things at different (but close) refresh rates. It's the same type of thing that happens when you try to film a TV screen and it looks like a black bar keeps moving down the image, or when car wheels appear to rotate backwards around their axles in car commercials. It is essentially an extension of the Moiré Effect.
I don't know whether or not it is caused by video drivers and/or hardware, but it is almost certainly caused by interfering cyclical patterns. It looks a lot like it should be the GC cycle interfering with your for loop (hence the sudden drop in the sawtooth-like wave as memory is freed up and can be allocated)

Categories

Resources