I'm trying to make a platformer with a parallax background. I managed the code and also made sure to add .convert.
It is running pretty well for the most part, but every now and then there are periodic lag spikes.
# Imports
from vars import *
# Background class
class Background(pygame.sprite.Sprite):
def __init__(self, image, x):
super(Background, self).__init__()
self.surf = pygame.transform.smoothscale(pygame.image.load(image).convert_alpha(), (s_width, s_height))
self.rect = self.surf.get_rect(center=(x, s_height/2))
# Cave Background
class BgSurface(pygame.sprite.Sprite):
def __init__(self):
super(BgSurface, self).__init__()
self.surf = pygame.Surface((s_width, s_height))
self.rect = self.surf.get_rect(center=(s_width/2, s_height/2))
self.surf.fill((144, 156, 156))
# Background stuff
cave_air = BgSurface()
l1 = Background("layer1.png", s_width/2)
l2 = Background("layer2.png", s_width/2)
layer_list = [pygame.sprite.Group(l1), pygame.sprite.Group(l2)]
head_list = [pygame.sprite.Group(l1), pygame.sprite.Group(l2)]
bg_img_list = ["layer1.png", "layer2.png"]
def parallax(s_xdir, s_ydir):
for i in range(len(layer_list)):
ind = 0
for x in layer_list[i]:
ind += 1
x.rect.move_ip(s_xdir * vel_list[i], s_ydir * vel_list[2])
# Adding to left
if x.rect.left > 0 and ind == len(layer_list[i]) and not x.rect.centerx > s_width:
new_bg = Background(bg_img_list[i], x.rect.centerx - s_width)
new_bg.rect.centery = x.rect.centery
layer_list[i].add(new_bg)
# Memory optimization
if x.rect.left > s_width or x.rect.right < 1:
if x in head_list[i]:
x.kill()
for a in layer_list[i]:
head_list[i] = pygame.sprite.Group(a)
x.kill()
# Adding to right side
for a in head_list[i]:
if a.rect.right < s_width:
new_head = Background(bg_img_list[i], a.rect.centerx + s_width)
new_head.rect.centery = a.rect.centery
layer_list[i].add(new_head)
head_list[i] = pygame.sprite.Group(new_head)
# Experimental deletion of common centers
for p in layer_list[i]:
for q in layer_list[i]:
if p.rect.centerx == q.rect.centerx and p != q:
p.kill()
(Stuff like s_width and s_height are defined in the vars module which I import)
I moved the last killing loop out of the function and checked how many sprites are there in the background every second, but it returns that there are no unnecessary sprites being added during the lag spikes.
The spikes don't even coincide with the moments in which a new surface is added to fill the screen. Do any of you know why this is happening?
Do not load the images in the application loop. Loading an image is very time consuming because the image file has to be read and interpreted. Load the images once at the begin of the application:
# Background class
class Background(pygame.sprite.Sprite):
def __init__(self, image, x):
super(Background, self).__init__()
self.surf = pygame.transform.smoothscale(image, (s_width, s_height))
self.rect = self.surf.get_rect(center=(x, s_height/2))
bg_img_filelist = ["layer1.png", "layer2.png"]
bg_img_list = [pygame.image.load(f).convert_alpha() for f in bg_img_filelist ]
l1 = Background(bg_img_list[0], s_width/2)
l2 = Background(bg_img_list[1], s_width/2)
Related
I am trying to read all the touching pixels with the same color in a image.
For that I use reccursive functions. When I check one pixel, I look on the right, left, top and bottom if the pixel close to it is the same color. If it is I add it to an array otherwise I don't.
The code is as follow:
vimport tkinter as tk
from PIL import Image
import sys
sys.setrecursionlimit(200000)
## WINDOWS
# to launch in debug mode
imgToDraw = Image.open('assets-test\\smile-face.png')
# to launch normaly
# imgToDraw = Image.open('..\\assets-test\\smile-face.png')
## LINUX
# imgToDraw = Image.open('../assets-test/smile-face.png')
imgPixels = imgToDraw.load()
imgWidth = imgToDraw.size[0]
imgHeight = imgToDraw.size[1]
# an element is a part of the image, it's a bunch of pixels with approximately the same color
# and each pixel touch at least one other pixel of the same element
elements = [];
isPixelChecked = [[ False for y in range( imgWidth ) ] for x in range( imgHeight )]
# min tolerable difference between two colors to consider them the same
# the higher the value is the more colors will be considered the same
COLOR_TOLERANCE = 10
reccursionCount = 0
class Element:
def __init__(self, color):
self.pixels = [];
self.color = color;
def addPixel(self, pixel):
self.pixels.append(pixel);
class Pixel:
def __init__(self, x, y, color):
self.x = x # x position of the pixel
self.y = y # y position of the pixel
self.color = color # color is a tuple (r,g,b)
def cutImageInElements():
global element
completeElement(element.pixels)
def completeElement(elemPixels):
global reccursionCount
global isPixelChecked
reccursionCount += 1
nbPixels = len(elemPixels);
xIndex = elemPixels[nbPixels - 1].x
yIndex = elemPixels[nbPixels - 1].y
xRightIdx = elemPixels[nbPixels - 1].x + 1
xLeftIdx = elemPixels[nbPixels - 1].x - 1
yBottomIdx = elemPixels[nbPixels - 1].y + 1
yTopIdx = elemPixels[nbPixels - 1].y - 1
isPixelChecked[xIndex][yIndex] = True
if((xRightIdx < imgWidth) and isPixelChecked[xRightIdx][yIndex] == False):
if(isColorAlmostSame(imgPixels[elemPixels[0].x, elemPixels[0].y], imgPixels[xRightIdx, yIndex])):
pixelAppended = Pixel(xRightIdx, yIndex, imgPixels[xRightIdx, yIndex])
elemPixels.append(pixelAppended)
completeElement(elemPixels)
if((xLeftIdx >= 0) and isPixelChecked[xLeftIdx][yIndex] == False):
if(isColorAlmostSame(imgPixels[elemPixels[0].x, elemPixels[0].y], imgPixels[xLeftIdx, yIndex])):
pixelAppended = Pixel(xLeftIdx, yIndex, imgPixels[xLeftIdx, yIndex])
elemPixels.append(pixelAppended)
completeElement(elemPixels)
if((yBottomIdx < imgHeight) and isPixelChecked[xIndex][yBottomIdx] == False):
if(isColorAlmostSame(imgPixels[elemPixels[0].x, elemPixels[0].y], imgPixels[xIndex, yBottomIdx])):
pixelAppended = Pixel(xIndex, yBottomIdx, imgPixels[xIndex, yBottomIdx])
elemPixels.append(pixelAppended)
completeElement(elemPixels)
if((yTopIdx >= 0) and isPixelChecked[xIndex][yTopIdx] == False):
if(isColorAlmostSame(imgPixels[elemPixels[0].x, elemPixels[0].y], imgPixels[xIndex, yTopIdx])):
pixelAppended = Pixel(xIndex, yTopIdx, imgPixels[xIndex, yTopIdx])
elemPixels.append(pixelAppended)
completeElement(elemPixels)
def isColorAlmostSame(pixel1, pixel2):
redDiff = abs(pixel1[0] - pixel2[0])
greenDiff = abs(pixel1[1] - pixel2[1])
blueDiff = abs(pixel1[2] - pixel2[2])
if(redDiff < COLOR_TOLERANCE and greenDiff < COLOR_TOLERANCE and blueDiff < COLOR_TOLERANCE):
return True
else:
return False
def printPixelsArr(pixelsArr):
for x in range(0, len(pixelsArr)):
print(pixelsArr[x].x, pixelsArr[x].y, pixelsArr[x].color)
if __name__ == '__main__':
pixel = Pixel(0, 0, imgPixels[0, 0]);
element = Element(pixel.color);
element.addPixel(pixel);
cutImageInElements();
print("NbReccursive call: ", reccursionCount)
This code works for small images of size 100x100 but crashes with an image of 400x400 with the error "terminated by signal SIGSEGV (Address boundary error)" when I launch the program on wsl2. When I run the program on cmd or powershell it just crashes but with no error code/msg.
I cannot understand why it would work with some size of images and not others. I can only think that the memory runs out or something but in the task manager the program uses almost no memory.
Not sure why that's failing, but that much recursion in Python isn't a great idea. I'd suggest reading about tail recursion that other languages use to make some recursive algorithms consume constant stack space. Note that your algorithm is not tail recursive, so this optimisation wouldn't help even if Python supported it.
I hacked together the following flood fill implementation. It uses Numpy so that it's only 10x slower than Pillow's ImageDraw.floodfill.
import numpy as np
def floodfill(im, row, col, threshold):
similar = np.mean(np.abs(im - im[row, col]), 2) < threshold
mask = np.zeros_like(similar)
mask[row, col] = 1
m2 = mask.copy()
while True:
m2[:,:] = mask
m2[1:,:] |= mask[:-1]
m2[:-1,:] |= mask[1:]
m2[:,1:] |= mask[:,:-1]
m2[:,:-1] |= mask[:,1:]
m2 &= similar
if np.all(m2 == mask):
return mask
mask[:,:] = m2
As an example of using this, you could do;
import requests
from io import BytesIO
res = requests.get("https://picsum.photos/300")
res.raise_for_status()
src = Image.open(BytesIO(res.content))
mask = floodfill(np.array(src, int), 10, 10, 40)
where the random image I got and the output mask are:
I cant blit images in normal speed if the images in array.
#brife review
In my code i defined 10 images as a variable(x1-x10)
those 100 images relevant for specific class (object.draw_function()), and will be bliting in main loop according specific condtions.
in the object.draw_function() all the images are saved in lst "images_lst" = [img1,img2,img3,,,,img10]
and bliting from that array according rulles.
i noticed that if the len of the array is higher then 4,5 , the loop FPS is slower. and i dont understand why ? the loading images is outside the loop.
Code example:
#loading images
img1 = pygame.image.load(r'images\game_background1\img1.jpg')
img2 = pygame.image.load(r'images\game_background1\img2.jpg')
img3 = pygame.image.load(r'images\game_background1\img3.jpg')
'
'
'
img10= pygame.image.load(r'images\game_background1\img10.png')
#define font and space size
space_width,space_height = pysical_tm_img.get_width(),pysical_tm_img.get_height()
font0_0 = pygame.font.SysFont(pygame.font.get_fonts()[0],12)
font0_02 = pygame.font.SysFont(pygame.font.get_fonts()[0],17)
# define class
class EXAMPLE():
def __init__(self,x,y,text,power,energy,range):
self.rect = pygame.Rect(x,y,100,10)
self.text = text
self.power = power
self.energy = energy
self.range = range
def draw_func(self,surface):
img_lst = [img1,text1,img2,text2,img3,text3......img10,text10]
for i,img in enumerate(img_lst):
if i % 2 == 0 :
img_rect = img.get_rect(center=(self.rect.x +20 + (i *space_width*2),self.rect.top + space_height))
surface.blit(img,img_rect)
else:
img_rect = img.get_rect(center=(self.rect.x +20 + space_width + ((i-1) *space_width*2),self.rect.top + space_height))
surface.blit(img,img_rect)
#main loop
while True:
if somthing:
object1 = EXAMPLE(10,10,"abc",100,50,10)
object1.draw_func(screen)
elif somthing else:
object3 = EXAMPLE(10,10,"abc",100,50,10)
object3.draw_func(screen)
pygame.display.update()
clock.tick(60)
I dont understand whats wrong and why i cant append more images to my images list without reducing runtime.
Another question that not much relevant to this code but rellevant to runtime. if this code without the main loop is in file number 1 and in this file i import pygame and init the font only.
pygame.font.init()
and in file num 2 where the main loop is running i import pygame and init pygame
pygame.init()
is it reduce my proggram runtime?
Ensure that the image Surface has the same format as the display Surface. Use convert() (or convert_alpha()) to create a Surface that has the same pixel format. This improves performance when the image is blit on the display, because the formats are compatible and blit does not need to perform an implicit transformation.
e.g.:
img1 = pygame.image.load(r'images\game_background1\img1.jpg').convert()
I'm trying to add a high FPS screen recorder to my application.
I use Python 3.7 on Windows.
The modules and methods I've tried are mss (python-mss) and d3dshot, but I'm still only achieving 15-19 FPS for a long video (more than 20 seconds).
The resolution I'm recording at is 1920 x 1080.
What is the best way to optimize screen recording? I've tried to use the multiprocessing library, but it seems like it's still not fast enough. I'm not sure I'm using it in the optimal way, what are some ways I could use it to improve processing performance?
Using OBS Studio, I'm able to get 30 FPS, no matter how long the video is. My objective is to achieve the same results with my own code.
Here is what I've written so far:
from multiprocessing import Process, Queue
from time import sleep, time
import cv2
import d3dshot
import numpy as np
def grab(queue):
d = d3dshot.create(capture_output="numpy", frame_buffer_size=500)
d.capture()
sleep(0.1)
c=0
begin = time()
while time() - begin < 30:
starter = time()
frame = d.get_latest_frame()
queue.put(frame)
c+=1
ender = time()
sleep(max(0, 1/60 - (ender -starter)))
# Tell the other worker to stop
queue.put(None)
final=time()
print(c/(final-begin))
d.stop()
def save(queue):
SCREEN_SIZE = 1920, 1080
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'DIVX') # In Windows: DIVX
out = cv2.VideoWriter(r"output.avi",fourcc, 30.0, (SCREEN_SIZE))
# type: (Queue) -> None
last_img = None
while "there are screenshots":
img = queue.get()
if img is None:
break
if img is last_img:
continue
out.write(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
last_img = img
if __name__ == "__main__":
# The screenshots queue
queue = Queue() # type: Queue
# 2 processes: one for grabing and one for saving PNG files
Process(target=grab, args=(queue,)).start()
Process(target=save, args=(queue,)).start()
The goal is to capture a game, while performing automated keyboard and mouse actions.
I have faced the same problem in trying to get high speed recording for games. This was the fastest solution I was able to find for Windows. The code is using raw buffer objects and leads to around ~27 FPS. I cannot find the original post on which this code is based, but if someone finds it I will add the reference.
Note that the framerate will significantly increase if you make the region smaller than 1920x1080.
"""
Alternative screen capture device, when there is no camera of webcam connected
to the desktop.
"""
import logging
import sys
import time
import cv2
import numpy as np
if sys.platform == 'win32':
import win32gui, win32ui, win32con, win32api
else:
logging.warning(f"Screen capture is not supported on platform: `{sys.platform}`")
from collections import namedtuple
class ScreenCapture:
"""
Captures a fixed region of the total screen. If no region is given
it will take the full screen size.
region_ltrb: Tuple[int, int, int, int]
Specific region that has to be taken from the screen using
the top left `x` and `y`, bottom right `x` and `y` (ltrb coordinates).
"""
__region = namedtuple('region', ('x', 'y', 'width', 'height'))
def __init__(self, region_ltrb=None):
self.region = region_ltrb
self.hwin = win32gui.GetDesktopWindow()
# Time management
self._time_start = time.time()
self._time_taken = 0
self._time_average = 0.04
def __getitem__(self, item):
return self.screenshot()
def __next__(self):
return self.screenshot()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
if exc_type and isinstance(exc_val, StopIteration):
return True
return False
#staticmethod
def screen_dimensions():
""" Retrieve total screen dimensions. """
left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
return left, top, height, width
#property
def fps(self):
return int(1 / self._time_average) * (self._time_average > 0)
#property
def region(self):
return self._region
#property
def size(self):
return self._region.width, self._region.height
#region.setter
def region(self, value):
if value is None:
self._region = self.__region(*self.screen_dimensions())
else:
assert len(value) == 4, f"Region requires 4 input, x, y of left top, and x, y of right bottom."
left, top, x2, y2 = value
width = x2 - left + 1
height = y2 - top + 1
self._region = self.__region(*list(map(int, (left, top, width, height))))
def screenshot(self, color=None):
"""
Takes a part of the screen, defined by the region.
:param color: cv2.COLOR_....2...
Converts the created BGRA image to the requested image output.
:return: np.ndarray
An image of the region in BGRA values.
"""
left, top, width, height = self._region
hwindc = win32gui.GetWindowDC(self.hwin)
srcdc = win32ui.CreateDCFromHandle(hwindc)
memdc = srcdc.CreateCompatibleDC()
bmp = win32ui.CreateBitmap()
bmp.CreateCompatibleBitmap(srcdc, width, height)
memdc.SelectObject(bmp)
memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)
signed_ints_array = bmp.GetBitmapBits(True)
img = np.frombuffer(signed_ints_array, dtype='uint8')
img.shape = (height, width, 4)
srcdc.DeleteDC()
memdc.DeleteDC()
win32gui.ReleaseDC(self.hwin, hwindc)
win32gui.DeleteObject(bmp.GetHandle())
# This makes sure that the FPS are taken in comparison to screenshots rates and vary only slightly.
self._time_taken, self._time_start = time.time() - self._time_start, time.time()
self._time_average = self._time_average * 0.95 + self._time_taken * 0.05
if color is not None:
return cv2.cvtColor(img, color)
return img
def show(self, screenshot=None):
""" Displays an image to the screen. """
image = screenshot if screenshot is not None else self.screenshot()
cv2.imshow('Screenshot', image)
if cv2.waitKey(1) & 0xff == ord('q'):
raise StopIteration
return image
def close(self):
""" Needs to be called before exiting when `show` is used, otherwise an error will occur. """
cv2.destroyWindow('Screenshot')
def scale(self, src: np.ndarray, size: tuple):
return cv2.resize(src, size, interpolation=cv2.INTER_LINEAR_EXACT)
def save(self, path, screenshot=None):
""" Store the current screenshot in the provided path. Full path, with img name is required.) """
image = screenshot if screenshot is not None else self.screenshot
cv2.imwrite(filename=path, img=image)
if __name__ == '__main__':
# Example usage when displaying.
with ScreenCapture((0, 0, 1920, 1080)) as capture:
for _ in range(100):
capture.show()
print(f"\rCapture framerate: {capture.fps}", end='')
# Example usage as generator.
start_time = time.perf_counter()
for frame, screenshot in enumerate(ScreenCapture((0, 0, 1920, 1080)), start=1):
print(f"\rFPS: {frame / (time.perf_counter() - start_time):3.0f}", end='')
Edit
I noticed some small mistake in the window show function, and the self.screenshot calls in the __getitem__ and __next__ method. These have been resolved.
Next to the for example using the ScreenCapture as a context manager, I added an example of using it as a generator.
This is my first question on StackOverflow, so here goes:
Edit: I have edited this a few times, just fixing typing mistakes and updating the code. Even after adding various changes to the code, the issue still remains the exact same.
Also, pygame.mixer.music.fadeout() is not what I'm looking for. This code will also be for when I want to lower music volume to perhaps 50% on, say, pausing the game or entering a talk scene.
With Pygame, I am trying to perform music volume manipulation based on how much time has passed. I already have some decent code created, but it's not performing how I thought it intuitively should. Also, I should note that I am using the component-based EBS system I ripped from PySDL2. Here is the link to the EBS module: https://bitbucket.org/marcusva/py-sdl2/src/02a4bc4f79d9440fe98e372e0ffaadacaefaa5c6/sdl2/ext/ebs.py?at=default
This is my initial block of code:
import pygame
from pygame.locals import *
# Setup import paths for module.
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
sys.path.insert(0, parent_dir)
sys.path.insert(0, os.path.join(parent_dir, "Game"))
import Game
from Porting.sdl2.ext import ebs
pygame.display.quit()
print("Counting down...")
for n in range(5):
print(str(n + 1))
pygame.time.delay(1000)
appworld = ebs.World()
audio_system = Game.audio.AudioSystem(44100, -16, 2, 4096)
appworld.add_system(audio_system)
test1 = Game.sprites.AudioSprite(appworld)
test2 = Game.sprites.AudioSprite(appworld)
test1.audio = Game.audio.Audio(database["BGMusic0"], True)
test2.audio = Game.audio.Audio(database["BGMusic1"], True)
game_clock = pygame.time.Clock()
volume_change_clock = pygame.time.Clock()
loop = True
time_passed = 0
while loop:
game_clock.tick(60)
appworld.process()
time_passed += volume_change_clock.tick(60)
if time_passed > (10 * 1000):
print(time_passed)
if not audio_system.music_volume_changed:
audio_system.set_music_volume(0, True)
My next block of code:
import pygame
from Porting.sdl2.ext import ebs
class AudioSystem(ebs.System):
def __init__(self, frequency, bit_size, channels, buffer):
super(AudioSystem, self).__init__()
self.componenttypes = Audio,
pygame.mixer.init(frequency, bit_size, channels, buffer)
pygame.mixer.set_num_channels(200)
self.frequency = frequency
self.bit_size = bit_size
self.channels = channels
self.buffer = buffer
self.music_volume_change_clock = None
self.music_volume_changed = False
self.music_volume_current = 0
self.music_volume_new = 0
self.music_fade = False
self.music_change_speed = 0
self.time_passed_total = 0
self.time_passed_remainder = 0
def process(self, world, componentsets):
for audio in componentsets:
if audio.is_music:
music = pygame.mixer.music
if not pygame.mixer.music.get_busy():
music.load(audio.file)
music.play()
if self.music_volume_changed:
self.music_volume_current = music.get_volume() * 100
if self.music_volume_current != self.music_volume_new and self.music_fade:
time_passed = self.music_volume_change_clock.tick(60)
self.time_passed_total += time_passed
self.time_passed_total += self.time_passed_remainder
self.time_passed_remainder = 0
if self.time_passed_total > self.music_change_speed:
self.time_passed_remainder = self.time_passed_total % self.music_change_speed
volume_change_amount = int(self.time_passed_total / self.music_change_speed)
self.time_passed_total = 0
if self.music_volume_current > self.music_volume_new:
self.music_volume_current -= volume_change_amount
music.set_volume(self.music_volume_current / 100)
elif self.music_current_volume < self.music_volume_new:
self.music_volume_current += volume_change_amount
music.set_volume(self.music_volume_current / 100)
elif self.music_volume_current != self.music_volume_new:
music.set_volume(self.music_volume_current / 100)
else:
self.music_volume_changed = False
self.music_fade = False
else:
if not audio.channel:
audio.channel = pygame.mixer.find_channel()
audio.channel.play()
def set_music_volume(self, percent, fade = False, change_speed = 50):
self.music_volume_changed = True
self.music_volume_new = percent
self.music_fade = fade
self.music_change_speed = change_speed
self.music_volume_change_clock = pygame.time.Clock()
class Audio(object):
def __init__(self, file, is_music = False):
self.is_music = is_music
if self.is_music:
self.file = file
else:
self.channel = None
self.file = pygame.mixer.Sound(file)
My testing has shown that manipulating the parameter of Clock.tick() in my Game.audio module in various ways influences how quickly the audio playing falls from 100 to 0. Leaving it blank causes it to stop almost instantaneously. At 60, it falls to 0 in around 2 seconds, which baffles me. At 30, in 1 second. At 5, it falls slowly, with the volume never seeming to reach 0. I want to completely desynchronize my audio volume manipulation completely from my game's frame-rate, but I am unsure of how I would accomplish that. I want to avoid threading and multiprocessing if possible.
Thanks in advance! :)
Clock.tick()'s parameter is used to call the SDL sleep function to limit how many times the loop runs per second.
Calling it with Clock.tick(5) limits it to five loops per second.
I've also never used two clocks in the same code, especially with the multiple ticks (all of which will calculate their sleep time individually). Instead of that, consider using the return value of tick (the time in ms since the last call), and use that to track time through the whole application.
Example:
timer = 0
Do things
timer += main_clock.tick(FPS)
I am using code in wxPython to show images.
I created a screen with 2 panels, one left and right.
In one of the panels (randomly chosen), I want do display an image for exactly 150ms.
How can I program this? I am relatively new to Python, and I don't find any clear way on the internet.
My code for now (without the 150ms):
import wxversion
wxversion.select("3.0")
import wx
import random
import timeclass Screen_1(wx.Dialog):
ri = 0
def __init__(self,parent,id,title):
wx.Dialog.__init__(self,parent,id,title,size=(400,300))
self.randomImage = random.randrange(1,3)
self.randomSlot = random.randrange(1,3)
Screen_1.ri = self.randomImage
if(self.randomSlot == 1):
self.side = 'Left'
else:
self.side = 'Right'
file = open('User.txt','a')
panel_left = wx.Panel(self,11,(-1,-1),(200,200))
self.picture_left = wx.StaticBitmap(panel_left)
font = wx.Font(13,wx.DEFAULT,wx.NORMAL,wx.BOLD)
panel_centre = wx.Panel(self,12,(200,70),(10,100))
msg = wx.StaticText(panel_centre,-1,'+',size=(10,100))
msg.SetFont(font)
panel_right = wx.Panel(self,13,(210,0),(200,200))
self.picture_right = wx.StaticBitmap(panel_right)
**self.imageName = 'im_'+str(self.randomImage)+'.png'**
if self.randomSlot == 1:
self.picture_left.SetBitmap(wx.Bitmap(self.imageName))
else:
self.picture_right.SetBitmap(wx.Bitmap(self.imageName))
wx.FutureCall(1000,self.Destroy)
self.Centre()
self.ShowModal()
def OnClick(self,event):
self.Close()
Thanks a lot!
def OnTimeUp(self,e):
#change images
self.timer.Start(15,oneShot=True) # if you want to call it again in 15 ms
def StartTimer(self):
self.timer = wx.Timer()
self.timer.Bind(wx.EVT_TIMER,self.OnTimeUp)
self.timer.Start(15,oneShot=True)
something like that ... although 15ms is very fast ...