Why is this Popen with threading not working? - python

I wrote a little tkinter GUI to handle 4 inputs to ffmpeg. Since the subprocess will take some time i want to status the process. Therefore I use threading so tkinter doesn't freeze while the subprocess is executed.
My problem is that with threading the ffmpeg command outputs the destination file with 0kb and nothing is anymore written to the file. If I use my function without threading everything works, but the GUI is freezing.
Here is the main part of the code:
def ffmpeg(v0,v1,v2,v3):
cmd = [ path+'ffmpeg.exe',"-y","-i",v0,"-i",v1,"-i",v2,'-i',v3,'-filter_complex',"[0:v][1:v]hstack[top];[2:v][3:v]hstack[bottom];[top][bottom]vstack,format=yuv420p[v]",'-map',"[v]","out.mp4"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
while True:
output = process.stdout.readline()
inpu = process.stderr.readline()
if output == b'' and process.poll() is not None:
break
if output:
print(output.strip()) # HERE i will insert into tkinter textfield
rc = process.poll()
def buttonClick(v0,v1,v2,v3):
#ffmpeg(v0,v1,v2,v3) # This line works
t = threading.Thread(target=ffmpeg,args=(v0,v1,v2,v3,)) #This doesn't work
t.start()
#t.join()
#tkvar list elements are absolute paths to the videofiles
submitButton = Button(mainframe, text="Process Video", command=lambda: buttonClick(tkvar[0].get(),tkvar[1].get(),tkvar[2].get(),tkvar[3].get()))
submitButton.grid(row = 7, column =3)
Why is my thread not working?

The problem was that
process.stdout.readline()
Is always empty since ffmpeg writes always everything to stderr.

Related

Running scripts at the same time but on different consoles [duplicate]

Apart from the scripts own console (which does nothing) I want to open two consoles and print the variables con1 and con2 in different consoles, How can I achieve this.
con1 = 'This is Console1'
con2 = 'This is Console2'
I've no idea how to achieve this and spent several hours trying to do so with modules such as subprocess but with no luck. I'm on windows by the way.
Edit:
Would the threading module do the job? or is multiprocessing needed?
Eg:
If you don't want to reconsider your problem and use a GUI such as in #Kevin's answer then you could use subprocess module to start two new consoles concurrently and display two given strings in the opened windows:
#!/usr/bin/env python3
import sys
import time
from subprocess import Popen, PIPE, CREATE_NEW_CONSOLE
messages = 'This is Console1', 'This is Console2'
# open new consoles
processes = [Popen([sys.executable, "-c", """import sys
for line in sys.stdin: # poor man's `cat`
sys.stdout.write(line)
sys.stdout.flush()
"""],
stdin=PIPE, bufsize=1, universal_newlines=True,
# assume the parent script is started from a console itself e.g.,
# this code is _not_ run as a *.pyw file
creationflags=CREATE_NEW_CONSOLE)
for _ in range(len(messages))]
# display messages
for proc, msg in zip(processes, messages):
proc.stdin.write(msg + "\n")
proc.stdin.flush()
time.sleep(10) # keep the windows open for a while
# close windows
for proc in processes:
proc.communicate("bye\n")
Here's a simplified version that doesn't rely on CREATE_NEW_CONSOLE:
#!/usr/bin/env python
"""Show messages in two new console windows simultaneously."""
import sys
import platform
from subprocess import Popen
messages = 'This is Console1', 'This is Console2'
# define a command that starts new terminal
if platform.system() == "Windows":
new_window_command = "cmd.exe /c start".split()
else: #XXX this can be made more portable
new_window_command = "x-terminal-emulator -e".split()
# open new consoles, display messages
echo = [sys.executable, "-c",
"import sys; print(sys.argv[1]); input('Press Enter..')"]
processes = [Popen(new_window_command + echo + [msg]) for msg in messages]
# wait for the windows to be closed
for proc in processes:
proc.wait()
You can get something like two consoles using two Tkinter Text widgets.
from Tkinter import *
import threading
class FakeConsole(Frame):
def __init__(self, root, *args, **kargs):
Frame.__init__(self, root, *args, **kargs)
#white text on black background,
#for extra versimilitude
self.text = Text(self, bg="black", fg="white")
self.text.pack()
#list of things not yet printed
self.printQueue = []
#one thread will be adding to the print queue,
#and another will be iterating through it.
#better make sure one doesn't interfere with the other.
self.printQueueLock = threading.Lock()
self.after(5, self.on_idle)
#check for new messages every five milliseconds
def on_idle(self):
with self.printQueueLock:
for msg in self.printQueue:
self.text.insert(END, msg)
self.text.see(END)
self.printQueue = []
self.after(5, self.on_idle)
#print msg to the console
def show(self, msg, sep="\n"):
with self.printQueueLock:
self.printQueue.append(str(msg) + sep)
#warning! Calling this more than once per program is a bad idea.
#Tkinter throws a fit when two roots each have a mainloop in different threads.
def makeConsoles(amount):
root = Tk()
consoles = [FakeConsole(root) for n in range(amount)]
for c in consoles:
c.pack()
threading.Thread(target=root.mainloop).start()
return consoles
a,b = makeConsoles(2)
a.show("This is Console 1")
b.show("This is Console 2")
a.show("I've got a lovely bunch of cocounts")
a.show("Here they are standing in a row")
b.show("Lorem ipsum dolor sit amet")
b.show("consectetur adipisicing elit")
Result:
I don't know if it suits you, but you can open two Python interpreters using Windows start command:
from subprocess import Popen
p1 = Popen('start c:\python27\python.exe', shell=True)
p2 = Popen('start c:\python27\python.exe', shell=True)
Of course there is problem that now Python runs in interactive mode which is not what u want (you can also pass file as parameter and that file will be executed).
On Linux I would try to make named pipe, pass the name of the file to python.exe and write python commands to that file. 'Maybe' it will work ;)
But I don't have an idea how to create named pipe on Windows. Windows API ... (fill urself).
pymux
pymux gets close to what you want: https://github.com/jonathanslenders/pymux
Unfortunately it is mostly a CLI tool replacement for tmux and does not have a decent programmatic API yet.
But hacking it up to expose that API is likely the most robust option if you are serious about this.
The README says:
Parts of pymux could become a library, so that any prompt_toolkit application can embed a vt100 terminal. (Imagine a terminal emulator embedded in pyvim.)
If you are on windows you can use win32console module to open a second console for your thread or subprocess output. This is the most simple and easiest way that works if you are on windows.
Here is a sample code:
import win32console
import multiprocessing
def subprocess(queue):
win32console.FreeConsole() #Frees subprocess from using main console
win32console.AllocConsole() #Creates new console and all input and output of subprocess goes to this new console
while True:
print(queue.get())
#prints any output produced by main script passed to subprocess using queue
queue = multiprocessing.Queue()
multiprocessing.Process(Target=subprocess, args=[queue]).start()
while True:
print("Hello World")
#and whatever else you want to do in ur main process
You can also do this with threading. You have to use queue module if you want the queue functionality as threading module doesn't have queue
Here is the win32console module documentation
I used jfs' response. Here is my embellishment/theft of jfs response.
This is tailored to run on Win10 and also handles Unicode:
# https://stackoverflow.com/questions/19479504/how-can-i-open-two-consoles-from-a-single-script
import sys, time, os, locale
from subprocess import Popen, PIPE, CREATE_NEW_CONSOLE
class console(Popen) :
NumConsoles = 0
def __init__(self, color=None, title=None):
console.NumConsoles += 1
cmd = "import sys, os, locale"
cmd += "\nos.system(\'color " + color + "\')" if color is not None else ""
title = title if title is not None else "console #" + str(console.NumConsoles)
cmd += "\nos.system(\"title " + title + "\")"
# poor man's `cat`
cmd += """
print(sys.stdout.encoding, locale.getpreferredencoding() )
endcoding = locale.getpreferredencoding()
for line in sys.stdin:
sys.stdout.buffer.write(line.encode(endcoding))
sys.stdout.flush()
"""
cmd = sys.executable, "-c", cmd
# print(cmd, end="", flush=True)
super().__init__(cmd, stdin=PIPE, bufsize=1, universal_newlines=True, creationflags=CREATE_NEW_CONSOLE, encoding='utf-8')
def write(self, msg):
self.stdin.write(msg + "\n" )
if __name__ == "__main__":
myConsole = console(color="c0", title="test error console")
myConsole.write("Thank you jfs. Cool explanation")
NoTitle= console()
NoTitle.write("default color and title! This answer uses Windows 10")
NoTitle.write(u"♥♥♥♥♥♥♥♥")
NoTitle.write("♥")
time.sleep(5)
myConsole.terminate()
NoTitle.write("some more text. Run this at the python console.")
time.sleep(4)
NoTitle.terminate()
time.sleep(5)
Do you know about screen/tmux?
How about tmuxp? For example, you can try to run cat in split panes and use "sendkeys" to send output (but dig the docs, may be there is even easier ways to achieve this).
As a side bonus this will work in the text console or GUI.

ProcessPoolExecutor not limiting to set value

I have a number of computation processes that need to be ran. They take anywhere from 20 minutes to 1+ days. I want the user to be able to observe what each is doing through the standard output, therefore I am executing each in its own cmd window. When I set the number of workers, it does not observe that value and keeps on spinning up more and more until i cancel the program.
def run_job(args):
os.system("start cmd /k \"{} > \"{}\\stdout.txt\"\"".format(run_command,
outpath))
CONCURRENCY_HANDLER = concurrent.futures.ProcessPoolExecutor(max_workers = 3)
jobs =[]
ALL_RUNS_MATRIX = [{k1:v1...kn:vn},....
{kA1,vA1...kAn,vAn}
]
with CONCURRENCY_HANDLER as executor:
for idx, configuration in enumerate(ALL_RUNS_MATRIX):
generate_run_specific_files(configuration,idx)
args = [doesnt,matter]
time.sleep(5)
print("running new")
jobs.append( executor.submit(run_job,args))
time.sleep(10)
I Originally tried using the ThreadPoolExector to the same effect. Why is this not actually limiting the number happening concurrently, and if this wont work what should I use instead? I need to retain this "generate -> wait->run" path because of the nature of the program (I change a file that it reads for config, It starts, retains all necessary info in memory, then executes) so I am wary of the "workers pull their work off a queue as they come available" model
Not quite sure what you're trying to do. Maybe give us an example with a simple task that has the same issue with processes? Are you thinking of max_workers as an upper bound to the number of processes spawned? That might not be true. I think max_workers is the number of processor cores your process pool is allowed to use. According to the docs,
If max_workers is None or not given, it will default to the number of processors on the machine. If max_workers is less than or equal to 0, then a ValueError will be raised. On Windows, max_workers must be less than or equal to 61. If it is not then ValueError will be raised. If max_workers is None, then the default chosen will be at most 61, even if more processors are available.
Here is a simple example,
from concurrent.futures import ProcessPoolExecutor
from time import sleep
futures = []
def job(i):
print('Job started: ' + str(i))
return i
def all_done():
done = True
for ft in futures:
done = done and ft.done()
return done
with ProcessPoolExecutor(max_workers=8) as executor:
for i in range(3):
futures.append(executor.submit(job, i))
while not all_done():
sleep(0.1)
for ft in futures:
print('Job done: ' + str(ft.result()))
It prints,
Job started: 0
Job started: 1
Job started: 2
Job done: 0
Job done: 1
Job done: 2
Does this help?
As I mentioned in my comment as soon as the start command is satisfied by opening up the new command window, the system command returns as completed even though the run command being passed to cmd /K has only just started to run. Therefore the process in the pool is now free to run another task.
If I understand correctly your problem, you have the following goals:
Detect the true completion of your command so that you ensure that no more than 3 commands are running concurrently.
Collect the output of the command in a window that will remain open even after the command has completed. I infer this from your having used the /K switch when invoking cmd.
My solution would be to use windows created by tkinter to hold your output and to use subprocess.Popen to run your commands using argument shell=True. You can specify the additional argument stdout=PIPE to read the output from a command and funnel it the tkinter window. How to actually do that is the challenge.
I have not done tkinter programming before and perhaps someone with more experience could find a more direct method. It seems to me that the windows need to be created and written to in the main thread. To that end for every command that will be executed a window (a special subclass of Tk called CmdWindow) will be created and paired with the window command. The command and the output window number will be passed to a worker function run_command along with an instance of queue.Queue. run_command will then use subprocess.Popen to execute the command and for every line of output it reads from the output pipe, it will write a tuple to the queue with the values of the window number and the line to be written. The main thread is in a loop reading these tuples and writing the lines to the appropriate window. Because the main thread is occupied with writing command output, a special thread is used to create a thread pool and to submit all the commands that need to be run and to await for their completion. When all tasks are completed, a special "end" record is added to the queue signifying to the main thread that it can stop reading from the queue. A that point the main thread displays a 'Pausing for termination...' message and will not terminate until the user enters a carriage return at the console.
from concurrent.futures import ThreadPoolExecutor, as_completed
from subprocess import Popen, PIPE
from tkinter import *
from tkinter.scrolledtext import ScrolledText
from queue import Queue
from threading import Thread
class CmdWindow(Tk):
""" A console window """
def __init__(self, cmd):
super().__init__()
self.title(cmd)
self.configure(background="#BAD0EF")
title = Entry(self, relief=FLAT, bg="#BAD0EF", bd=0)
title.pack(side=TOP)
textArea = ScrolledText(self, height=24, width=120, bg="#FFFFFF", font=('consolas', '14'))
textArea.pack(expand=True, fill='both')
textArea.bind("<Key>", lambda e: "break") # read only
self._textArea = textArea
def write(self, s):
""" write the next line of output """
self._textArea.insert(END, s)
self.update()
def run_command(q, cmd, win):
""" run command cmd with output window win """
# special "create window" command:
q.put((win, None)) # create the window
with Popen(cmd, stdout=PIPE, shell=True, text=True) as proc:
for line in iter(proc.stdout.readline, ''):
# write line command:
q.put((win, line))
def run_tasks(q, arguments):
# we only need a thread pool since each command will be its own process:
with ThreadPoolExecutor(max_workers=3) as executor:
futures = []
for win, cmd in arguments:
futures.append(executor.submit(run_command, q, cmd, win))
# each task doesn't currently return anything
results = [future.result() for future in as_completed(futures)]
q.put(None) # signify end
def main():
q = Queue()
# sample commands to execute (under Windows):
cmds = ['dir *.py', 'dir *.html', 'dir *.txt', 'dir *.js', 'dir *.csv']
# each command will get its own window for output:
windows = list(cmds)
# pair a command with a window number:
arguments = enumerate(cmds)
# create the thread for running the commands:
thread = Thread(target=run_tasks, args=(q, arguments))
# start the thread:
thread.start()
# wait for command output in main thread
# output must be written from main thread
while True:
t = q.get() # get next tuple or special "end" record
if t is None: # special end record?
break # yes!
# unpack tuple:
win, line = t
if line is None: # special create window command
# use cmd as title and replace with actual window:
windows[win] = CmdWindow(windows[win])
else:
windows[win].write(line)
thread.join() # wait for run_jobs thread to end
input('Pausing for termination...') # wait for user to be finished looking at windows
if __name__ == '__main__':
main()

How to collect output from a Python subprocess

I am trying to make a python process that reads some input, processes it and prints out the result. The processing is done by a subprocess (Stanford's NER), for ilustration I will use 'cat'. I don't know exactly how much output NER will give, so I use run a separate thread to collect it all and print it out. The following example illustrates.
import sys
import threading
import subprocess
# start my subprocess
cat = subprocess.Popen(
['cat'],
shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=None)
def subproc_cat():
""" Reads the subprocess output and prints out """
while True:
line = cat.stdout.readline()
if not line:
break
print("CAT PROC: %s" % line.decode('UTF-8'))
# a daemon that runs the above function
th = threading.Thread(target=subproc_cat)
th.setDaemon(True)
th.start()
# the main thread reads from stdin and feeds the subprocess
while True:
line = sys.stdin.readline()
print("MAIN PROC: %s" % line)
if not line:
break
cat.stdin.write(bytes(line.strip() + "\n", 'UTF-8'))
cat.stdin.flush()
This seems to work well when I enter text with the keyboard. However, if I try to pipe input into my script (cat file.txt | python3 my_script.py), a racing condition seems to occur. Sometimes I get proper output, sometimes not, sometimes it locks down. Any help would be appreciated!
I am runing Ubuntu 14.04, python 3.4.0. The solution should be platform-independant.
Add th.join() at the end otherwise you may kill the thread prematurely before it has processed all the output when the main thread exits: daemon threads do not survive the main thread (or remove th.setDaemon(True) instead of th.join()).

How can I open two consoles from a single script

Apart from the scripts own console (which does nothing) I want to open two consoles and print the variables con1 and con2 in different consoles, How can I achieve this.
con1 = 'This is Console1'
con2 = 'This is Console2'
I've no idea how to achieve this and spent several hours trying to do so with modules such as subprocess but with no luck. I'm on windows by the way.
Edit:
Would the threading module do the job? or is multiprocessing needed?
Eg:
If you don't want to reconsider your problem and use a GUI such as in #Kevin's answer then you could use subprocess module to start two new consoles concurrently and display two given strings in the opened windows:
#!/usr/bin/env python3
import sys
import time
from subprocess import Popen, PIPE, CREATE_NEW_CONSOLE
messages = 'This is Console1', 'This is Console2'
# open new consoles
processes = [Popen([sys.executable, "-c", """import sys
for line in sys.stdin: # poor man's `cat`
sys.stdout.write(line)
sys.stdout.flush()
"""],
stdin=PIPE, bufsize=1, universal_newlines=True,
# assume the parent script is started from a console itself e.g.,
# this code is _not_ run as a *.pyw file
creationflags=CREATE_NEW_CONSOLE)
for _ in range(len(messages))]
# display messages
for proc, msg in zip(processes, messages):
proc.stdin.write(msg + "\n")
proc.stdin.flush()
time.sleep(10) # keep the windows open for a while
# close windows
for proc in processes:
proc.communicate("bye\n")
Here's a simplified version that doesn't rely on CREATE_NEW_CONSOLE:
#!/usr/bin/env python
"""Show messages in two new console windows simultaneously."""
import sys
import platform
from subprocess import Popen
messages = 'This is Console1', 'This is Console2'
# define a command that starts new terminal
if platform.system() == "Windows":
new_window_command = "cmd.exe /c start".split()
else: #XXX this can be made more portable
new_window_command = "x-terminal-emulator -e".split()
# open new consoles, display messages
echo = [sys.executable, "-c",
"import sys; print(sys.argv[1]); input('Press Enter..')"]
processes = [Popen(new_window_command + echo + [msg]) for msg in messages]
# wait for the windows to be closed
for proc in processes:
proc.wait()
You can get something like two consoles using two Tkinter Text widgets.
from Tkinter import *
import threading
class FakeConsole(Frame):
def __init__(self, root, *args, **kargs):
Frame.__init__(self, root, *args, **kargs)
#white text on black background,
#for extra versimilitude
self.text = Text(self, bg="black", fg="white")
self.text.pack()
#list of things not yet printed
self.printQueue = []
#one thread will be adding to the print queue,
#and another will be iterating through it.
#better make sure one doesn't interfere with the other.
self.printQueueLock = threading.Lock()
self.after(5, self.on_idle)
#check for new messages every five milliseconds
def on_idle(self):
with self.printQueueLock:
for msg in self.printQueue:
self.text.insert(END, msg)
self.text.see(END)
self.printQueue = []
self.after(5, self.on_idle)
#print msg to the console
def show(self, msg, sep="\n"):
with self.printQueueLock:
self.printQueue.append(str(msg) + sep)
#warning! Calling this more than once per program is a bad idea.
#Tkinter throws a fit when two roots each have a mainloop in different threads.
def makeConsoles(amount):
root = Tk()
consoles = [FakeConsole(root) for n in range(amount)]
for c in consoles:
c.pack()
threading.Thread(target=root.mainloop).start()
return consoles
a,b = makeConsoles(2)
a.show("This is Console 1")
b.show("This is Console 2")
a.show("I've got a lovely bunch of cocounts")
a.show("Here they are standing in a row")
b.show("Lorem ipsum dolor sit amet")
b.show("consectetur adipisicing elit")
Result:
I don't know if it suits you, but you can open two Python interpreters using Windows start command:
from subprocess import Popen
p1 = Popen('start c:\python27\python.exe', shell=True)
p2 = Popen('start c:\python27\python.exe', shell=True)
Of course there is problem that now Python runs in interactive mode which is not what u want (you can also pass file as parameter and that file will be executed).
On Linux I would try to make named pipe, pass the name of the file to python.exe and write python commands to that file. 'Maybe' it will work ;)
But I don't have an idea how to create named pipe on Windows. Windows API ... (fill urself).
pymux
pymux gets close to what you want: https://github.com/jonathanslenders/pymux
Unfortunately it is mostly a CLI tool replacement for tmux and does not have a decent programmatic API yet.
But hacking it up to expose that API is likely the most robust option if you are serious about this.
The README says:
Parts of pymux could become a library, so that any prompt_toolkit application can embed a vt100 terminal. (Imagine a terminal emulator embedded in pyvim.)
If you are on windows you can use win32console module to open a second console for your thread or subprocess output. This is the most simple and easiest way that works if you are on windows.
Here is a sample code:
import win32console
import multiprocessing
def subprocess(queue):
win32console.FreeConsole() #Frees subprocess from using main console
win32console.AllocConsole() #Creates new console and all input and output of subprocess goes to this new console
while True:
print(queue.get())
#prints any output produced by main script passed to subprocess using queue
queue = multiprocessing.Queue()
multiprocessing.Process(Target=subprocess, args=[queue]).start()
while True:
print("Hello World")
#and whatever else you want to do in ur main process
You can also do this with threading. You have to use queue module if you want the queue functionality as threading module doesn't have queue
Here is the win32console module documentation
I used jfs' response. Here is my embellishment/theft of jfs response.
This is tailored to run on Win10 and also handles Unicode:
# https://stackoverflow.com/questions/19479504/how-can-i-open-two-consoles-from-a-single-script
import sys, time, os, locale
from subprocess import Popen, PIPE, CREATE_NEW_CONSOLE
class console(Popen) :
NumConsoles = 0
def __init__(self, color=None, title=None):
console.NumConsoles += 1
cmd = "import sys, os, locale"
cmd += "\nos.system(\'color " + color + "\')" if color is not None else ""
title = title if title is not None else "console #" + str(console.NumConsoles)
cmd += "\nos.system(\"title " + title + "\")"
# poor man's `cat`
cmd += """
print(sys.stdout.encoding, locale.getpreferredencoding() )
endcoding = locale.getpreferredencoding()
for line in sys.stdin:
sys.stdout.buffer.write(line.encode(endcoding))
sys.stdout.flush()
"""
cmd = sys.executable, "-c", cmd
# print(cmd, end="", flush=True)
super().__init__(cmd, stdin=PIPE, bufsize=1, universal_newlines=True, creationflags=CREATE_NEW_CONSOLE, encoding='utf-8')
def write(self, msg):
self.stdin.write(msg + "\n" )
if __name__ == "__main__":
myConsole = console(color="c0", title="test error console")
myConsole.write("Thank you jfs. Cool explanation")
NoTitle= console()
NoTitle.write("default color and title! This answer uses Windows 10")
NoTitle.write(u"♥♥♥♥♥♥♥♥")
NoTitle.write("♥")
time.sleep(5)
myConsole.terminate()
NoTitle.write("some more text. Run this at the python console.")
time.sleep(4)
NoTitle.terminate()
time.sleep(5)
Do you know about screen/tmux?
How about tmuxp? For example, you can try to run cat in split panes and use "sendkeys" to send output (but dig the docs, may be there is even easier ways to achieve this).
As a side bonus this will work in the text console or GUI.

Python, Tkinter - ttk.Progressbar in a separate thread

I googled a lot on this but I still haven't found what I'm looking for.
This is a classic question, I guess, but I still can't figure it out.
I have this Python/Tkinter code. The code starts a pretty CPU-heavy process by trivially calling it with os.system(cmd). I want a progress bar (oscillating one, not progressive) which shows users something is actually happening.
I guess I just have to kind of start the thread containing the progress bar before calling os.system, then call os.system while progress bar thread is running, close progress bar thread and destroy associate Toplevel().
I mean, Python is pretty flexible, is it possible to do this without much pain?
I know killing a thread from another thread is unsafe (due to data sharing), but this two threads do not share any data as far as I know.
Would it be possible to go like this:
progressbar_thread.start()
os.system(...)
progressbar_thread.kill()
If that's not possible, I still don't understand how to pass 'signal' variables between the two threads.
Thank you,
Andrea
Is this the type of thing you are after?
from Tkinter import *
import ttk, threading
class progress():
def __init__(self, parent):
toplevel = Toplevel(tk)
self.progressbar = ttk.Progressbar(toplevel, orient = HORIZONTAL, mode = 'indeterminate')
self.progressbar.pack()
self.t = threading.Thread()
self.t.__init__(target = self.progressbar.start, args = ())
self.t.start()
#if self.t.isAlive() == True:
# print 'worked'
def end(self):
if self.t.isAlive() == False:
self.progressbar.stop()
self.t.join()
def printmsg():
print 'proof a new thread is running'
tk = Tk()
new = progress(tk)
but1 = ttk.Button(tk, text= 'stop', command= new.end)
but2 = ttk.Button(tk, text = 'test', command= printmsg)
but1.pack()
but2.pack()
tk.mainloop()
You don't need threads in this case. Just use subprocess.Popen to start the subprocess.
To notify the GUI when the process ends you could implement polling using widget.after() method:
process = Popen(['/path/to/command', 'arg1', 'arg2', 'etc'])
progressbar.start()
def poller():
if process.poll() is None: # process is still running
progressbar.after(delay, poller) # continue polling
else:
progressbar.stop() # process ended; stop progress bar
delay = 100 # milliseconds
progressbar.after(delay, poller) # call poller() in `delay` milliseconds
If you want to stop the process manually without waiting:
if process.poll() is None: # process is still running
process.terminate()
# kill process in a couple of seconds if it is not terminated
progressbar.after(2000, kill_process, process)
def kill_process(process):
if process.poll() is None:
process.kill()
process.wait()
Here's a complete example.

Categories

Resources