Run a .bat program in the background on Windows - python

I am trying to run a .bat file (which acts as a simulator) in a new window, so it must always be running in the background. I think that creating a new process is the only option that I have. Basically, I want my code to do something like this:
def startSim:
# open .bat file in a new window
os.system("startsim.bat")
# continue doing other stuff here
print("Simulator started")
I'm on Windows so I can't do os.fork.

Use subprocess.Popen (not tested on Windows, but should work).
import subprocess
def startSim():
child_process = subprocess.Popen("startsim.bat")
# Do your stuff here.
# You can terminate the child process after done.
child_process.terminate()
# You may want to give it some time to terminate before killing it.
time.sleep(1)
if child_process.returncode is None:
# It has not terminated. Kill it.
child_process.kill()
Edit: you could also use os.startfile (Windows only, not tested too).
import os
def startSim():
os.startfile("startsim.bat")
# Do your stuff here.

Looks like you want "os.spawn*", which seems to equate to os.fork, but for Windows.
Some searching turned up this example:
# File: os-spawn-example-3.py
import os
import string
if os.name in ("nt", "dos"):
exefile = ".exe"
else:
exefile = ""
def spawn(program, *args):
try:
# check if the os module provides a shortcut
return os.spawnvp(program, (program,) + args)
except AttributeError:
pass
try:
spawnv = os.spawnv
except AttributeError:
# assume it's unix
pid = os.fork()
if not pid:
os.execvp(program, (program,) + args)
return os.wait()[0]
else:
# got spawnv but no spawnp: go look for an executable
for path in string.split(os.environ["PATH"], os.pathsep):
file = os.path.join(path, program) + exefile
try:
return spawnv(os.P_WAIT, file, (file,) + args)
except os.error:
pass
raise IOError, "cannot find executable"
#
# try it out!
spawn("python", "hello.py")
print "goodbye"

On Windows, a background process is called a "service". Check this other question about how to create a Windows service with Python: Creating a python win32 service

import subprocess
proc = subprocess.Popen(['/path/script.bat'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
Using subprocess.Popen() will run the given .bat path ( or any other executable).
If you do wish to wait for the process to finish just add proc.wait():
proc.wait()

Related

How to stop CMD (from subprocess.popen) closing when an Exception is raised?

I'm spawning multiple CMDs with a given python file, using subprocess.popen. All with an input() at the end. The problem is if there is any raised exception in the code the window just closes and I can't see what happened to it.
I want it to either way stay open no matter the error. so I can see it. Or get the error back at the main window like this script failed to run because of this..
I'm running this on Windows:
import sys
import platform
from subprocess import Popen,PIPE
pipelines = [("Name1","path1"),
("Name2","path2")]
# define a command that starts new terminal
if platform.system() == "Windows":
new_window_command = "cmd.exe /c start".split()
else: #XXX this can be made more portable
new_window_command = "x-terminal-emulator -e".split()
processes = []
for i in range(len(pipelines)):
# open new consoles, display messages
echo = [sys.executable, "-c",
"import sys; print(sys.argv[1]); from {} import {}; obj = {}(); obj.run(); input('Press Enter..')".format(pipelines[i][1],pipelines[i][0],pipelines[i][0])]
processes.append(Popen(new_window_command + echo + [pipelines[i][0]]))
for proc in processes:
proc.wait()
To see the error, try to wrap the desired code fragment in try / except
try:
...
except Exception as e:
print(e)

Python subprocess in .exe

I'm creating a python script that will copy files and folder over the network. it's cross-platform so I make an .exe file using cx_freeze
I used Popen method of the subprocess module
if I run .py file it is running as expected but when i create .exe subprocess is not created in the system
I've gone through all documentation of subprocess module but I didn't find any solution
everything else (I am using Tkinter that also works fine) is working in the .exe accept subprocess.
any idea how can I call subprocess in .exe.file ??
This file is calling another .py file
def start_scheduler_action(self, scheduler_id, scheduler_name, list_index):
scheduler_detail=db.get_scheduler_detail_using_id(scheduler_id)
for detail in scheduler_detail:
source_path=detail[2]
if not os.path.exists(source_path):
showerror("Invalid Path","Please select valid path", parent=self.new_frame)
return
self.forms.new_scheduler.start_scheduler_button.destroy()
#Create stop scheduler button
if getattr(self.forms.new_scheduler, "stop_scheduler_button", None)==None:
self.forms.new_scheduler.stop_scheduler_button = tk.Button(self.new_frame, text='Stop scheduler', width=10, command=lambda:self.stop_scheduler_action(scheduler_id, scheduler_name, list_index))
self.forms.new_scheduler.stop_scheduler_button.grid(row=11, column=1, sticky=E, pady=10, padx=1)
scheduler_id=str(scheduler_id)
# Get python paths
if sys.platform == "win32":
proc = subprocess.Popen(['where', "python"], env=None, stdout=subprocess.PIPE)
else:
proc = subprocess.Popen(['which', "python"], env=None,stdout=subprocess.PIPE)
out, err = proc.communicate()
if err or not out:
showerror("", "Python not found", parent=self.new_frame)
else:
try:
paths = out.split(os.pathsep)
# Create python path
python_path = (paths[len(paths) - 1]).split('\n')[0]
cmd = os.path.realpath('scheduler.py')
#cmd='scheduler.py'
if sys.platform == "win32":
python_path=python_path.splitlines()
else:
python_path=python_path
# Run the scheduler file using scheduler id
proc = subprocess.Popen([python_path, cmd, scheduler_id], env=None, stdout=subprocess.PIPE)
message="Started the scheduler : %s" %(scheduler_name)
showinfo("", message, parent=self.new_frame)
#Add process id to scheduler table
process_id=proc.pid
#showinfo("pid", process_id, parent=self.new_frame)
def get_process_id(name):
child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False)
response = child.communicate()[0]
return [int(pid) for pid in response.split()]
print(get_process_id(scheduler_name))
# Add the process id in database
self.db.add_process_id(scheduler_id, process_id)
# Add the is_running status in database
self.db.add_status(scheduler_id)
except Exception as e:
showerror("", e)
And this file is called:
def scheduler_copy():
date= strftime("%m-%d-%Y %H %M %S", localtime())
logFile = scheduler_name + "_"+scheduler_id+"_"+ date+".log"
#file_obj=open(logFile, 'w')
# Call __init__ method of xcopy file
xcopy=XCopy(connection_ip, username , password, client_name, server_name, domain_name)
check=xcopy.connect()
# Cretae a log file for scheduler
file_obj=open(logFile, 'w')
if check is False:
file_obj.write("Problem in connection..Please check connection..!!")
return
scheduler_next_run=schedule.next_run()
scheduler_next_run="Next run at: " +str(scheduler_next_run)
# If checkbox_value selected copy all the file to new directory
if checkbox_value==1:
new_destination_path=xcopy.create_backup_directory(share_folder, destination_path, date)
else:
new_destination_path=destination_path
# Call backup method for coping data from source to destination
try:
xcopy.backup(share_folder, source_path, new_destination_path, file_obj, exclude)
file_obj.write("Scheduler completed successfully..\n")
except Exception as e:
# Write the error message of the scheduler to log file
file_obj.write("Scheduler failed to copy all data..\nProblem in connection..Please check connection..!!\n")
# #file_obj.write("Error while scheduling")
# return
# Write the details of scheduler to log file
file_obj.write("Total skipped unmodified file:")
file_obj.write(str(xcopy.skipped_unmodified_count))
file_obj.write("\n")
file_obj.write("Total skipped file:")
file_obj.write(str(xcopy.skipped_file))
file_obj.write("\n")
file_obj.write("Total copied file:")
file_obj.write(str(xcopy.copy_count))
file_obj.write("\n")
file_obj.write("Total skipped folder:")
file_obj.write(str(xcopy.skipped_folder))
file_obj.write("\n")
# file_obj.write(scheduler_next_run)
file_obj.close()
There is some awkwardness in your source code, but I won't spend time on that. For instance, if you want to find the source_path, it's better to use a for loop with break/else:
for detail in scheduler_detail:
source_path = detail[2]
break # found
else:
# not found: raise an exception
...
Some advice:
Try to separate the user interface code and the sub-processing, avoid mixing the two.
Use exceptions and exception handlers.
If you want portable code: avoid system call (there are no pgrep on Windows).
Since your application is packaged in a virtualenv (I make the assumption cx_freeze does this kind of thing), you have no access to the system-wide Python. You even don't have that on Windows. So you need to use the packaged Python (this is a best practice anyway).
If you want to call a Python script like a subprocess, that means you have two packaged applications: you need to create an exe for the main application and for the scheduler.py script. But, that's not easy to communicate with it.
Another solution is to use multiprocessing to spawn a new Python process. Since you don't want to wait for the end of processing (which may be long), you need to create daemon processes. The way to do that is explained in the multiprocessing module.
Basically:
import time
from multiprocessing import Process
def f(name):
print('hello', name)
if __name__ == '__main__':
p = Process(target=f, args=('bob',))
p.daemon = True
p.start()
# let it live and die, don't call: `p.join()`
time.sleep(1)
Of course, we need to adapt that with your problem.
Here is how I would do that (I removed UI-related code for clarity):
import scheduler
class SchedulerError(Exception):
pass
class YourClass(object):
def start_scheduler_action(self, scheduler_id, scheduler_name, list_index):
scheduler_detail = db.get_scheduler_detail_using_id(scheduler_id)
for detail in scheduler_detail:
source_path = detail[2]
break
else:
raise SchedulerError("Invalid Path", "Missing source path", parent=self.new_frame)
if not os.path.exists(source_path):
raise SchedulerError("Invalid Path", "Please select valid path", parent=self.new_frame)
p = Process(target=scheduler.scheduler_copy, args=('source_path',))
p.daemon = True
p.start()
self.db.add_process_id(scheduler_id, p.pid)
To check if your process is still running, I recommend you to use psutil. It's really a great tool!
You can define your scheduler.py script like that:
def scheduler_copy(source_path):
...
Multiprocessing vs Threading Python
Quoting this answer: https://stackoverflow.com/a/3044626/1513933
The threading module uses threads, the multiprocessing module uses processes. The difference is that threads run in the same memory space, while processes have separate memory. This makes it a bit harder to share objects between processes with multiprocessing. Since threads use the same memory, precautions have to be taken or two threads will write to the same memory at the same time. This is what the global interpreter lock is for.
Here, the advantage of multiprocessing over multithreading is that you can kill (or terminate) a process; you can't kill a thread. You may need psutil for that.
This is not an exact solution you are looking for, but following suggestion should be preferred for two reasons.
These are more pythonic way
subprocess is slightly expensive
Suggestions you can consider
Don't use subprocess for fetching system path. Try check os.getenv('PATH') to get env variable & try to find if python is in the path. For windows, one has to manually add Python path or else you can directly check in Program Files I guess
For checking process ID's you can try psutils. A wonderful answer is provided here at how do I get the process list in Python?
Calling another script from a python script. This does not look cool. Not bad, but I would not prefer this at all.
In above code, line - if sys.platform == "win32": has same value in if and else condition ==> you dont need a conditional statement here.
You wrote pretty fine working code to tell you. Keep Coding!
If you want to run a subprocess in an exe file, then you can use
import subprocess
program=('example')
arguments=('/command')
subprocess.call([program, arguments])

python-daemon to start an independent process but let the main application continue?

Okay I'm officially out of ideas after running each and every sample I could find on google up to 19th page. I have a "provider" script. The goal of this python script is to start up other services that run indefinitely even after this "provider" stopped running. Basically, start the process then forget about it but continue the script and not stopping it...
My problem: python-daemon... I have actions (web-service calls to start/stop/get status from the started services). I create the start commands on the fly and perform variable substitution on the config files as required.
Let's start from this point: I have a command to run (A bash script that executes a java process - a long running service that will be stopped sometime later).
def start(command, working_directory):
pidfile = os.path.join(working_directory, 'application.pid')
# I expect the pid of the started application to be here. The file is not created. Nothing is there.
context = daemon.DaemonContext(working_directory=working_directory,
pidfile=daemon.pidfile.PIDLockFile(pidfile))
with context:
psutil.Popen(command)
# This part never runs. Even if I put a simple print statement at this point, that never appears. Debugging in pycharms shows that my script returns with 0 on with context
with open(pidfile, 'r') as pf:
pid = pf.read()
return pid
From here on in my caller to this method I prepare a json object to return to the client which essentially contains an instance_id (don't mind it) and a pid (that'll be used to stop this process in another request.
What happens? After with context my application exits with status 0, nothing is returned, no json response gets created, no pidfile gets created only the executed psutil.Popen command runs. How can I achieve what I need? I need an independently running process and need to know its PID in order to stop it later on. The executed process must run even if the current python script stops for some reason. I can't get around the shell script as that application is not mine I have to use what I have.
Thanks for any tip!
#Edit:
I tried using simply the Popen from psutil/subprocess with somewhat more promising result.
def start(self, command):
import psutil/subprocess
proc = psutil.Popen(command)
return str(proc.pid)
Now If I debug the application and wait some undefined time on the return statement everything is working great! The service is running the pid is there, I can stop later on. Then I simply ran the provider without debugging. It returns the pid but the process is not running. Seems like Popen has no time to start the service because the whole provider stops faster.
#Update:
Using os.fork:
#staticmethod
def __start_process(command, working_directory):
pid = os.fork()
if pid == 0:
os.chdir(working_directory)
proc = psutil.Popen(command)
with open('application.pid', 'w') as pf:
pf.write(proc.pid)
def start(self):
...
__start_process(command, working_directory)
with open(os.path.join(working_directory, 'application.pid'), 'r') as pf:
pid = int(pf.read())
proc = psutil.Process(pid)
print("RUNNING" if proc.status() == psutil.STATUS_RUNNING else "...")
After running the above sample, RUNNING is written on console. After the main script exits because I'm not fast enough:
ps auxf | grep
No instances are running...
Checking the pidfile; sure it's there it was created
cat /application.pid
EMPTY 0bytes
From multiple partial tips i got, finally managed to get it working...
def start(command, working_directory):
pid = os.fork()
if pid == 0:
os.setsid()
os.umask(0) # I'm not sure about this, not on my notebook at the moment
os.execv(command[0], command) # This was strange as i needed to use the name of the shell script twice: command argv[0] [args]. Upon using ksh as command i got a nice error...
else:
with open(os.path.join(working_directory, 'application.pid'), 'w') as pf:
pf.write(str(pid))
return pid
That together solved the issue. The started process is not a child process of the running python script and won't stop when the script terminates.
Have you tried with os.fork()?
In a nutshell, os.fork() spawns a new process and returns the PID of that new process.
You could do something like this:
#!/usr/bin/env python
import os
import subprocess
import sys
import time
command = 'ls' # YOUR COMMAND
working_directory = '/etc' # YOUR WORKING DIRECTORY
def child(command, directory):
print "I'm the child process, will execute '%s' in '%s'" % (command, directory)
# Change working directory
os.chdir(directory)
# Execute command
cmd = subprocess.Popen(command
, shell=True
, stdout=subprocess.PIPE
, stderr=subprocess.PIPE
, stdin=subprocess.PIPE
)
# Retrieve output and error(s), if any
output = cmd.stdout.read() + cmd.stderr.read()
print output
# Exiting
print 'Child process ending now'
sys.exit(0)
def main():
print "I'm the main process"
pid = os.fork()
if pid == 0:
child(command, working_directory)
else:
print 'A subprocess was created with PID: %s' % pid
# Do stuff here ...
time.sleep(5)
print 'Main process ending now.'
sys.exit(0)
if __name__ == '__main__':
main()
Further info:
Documentation: https://docs.python.org/2/library/os.html#os.fork
Examples: http://www.python-course.eu/forking.php
Another related-question: Regarding The os.fork() Function In Python

open and close application sequentially in python

I am trying to open and close an application sequentially. But the problem is the application is being opened but to enter to the next line which is the closing line of that application I have to manually close the application.
import os
os.system("scad3 file.txt")
os.system("TASKKILL /PID scad3.exe /T")
scad3 is the application i wish to run,but to enter the next line i.e., taskkilling line, I have to manually close the window
please let me know is there any way to solve it??
thank you very much in advance
I guess os.system is a blocking call. Try using the Popen Objects in python:-
import subprocess
p = subprocess.Popen("notepad.exe")
p.terminate()
Refer :https://docs.python.org/2/library/subprocess.html#popen-objects
You can try using popen to execute command then wait given time and try to get result or kill the subrocess if it hasn't finished.
import subprocess
def get_array_from_cmd_str(cmd_str):
cmd_str_parts = cmd_str.split(" ")
return [cmd_part for cmd_part in cmd_str_parts]
def run_command_str(command):
p = subprocess.Popen(get_array_from_cmd_str(command),
stdout = subprocess.PIPE, stderr = subprocess.PIPE).communicate()[0]
resp = {'out': p[0],
'err': p[1]}
return resp
to run a command use the "run_command_str" function above in this way:
import time
cmd = "scad3 file.txt"
cmd_out = run_command_str(cmd)
expected_execution_time = 5
time.sleep(expected_execution_time)
if cmd_out['err'] != '':
pass # handle error here
Now if your program does not close automatically you can modify the approach to manually kill it using methods descriged in this thread.
(examples not tested on Windows)
EDIT: Code modified according to valuable comment. Example makes a blocking call and does not address the issue; use other ones.

Check to see if python script is running

I have a python daemon running as a part of my web app/ How can I quickly check (using python) if my daemon is running and, if not, launch it?
I want to do it that way to fix any crashes of the daemon, and so the script does not have to be run manually, it will automatically run as soon as it is called and then stay running.
How can i check (using python) if my script is running?
A technique that is handy on a Linux system is using domain sockets:
import socket
import sys
import time
def get_lock(process_name):
# Without holding a reference to our socket somewhere it gets garbage
# collected when the function exits
get_lock._lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
# The null byte (\0) means the socket is created
# in the abstract namespace instead of being created
# on the file system itself.
# Works only in Linux
get_lock._lock_socket.bind('\0' + process_name)
print 'I got the lock'
except socket.error:
print 'lock exists'
sys.exit()
get_lock('running_test')
while True:
time.sleep(3)
It is atomic and avoids the problem of having lock files lying around if your process gets sent a SIGKILL
You can read in the documentation for socket.close that sockets are automatically closed when garbage collected.
Drop a pidfile somewhere (e.g. /tmp). Then you can check to see if the process is running by checking to see if the PID in the file exists. Don't forget to delete the file when you shut down cleanly, and check for it when you start up.
#/usr/bin/env python
import os
import sys
pid = str(os.getpid())
pidfile = "/tmp/mydaemon.pid"
if os.path.isfile(pidfile):
print "%s already exists, exiting" % pidfile
sys.exit()
file(pidfile, 'w').write(pid)
try:
# Do some actual work here
finally:
os.unlink(pidfile)
Then you can check to see if the process is running by checking to see if the contents of /tmp/mydaemon.pid are an existing process. Monit (mentioned above) can do this for you, or you can write a simple shell script to check it for you using the return code from ps.
ps up `cat /tmp/mydaemon.pid ` >/dev/null && echo "Running" || echo "Not running"
For extra credit, you can use the atexit module to ensure that your program cleans up its pidfile under any circumstances (when killed, exceptions raised, etc.).
The pid library can do exactly this.
from pid import PidFile
with PidFile():
do_something()
It will also automatically handle the case where the pidfile exists but the process is not running.
My solution is to check for the process and command line arguments
Tested on windows and ubuntu linux
import psutil
import os
def is_running(script):
for q in psutil.process_iter():
if q.name().startswith('python'):
if len(q.cmdline())>1 and script in q.cmdline()[1] and q.pid !=os.getpid():
print("'{}' Process is already running".format(script))
return True
return False
if not is_running("test.py"):
n = input("What is Your Name? ")
print ("Hello " + n)
Of course the example from Dan will not work as it should be.
Indeed, if the script crash, rise an exception, or does not clean pid file, the script will be run multiple times.
I suggest the following based from another website:
This is to check if there is already a lock file existing
\#/usr/bin/env python
import os
import sys
if os.access(os.path.expanduser("~/.lockfile.vestibular.lock"), os.F_OK):
#if the lockfile is already there then check the PID number
#in the lock file
pidfile = open(os.path.expanduser("~/.lockfile.vestibular.lock"), "r")
pidfile.seek(0)
old_pid = pidfile.readline()
# Now we check the PID from lock file matches to the current
# process PID
if os.path.exists("/proc/%s" % old_pid):
print "You already have an instance of the program running"
print "It is running as process %s," % old_pid
sys.exit(1)
else:
print "File is there but the program is not running"
print "Removing lock file for the: %s as it can be there because of the program last time it was run" % old_pid
os.remove(os.path.expanduser("~/.lockfile.vestibular.lock"))
This is part of code where we put a PID file in the lock file
pidfile = open(os.path.expanduser("~/.lockfile.vestibular.lock"), "w")
pidfile.write("%s" % os.getpid())
pidfile.close()
This code will check the value of pid compared to existing running process., avoiding double execution.
I hope it will help.
There are very good packages for restarting processes on UNIX. One that has a great tutorial about building and configuring it is monit. With some tweaking you can have a rock solid proven technology keeping up your daemon.
Came across this old question looking for solution myself.
Use psutil:
import psutil
import sys
from subprocess import Popen
for process in psutil.process_iter():
if process.cmdline() == ['python', 'your_script.py']:
sys.exit('Process found: exiting.')
print('Process not found: starting it.')
Popen(['python', 'your_script.py'])
There are a myriad of options. One method is using system calls or python libraries that perform such calls for you. The other is simply to spawn out a process like:
ps ax | grep processName
and parse the output. Many people choose this approach, it isn't necessarily a bad approach in my view.
I'm a big fan of Supervisor for managing daemons. It's written in Python, so there are plenty of examples of how to interact with or extend it from Python. For your purposes the XML-RPC process control API should work nicely.
Try this other version
def checkPidRunning(pid):
'''Check For the existence of a unix pid.
'''
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
# Entry point
if __name__ == '__main__':
pid = str(os.getpid())
pidfile = os.path.join("/", "tmp", __program__+".pid")
if os.path.isfile(pidfile) and checkPidRunning(int(file(pidfile,'r').readlines()[0])):
print "%s already exists, exiting" % pidfile
sys.exit()
else:
file(pidfile, 'w').write(pid)
# Do some actual work here
main()
os.unlink(pidfile)
Rather than developing your own PID file solution (which has more subtleties and corner cases than you might think), have a look at supervisord -- this is a process control system that makes it easy to wrap job control and daemon behaviors around an existing Python script.
The other answers are great for things like cron jobs, but if you're running a daemon you should monitor it with something like daemontools.
ps ax | grep processName
if yor debug script in pycharm always exit
pydevd.py --multiproc --client 127.0.0.1 --port 33882 --file processName
try this:
#/usr/bin/env python
import os, sys, atexit
try:
# Set PID file
def set_pid_file():
pid = str(os.getpid())
f = open('myCode.pid', 'w')
f.write(pid)
f.close()
def goodby():
pid = str('myCode.pid')
os.remove(pid)
atexit.register(goodby)
set_pid_file()
# Place your code here
except KeyboardInterrupt:
sys.exit(0)
Here is more useful code (with checking if exactly python executes the script):
#! /usr/bin/env python
import os
from sys import exit
def checkPidRunning(pid):
global script_name
if pid<1:
print "Incorrect pid number!"
exit()
try:
os.kill(pid, 0)
except OSError:
print "Abnormal termination of previous process."
return False
else:
ps_command = "ps -o command= %s | grep -Eq 'python .*/%s'" % (pid,script_name)
process_exist = os.system(ps_command)
if process_exist == 0:
return True
else:
print "Process with pid %s is not a Python process. Continue..." % pid
return False
if __name__ == '__main__':
script_name = os.path.basename(__file__)
pid = str(os.getpid())
pidfile = os.path.join("/", "tmp/", script_name+".pid")
if os.path.isfile(pidfile):
print "Warning! Pid file %s existing. Checking for process..." % pidfile
r_pid = int(file(pidfile,'r').readlines()[0])
if checkPidRunning(r_pid):
print "Python process with pid = %s is already running. Exit!" % r_pid
exit()
else:
file(pidfile, 'w').write(pid)
else:
file(pidfile, 'w').write(pid)
# main programm
....
....
os.unlink(pidfile)
Here is string:
ps_command = "ps -o command= %s | grep -Eq 'python .*/%s'" % (pid,script_name)
returns 0 if "grep" is successful, and the process "python" is currently running with the name of your script as a parameter .
A simple example if you only are looking for a process name exist or not:
import os
def pname_exists(inp):
os.system('ps -ef > /tmp/psef')
lines=open('/tmp/psef', 'r').read().split('\n')
res=[i for i in lines if inp in i]
return True if res else False
Result:
In [21]: pname_exists('syslog')
Out[21]: True
In [22]: pname_exists('syslog_')
Out[22]: False
I was looking for an answer on this and in my case, came to mind a very easy and very good solution, in my opinion (since it's not possible to exist a false positive on this, I guess - how can the timestamp on the TXT be updated if the program doesn't do it):
--> just keep writing on a TXT the current timestamp in some time interval, depending on your needs (here each half hour was perfect).
If the timestamp on the TXT is outdated relatively to the current one when you check, then there was a problem on the program and it should be restarted or what you prefer to do.
A portable solution that relies on multiprocessing.shared_memory:
import atexit
from multiprocessing import shared_memory
_ensure_single_process_store = {}
def ensure_single_process(name: str):
if name in _ensure_single_process_store:
return
try:
shm = shared_memory.SharedMemory(name='ensure_single_process__' + name,
create=True,
size=1)
except FileExistsError:
print(f"{name} is already running!")
raise
_ensure_single_process_store[name] = shm
atexit.register(shm.unlink)
Usually you wouldn't have to use atexit, but sometimes it helps to clean up upon abnormal exit.
Consider the following example to solve your problem:
#!/usr/bin/python
# -*- coding: latin-1 -*-
import os, sys, time, signal
def termination_handler (signum,frame):
global running
global pidfile
print 'You have requested to terminate the application...'
sys.stdout.flush()
running = 0
os.unlink(pidfile)
running = 1
signal.signal(signal.SIGINT,termination_handler)
pid = str(os.getpid())
pidfile = '/tmp/'+os.path.basename(__file__).split('.')[0]+'.pid'
if os.path.isfile(pidfile):
print "%s already exists, exiting" % pidfile
sys.exit()
else:
file(pidfile, 'w').write(pid)
# Do some actual work here
while running:
time.sleep(10)
I suggest this script because it can be executed one time only.
Using bash to look for a process with the current script's name. No extra file.
import commands
import os
import time
import sys
def stop_if_already_running():
script_name = os.path.basename(__file__)
l = commands.getstatusoutput("ps aux | grep -e '%s' | grep -v grep | awk '{print $2}'| awk '{print $2}'" % script_name)
if l[1]:
sys.exit(0);
To test, add
stop_if_already_running()
print "running normally"
while True:
time.sleep(3)
This is what I use in Linux to avoid starting a script if already running:
import os
import sys
script_name = os.path.basename(__file__)
pidfile = os.path.join("/tmp", os.path.splitext(script_name)[0]) + ".pid"
def create_pidfile():
if os.path.exists(pidfile):
with open(pidfile, "r") as _file:
last_pid = int(_file.read())
# Checking if process is still running
last_process_cmdline = "/proc/%d/cmdline" % last_pid
if os.path.exists(last_process_cmdline):
with open(last_process_cmdline, "r") as _file:
cmdline = _file.read()
if script_name in cmdline:
raise Exception("Script already running...")
with open(pidfile, "w") as _file:
pid = str(os.getpid())
_file.write(pid)
def main():
"""Your application logic goes here"""
if __name__ == "__main__":
create_pidfile()
main()
This approach works good without any dependency on an external module.

Categories

Resources