Checking process status from Python - python

I am running on a Linux x86-64 system. From a Python (2.6) script, I wish to periodically check whether a given process (identified by pid) has become "defunct"/zombie (this means that entry in the process table exists but the process is doing nothing). It would be also good to know how much CPU the process is consuming (similar to what 'top' command shows).
Can somebody give me some pointers on how I can get these in Python?

I'd use the psutil library:
import psutil
proc = psutil.Process(pid)
if proc.status() == psutil.STATUS_ZOMBIE:
# Zombie process!

you can get top result in python as below:
linux:
import sys, os
f = os.popen("top -p 1 -n 1", "r")
text = f.read()
print text
update
windows:
from os import popen
from sys import stdin
ps = popen("C:/WINDOWS/system32/tasklist.exe","r")
pp = ps.readlines()
ps.close()
# wow, look at the robust parser!
pp.pop(0) # blank line
ph = pp.pop(0) # header line
pp.pop(0) # ===
print ("%d processes reported." % len(pp))
print ("First process in list:")
print (pp[0])
stdin.readline()

Related

subprocess that prints to pseudo terminal is not using full terminal size

I have the following program that wraps top in a pseudo terminal and prints it back to the real terminal.
import os
import pty
import subprocess
import sys
import time
import select
stdout_master_fd, stdout_slave_fd = pty.openpty()
stderr_master_fd, stderr_slave_fd = pty.openpty()
p = subprocess.Popen(
"top",
shell=True,
stdout=stdout_slave_fd,
stderr=stderr_slave_fd,
close_fds=True
)
stdout_parts = []
while p.poll() is None:
rlist, _, _ = select.select([stdout_master_fd, stderr_master_fd], [], [])
for f in rlist:
output = os.read(f, 1000) # This is used because it doesn't block
sys.stdout.write(output.decode("utf-8"))
sys.stdout.flush()
time.sleep(0.01)
This works well control sequences are handled as expected. However, the subprocess is not using the full dimensions of the real terminal.
For comparison, running the above program:
And running top directly:
I didn't find any api of the pty library to suggest dimensions could be provided.
The dimensions I get in practice for the pseudo terminal are height of 24 lines and width of 80 columns, I'm assuming it might be hardcoded somewhere.
Reading on Emulate a number of columns for a program in the terminal I found the following working solution, at least on my environment (OSX and xterm)
echo LINES=$LINES COLUMNS=$COLUMNS TERM=$TERM
which comes to LINES=40 COLUMNS=203 TERM=xterm-256color in my shell. Then setting the following in the script gives the expected output:
p = subprocess.Popen(
"top",
shell=True,
stdout=stdout_slave_fd,
stderr=stderr_slave_fd,
close_fds=True,
env={
"LINES": "40",
"COLUMNS": "203",
"TERM": "xterm-256color"
}
)
#Mugen's answer pointed me in the right direction but did not quite work, here is what worked for me personally :
import os
import subprocess
my_env = os.environ.copy()
my_env["LINES"] = "40"
my_env["COLUMNS"] = "203"
result = subprocess.Popen(
cmd,
stdout= subprocess.PIPE,
env=my_env
).communicate()[0]
So I had to first get my entire environment variable with os library and then add the elements I needed to it.
The solutions provided by #leas and #Mugen did not work for me, but I eventually stumbled upon ptyprocess Python module, which allows you to provide terminal dimensions when spawning a process.
For context, I am trying to use a Python script to run a PowerShell 7 script and capture the PowerShell script's output. The host OS is Ubuntu Linux 22.04.
My code looks something like this:
from ptyprocess import PtyProcessUnicode
# Run the PowerShell script
script_run_cmd = 'pwsh -file script.ps1 param1 param2'
p = PtyProcessUnicode.spawn(script_run_cmd.split(), dimensions=(24,130))
# Get all script output
script_output = []
while True:
try:
script_output.append(p.readline().rstrip())
except EOFError:
break
# Not sure if this is necessary
p.close()
I feel like there should be a class method to get all the output, but I couldn't find one and the above code works well for me.

Get local DNS settings in Python

Is there any elegant and cross platform (Python) way to get the local DNS settings?
It could probably work with a complex combination of modules such as platform and subprocess, but maybe there is already a good module, such as netifaces which can retrieve it in low-level and save some "reinventing the wheel" effort.
Less ideally, one could probably query something like dig, but I find it "noisy", because it would run an extra request instead of just retrieving something which exists already locally.
Any ideas?
Using subprocess you could do something like this, in a MacBook or Linux system
import subprocess
process = subprocess.Popen(['cat', '/etc/resolv.conf'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
print(stdout, stderr)
or do something like this
import subprocess
with open('dns.txt', 'w') as f:
process = subprocess.Popen(['cat', '/etc/resolv.conf'], stdout=f)
The first output will go to stdout and the second to a file
Maybe this one will solve your problem
import subprocess
def get_local_dns(cmd_):
with open('dns1.txt', 'w+') as f:
with open('dns_log1.txt', 'w+') as flog:
try:
process = subprocess.Popen(cmd_, stdout=f, stderr=flog)
except FileNotFoundError as e:
flog.write(f"Error while executing this command {str(e)}")
linux_cmd = ['cat', '/etc/resolv.conf']
windows_cmd = ['windows_command', 'parameters']
commands = [linux_cmd, windows_cmd]
if __name__ == "__main__":
for cmd in commands:
get_local_dns(cmd)
Thanks #MasterOfTheHouse.
I ended up writing my own function. It's not so elegant, but it does the job for now. There's plenty of room for improvement, but well...
import os
import subprocess
def get_dns_settings()->dict:
# Initialize the output variables
dns_ns, dns_search = [], ''
# For Unix based OSs
if os.path.isfile('/etc/resolv.conf'):
for line in open('/etc/resolv.conf','r'):
if line.strip().startswith('nameserver'):
nameserver = line.split()[1].strip()
dns_ns.append(nameserver)
elif line.strip().startswith('search'):
search = line.split()[1].strip()
dns_search = search
# If it is not a Unix based OS, try "the Windows way"
elif os.name == 'nt':
cmd = 'ipconfig /all'
raw_ipconfig = subprocess.check_output(cmd)
# Convert the bytes into a string
ipconfig_str = raw_ipconfig.decode('cp850')
# Convert the string into a list of lines
ipconfig_lines = ipconfig_str.split('\n')
for n in range(len(ipconfig_lines)):
line = ipconfig_lines[n]
# Parse nameserver in current line and next ones
if line.strip().startswith('DNS-Server'):
nameserver = ':'.join(line.split(':')[1:]).strip()
dns_ns.append(nameserver)
next_line = ipconfig_lines[n+1]
# If there's too much blank at the beginning, assume we have
# another nameserver on the next line
if len(next_line) - len(next_line.strip()) > 10:
dns_ns.append(next_line.strip())
next_next_line = ipconfig_lines[n+2]
if len(next_next_line) - len(next_next_line.strip()) > 10:
dns_ns.append(next_next_line.strip())
elif line.strip().startswith('DNS-Suffix'):
dns_search = line.split(':')[1].strip()
return {'nameservers': dns_ns, 'search': dns_search}
print(get_dns_settings())
By the way... how did you manage to write two answers with the same account?

Python 3.4.3 subprocess.Popen get output of command without piping?

I am trying to assign the output of a command to a variable without the command thinking that it is being piped. The reason for this is that the command in question gives unformatted text as output if it is being piped, but it gives color formatted text if it is being run from the terminal. I need to get this color formatted text.
So far I've tried a few things. I've tried Popen like so:
output = subprocess.Popen(command, stdout=subprocess.PIPE)
output = output.communicate()[0]
output = output.decode()
print(output)
This will let me print the output, but it gives me the unformatted output that I get when the command is piped. That makes sense, as I'm piping it here in the Python code. But I am curious if there is a way to assign the output of this command, directly to a variable, without the command running the piped version of itself.
I have also tried the following version that relies on check_output instead:
output = subprocess.check_output(command)
output = output.decode()
print(output)
And again I get the same unformatted output that the command returns when the command is piped.
Is there a way to get the formatted output, the output the command would normally give from the terminal, when it is not being piped?
Using pexpect:
2.py:
import sys
if sys.stdout.isatty():
print('hello')
else:
print('goodbye')
subprocess:
import subprocess
p = subprocess.Popen(
['python3.4', '2.py'],
stdout=subprocess.PIPE
)
print(p.stdout.read())
--output:--
goodbye
pexpect:
import pexpect
child = pexpect.spawn('python3.4 2.py')
child.expect(pexpect.EOF)
print(child.before) #Print all the output before the expectation.
--output:--
hello
Here it is with grep --colour=auto:
import subprocess
p = subprocess.Popen(
['grep', '--colour=auto', 'hello', 'data.txt'],
stdout=subprocess.PIPE
)
print(p.stdout.read())
import pexpect
child = pexpect.spawn('grep --colour=auto hello data.txt')
child.expect(pexpect.EOF)
print(child.before)
--output:--
b'hello world\n'
b'\x1b[01;31mhello\x1b[00m world\r\n'
Yes, you can use the pty module.
>>> import subprocess
>>> p = subprocess.Popen(["ls", "--color=auto"], stdout=subprocess.PIPE)
>>> p.communicate()[0]
# Output does not appear in colour
With pty:
import subprocess
import pty
import os
master, slave = pty.openpty()
p = subprocess.Popen(["ls", "--color=auto"], stdout=slave)
p.communicate()
print(os.read(master, 100)) # Print 100 bytes
# Prints with colour formatting info
Note from the docs:
Because pseudo-terminal handling is highly platform dependent, there
is code to do it only for Linux. (The Linux code is supposed to work
on other platforms, but hasn’t been tested yet.)
A less than beautiful way of reading the whole output to the end in one go:
def num_bytes_readable(fd):
import array
import fcntl
import termios
buf = array.array('i', [0])
if fcntl.ioctl(fd, termios.FIONREAD, buf, 1) == -1:
raise Exception("We really should have had data")
return buf[0]
print(os.read(master, num_bytes_readable(master)))
Edit: nicer way of getting the content at once thanks to #Antti Haapala:
os.close(slave)
f = os.fdopen(master)
print(f.read())
Edit: people are right to point out that this will deadlock if the process generates a large output, so #Antti Haapala's answer is better.
A working polyglot example (works the same for Python 2 and Python 3), using pty.
import subprocess
import pty
import os
import sys
master, slave = pty.openpty()
# direct stderr also to the pty!
process = subprocess.Popen(
['ls', '-al', '--color=auto'],
stdout=slave,
stderr=subprocess.STDOUT
)
# close the slave descriptor! otherwise we will
# hang forever waiting for input
os.close(slave)
def reader(fd):
try:
while True:
buffer = os.read(fd, 1024)
if not buffer:
return
yield buffer
# Unfortunately with a pty, an
# IOError will be thrown at EOF
# On Python 2, OSError will be thrown instead.
except (IOError, OSError) as e:
pass
# read chunks (yields bytes)
for i in reader(master):
# and write them to stdout file descriptor
os.write(1, b'<chunk>' + i + b'</chunk>')
Many programs automatically turn off colour printing codes when they detect they are not connected directly to a terminal. Many programs will have a flag so you can force colour output. You could add this flag to your process call. For example:
grep "search term" inputfile.txt
# prints colour to the terminal in most OSes
grep "search term" inputfile.txt | less
# output goes to less rather than terminal, so colour is turned off
grep "search term" inputfile.txt --color | less
# forces colour output even when not connected to terminal
Be warned though. The actual colour output is done by the terminal. The terminal interprets special character espace codes and changes the text colour and background color accordingly. Without the terminal to interpret the colour codes you will just see the text in black with these escape codes interspersed throughout.

How to terminate process from Python using pid?

I'm trying to write some short script in python which would start another python code in subprocess if is not already started else terminate terminal & app (Linux).
So it looks like:
#!/usr/bin/python
from subprocess import Popen
text_file = open(".proc", "rb")
dat = text_file.read()
text_file.close()
def do(dat):
text_file = open(".proc", "w")
p = None
if dat == "x" :
p = Popen('python StripCore.py', shell=True)
text_file.write( str( p.pid ) )
else :
text_file.write( "x" )
p = # Assign process by pid / pid from int( dat )
p.terminate()
text_file.close()
do( dat )
Have problem of lacking knowledge to name proces by pid which app reads from file ".proc".
The other problem is that interpreter says that string named dat is not equal to "x" ??? What I've missed ?
Using the awesome psutil library it's pretty simple:
p = psutil.Process(pid)
p.terminate() #or p.kill()
If you don't want to install a new library, you can use the os module:
import os
import signal
os.kill(pid, signal.SIGTERM) #or signal.SIGKILL
See also the os.kill documentation.
If you are interested in starting the command python StripCore.py if it is not running, and killing it otherwise, you can use psutil to do this reliably.
Something like:
import psutil
from subprocess import Popen
for process in psutil.process_iter():
if process.cmdline() == ['python', 'StripCore.py']:
print('Process found. Terminating it.')
process.terminate()
break
else:
print('Process not found: starting it.')
Popen(['python', 'StripCore.py'])
Sample run:
$python test_strip.py #test_strip.py contains the code above
Process not found: starting it.
$python test_strip.py
Process found. Terminating it.
$python test_strip.py
Process not found: starting it.
$killall python
$python test_strip.py
Process not found: starting it.
$python test_strip.py
Process found. Terminating it.
$python test_strip.py
Process not found: starting it.
Note: In previous psutil versions cmdline was an attribute instead of a method.
I wanted to do the same thing as, but I wanted to do it in the one file.
So the logic would be:
if a script with my name is running, kill it, then exit
if a script with my name is not running, do stuff
I modified the answer by Bakuriu and came up with this:
from os import getpid
from sys import argv, exit
import psutil ## pip install psutil
myname = argv[0]
mypid = getpid()
for process in psutil.process_iter():
if process.pid != mypid:
for path in process.cmdline():
if myname in path:
print "process found"
process.terminate()
exit()
## your program starts here...
Running the script will do whatever the script does. Running another instance of the script will kill any existing instance of the script.
I use this to display a little PyGTK calendar widget which runs when I click the clock. If I click and the calendar is not up, the calendar displays. If the calendar is running and I click the clock, the calendar disappears.
So, not directly related but this is the first question that appears when you try to find how to terminate a process running from a specific folder using Python.
It also answers the question in a way(even though it is an old one with lots of answers).
While creating a faster way to scrape some government sites for data I had an issue where if any of the processes in the pool got stuck they would be skipped but still take up memory from my computer. This is the solution I reached for killing them, if anyone knows a better way to do it please let me know!
import pandas as pd
import wmi
from re import escape
import os
def kill_process(kill_path, execs):
f = wmi.WMI()
esc = escape(kill_path)
temp = {'id':[], 'path':[], 'name':[]}
for process in f.Win32_Process():
temp['id'].append(process.ProcessId)
temp['path'].append(process.ExecutablePath)
temp['name'].append(process.Name)
temp = pd.DataFrame(temp)
temp = temp.dropna(subset=['path']).reset_index().drop(columns=['index'])
temp = temp.loc[temp['path'].str.contains(esc)].loc[temp.name.isin(execs)].reset_index().drop(columns=['index'])
[os.system('taskkill /PID {} /f'.format(t)) for t in temp['id']]

Check to see if python script is running

I have a python daemon running as a part of my web app/ How can I quickly check (using python) if my daemon is running and, if not, launch it?
I want to do it that way to fix any crashes of the daemon, and so the script does not have to be run manually, it will automatically run as soon as it is called and then stay running.
How can i check (using python) if my script is running?
A technique that is handy on a Linux system is using domain sockets:
import socket
import sys
import time
def get_lock(process_name):
# Without holding a reference to our socket somewhere it gets garbage
# collected when the function exits
get_lock._lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
# The null byte (\0) means the socket is created
# in the abstract namespace instead of being created
# on the file system itself.
# Works only in Linux
get_lock._lock_socket.bind('\0' + process_name)
print 'I got the lock'
except socket.error:
print 'lock exists'
sys.exit()
get_lock('running_test')
while True:
time.sleep(3)
It is atomic and avoids the problem of having lock files lying around if your process gets sent a SIGKILL
You can read in the documentation for socket.close that sockets are automatically closed when garbage collected.
Drop a pidfile somewhere (e.g. /tmp). Then you can check to see if the process is running by checking to see if the PID in the file exists. Don't forget to delete the file when you shut down cleanly, and check for it when you start up.
#/usr/bin/env python
import os
import sys
pid = str(os.getpid())
pidfile = "/tmp/mydaemon.pid"
if os.path.isfile(pidfile):
print "%s already exists, exiting" % pidfile
sys.exit()
file(pidfile, 'w').write(pid)
try:
# Do some actual work here
finally:
os.unlink(pidfile)
Then you can check to see if the process is running by checking to see if the contents of /tmp/mydaemon.pid are an existing process. Monit (mentioned above) can do this for you, or you can write a simple shell script to check it for you using the return code from ps.
ps up `cat /tmp/mydaemon.pid ` >/dev/null && echo "Running" || echo "Not running"
For extra credit, you can use the atexit module to ensure that your program cleans up its pidfile under any circumstances (when killed, exceptions raised, etc.).
The pid library can do exactly this.
from pid import PidFile
with PidFile():
do_something()
It will also automatically handle the case where the pidfile exists but the process is not running.
My solution is to check for the process and command line arguments
Tested on windows and ubuntu linux
import psutil
import os
def is_running(script):
for q in psutil.process_iter():
if q.name().startswith('python'):
if len(q.cmdline())>1 and script in q.cmdline()[1] and q.pid !=os.getpid():
print("'{}' Process is already running".format(script))
return True
return False
if not is_running("test.py"):
n = input("What is Your Name? ")
print ("Hello " + n)
Of course the example from Dan will not work as it should be.
Indeed, if the script crash, rise an exception, or does not clean pid file, the script will be run multiple times.
I suggest the following based from another website:
This is to check if there is already a lock file existing
\#/usr/bin/env python
import os
import sys
if os.access(os.path.expanduser("~/.lockfile.vestibular.lock"), os.F_OK):
#if the lockfile is already there then check the PID number
#in the lock file
pidfile = open(os.path.expanduser("~/.lockfile.vestibular.lock"), "r")
pidfile.seek(0)
old_pid = pidfile.readline()
# Now we check the PID from lock file matches to the current
# process PID
if os.path.exists("/proc/%s" % old_pid):
print "You already have an instance of the program running"
print "It is running as process %s," % old_pid
sys.exit(1)
else:
print "File is there but the program is not running"
print "Removing lock file for the: %s as it can be there because of the program last time it was run" % old_pid
os.remove(os.path.expanduser("~/.lockfile.vestibular.lock"))
This is part of code where we put a PID file in the lock file
pidfile = open(os.path.expanduser("~/.lockfile.vestibular.lock"), "w")
pidfile.write("%s" % os.getpid())
pidfile.close()
This code will check the value of pid compared to existing running process., avoiding double execution.
I hope it will help.
There are very good packages for restarting processes on UNIX. One that has a great tutorial about building and configuring it is monit. With some tweaking you can have a rock solid proven technology keeping up your daemon.
Came across this old question looking for solution myself.
Use psutil:
import psutil
import sys
from subprocess import Popen
for process in psutil.process_iter():
if process.cmdline() == ['python', 'your_script.py']:
sys.exit('Process found: exiting.')
print('Process not found: starting it.')
Popen(['python', 'your_script.py'])
There are a myriad of options. One method is using system calls or python libraries that perform such calls for you. The other is simply to spawn out a process like:
ps ax | grep processName
and parse the output. Many people choose this approach, it isn't necessarily a bad approach in my view.
I'm a big fan of Supervisor for managing daemons. It's written in Python, so there are plenty of examples of how to interact with or extend it from Python. For your purposes the XML-RPC process control API should work nicely.
Try this other version
def checkPidRunning(pid):
'''Check For the existence of a unix pid.
'''
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
# Entry point
if __name__ == '__main__':
pid = str(os.getpid())
pidfile = os.path.join("/", "tmp", __program__+".pid")
if os.path.isfile(pidfile) and checkPidRunning(int(file(pidfile,'r').readlines()[0])):
print "%s already exists, exiting" % pidfile
sys.exit()
else:
file(pidfile, 'w').write(pid)
# Do some actual work here
main()
os.unlink(pidfile)
Rather than developing your own PID file solution (which has more subtleties and corner cases than you might think), have a look at supervisord -- this is a process control system that makes it easy to wrap job control and daemon behaviors around an existing Python script.
The other answers are great for things like cron jobs, but if you're running a daemon you should monitor it with something like daemontools.
ps ax | grep processName
if yor debug script in pycharm always exit
pydevd.py --multiproc --client 127.0.0.1 --port 33882 --file processName
try this:
#/usr/bin/env python
import os, sys, atexit
try:
# Set PID file
def set_pid_file():
pid = str(os.getpid())
f = open('myCode.pid', 'w')
f.write(pid)
f.close()
def goodby():
pid = str('myCode.pid')
os.remove(pid)
atexit.register(goodby)
set_pid_file()
# Place your code here
except KeyboardInterrupt:
sys.exit(0)
Here is more useful code (with checking if exactly python executes the script):
#! /usr/bin/env python
import os
from sys import exit
def checkPidRunning(pid):
global script_name
if pid<1:
print "Incorrect pid number!"
exit()
try:
os.kill(pid, 0)
except OSError:
print "Abnormal termination of previous process."
return False
else:
ps_command = "ps -o command= %s | grep -Eq 'python .*/%s'" % (pid,script_name)
process_exist = os.system(ps_command)
if process_exist == 0:
return True
else:
print "Process with pid %s is not a Python process. Continue..." % pid
return False
if __name__ == '__main__':
script_name = os.path.basename(__file__)
pid = str(os.getpid())
pidfile = os.path.join("/", "tmp/", script_name+".pid")
if os.path.isfile(pidfile):
print "Warning! Pid file %s existing. Checking for process..." % pidfile
r_pid = int(file(pidfile,'r').readlines()[0])
if checkPidRunning(r_pid):
print "Python process with pid = %s is already running. Exit!" % r_pid
exit()
else:
file(pidfile, 'w').write(pid)
else:
file(pidfile, 'w').write(pid)
# main programm
....
....
os.unlink(pidfile)
Here is string:
ps_command = "ps -o command= %s | grep -Eq 'python .*/%s'" % (pid,script_name)
returns 0 if "grep" is successful, and the process "python" is currently running with the name of your script as a parameter .
A simple example if you only are looking for a process name exist or not:
import os
def pname_exists(inp):
os.system('ps -ef > /tmp/psef')
lines=open('/tmp/psef', 'r').read().split('\n')
res=[i for i in lines if inp in i]
return True if res else False
Result:
In [21]: pname_exists('syslog')
Out[21]: True
In [22]: pname_exists('syslog_')
Out[22]: False
I was looking for an answer on this and in my case, came to mind a very easy and very good solution, in my opinion (since it's not possible to exist a false positive on this, I guess - how can the timestamp on the TXT be updated if the program doesn't do it):
--> just keep writing on a TXT the current timestamp in some time interval, depending on your needs (here each half hour was perfect).
If the timestamp on the TXT is outdated relatively to the current one when you check, then there was a problem on the program and it should be restarted or what you prefer to do.
A portable solution that relies on multiprocessing.shared_memory:
import atexit
from multiprocessing import shared_memory
_ensure_single_process_store = {}
def ensure_single_process(name: str):
if name in _ensure_single_process_store:
return
try:
shm = shared_memory.SharedMemory(name='ensure_single_process__' + name,
create=True,
size=1)
except FileExistsError:
print(f"{name} is already running!")
raise
_ensure_single_process_store[name] = shm
atexit.register(shm.unlink)
Usually you wouldn't have to use atexit, but sometimes it helps to clean up upon abnormal exit.
Consider the following example to solve your problem:
#!/usr/bin/python
# -*- coding: latin-1 -*-
import os, sys, time, signal
def termination_handler (signum,frame):
global running
global pidfile
print 'You have requested to terminate the application...'
sys.stdout.flush()
running = 0
os.unlink(pidfile)
running = 1
signal.signal(signal.SIGINT,termination_handler)
pid = str(os.getpid())
pidfile = '/tmp/'+os.path.basename(__file__).split('.')[0]+'.pid'
if os.path.isfile(pidfile):
print "%s already exists, exiting" % pidfile
sys.exit()
else:
file(pidfile, 'w').write(pid)
# Do some actual work here
while running:
time.sleep(10)
I suggest this script because it can be executed one time only.
Using bash to look for a process with the current script's name. No extra file.
import commands
import os
import time
import sys
def stop_if_already_running():
script_name = os.path.basename(__file__)
l = commands.getstatusoutput("ps aux | grep -e '%s' | grep -v grep | awk '{print $2}'| awk '{print $2}'" % script_name)
if l[1]:
sys.exit(0);
To test, add
stop_if_already_running()
print "running normally"
while True:
time.sleep(3)
This is what I use in Linux to avoid starting a script if already running:
import os
import sys
script_name = os.path.basename(__file__)
pidfile = os.path.join("/tmp", os.path.splitext(script_name)[0]) + ".pid"
def create_pidfile():
if os.path.exists(pidfile):
with open(pidfile, "r") as _file:
last_pid = int(_file.read())
# Checking if process is still running
last_process_cmdline = "/proc/%d/cmdline" % last_pid
if os.path.exists(last_process_cmdline):
with open(last_process_cmdline, "r") as _file:
cmdline = _file.read()
if script_name in cmdline:
raise Exception("Script already running...")
with open(pidfile, "w") as _file:
pid = str(os.getpid())
_file.write(pid)
def main():
"""Your application logic goes here"""
if __name__ == "__main__":
create_pidfile()
main()
This approach works good without any dependency on an external module.

Categories

Resources