How to fix the program won't show other results - python

I have the following problem
import os
import json
import wmi
from random import choice
import time
filename = "kill.json"
with open(filename) as file:
kill = json.load(file)
def taskKill(imageNames: list):
cmdPrefix = 'taskkill /F /IM '
for imageName in imageNames:
cmd = cmdPrefix + imageName
os.system(cmd)
while 1==1:
c=wmi.WMI()
def check_process_running(rty):
if(c.Win32_Process(name=rty)):
print("Process is running")
taskKill(kill)
return
else:
print("Process is not running")
StrA =choice(kill)
check_process_running(StrA)
In this code that detects if the program is open and closes it, no matter how I change it, it always says Process is not running.

The output of your script is depending on the line if(c.Win32_Process(name=rty)) - it seems the return of Win32_Process is always True.
Insert a print statement with the value of Win32_Process before this line
Have you tried to provide the argument as a String ("StrA" instead of StrA)?
To check all current running processes, use:
import os, wmi
c = wmi.WMI()
for process in c.Win32_Process(name="python.exe"):
print(process.ProcessId, process.Name)
print("current processId:", os.getpid())

Related

python: proc.comunicate hangs

I'm trying to start another script in python and then give an answer to input, this is the main script:
import subprocess
import sys
import platform
cmdline = ['py', 'ciao.py']
cmd = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
for line in cmd.stdout:
if line == b'Loading...\r\n':
print("sending data...")
cmd.communicate(b"test\n")[0]
print("done")
print(line)
print(line)
And this is ciao.py:
import os
import re
import time
print("Loading...")
ciao = input("> ")
print(ciao)
os.system("mkdir okfunziona")
print("Done")
while 1:
time.sleep(10)
The main script manages to send "test" but then hangs and does not print "done" to the console.
The problem is both on windows and on linux.
---------------------------------------------------------------EDIT--------------------------------------------------------------
Ok i have tested Ashish Nitin Patil's example but i see b'Loading...\r\n' output, and I do not see the other outputs of the secondary script, like ">" or "Done", it seems that the "cmd.stdout.readline ()" works only the first time because the script does not end.
See this answer (and others on that question) for inspiration. For your case, you should not be using communicate, instead use stdin.write and stdout.readline.
Your main script might look like below -
while True:
line = cmd.stdout.readline()
print(line)
if line.strip() == b'Loading...':
print("sending data...")
cmd.stdin.write(b"test\n")
cmd.stdin.close()
print("done")
elif line.strip() == b'Done':
break
The outputs -
b'Loading...\n'
sending data...
5
done
b'> test\n'
b'Done\n'

Calling a python script with arguments using subprocess

I have a python script which call another python script from another directory. To do that I used subprocess.Popen :
import os
import subprocess
arg_list = [project, profile, reader, file, str(loop)]
where all args are string if not converted implicitely
f = open(project_path + '/log.txt','w')
proc = subprocess.Popen([sys.executable, python_script] + arg_list, stdin=subprocess.PIPE, stdout=f, stderr=f)
streamdata = proc.communicate()[0]
retCode = proc.returncode
f.close()
This part works well, because of the log file I can see errors that occurs on the called script. Here's the python script called:
import time
import csv
import os
class loading(object):
def __init__(self, project=None, profile=None, reader=None, file=None, loop=None):
self.project=project
self.profile=profile
self.reader=reader
self.file=file
self.loop=loop
def csv_generation(self):
f=open(self.file,'a')
try:
writer=csv.writer(f)
if self.loop==True:
writer.writerow((self.project,self.profile,self.reader))
else:
raise('File already completed')
finally:
file.close()
def main():
p = loading(project, profile, reader, file, loop)
p.csv_generation()
if __name__ == "__main__":
main()
When I launch my subprocess.Popen, I have an error from the called script which tell me that 'project' is not defined. It looks the Popen method doesn't pass arguments to that script. I think i'm doing something wrong, someone has an idea ?
When you pass parameters to a new process they are passed positionally, the names from the parent process do not survive, only the values. You need to add:
import sys
def main():
if len(sys.argv) == 6:
project, profile, reader, file, loop = sys.argv[1:]
else:
raise ValueError,("incorrect number of arguments")
p = loading(project, profile, reader, file, loop)
p.csv_generation()
We are testing the length of sys.argv before the assignment (the first element is the name of the program).

Python hang on first import

I copied a partial Python script from a benchmark scripts and tried to execute it after making modifications. The problem is, it is hanging once it is started.
The Python script is:
# !/usr/bin/python
# =============================
# initialize & configure
# =============================
#import shlex
#import fnmatch
import os
import subprocess
import resource
import os, sys, cPickle, time, threading, signal
from errno import EEXIST
from os.path import join
from subprocess import Popen
#from domain import Record
from threading import Timer
def measure(commandline):
# r,w = os.pipe()
forkedPid = os.fork()
if forkedPid: # read pickled measurements from the pipe
print "Parent proc start and the child pid: ", forkedPid
# os.close(w); rPipe = os.fdopen(r); r = cPickle.Unpickler(rPipe)
# measurements = r.load()
# rPipe.close()
os.waitpid(forkedPid,0)
print "Parent proc end"
return "------------"
else:
# Sample thread will be destroyed when the forked process _exits
class Sample(threading.Thread):
def __init__(self,program):
threading.Thread.__init__(self)
self.setDaemon(1)
self.timedout = False
self.p = program
self.maxMem = 0
self.childpids = None
self.start()
def run(self):
try:
remaining = maxtime
delay=0.01
print "Start run deman thread: ", remaining, "delay", delay
while remaining > 0:
res = resource.getrusage(resource.RUSAGE_CHILDREN)
time.sleep(delay)
remaining -= delay
print "\n res: ",res, " and remaining is: ", remaining
else:
self.timedout = True
os.kill(self.p, signal.SIGKILL)
except OSError, (e,err):
print "Error ", err
try:
print "Child proc ", commandline
# only write pickles to the pipe
# os.close(r); wPipe = os.fdopen(w, 'w'); w = cPickle.Pickler(wPipe)
start = time.time()
# print "commandLine: ", commandline, " remaining: ",remaining
# spawn the program in a separate process
p = Popen(commandline,stdout=outFile,stderr=errFile,stdin=inFile, shell=True)
# start a thread to sample the program's resident memory use
t = Sample( program = p.pid )
print "Child ......"
# wait for program exit status and resource usage
rusage = os.wait3(0)
print 'rusage: ', rusage
elapsed = time.time() - start
# m.userSysTime = rusage[2][0] + rusage[2][1]
# m.maxMem = t.rusage
# m.cpuLoad = "%"
# m.elapsed = elapsed
print "Child proc end"
except KeyboardInterrupt:
print "keyBordInterrupt..."
os.kill(p.pid, signal.SIGKILL)
except ZeroDivisionError, (e,err):
print " error ZeroDiv: "
except (OSError,ValueError), (e,err):
print " error ", e
finally:
print "Here is finally section. "
#w.dump(m)
# wPipe.close()
# Sample thread will be destroyed when the forked process _exits
os._exit(0)
if __name__ == '__main__':
print "Start now...."
#measure("jruby \/tmp/test.rb")
When I use ps -ef | grep MyAccount, I find the interpreter will hang on the first `import instruction:
MyAccount 16934 16933 0 12:08 pts/19 00:00:00 import os
/tmp/test.rb is a one-line Ruby script (puts "hello"), and it should not caused any problem as it has been commented out.
I ran ps -ef | grep MyAccount for three mintues, and this one is ALWAYS there. Also, there is not any output in the console, where I expected to see Start now.....
It's because your code is not interpreted as a Python script but as a Bash script.
The first line should be
#!/usr/bin/python
instead of
# !/usr/bin/python
You can skip this line and simply run your program using
python <your_script.py>
instead of
./<your_script.py>
I also think you read the ps results incorrectly.
The last column of ps -ef is the name of the command, definatelly not a line of the script, and the first one, MyAccount, is the name of the user.
So this ps output means that process import os is hanging. There is an application called import in some Linux distributions, see man import. If you try to execute it from the shell:
$import os
it simply waits for input forever (until interupted), and that is what happened to you.

teminate subprocess.call( 'python script") and continure with next test script query

I have written this demo script to ask my question on subprocess.call().
I am trying to run python test scripts one after another. However in this scenario when one of the test aborts due to invalid test condition, I want to terminate subprocess.call(). and move on to next test script. I have read through other queries but couldn't find sufficient explanation. Appreciate any suggestion or help in this matter. Below are demo files.
File1: listscripts.py -> this file list all tests from a folder and runs them using subprocess.call()
import os
from subprocess import *
import sys,os,time
Lib_Path = "C:\\Demo\\question"
sys.path.append(Lib_Path)
import globalsvar # has a global variable
list = os.listdir("C:\\Demo\\question\\scripts") # this has 3 example basic script
for testscripts in list:
aborttest = globalsvar.aborttestcall # when it encounters invalid condition from testscript1thru3 call() should terminate and go to next test
while not aborttest:
Desttestresultpath = os.path.join('C:/Demo/question/scripts',pyscripts)
call(["python",Desttestresultpath]) #calls individual scripts
aborttest = True
exit(1)
File2: globalsvar.py ( aborttestcall = False )
testscript1.py, testscript2.py and testscript3.py -> has some print statments placed in C:/Demo/question/scripts
testscript1.py and testscript3.py:
import sys,os,time
Lib_Path = "C:\\Demo\\question"
sys.path.append(Lib_Path)
import globalsvar
print "Start of Test\n"
print "testing stdout prints --1"
time.sleep(1)
globalsvar.aborttestcall = False
print "testing stdout prints --2"
time.sleep(1)
print "testing stdout prints --3"
time.sleep(1)
testscript2.py:
import sys,os,time
Lib_Path = "C:\\Demo\\question"
sys.path.append(Lib_Path)
import globalsvar
print "Start of Test\n"
print "testing stdout prints --1"
time.sleep(1)
globalsvar.aborttestcall = True
print "testing stdout prints --2"
time.sleep(1)
print "testing stdout prints --3"
time.sleep(1)
You can run your scripts (among different possibilities) like this:
import subprocess
import os
for file_item in os.listdir('scripts'):
script = os.sep.join(['scripts', file_item])
return_value = subprocess.call(['python', script])
print "OUTPUT: " + str(return_value)
while your inner scripts can exit their process with an exit code that you can evaluate on your calling process.
import time
import sys
print "Doing subprocess testing stuff"
time.sleep(2)
print "Doing more subprocess testing stuff"
# simulate error
time.sleep(2)
print "Error, jump out of the process"
sys.exit(1)
# finish
time.sleep(2)
print "done"
# this can be left out since it is called implicitely
# on successful step out of a process
# sys.exit(0)

Check to see if python script is running

I have a python daemon running as a part of my web app/ How can I quickly check (using python) if my daemon is running and, if not, launch it?
I want to do it that way to fix any crashes of the daemon, and so the script does not have to be run manually, it will automatically run as soon as it is called and then stay running.
How can i check (using python) if my script is running?
A technique that is handy on a Linux system is using domain sockets:
import socket
import sys
import time
def get_lock(process_name):
# Without holding a reference to our socket somewhere it gets garbage
# collected when the function exits
get_lock._lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
# The null byte (\0) means the socket is created
# in the abstract namespace instead of being created
# on the file system itself.
# Works only in Linux
get_lock._lock_socket.bind('\0' + process_name)
print 'I got the lock'
except socket.error:
print 'lock exists'
sys.exit()
get_lock('running_test')
while True:
time.sleep(3)
It is atomic and avoids the problem of having lock files lying around if your process gets sent a SIGKILL
You can read in the documentation for socket.close that sockets are automatically closed when garbage collected.
Drop a pidfile somewhere (e.g. /tmp). Then you can check to see if the process is running by checking to see if the PID in the file exists. Don't forget to delete the file when you shut down cleanly, and check for it when you start up.
#/usr/bin/env python
import os
import sys
pid = str(os.getpid())
pidfile = "/tmp/mydaemon.pid"
if os.path.isfile(pidfile):
print "%s already exists, exiting" % pidfile
sys.exit()
file(pidfile, 'w').write(pid)
try:
# Do some actual work here
finally:
os.unlink(pidfile)
Then you can check to see if the process is running by checking to see if the contents of /tmp/mydaemon.pid are an existing process. Monit (mentioned above) can do this for you, or you can write a simple shell script to check it for you using the return code from ps.
ps up `cat /tmp/mydaemon.pid ` >/dev/null && echo "Running" || echo "Not running"
For extra credit, you can use the atexit module to ensure that your program cleans up its pidfile under any circumstances (when killed, exceptions raised, etc.).
The pid library can do exactly this.
from pid import PidFile
with PidFile():
do_something()
It will also automatically handle the case where the pidfile exists but the process is not running.
My solution is to check for the process and command line arguments
Tested on windows and ubuntu linux
import psutil
import os
def is_running(script):
for q in psutil.process_iter():
if q.name().startswith('python'):
if len(q.cmdline())>1 and script in q.cmdline()[1] and q.pid !=os.getpid():
print("'{}' Process is already running".format(script))
return True
return False
if not is_running("test.py"):
n = input("What is Your Name? ")
print ("Hello " + n)
Of course the example from Dan will not work as it should be.
Indeed, if the script crash, rise an exception, or does not clean pid file, the script will be run multiple times.
I suggest the following based from another website:
This is to check if there is already a lock file existing
\#/usr/bin/env python
import os
import sys
if os.access(os.path.expanduser("~/.lockfile.vestibular.lock"), os.F_OK):
#if the lockfile is already there then check the PID number
#in the lock file
pidfile = open(os.path.expanduser("~/.lockfile.vestibular.lock"), "r")
pidfile.seek(0)
old_pid = pidfile.readline()
# Now we check the PID from lock file matches to the current
# process PID
if os.path.exists("/proc/%s" % old_pid):
print "You already have an instance of the program running"
print "It is running as process %s," % old_pid
sys.exit(1)
else:
print "File is there but the program is not running"
print "Removing lock file for the: %s as it can be there because of the program last time it was run" % old_pid
os.remove(os.path.expanduser("~/.lockfile.vestibular.lock"))
This is part of code where we put a PID file in the lock file
pidfile = open(os.path.expanduser("~/.lockfile.vestibular.lock"), "w")
pidfile.write("%s" % os.getpid())
pidfile.close()
This code will check the value of pid compared to existing running process., avoiding double execution.
I hope it will help.
There are very good packages for restarting processes on UNIX. One that has a great tutorial about building and configuring it is monit. With some tweaking you can have a rock solid proven technology keeping up your daemon.
Came across this old question looking for solution myself.
Use psutil:
import psutil
import sys
from subprocess import Popen
for process in psutil.process_iter():
if process.cmdline() == ['python', 'your_script.py']:
sys.exit('Process found: exiting.')
print('Process not found: starting it.')
Popen(['python', 'your_script.py'])
There are a myriad of options. One method is using system calls or python libraries that perform such calls for you. The other is simply to spawn out a process like:
ps ax | grep processName
and parse the output. Many people choose this approach, it isn't necessarily a bad approach in my view.
I'm a big fan of Supervisor for managing daemons. It's written in Python, so there are plenty of examples of how to interact with or extend it from Python. For your purposes the XML-RPC process control API should work nicely.
Try this other version
def checkPidRunning(pid):
'''Check For the existence of a unix pid.
'''
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
# Entry point
if __name__ == '__main__':
pid = str(os.getpid())
pidfile = os.path.join("/", "tmp", __program__+".pid")
if os.path.isfile(pidfile) and checkPidRunning(int(file(pidfile,'r').readlines()[0])):
print "%s already exists, exiting" % pidfile
sys.exit()
else:
file(pidfile, 'w').write(pid)
# Do some actual work here
main()
os.unlink(pidfile)
Rather than developing your own PID file solution (which has more subtleties and corner cases than you might think), have a look at supervisord -- this is a process control system that makes it easy to wrap job control and daemon behaviors around an existing Python script.
The other answers are great for things like cron jobs, but if you're running a daemon you should monitor it with something like daemontools.
ps ax | grep processName
if yor debug script in pycharm always exit
pydevd.py --multiproc --client 127.0.0.1 --port 33882 --file processName
try this:
#/usr/bin/env python
import os, sys, atexit
try:
# Set PID file
def set_pid_file():
pid = str(os.getpid())
f = open('myCode.pid', 'w')
f.write(pid)
f.close()
def goodby():
pid = str('myCode.pid')
os.remove(pid)
atexit.register(goodby)
set_pid_file()
# Place your code here
except KeyboardInterrupt:
sys.exit(0)
Here is more useful code (with checking if exactly python executes the script):
#! /usr/bin/env python
import os
from sys import exit
def checkPidRunning(pid):
global script_name
if pid<1:
print "Incorrect pid number!"
exit()
try:
os.kill(pid, 0)
except OSError:
print "Abnormal termination of previous process."
return False
else:
ps_command = "ps -o command= %s | grep -Eq 'python .*/%s'" % (pid,script_name)
process_exist = os.system(ps_command)
if process_exist == 0:
return True
else:
print "Process with pid %s is not a Python process. Continue..." % pid
return False
if __name__ == '__main__':
script_name = os.path.basename(__file__)
pid = str(os.getpid())
pidfile = os.path.join("/", "tmp/", script_name+".pid")
if os.path.isfile(pidfile):
print "Warning! Pid file %s existing. Checking for process..." % pidfile
r_pid = int(file(pidfile,'r').readlines()[0])
if checkPidRunning(r_pid):
print "Python process with pid = %s is already running. Exit!" % r_pid
exit()
else:
file(pidfile, 'w').write(pid)
else:
file(pidfile, 'w').write(pid)
# main programm
....
....
os.unlink(pidfile)
Here is string:
ps_command = "ps -o command= %s | grep -Eq 'python .*/%s'" % (pid,script_name)
returns 0 if "grep" is successful, and the process "python" is currently running with the name of your script as a parameter .
A simple example if you only are looking for a process name exist or not:
import os
def pname_exists(inp):
os.system('ps -ef > /tmp/psef')
lines=open('/tmp/psef', 'r').read().split('\n')
res=[i for i in lines if inp in i]
return True if res else False
Result:
In [21]: pname_exists('syslog')
Out[21]: True
In [22]: pname_exists('syslog_')
Out[22]: False
I was looking for an answer on this and in my case, came to mind a very easy and very good solution, in my opinion (since it's not possible to exist a false positive on this, I guess - how can the timestamp on the TXT be updated if the program doesn't do it):
--> just keep writing on a TXT the current timestamp in some time interval, depending on your needs (here each half hour was perfect).
If the timestamp on the TXT is outdated relatively to the current one when you check, then there was a problem on the program and it should be restarted or what you prefer to do.
A portable solution that relies on multiprocessing.shared_memory:
import atexit
from multiprocessing import shared_memory
_ensure_single_process_store = {}
def ensure_single_process(name: str):
if name in _ensure_single_process_store:
return
try:
shm = shared_memory.SharedMemory(name='ensure_single_process__' + name,
create=True,
size=1)
except FileExistsError:
print(f"{name} is already running!")
raise
_ensure_single_process_store[name] = shm
atexit.register(shm.unlink)
Usually you wouldn't have to use atexit, but sometimes it helps to clean up upon abnormal exit.
Consider the following example to solve your problem:
#!/usr/bin/python
# -*- coding: latin-1 -*-
import os, sys, time, signal
def termination_handler (signum,frame):
global running
global pidfile
print 'You have requested to terminate the application...'
sys.stdout.flush()
running = 0
os.unlink(pidfile)
running = 1
signal.signal(signal.SIGINT,termination_handler)
pid = str(os.getpid())
pidfile = '/tmp/'+os.path.basename(__file__).split('.')[0]+'.pid'
if os.path.isfile(pidfile):
print "%s already exists, exiting" % pidfile
sys.exit()
else:
file(pidfile, 'w').write(pid)
# Do some actual work here
while running:
time.sleep(10)
I suggest this script because it can be executed one time only.
Using bash to look for a process with the current script's name. No extra file.
import commands
import os
import time
import sys
def stop_if_already_running():
script_name = os.path.basename(__file__)
l = commands.getstatusoutput("ps aux | grep -e '%s' | grep -v grep | awk '{print $2}'| awk '{print $2}'" % script_name)
if l[1]:
sys.exit(0);
To test, add
stop_if_already_running()
print "running normally"
while True:
time.sleep(3)
This is what I use in Linux to avoid starting a script if already running:
import os
import sys
script_name = os.path.basename(__file__)
pidfile = os.path.join("/tmp", os.path.splitext(script_name)[0]) + ".pid"
def create_pidfile():
if os.path.exists(pidfile):
with open(pidfile, "r") as _file:
last_pid = int(_file.read())
# Checking if process is still running
last_process_cmdline = "/proc/%d/cmdline" % last_pid
if os.path.exists(last_process_cmdline):
with open(last_process_cmdline, "r") as _file:
cmdline = _file.read()
if script_name in cmdline:
raise Exception("Script already running...")
with open(pidfile, "w") as _file:
pid = str(os.getpid())
_file.write(pid)
def main():
"""Your application logic goes here"""
if __name__ == "__main__":
create_pidfile()
main()
This approach works good without any dependency on an external module.

Categories

Resources