I have been trying to implement a wrapper around subprocess as follows:
def ans_cmd_stream_color(inputcmd):
"""Driver function for local ansible commands.
Stream stdout to stdout and log file with color.
Runs <inputcmd> via subprocess.
Returns return code, stdout, stderr as dict.
"""
fullcmd = inputcmd
create_debug('Enabling colorful ansible output.', LOGGER)
create_info('Running command: ' + fullcmd, LOGGER, True)
p = subprocess.Popen('export ANSIBLE_FORCE_COLOR=true; ' + fullcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout_l = []
stderr_l = []
rcode = 0
# Regex black magic
ansi_escape = re.compile(r'\x1b[^m]*m')
# Get the unbuffered IO action going.
try:
# Non blocking
reads = [p.stdout.fileno(), p.stderr.fileno()]
ret = select.select(reads, [], [])
# Print line by line
while True:
for fd in ret[0]:
if fd == p.stdout.fileno():
line = p.stdout.readline()
sys.stdout.write(line.encode('utf-8'))
stdout_l.append(ansi_escape.sub('',
line.encode('utf-8'))
)
if fd == p.stderr.fileno():
line = p.stdout.readline()
sys.stderr.write(line.encode('utf-8'))
stderr_l.append(ansi_escape.sub('',
line.encode('utf-8'))
)
# Break when the process is done.
if p.poll() is not None:
rcode = p.returncode
break
except BaseException as e:
raise e
outstr = ''.join(stdout_l)
errstr = ''.join(stderr_l)
outstr, errstr = str(outstr).rstrip('\n'), str(errstr).rstrip('\n')
expstr = errstr.strip('ERROR: ')
if len(expstr) >= 1:
create_info('Command: ' + str(fullcmd) + ': ' + expstr + '\n', LOGGER,
True)
if rcode == 0:
rcode = 1
else:
create_info(outstr + '\n', LOGGER)
if rcode == 0:
create_info('Command: ' + fullcmd + ' ran successfully.', LOGGER,
True)
expstr = False
ret_dict = {inputcmd: {}}
ret_dict[inputcmd]['rcode'] = rcode
ret_dict[inputcmd]['stdout'] = outstr
ret_dict[inputcmd]['stderr'] = expstr
return copy.deepcopy(ret_dict)
The idea is to print a streaming output of subprocess command and then return info to the function user. The issue is that even using a direct io.open, the subprocess PIP is still buffered unless I set:
os.environ["PYTHONUNBUFFERED"] = "1"
Which is not ideal. Any ideas or has anybody encountered this issue?
UPDATE: With ansible you need to disable buffering for subprocess to honor buffering settings:
def ans_cmd_stream_color(inputcmd):
"""Driver function for local ansible commands.
Stream stdout to stdout and log file with color.
Runs <inputcmd> via subprocess.
Returns return code, stdout, stderr as dict.
"""
fullcmd = inputcmd
create_debug('Enabling colorful ansible output.', LOGGER)
create_info('Running command: ' + fullcmd, LOGGER, True)
p = subprocess.Popen('export ANSIBLE_FORCE_COLOR=true; ' +
'export PYTHONUNBUFFERED=1; ' + fullcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout_l = []
stderr_l = []
rcode = 0
# Regex black magic
ansi_escape = re.compile(r'\x1b[^m]*m')
# Get the unbuffered IO action going.
try:
# Non blocking
reads = [p.stdout.fileno(), p.stderr.fileno()]
ret = select.select(reads, [], [])
# Print line by line
while True:
for fd in ret[0]:
if fd == p.stdout.fileno():
line = p.stdout.readline()
sys.stdout.write(line.encode('utf-8'))
stdout_l.append(ansi_escape.sub('',
line.encode('utf-8'))
)
if fd == p.stderr.fileno():
line = p.stdout.readline()
sys.stderr.write(line.encode('utf-8'))
stderr_l.append(ansi_escape.sub('',
line.encode('utf-8'))
)
# Break when the process is done.
if p.poll() is not None:
rcode = p.returncode
break
except BaseException as e:
raise e
outstr = ''.join(stdout_l)
errstr = ''.join(stderr_l)
outstr, errstr = str(outstr).rstrip('\n'), str(errstr).rstrip('\n')
expstr = errstr.strip('ERROR: ')
if len(expstr) >= 1:
create_info('Command: ' + str(fullcmd) + ': ' + expstr + '\n', LOGGER,
True)
if rcode == 0:
rcode = 1
else:
create_info(outstr + '\n', LOGGER)
if rcode == 0:
create_info('Command: ' + fullcmd + ' ran successfully.', LOGGER,
True)
expstr = False
ret_dict = {inputcmd: {}}
ret_dict[inputcmd]['rcode'] = rcode
ret_dict[inputcmd]['stdout'] = outstr
ret_dict[inputcmd]['stderr'] = expstr
return copy.deepcopy(ret_dict)
You should probably directly read from the subprocess pipes. Something like the following will read from the standard out to the information logger and the standard error to the error logger.
import logging, subprocess
logging.basicConfig(level=logging.INFO)
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
cont = True
while cont:
cont = False
line = proc.stdout.readline()
if not line == b"":
out = line.decode("utf-8").rstrip()
logging.info(out)
cont = True
line = proc.stderr.readline()
if not line == b"":
out = line.decode("utf-8").rstrip()
logging.error(out)
cont = True
if not cont and proc.poll() is not None:
break
To address the buffering issue, per this question, either the subordinate Python script must explicitly flush the buffers, or the environment variable PYTHONUNBUFFERED must be set to a non-empty string.
Related
Good morning!
I'm compiling latex documents from python in Ubuntu 16.04, but I want to disable the printing that appears in my shell when doing it (same printing that appears when compiling using a TeX editor like TexWorks).
The code I'm using is the following:
def compila_latex(nombre):
cmd = ['pdflatex','-interaction','nonstopmode', nombre + '.tex']
proc = subprocess.Popen(cmd)
proc.communicate()
retcode = proc.returncode
if not retcode == 0:
os.unlink(nombre + '.pdf')
raise ValueError('Hay un error con la compilaciĆ³n del documento.')
cmd = ['pdflatex','-interaction','nonstopmode', nombre + '.tex']
proc = subprocess.Popen(cmd)
proc.communicate()
retcode = proc.returncode
if not retcode == 0:
os.unlink(nombre + '.pdf')
raise ValueError('Error')
list_files = os.listdir(os.curdir)
list_aux_files = [fil[len(nombre):] for fil in list_files if fil[:len(nombre)] == nombre]
for end in list_aux_files:
if end != '.tex' and end != '.pdf':
os.unlink(nombre + end)
Note: I'm compiling twice due to index and page numering.
Thanks in advance!
I have a python script which will create the branches and uploads the file in Bitbucket but i am trying to get the links from the Bitbucket after creating branches. How can i do that?
from subprocess import PIPE
import subprocess
import os
import getpass
import pandas as pd
import shutil
git_command = ['git', 'status']
current_user = getpass.getuser()
# Assuming every use has same location for their local repository,
# otherwise this should be set manually
repository = r'C:\Users\%s\bb96' % current_user
git_config = subprocess.call(['git', 'config', 'core.autocrlf', 'true'], cwd=repository, stdout=PIPE, stderr=PIPE, shell=True)
#------------------------------------------------------------------------------
git_fetch = subprocess.Popen(['git', 'fetch'], cwd=repository, stdout=PIPE, stderr=PIPE, shell=True)
stdout_f, stderr_f = git_fetch.communicate()
print(stdout_f)
print(stderr_f)
'''
1. Provide path to where all the BSTM are being stored
Provide app_id
Provide schema name
Provide type of the bstms to be created
for ongoing process -> o
for history load -> h
2. Program creates list of branches to be created as follows:
feature/schema-physcial_tab_name-o
3. For each new feature branch creates a directory tree:
appid\schema\table_name\misc\o\bstm, e.g.:
edld_bb96_rdiapid\crz_rdi_ca\table_name\misc\o\bstm
'''
def loading_to_bitbucket(bstm_path, app_id, schema, bstm_type):
# creates list of bstms
feature_list = []
feature_bstm_ref = {}
bstm_list = [f for f in os.listdir(bstm_path) if os.path.isfile(os.path.join(bstm_path, f)) and f[-4:] == ".csv"]
# creates name of future branch based on table physical name
for bstm in bstm_list:
directorytree = ''
df = pd.read_csv(os.path.join(bstm_path, bstm))
for r in range(len(df)):
for c in range(len(df.columns.values.tolist())):
if str(df.iloc[r,c]).strip() == "Target Physical Table Name":
directorytree = os.path.join(repository,app_id,schema,df.iloc[r+1,c].lower(),'misc',bstm_type,'bstm')
feature_list.append("feature/"+schema+"-"+df.iloc[r+1,c].lower()+"-o")
feature_bstm_ref["feature/"+schema+"-"+df.iloc[r+1,c].lower()+"-o"] = [bstm, directorytree]
break
else:
continue
break
# for each new future branch, new branch is created in bitbucket and file is loaded,
# for existing bstm newer version os loaded only if the bstm file was updated
for feature in feature_list:
compare_flag = 0
x = ''
y = ''
next_release = False
#---------
print(" ")
print("Current iteration: " + feature)
git_pull = subprocess.call(['git', 'pull'], cwd=repository, stdout=PIPE, stderr=PIPE, shell=True)
#git_pull = subprocess.Popen(['git', 'pull'], cwd=repository, stdout=PIPE, stderr=PIPE, shell=True)
#stdout_md, stderr_md = git_pull.communicate()
#print(stdout_md)
#print(stderr_md)
if git_pull != 0:
print("GIT PULL didn't succeed, check your git status.")
break
else:
print("GIT PULL ended successfully.")
checkout_dev = subprocess.call(['git', 'checkout', 'dev'], cwd=repository, stdout=PIPE, stderr=PIPE, shell=True)
if checkout_dev != 0:
print("Can\'t checkout DEV branch, check git status")
break
else:
print("Checked out on DEV successfully.")
create_branch = subprocess.Popen(['git', 'checkout', '-b', feature], cwd=repository, stdout=PIPE, stderr=PIPE, shell=True)
stdout_cb, stderr_cb = create_branch.communicate()
#print(str(stdout_cb))
#print(str(stderr_cb))
if str(stderr_cb).find("fatal: A branch named " + "'" + feature + "'" + " already exists.") < 0 :
try:
createdirtree = subprocess.Popen(['mkdir', feature_bstm_ref[feature][1]], cwd=repository, stdout=PIPE, stderr=PIPE, shell=True)
stdout_md, stderr_md = createdirtree.communicate()
print("Created feature branch: " + feature)
except:
print('Error while creating directory tree for ' + feature)
continue
else:
try:
checkout_branch = subprocess.Popen(['git', 'checkout', feature], cwd=repository, stdout=PIPE, stderr=PIPE, shell=True)
stdout_chb, stderr_chb = checkout_branch.communicate()
print("Checked out on branch: " + feature)
except:
print("Error")
break
try:
x = (os.path.join(bstm_path, feature_bstm_ref[feature][0]).replace('\\', '/'))
y = os.path.join(feature_bstm_ref[feature][1],feature_bstm_ref[feature][0]).replace('\\', '/')
next_release = os.path.isfile(y) # values True/False
#print(next_release)
if next_release:
print("Comparing files",)
compare_files = subprocess.Popen(['git', 'diff', '--shortstat', x, y], stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = compare_files.communicate()
#print(str(stdout))
#print(str(stderr))
if str(stdout).find('file changed') > 0 :
compare_flag = 1
if compare_flag == 0:
try:
print("Nothing has changed, move to next avalaible feature branch.")
reset_local = subprocess.Popen(['git', 'reset', '--hard', 'origin/'+ feature], cwd=repository, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = reset_local.communicate()
continue
except:
print("Something went wrong.")
break
except:
print("comparing files didn't succeed.")
break
try:
if compare_flag != 0:
print("Newer version found, new version will be loaded")
else:
print("Initial load of file.")
shutil.copy(os.path.join(bstm_path, feature_bstm_ref[feature][0]), feature_bstm_ref[feature][1])
except shutil.Error:
print(shutil.Error)
print ("Unable to copy file." + feature_bstm_ref[feature][0])
break
# Add the file
try:
add_file = subprocess.Popen(["git", "add", "-A"],cwd = repository, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = add_file.communicate()
#print(str(stdout))
#print(str(stderr))
except:
print ("git add error" + feature)
break
# Compile the commit message and then commit.
try:
message = str(input("Provide commit message: "))
#message = "Initial release " + feature_bstm_ref[feature][0][5:-4] #Assuming that each BSTM will have BSTM_ prefix
commit_info = subprocess.Popen(["git", "commit", "-m", message], cwd = repository,stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = commit_info.communicate()
#print(str(stdout))
#print(str(stderr))
except:
print ("git commit error" + feature)
break
# Push to the target BSTM. If you have passphrase for your BitBucket,
# in this step a window will prompt out asking you to input the passphrase.
# After you input the passphrase, the upload will be completed for this feature.
try:
push_to_remote = subprocess.Popen(["git", "push", "origin", "-u",feature], cwd = repository, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = push_to_remote.communicate()
#print(str(stdout))
#print(str(stderr))
print ("Git Bucket uploading succeeded for " + feature)
except:
print ("git push error" + feature)
break
checkout_dev = subprocess.call(['git', 'checkout', 'dev'], cwd=repository, stdout=PIPE, stderr=PIPE, shell=True)
print("All features from the list are checked/loaded.")
#------------------------------------------------------------------------------
bstm_path = r'M:\EDW\EDW_old\Projects\'
app_id = 'dummy'
schema = 'dummy_schema'
bstm_type = 'o'
# calling the function:
loading_to_bitbucket(bstm_path, app_id, schema, bstm_type)....
I need to run an interactive Bash instance in a separated process in Python with it's own dedicated TTY (I can't use pexpect).
I used this code snippet I commonly see used in similar programs:
master, slave = pty.openpty()
p = subprocess.Popen(["/bin/bash", "-i"], stdin=slave, stdout=slave, stderr=slave)
os.close(slave)
x = os.read(master, 1026)
print x
subprocess.Popen.kill(p)
os.close(master)
But when I run it I get the following output:
$ ./pty_try.py
bash: cannot set terminal process group (10790): Inappropriate ioctl for device
bash: no job control in this shell
Strace of the run shows some errors:
...
readlink("/usr/bin/python2.7", 0x7ffc8db02510, 4096) = -1 EINVAL (Invalid argument)
...
ioctl(3, SNDCTL_TMR_TIMEBASE or SNDRV_TIMER_IOCTL_NEXT_DEVICE or TCGETS, 0x7ffc8db03590) = -1 ENOTTY (Inappropriate ioctl for device)
...
readlink("./pty_try.py", 0x7ffc8db00610, 4096) = -1 EINVAL (Invalid argument)
The code snippet seems pretty straightforward, is Bash not getting something it needs? what could be the problem here?
This is a solution to run an interactive command in subprocess. It uses pseudo-terminal to make stdout non-blocking(also some command needs a tty device, eg. bash). it uses select to handle input and ouput to the subprocess.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import select
import termios
import tty
import pty
from subprocess import Popen
command = 'bash'
# command = 'docker run -it --rm centos /bin/bash'.split()
# save original tty setting then set it to raw mode
old_tty = termios.tcgetattr(sys.stdin)
tty.setraw(sys.stdin.fileno())
# open pseudo-terminal to interact with subprocess
master_fd, slave_fd = pty.openpty()
try:
# use os.setsid() make it run in a new process group, or bash job control will not be enabled
p = Popen(command,
preexec_fn=os.setsid,
stdin=slave_fd,
stdout=slave_fd,
stderr=slave_fd,
universal_newlines=True)
while p.poll() is None:
r, w, e = select.select([sys.stdin, master_fd], [], [])
if sys.stdin in r:
d = os.read(sys.stdin.fileno(), 10240)
os.write(master_fd, d)
elif master_fd in r:
o = os.read(master_fd, 10240)
if o:
os.write(sys.stdout.fileno(), o)
finally:
# restore tty settings back
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
This is the solution that worked for me at the end (as suggested by qarma) :
libc = ctypes.CDLL('libc.so.6')
master, slave = pty.openpty()
p = subprocess.Popen(["/bin/bash", "-i"], preexec_fn=libc.setsid, stdin=slave, stdout=slave, stderr=slave)
os.close(slave)
... do stuff here ...
x = os.read(master, 1026)
print x
Here is a full object oriented solution to do interactive shell commands with TTYs using threads and queues for stdout and stderr IO handling. This took me a while to build from multiple locations but it works perfectly so far on Unix/Linux systems and also as part of a Juniper op script. Thought I would post this here to save others time in trying to build something like this.
import pty
import re
import select
import threading
from datetime import datetime, timedelta
import os
import logging
import subprocess
import time
from queue import Queue, Empty
lib_logger = logging.getLogger("lib")
# Handler function to be run as a thread for pulling pty channels from an interactive shell
def _pty_handler(pty_master, logger, queue, stop):
poller = select.poll()
poller.register(pty_master, select.POLLIN)
while True:
# Stop handler if flagged
if stop():
logger.debug("Disabling pty handler for interactive shell")
break
fd_event = poller.poll(100)
for descriptor, event in fd_event:
# Read data from pipe and send to queue if there is data to read
if event == select.POLLIN:
data = os.read(descriptor, 1).decode("utf-8")
if not data:
break
# logger.debug("Reading in to handler queue: " + data)
queue.put(data)
# Exit handler if stdout is closing
elif event == select.POLLHUP:
logger.debug("Disabling pty handler for interactive shell")
break
# Function for reading outputs from the given queue by draining it and returning the output
def _get_queue_output(queue: Queue) -> str:
value = ""
try:
while True:
value += queue.get_nowait()
except Empty:
return value
# Helper function to create the needed list for popen and print the command run to the logger
def popen_command(command, logger, *args):
popen_list = list()
popen_list.append(command)
command_output = command
for arg in args:
popen_list.append(arg)
command_output += " " + arg
lib_logger.debug("Making Popen call using: " + str(popen_list))
logger.debug("")
logger.debug(command_output)
logger.debug("")
return popen_list
# Class for create an interactive shell and sending commands to it along with logging output to loggers
class InteractiveShell(object):
def __init__(self, command, logger, *args):
self.logger = logger
self.command = command
self.process = None
self.popen_list = popen_command(command, logger, *args)
self.master_stdout = None
self.slave_stdout = None
self.master_stderr = None
self.slave_stderr = None
self.stdout_handler = None
self.stderr_handler = None
self.stdout_queue = None
self.stderr_queue = None
self.stop_handlers = False
# Open interactive shell and setup all threaded IO handlers
def open(self, shell_prompt, timeout=DEVICE_TIMEOUT):
# Create PTYs
self.master_stdout, self.slave_stdout = pty.openpty()
self.master_stderr, self.slave_stderr = pty.openpty()
# Create shell subprocess
self.process = subprocess.Popen(self.popen_list, stdin=self.slave_stdout, stdout=self.slave_stdout,
stderr=self.slave_stderr, bufsize=0, start_new_session=True)
lib_logger.debug("")
lib_logger.debug("Started interactive shell for command " + self.command)
lib_logger.debug("")
# Create thread and queues for handling pty output and start them
self.stdout_queue = Queue()
self.stderr_queue = Queue()
self.stdout_handler = threading.Thread(target=_pty_handler, args=(self.master_stdout,
lib_logger,
self.stdout_queue,
lambda: self.stop_handlers))
self.stderr_handler = threading.Thread(target=_pty_handler, args=(self.master_stderr,
lib_logger,
self.stderr_queue,
lambda: self.stop_handlers))
self.stdout_handler.daemon = True
self.stderr_handler.daemon = True
lib_logger.debug("Enabling stderr handler for interactive shell " + self.command)
self.stderr_handler.start()
lib_logger.debug("Enabling stdout handler for interactive shell " + self.command)
self.stdout_handler.start()
# Wait for shell prompt
lib_logger.debug("Waiting for shell prompt: " + shell_prompt)
return self.wait_for(shell_prompt, timeout)
# Close interactive shell which should also kill all threaded IO handlers
def close(self):
# Wait 5 seconds before closing to let shell handle all input and outputs
time.sleep(5)
# Stop IO handler threads and terminate the process then wait another 5 seconds for cleanup to happen
self.stop_handlers = True
self.process.terminate()
time.sleep(5)
# Check for any additional output from the stdout handler
output = ""
while True:
data = _get_queue_output(self.stdout_queue)
if data != "":
output += data
else:
break
for line in iter(output.splitlines()):
self.logger.debug(line)
# Check for any additional output from the stderr handler
output = ""
while True:
data = _get_queue_output(self.stderr_queue)
if data != "":
output += data
else:
break
for line in iter(output.splitlines()):
self.logger.error(line)
# Cleanup PTYs
os.close(self.master_stdout)
os.close(self.master_stderr)
os.close(self.slave_stdout)
os.close(self.slave_stderr)
lib_logger.debug("Interactive shell command " + self.command + " terminated")
# Run series of commands given as a list of a list of commands and wait_for strings. If no wait_for is needed then
# only provide the command. Return if all the commands completed successfully or not.
# Ex:
# [
# ["ssh jsas#" + vnf_ip, r"jsas#.*:"],
# ["juniper123", r"jsas#.*\$"],
# ["sudo su", r".*jsas:"],
# ["juniper123", r"root#.*#"],
# ["usermod -p 'blah' jsas"]
# ]
def run_commands(self, commands_list):
shell_status = True
for command in commands_list:
shell_status = self.run(command[0])
if shell_status and len(command) == 2:
shell_status = self.wait_for(command[1])
# Break out of running commands if a command failed
if not shell_status:
break
return shell_status
# Run given command and return False if error occurs otherwise return True
def run(self, command, sleep=0):
# Check process to make sure it is still running and if not grab the stderr output
if self.process.poll():
self.logger.error("Interactive shell command " + self.command + " closed with return code: " +
self.process.returncode)
data = _get_queue_output(self.stderr_queue)
if data != "":
self.logger.error("Interactive shell error messages:")
for line in iter(data.splitlines()):
self.logger.error(line)
return False
# Write command to process and check to make sure a newline is in command otherwise add it
if "\n" not in command:
command += "\n"
os.write(self.master_stdout, command.encode("utf-8"))
if sleep:
time.sleep(sleep)
return True
# Wait for specific regex expression in output before continuing return False if wait time expires otherwise return
# True
def wait_for(self, this, timeout=DEVICE_TIMEOUT):
timeout = datetime.now() + timedelta(seconds=timeout)
output = ""
# Keep searching for output until timeout occurs
while timeout > datetime.now():
data = _get_queue_output(self.stdout_queue)
if data != "":
# Add to output line and check for match to regex given and if match then break and send output to
# logger
output += data
lib_logger.debug("Checking for " + this + " in data: ")
for line in iter(output.splitlines()):
lib_logger.debug(line)
if re.search(r"{}\s?$".format(this), output):
break
time.sleep(1)
# Send output to logger
for line in iter(output.splitlines()):
self.logger.debug(line)
# If wait time expired print error message and return False
if timeout < datetime.now():
self.logger.error("Wait time expired when waiting for " + this)
return False
return True
When i run the python script ( BootScript.py ) on the shell it runs properly but when i try to run it through another script( automation.py ) it gets stuck
//automation.py
#!/usr/bin/env python
import sys
import optparse
import subprocess
global flag
failcount = 0
def incrfailcount():
global failcount
failcount += 1
def readfile():
fp = open('BootStrap.log','r')
print "press any key"
#_input()
for l in fp.readlines() :
if "BOOTSCRIPT SCORE IS: 3010" in l :
#import pdb
#pdb.set_trace()
global flag
flag = 0
fp.close()
parser = optparse.OptionParser()
parser.add_option('-c', '--count', dest='counter', help='no of time reboot Should Happen')
(options, args) = parser.parse_args()
#counter = 1
if options.counter is None:
counter = 1
else :
counter = options.counter
count = 0
output = ""
mylist = [ ' --cfgfile="BDXT0_PO_0.cfg"' , ' --cfgfile="BDXT0_PO_OVR_0.cfg"' ,' --scbypass' , ' --dmipy="C:\\sfd\\jg\\kdg\\dmi_pcie_po.py"', ' --fusestr="IA_CORE_DISABLE=0y111111111111111111111110"' , ' --fusestr="HT_DIS=1"' , ' --earbreakpy="C:\\dvfdfv\\dskf\\lsvcd\\config_restart.py"']
logfile = open('BootStrap.log', 'w')
#if out.__contains__('3010') :
#break
for i in range(int(counter)):
global flag
flag = 1
logfile = open('BootStrap.log', 'w')
proc = subprocess.Popen(['python' ,'bdxBootScript.py', mylist ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
sys.stdout.write(line)
logfile.write(line)
proc.wait()
count = count + 1
print "file closing "
logfile.close()
readfile()
#global flag
if flag :
incrfailcount()
continue
if flag :
print "Error Occured in %d th iteration" %count
else :
print "Every thing Went well"
if failcount >= 0 :
print "Script failed %d times of total run %d " %(failcount, count)
I am trying to automate BootScript.py
**What the Program Does ?**
Here it runs BootScript.py which arguments . the output of the bootscript.py is checked for specific line (BOOTSCRIPT SCORE IS: 3010)
If present it is asumed as to sucess else failure , this script is run for counter number of times
**What i want?**
This script gets stuck for a long time , i want it to execute with out beeing sstuck , as though i am running the bootscript manually
There are several issues e.g., Popen(['python' ,'bdxBootScript.py', mylist ]) should raise an exception because you should use Popen(['python' ,'bdxBootScript.py'] + mylist) instead. If you don't see the exception then either the code is not run e.g., counter==0 or (worse) you suppress exceptions up the stack (don't do it, at the very least you should log unexpected errors).
If bdxBootScript.py doesn't produce much output then for line in proc.stdout: may appear to do nothing for some time, to fix it pass -u flag to python to make its output unbuffered and use iter(p.stdout.readline, b'') to workaround the "hidden read-ahead buffer" bug for pipes in Python 2:
import os
import sys
from subprocess import Popen, PIPE, STDOUT
with open(os.devnull, 'rb', 0) as DEVNULL:
proc = Popen([sys.executable, '-u', 'bdxBootScript.py'] + mylist,
stdin=DEVNULL, stdout=PIPE, stderr=STDOUT, bufsize=1)
for line in iter(proc.stdout.readline, b''):
sys.stdout.write(line)
sys.stdout.flush()
logfile.write(line)
logfile.flush() # make the line available in the log immediately
proc.stdout.close()
rc = proc.wait()
I'm trying to write a program that will run a program on a remote machine using ssh and pass it SIGINT signals without killing my ssh connection. If I was running on a terminal, then this would be easy with something like
ssh -t -t host "command to run"
I tried this with the following script:
#!/bin/bash
ssh -t -t host "program $#"
and when I run that from the terminal it works fine, but when proofgeneral runs this script and sends it SIGINT it just ends up killing the program (I guess it can't allocate a terminal?).
I've got the following mess of code:
CTRL_C = "<CTRL-C>"
def endpoint(proc, handler):
signal.signal(2, signal.SIG_IGN)
inp = ""
out = ""
err = ""
inp_from_fd = sys.stdin.fileno()
inp_to_fd = proc.stdin.fileno()
out_from_fd = proc.stdout.fileno()
out_to_fd = sys.stdout.fileno()
err_from_fd = proc.stderr.fileno()
err_to_fd = sys.stderr.fileno()
while True:
try:
ins,outs,_ = select.select([inp_from_fd, out_from_fd, err_from_fd],
[inp_to_fd, out_to_fd, err_to_fd],
[])
for fd in ins:
if fd == inp_from_fd:
k = os.read(fd, 1024)
if k == "":
os.close(proc.stdin.fileno())
return
inp = inp + k
elif fd == out_from_fd:
k = os.read(fd, 1024)
out = out + k
elif fd == err_from_fd:
k = os.read(fd, 1024)
err = err + k
else:
assert False
for fd in outs:
if fd == inp_to_fd:
while CTRL_C in inp:
proc.send_signal(2)
p = inp.find(CTRL_C)
inp = inp[0:p] + inp[p+len(CTRL_C):]
k = os.write(fd, inp)
inp = inp[k:]
elif fd == out_to_fd:
k = os.write(fd, out)
out = out[k:]
elif fd == err_to_fd:
k = os.write(fd, err)
err = err[k:]
else:
assert False
except select.error:
pass
except KeyboardInterrupt:
handler()
except IOError, e:
pass
def usage(args):
print "must specify --client or --server"
if __name__ == '__main__':
if len(sys.argv) == 1:
usage(sys.argv)
elif sys.argv[1] == '--server':
proc = subprocess.Popen(sys.argv[2:],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def INT():
proc.stdin.write(CTRL_C)
proc.stdin.flush()
endpoint(proc, INT)
elif '--client' in sys.argv:
proc = subprocess.Popen(sys.argv[2:],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
import time
time.sleep(1)
def INT():
pass
endpoint(proc, INT)
else:
usage(sys.argv)
which I'm invoking using something like:
remote.py --client ssh -t -t host "remote.py --server <program-to-run> <args>"
Is there something that I'm doing wrong here to handle the signal? I've tried putting a print in the signal 2 handler and it does print it, but it is also killing ssh (I'm getting "Killed by signal 2." printed on the console). Is python forwarding the signal to it's children? Is there a way to get around this? Is there an easier way to do this?
Thanks for any pointers.
os.setpgrp (look at setpgrp manpage) otherwise the signal is propagated to children