How to run PSCP cmd window step in my Python script - python

I am running Hadoop MapReduce and other SSH commands from a Python script using the paramiko module (code can be seen here). Once the MapReduce job is complete, I run the getmerge step to get the output into a text file.
The problem is, I then have to open a cmd window and run PSCP to download the output.txt file from the HDFS environment to my computer. For example:
pscp xxxx#xx.xx.xx.xx:/nfs_home/appers/cnielsen/MROutput_121815_0.txt C:\Users\cnielsen\Desktop\MR_Test
How can I incorporate this pscp step into my script so that I don't have to open a cmd window to run this after my MapReduce and getmerge tasks are complete? I would like my script to be able to run the MR task, getmerge task, and then automatically save the MR output to my computer.
Here is my code.

I have solved this problem with the following code. The trick was to use the scp module and import SCPClient. See the scp_download(ssh) function below.
When the MapReduce job completes the getmerge command is run, followed by the scp_download step.
import paramiko
from scp import SCPClient
import time
# Define connection info
host_ip = 'xx.xx.xx.xx'
user = 'xxxxxxxx'
pw = 'xxxxxxxx'
port = 22
# Paths
input_loc = '/nfs_home/appers/extracts/*/*.xml'
output_loc = '/user/lcmsprod/output/cnielsen/'
python_path = "/usr/lib/python_2.7.3/bin/python"
hdfs_home = '/nfs_home/appers/cnielsen/'
output_log = r'C:\Users\cnielsen\Desktop\MR_Test\MRtest011316_0.txt'
# File names
xml_lookup_file = 'product_lookups.xml'
mapper = 'Mapper.py'
reducer = 'Reducer.py'
helper_script = 'Process.py'
product_name = 'test1'
output_ref = 'test65'
target_file = 'test_011416_3.txt'
# ----------------------------------------------------
def createSSHClient(host_ip, port, user, pw):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host_ip, port, user, pw)
return client
# ----------------------------------------------------
def buildMRcommand(product_name):
space = " "
mr_command_list = [ 'hadoop', 'jar', '/share/hadoop/tools/lib/hadoop-streaming.jar',
'-files', hdfs_home+xml_lookup_file,
'-file', hdfs_home+mapper,
'-file', hdfs_home+reducer,
'-mapper', "'"+python_path, mapper, product_name+"'",
'-file', hdfs_home+helper_script,
'-reducer', "'"+python_path, reducer+"'",
'-input', input_loc,
'-output', output_loc+output_ref]
MR_command = space.join(mr_command_list)
print MR_command
return MR_command
# ----------------------------------------------------
def unbuffered_lines(f):
line_buf = ""
while not f.channel.exit_status_ready():
line_buf += f.read(1)
if line_buf.endswith('\n'):
yield line_buf
line_buf = ""
# ----------------------------------------------------
def stream_output(stdin, stdout, stderr):
writer = open(output_log, 'w')
# Using line_buffer function
for l in unbuffered_lines(stderr):
e = '[stderr] ' + l
print '[stderr] ' + l.strip('\n')
writer.write(e)
# gives full listing..
for line in stdout:
r = '[stdout]' + line
print '[stdout]' + line.strip('\n')
writer.write(r)
writer.close()
# ----------------------------------------------------
def run_MapReduce(ssh):
stdin, stdout, stderr = ssh.exec_command(buildMRcommand(product_name))
stream_output(stdin, stdout, stderr)
return 1
# ----------------------------------------------------
def run_list_dir(ssh):
list_dir = "ls "+hdfs_home+" -l"
stdin, stdout, stderr = ssh.exec_command(list_dir)
stream_output(stdin, stdout, stderr)
# ----------------------------------------------------
def run_getmerge(ssh):
getmerge = "hadoop fs -getmerge "+output_loc+output_ref+" "+hdfs_home+target_file
print getmerge
stdin, stdout, stderr = ssh.exec_command(getmerge)
for line in stdout:
print '[stdout]' + line.strip('\n')
time.sleep(1.5)
return 1
# ----------------------------------------------------
def scp_download(ssh):
scp = SCPClient(ssh.get_transport())
print "Fetching SCP data.."
scp.get(hdfs_home+target_file, local_dir)
print "File download complete."
# ----------------------------------------------------
def main():
# Get the ssh connection
global ssh
ssh = createSSHClient(host_ip, port, user, pw)
print "Executing command..."
# Command list
##run_list_dir(ssh)
##run_getmerge(ssh)
##scp_download(ssh)
# Run MapReduce
MR_status = 0
MR_status = run_MapReduce(ssh)
if MR_status == 1:
gs = 0
gs = run_getmerge(ssh)
if gs == 1:
scp_download(ssh)
# Close ssh connection
ssh.close()
if __name__ == '__main__':
main()

Related

how to print variables after to use subprocess.call?

In my program, I use subprocess.call to run apache drill automatically.
After that, I make some queries and I would like to print the result.
Before to program the code to run apache drill automatically, I was doing it manually and i could print the results but now i cannot do it.
My last try was to write in a file, but the behavior is the same, nothing is written.
My code is bellow.
import subprocess
from pydrill.client import PyDrill
import sys
writer = open('resultado.txt', 'w')
cmdmsg = subprocess.check_output("cd C:\\Users\\Tito\\Downloads\\apache-drill-1.14.0\\bin & sqlline -u \"jdbc:drill:zk=local\"", shell = True)
writer.write("teste de msg: " + str(cmdmsg))
drill = PyDrill(host='localhost', port=8047)
if drill.is_active:
result = drill.query('''SELECT * FROM cp.`employee.json` LIMIT 3''')
result2 = drill.query('''SELECT * FROM dfs.`/Users/Tito/Desktop/banco_gal.csv` LIMIT 5''')
for tuple in result2:
writer.write(tuple)
writer.close
I could solve this problem.
3 things are important for this topic.
a) we should kill the java virtual machine and shell after that apache drill is turned on.
b) the windows buffer is very short, so the result has not been printed.
c) popen method is better than call for this task.
import os
import re
import subprocess
import traceback
from os import path
from pydrill.client import PyDrill
DRILL_HOST = 'localhost'
DRILL_PORT = 8047
JAVA_HOME = ''
JAVA_HOME = JAVA_HOME or ("JAVA_HOME" in os.environ and os.environ["JAVA_HOME"])
JPS_EXE_PATH = path.join(JAVA_HOME, 'bin', 'jps.exe')
KILL_DRILL_JVM = True
def getJvmPID(className):
pid = None
print('Running JPS cmd: %s' % JPS_EXE_PATH)
jpsOutput = subprocess.check_output(JPS_EXE_PATH)
jpsOutput = str(jpsOutput)
se = re.search(r'([0-9]*\s*)' + className, jpsOutput)
if se:
pid = se.group(1)
return pid
def killProcessByPID(pid):
killCmd = ['taskkill', '/f', '/pid', str(pid)]
print('Running taskkill cmd: %s' % killCmd)
killCmdOuput = subprocess.check_output(killCmd, stderr=subprocess.STDOUT)
print(str(killCmdOuput))
def killJvm(className):
pid = getJvmPID(className)
killProcessByPID(pid)
drillBinDir = 'C:/Users/Tito/Downloads/apache-drill-1.14.0/bin'
sqlinePath = path.join(drillBinDir, 'sqlline.bat')
drillCmdList = [sqlinePath , '-u', '"jdbc:drill:zk=local"']
drillCmdStr = " ".join(drillCmdList)
drillConAttempts = 2
while drillConAttempts > 0:
drillConAttempts -= 1
print("Connecting to drill on %s:%d..." % (DRILL_HOST, DRILL_PORT))
try:
drill = PyDrill(host=DRILL_HOST, port=DRILL_PORT)
except:
print("Exception when creating object")
traceback.print_exc()
print("Checking Drill conection...")
try:
if drill.is_active():
print("Connected.")
break
elif drillConAttempts > 0:
print("Could not connect to Drill. Trying to start Drill...")
print("Running cmd '%s > %s'" % (drillCmdStr, os.devnull) )
devNull = open(os.devnull,"w")
cmdProc = subprocess.Popen(drillCmdStr, cwd=drillBinDir, stdout=devNull, stderr=subprocess.STDOUT, shell=True)
print("Started CMD process with PID %d" %(cmdProc.pid))
except:
print("Exception when checking connection")
traceback.print_exc()
if drill.is_active():
result = drill.query('''SELECT * FROM cp.`employee.json` LIMIT 3''')
for resultTuple in result:
print(resultTuple)
if KILL_DRILL_JVM:
print('Killing Drill process...')
killJvm('SqlLine')

Run interactive Bash with popen and a dedicated TTY Python

I need to run an interactive Bash instance in a separated process in Python with it's own dedicated TTY (I can't use pexpect).
I used this code snippet I commonly see used in similar programs:
master, slave = pty.openpty()
p = subprocess.Popen(["/bin/bash", "-i"], stdin=slave, stdout=slave, stderr=slave)
os.close(slave)
x = os.read(master, 1026)
print x
subprocess.Popen.kill(p)
os.close(master)
But when I run it I get the following output:
$ ./pty_try.py
bash: cannot set terminal process group (10790): Inappropriate ioctl for device
bash: no job control in this shell
Strace of the run shows some errors:
...
readlink("/usr/bin/python2.7", 0x7ffc8db02510, 4096) = -1 EINVAL (Invalid argument)
...
ioctl(3, SNDCTL_TMR_TIMEBASE or SNDRV_TIMER_IOCTL_NEXT_DEVICE or TCGETS, 0x7ffc8db03590) = -1 ENOTTY (Inappropriate ioctl for device)
...
readlink("./pty_try.py", 0x7ffc8db00610, 4096) = -1 EINVAL (Invalid argument)
The code snippet seems pretty straightforward, is Bash not getting something it needs? what could be the problem here?
This is a solution to run an interactive command in subprocess. It uses pseudo-terminal to make stdout non-blocking(also some command needs a tty device, eg. bash). it uses select to handle input and ouput to the subprocess.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import select
import termios
import tty
import pty
from subprocess import Popen
command = 'bash'
# command = 'docker run -it --rm centos /bin/bash'.split()
# save original tty setting then set it to raw mode
old_tty = termios.tcgetattr(sys.stdin)
tty.setraw(sys.stdin.fileno())
# open pseudo-terminal to interact with subprocess
master_fd, slave_fd = pty.openpty()
try:
# use os.setsid() make it run in a new process group, or bash job control will not be enabled
p = Popen(command,
preexec_fn=os.setsid,
stdin=slave_fd,
stdout=slave_fd,
stderr=slave_fd,
universal_newlines=True)
while p.poll() is None:
r, w, e = select.select([sys.stdin, master_fd], [], [])
if sys.stdin in r:
d = os.read(sys.stdin.fileno(), 10240)
os.write(master_fd, d)
elif master_fd in r:
o = os.read(master_fd, 10240)
if o:
os.write(sys.stdout.fileno(), o)
finally:
# restore tty settings back
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
This is the solution that worked for me at the end (as suggested by qarma) :
libc = ctypes.CDLL('libc.so.6')
master, slave = pty.openpty()
p = subprocess.Popen(["/bin/bash", "-i"], preexec_fn=libc.setsid, stdin=slave, stdout=slave, stderr=slave)
os.close(slave)
... do stuff here ...
x = os.read(master, 1026)
print x
Here is a full object oriented solution to do interactive shell commands with TTYs using threads and queues for stdout and stderr IO handling. This took me a while to build from multiple locations but it works perfectly so far on Unix/Linux systems and also as part of a Juniper op script. Thought I would post this here to save others time in trying to build something like this.
import pty
import re
import select
import threading
from datetime import datetime, timedelta
import os
import logging
import subprocess
import time
from queue import Queue, Empty
lib_logger = logging.getLogger("lib")
# Handler function to be run as a thread for pulling pty channels from an interactive shell
def _pty_handler(pty_master, logger, queue, stop):
poller = select.poll()
poller.register(pty_master, select.POLLIN)
while True:
# Stop handler if flagged
if stop():
logger.debug("Disabling pty handler for interactive shell")
break
fd_event = poller.poll(100)
for descriptor, event in fd_event:
# Read data from pipe and send to queue if there is data to read
if event == select.POLLIN:
data = os.read(descriptor, 1).decode("utf-8")
if not data:
break
# logger.debug("Reading in to handler queue: " + data)
queue.put(data)
# Exit handler if stdout is closing
elif event == select.POLLHUP:
logger.debug("Disabling pty handler for interactive shell")
break
# Function for reading outputs from the given queue by draining it and returning the output
def _get_queue_output(queue: Queue) -> str:
value = ""
try:
while True:
value += queue.get_nowait()
except Empty:
return value
# Helper function to create the needed list for popen and print the command run to the logger
def popen_command(command, logger, *args):
popen_list = list()
popen_list.append(command)
command_output = command
for arg in args:
popen_list.append(arg)
command_output += " " + arg
lib_logger.debug("Making Popen call using: " + str(popen_list))
logger.debug("")
logger.debug(command_output)
logger.debug("")
return popen_list
# Class for create an interactive shell and sending commands to it along with logging output to loggers
class InteractiveShell(object):
def __init__(self, command, logger, *args):
self.logger = logger
self.command = command
self.process = None
self.popen_list = popen_command(command, logger, *args)
self.master_stdout = None
self.slave_stdout = None
self.master_stderr = None
self.slave_stderr = None
self.stdout_handler = None
self.stderr_handler = None
self.stdout_queue = None
self.stderr_queue = None
self.stop_handlers = False
# Open interactive shell and setup all threaded IO handlers
def open(self, shell_prompt, timeout=DEVICE_TIMEOUT):
# Create PTYs
self.master_stdout, self.slave_stdout = pty.openpty()
self.master_stderr, self.slave_stderr = pty.openpty()
# Create shell subprocess
self.process = subprocess.Popen(self.popen_list, stdin=self.slave_stdout, stdout=self.slave_stdout,
stderr=self.slave_stderr, bufsize=0, start_new_session=True)
lib_logger.debug("")
lib_logger.debug("Started interactive shell for command " + self.command)
lib_logger.debug("")
# Create thread and queues for handling pty output and start them
self.stdout_queue = Queue()
self.stderr_queue = Queue()
self.stdout_handler = threading.Thread(target=_pty_handler, args=(self.master_stdout,
lib_logger,
self.stdout_queue,
lambda: self.stop_handlers))
self.stderr_handler = threading.Thread(target=_pty_handler, args=(self.master_stderr,
lib_logger,
self.stderr_queue,
lambda: self.stop_handlers))
self.stdout_handler.daemon = True
self.stderr_handler.daemon = True
lib_logger.debug("Enabling stderr handler for interactive shell " + self.command)
self.stderr_handler.start()
lib_logger.debug("Enabling stdout handler for interactive shell " + self.command)
self.stdout_handler.start()
# Wait for shell prompt
lib_logger.debug("Waiting for shell prompt: " + shell_prompt)
return self.wait_for(shell_prompt, timeout)
# Close interactive shell which should also kill all threaded IO handlers
def close(self):
# Wait 5 seconds before closing to let shell handle all input and outputs
time.sleep(5)
# Stop IO handler threads and terminate the process then wait another 5 seconds for cleanup to happen
self.stop_handlers = True
self.process.terminate()
time.sleep(5)
# Check for any additional output from the stdout handler
output = ""
while True:
data = _get_queue_output(self.stdout_queue)
if data != "":
output += data
else:
break
for line in iter(output.splitlines()):
self.logger.debug(line)
# Check for any additional output from the stderr handler
output = ""
while True:
data = _get_queue_output(self.stderr_queue)
if data != "":
output += data
else:
break
for line in iter(output.splitlines()):
self.logger.error(line)
# Cleanup PTYs
os.close(self.master_stdout)
os.close(self.master_stderr)
os.close(self.slave_stdout)
os.close(self.slave_stderr)
lib_logger.debug("Interactive shell command " + self.command + " terminated")
# Run series of commands given as a list of a list of commands and wait_for strings. If no wait_for is needed then
# only provide the command. Return if all the commands completed successfully or not.
# Ex:
# [
# ["ssh jsas#" + vnf_ip, r"jsas#.*:"],
# ["juniper123", r"jsas#.*\$"],
# ["sudo su", r".*jsas:"],
# ["juniper123", r"root#.*#"],
# ["usermod -p 'blah' jsas"]
# ]
def run_commands(self, commands_list):
shell_status = True
for command in commands_list:
shell_status = self.run(command[0])
if shell_status and len(command) == 2:
shell_status = self.wait_for(command[1])
# Break out of running commands if a command failed
if not shell_status:
break
return shell_status
# Run given command and return False if error occurs otherwise return True
def run(self, command, sleep=0):
# Check process to make sure it is still running and if not grab the stderr output
if self.process.poll():
self.logger.error("Interactive shell command " + self.command + " closed with return code: " +
self.process.returncode)
data = _get_queue_output(self.stderr_queue)
if data != "":
self.logger.error("Interactive shell error messages:")
for line in iter(data.splitlines()):
self.logger.error(line)
return False
# Write command to process and check to make sure a newline is in command otherwise add it
if "\n" not in command:
command += "\n"
os.write(self.master_stdout, command.encode("utf-8"))
if sleep:
time.sleep(sleep)
return True
# Wait for specific regex expression in output before continuing return False if wait time expires otherwise return
# True
def wait_for(self, this, timeout=DEVICE_TIMEOUT):
timeout = datetime.now() + timedelta(seconds=timeout)
output = ""
# Keep searching for output until timeout occurs
while timeout > datetime.now():
data = _get_queue_output(self.stdout_queue)
if data != "":
# Add to output line and check for match to regex given and if match then break and send output to
# logger
output += data
lib_logger.debug("Checking for " + this + " in data: ")
for line in iter(output.splitlines()):
lib_logger.debug(line)
if re.search(r"{}\s?$".format(this), output):
break
time.sleep(1)
# Send output to logger
for line in iter(output.splitlines()):
self.logger.debug(line)
# If wait time expired print error message and return False
if timeout < datetime.now():
self.logger.error("Wait time expired when waiting for " + this)
return False
return True

ssh command with argument execution in python

I need to execute a ssh command with arguments in python. I have been able to execute the ssh command.But, I cannot figure out, how to pass the arguments.
The command:
ssh -L 22222:localhost:5434 sayan#155.97.73.252
Here is the code :
ssh = paramiko.SSHClient();
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy());
ssh.connect("155.97.73.252", username="sayan", password="#####");
paramiko Example
class RunCommand(cmd.Cmd):
""" Simple shell to run a command on the host """
prompt = 'ssh > '
def __init__(self):
cmd.Cmd.__init__(self)
self.hosts = []
self.connections = []
def do_add_host(self, args):
"""add_host
Add the host to the host list"""
if args:
self.hosts.append(args.split(','))
else:
print "usage: host "
def do_connect(self, args):
"""Connect to all hosts in the hosts list"""
for host in self.hosts:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
client.connect(host[0],
username=host[1],
password=host[2])
self.connections.append(client)
def do_run(self, command):
"""run
Execute this command on all hosts in the list"""
if command:
for host, conn in zip(self.hosts, self.connections):
stdin, stdout, stderr = conn.exec_command(command)
stdin.close()
for line in stdout.read().splitlines():
print 'host: %s: %s' % (host[0], line)
else:
print "usage: run "
def do_close(self, args):
for conn in self.connections:
conn.close()
if __name__ == '__main__':
RunCommand().cmdloop()
Example output:
ssh > add_host 127.0.0.1,jesse,lol
ssh > connect
ssh > run uptime
host: 127.0.0.1: 14:49 up 11 days, 4:27, 8 users,
load averages: 0.36 0.25 0.19
ssh > close
fabric example
from fabric import tasks
env.hosts = ['localhost', 'sunflower.heliotropic.us']
pattern = re.compile(r'up (\d+) days')
# No need to decorate this function with #task
def uptime():
res = run('uptime')
match = pattern.search(res)
if match:
days = int(match.group(1))
env['uts'].append(days)
def main():
env['uts'] = []
tasks.execute(uptime)
uts_list = env['uts']
if not uts_list:
return # Perhaps we should print a notice here?
avg = sum(uts_list) / float(len(uts_list))
print '-' * 80
print 'Average uptime: %s days' % avg
print '-' * 80
if __name__ == '__main__':
main()

Create a configureable, multithreaded log file accumulator in python using SSH (paramiko?)

I am about to code something and thought I'd put my idea out here first to see if anyone has comments, etc.
I want to create a class in python for simultaneously monitoring (and merging) multiple log files that will be used as part of an automated test. I am hoping to simply execute 'tail -f' (over SSH with paramiko, maybe) on each file using a separate thread. Then, every few seconds, get the stdout from each thread and merge it into one file with a suffix added to each line that identifies the source. This way I can write tests of distributed systems and monitor the logs of maybe a dozen machines at once (numerous of which have the same purpose and are behind a load balancer, etc)
Startup:
for machine, logfile in config_list:
create thread running tail -f on logfile on machine
create accumulator thread that:
wakes up each second and
gets all config_list stdout and merges it into one in-memory list
Test_API:
method to get/query data from the in memory accumulator.
in memory list would be the only data item needed to be synchronized
So, I am wondering: is paramiko the correct choice? any caveats, etc about handling the threading (have never done anything with threading in python)? any additional ideas that come to mind?
thanks in advance!
feel free to post code snippets. i will update this post with a working solution once it is finished. i anticipate it will be pretty small
Just found this:
Creating multiple SSH connections at a time using Paramiko
EDIT
From looking at a couple other posts, I have this so far. It is just doing a tail, not a tail -f and does not have the polling I need.
from someplace import TestLogger
import threading
import paramiko
def start_watching():
logger = TestLogger().get()
logs_to_watch = [('somemachine1', '/var/log/foo'),
('somemachine2', '/var/log/bar')]
threads = []
for machine, filename in logs_to_watch:
logger.info(machine)
logger.info(filename)
t = threading.Thread(target=workon, args=(machine, filename))
t.start()
threads.append(t)
for t in threads:
t.join()
for merge_line in merged_log:
logger.info(merge_line.dump())
outlock = threading.Lock()
merged_log = []
def workon(host, logfile):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username='yourusername', allow_agent=True, look_for_keys=True)
stdin, stdout, stderr = ssh.exec_command('sudo tail ' + logfile)
stdin.flush()
with outlock:
line = stdout.readline()
while line:
line = stdout.readline()
merged_log.append(MergeLogLine(line, host, logfile))
class MergeLogLine():
def __init__(self, line, host, logfile):
self._line = line
self._host = host
self._logfile = logfile
def line(self):
return self._line
def host(self):
return self._host
def logfile(self):
return self._logfile
def dump(self):
return self._line + '(from host = ' + self._host + ', log = ' + self._logfile + ')'
This turned out to be pretty hard. Here is a working sample:
Sample 'client code':
import sys
import traceback
import tail_accumulate as ta
import time
def main(argv):
user = 'cmead'
logs_to_watch = [('somemachine1', '/var/log/bar/sample.log'),
('somemachine2', '/var/log/foo')]
tac = ta.TailAccumulateConfig(logs_to_watch, user)
try:
ta.start_watching(tac)
time.sleep(10)
for merge_line in ta.get_merged_log():
print merge_line.dump()
except Exception as e:
print traceback.format_exc()
ta.stop()
if __name__ == "__main__":
main(sys.argv[1:])
Tail accumulate package:
import threading
import paramiko
import select
import time
threads = []
stopFlag = None
class TailAccumulateConfig():
def __init__(self, log_list, user):
self._log_list = log_list
self._user = user
def user(self):
return self._user
def log_list(self):
return self._log_list
def start_watching(tail_accumulate_config):
global stopFlag
stopFlag = threading.Event()
for machine, filename in tail_accumulate_config.log_list():
t = LogListenWorker(stopFlag, machine, filename, tail_accumulate_config.user())
t.start()
global threads
threads.append(t)
def stop():
global stopFlag
stopFlag.set()
def get_merged_log():
with outlock:
global merged_log
temp = merged_log[:]
del merged_log[:]
return temp
outlock = threading.Lock()
merged_log = []
class LogListenWorker(threading.Thread):
def __init__(self, event, host, logfile, username):
threading.Thread.__init__(self)
self.stopped = event
self.host = host
self.logfile = logfile
self.username = username
def run(self):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(self.host, username=self.username)
transport = ssh.get_transport()
channel = transport.open_session()
channel.exec_command('sudo tail -f ' + self.logfile)
while not self.stopped.isSet():
try:
rl, wl, xl = select.select([channel],[],[],3.0)
if len(rl) > 0:
# Must be stdout
line = channel.recv(1024)
else:
time.sleep(1.0)
continue
except Exception as e:
break
if line:
with outlock:
sublines = line.split('\n')
for subline in sublines:
merged_log.append(MergeLogLine(subline, self.host, self.logfile))
ssh.close()
class MergeLogLine():
def __init__(self, line, host, logfile):
self._line = line
self._host = host
self._logfile = logfile
def line(self):
return self._line
def host(self):
return self._host
def logfile(self):
return self._logfile
def dump(self):
return self._line + ' ---> (from host = ' + self._host + ', log = ' + self._logfile + ')'

Catch & Ignore Signal

I'm trying to write a program that will run a program on a remote machine using ssh and pass it SIGINT signals without killing my ssh connection. If I was running on a terminal, then this would be easy with something like
ssh -t -t host "command to run"
I tried this with the following script:
#!/bin/bash
ssh -t -t host "program $#"
and when I run that from the terminal it works fine, but when proofgeneral runs this script and sends it SIGINT it just ends up killing the program (I guess it can't allocate a terminal?).
I've got the following mess of code:
CTRL_C = "<CTRL-C>"
def endpoint(proc, handler):
signal.signal(2, signal.SIG_IGN)
inp = ""
out = ""
err = ""
inp_from_fd = sys.stdin.fileno()
inp_to_fd = proc.stdin.fileno()
out_from_fd = proc.stdout.fileno()
out_to_fd = sys.stdout.fileno()
err_from_fd = proc.stderr.fileno()
err_to_fd = sys.stderr.fileno()
while True:
try:
ins,outs,_ = select.select([inp_from_fd, out_from_fd, err_from_fd],
[inp_to_fd, out_to_fd, err_to_fd],
[])
for fd in ins:
if fd == inp_from_fd:
k = os.read(fd, 1024)
if k == "":
os.close(proc.stdin.fileno())
return
inp = inp + k
elif fd == out_from_fd:
k = os.read(fd, 1024)
out = out + k
elif fd == err_from_fd:
k = os.read(fd, 1024)
err = err + k
else:
assert False
for fd in outs:
if fd == inp_to_fd:
while CTRL_C in inp:
proc.send_signal(2)
p = inp.find(CTRL_C)
inp = inp[0:p] + inp[p+len(CTRL_C):]
k = os.write(fd, inp)
inp = inp[k:]
elif fd == out_to_fd:
k = os.write(fd, out)
out = out[k:]
elif fd == err_to_fd:
k = os.write(fd, err)
err = err[k:]
else:
assert False
except select.error:
pass
except KeyboardInterrupt:
handler()
except IOError, e:
pass
def usage(args):
print "must specify --client or --server"
if __name__ == '__main__':
if len(sys.argv) == 1:
usage(sys.argv)
elif sys.argv[1] == '--server':
proc = subprocess.Popen(sys.argv[2:],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def INT():
proc.stdin.write(CTRL_C)
proc.stdin.flush()
endpoint(proc, INT)
elif '--client' in sys.argv:
proc = subprocess.Popen(sys.argv[2:],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
import time
time.sleep(1)
def INT():
pass
endpoint(proc, INT)
else:
usage(sys.argv)
which I'm invoking using something like:
remote.py --client ssh -t -t host "remote.py --server <program-to-run> <args>"
Is there something that I'm doing wrong here to handle the signal? I've tried putting a print in the signal 2 handler and it does print it, but it is also killing ssh (I'm getting "Killed by signal 2." printed on the console). Is python forwarding the signal to it's children? Is there a way to get around this? Is there an easier way to do this?
Thanks for any pointers.
os.setpgrp (look at setpgrp manpage) otherwise the signal is propagated to children

Categories

Resources