I'm trying to write a program that will run a program on a remote machine using ssh and pass it SIGINT signals without killing my ssh connection. If I was running on a terminal, then this would be easy with something like
ssh -t -t host "command to run"
I tried this with the following script:
#!/bin/bash
ssh -t -t host "program $#"
and when I run that from the terminal it works fine, but when proofgeneral runs this script and sends it SIGINT it just ends up killing the program (I guess it can't allocate a terminal?).
I've got the following mess of code:
CTRL_C = "<CTRL-C>"
def endpoint(proc, handler):
signal.signal(2, signal.SIG_IGN)
inp = ""
out = ""
err = ""
inp_from_fd = sys.stdin.fileno()
inp_to_fd = proc.stdin.fileno()
out_from_fd = proc.stdout.fileno()
out_to_fd = sys.stdout.fileno()
err_from_fd = proc.stderr.fileno()
err_to_fd = sys.stderr.fileno()
while True:
try:
ins,outs,_ = select.select([inp_from_fd, out_from_fd, err_from_fd],
[inp_to_fd, out_to_fd, err_to_fd],
[])
for fd in ins:
if fd == inp_from_fd:
k = os.read(fd, 1024)
if k == "":
os.close(proc.stdin.fileno())
return
inp = inp + k
elif fd == out_from_fd:
k = os.read(fd, 1024)
out = out + k
elif fd == err_from_fd:
k = os.read(fd, 1024)
err = err + k
else:
assert False
for fd in outs:
if fd == inp_to_fd:
while CTRL_C in inp:
proc.send_signal(2)
p = inp.find(CTRL_C)
inp = inp[0:p] + inp[p+len(CTRL_C):]
k = os.write(fd, inp)
inp = inp[k:]
elif fd == out_to_fd:
k = os.write(fd, out)
out = out[k:]
elif fd == err_to_fd:
k = os.write(fd, err)
err = err[k:]
else:
assert False
except select.error:
pass
except KeyboardInterrupt:
handler()
except IOError, e:
pass
def usage(args):
print "must specify --client or --server"
if __name__ == '__main__':
if len(sys.argv) == 1:
usage(sys.argv)
elif sys.argv[1] == '--server':
proc = subprocess.Popen(sys.argv[2:],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def INT():
proc.stdin.write(CTRL_C)
proc.stdin.flush()
endpoint(proc, INT)
elif '--client' in sys.argv:
proc = subprocess.Popen(sys.argv[2:],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
import time
time.sleep(1)
def INT():
pass
endpoint(proc, INT)
else:
usage(sys.argv)
which I'm invoking using something like:
remote.py --client ssh -t -t host "remote.py --server <program-to-run> <args>"
Is there something that I'm doing wrong here to handle the signal? I've tried putting a print in the signal 2 handler and it does print it, but it is also killing ssh (I'm getting "Killed by signal 2." printed on the console). Is python forwarding the signal to it's children? Is there a way to get around this? Is there an easier way to do this?
Thanks for any pointers.
os.setpgrp (look at setpgrp manpage) otherwise the signal is propagated to children
Related
i was trying to launch 2 function at the same time using threading, but python skipping my first function. have you any idea why ?
def ip_spoofer():
while True:
print("gg")
REMOTE_BUFFER = requests.get('MY_URL').json()
REMOTE_HOST = REMOTE_BUFFER['ip']
time.sleep(60)
def hello():
try:
REMOTE_PORT = 8081
client = socket.socket()
while True:
try:
print('hey') # Debugging
if 'ip_spoofer.REMOTE_HOST' in locals():
print('bye') # Debugging
client.connect((ip_spoofer.REMOTE_HOST, REMOTE_PORT))
break
except:
pass
while True:
command = client.recv(1024)
command = command.decode()
if command == 'KEY_SCREEN':
sender = ScreenShareClient(ip_spoofer.REMOTE_HOST, REMOTE_PORT)
sender.start_stream()
else:
output = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = output.communicate()
client.send(out + err)
except:
hello()
threading.Thread(target=ip_spoofer).start()
threading.Thread(target=hello).start()
Output : console spamming hey because the while. I deduce python skipping the first function.
I'm writing a subprocess based python program that acts as a proxy between the user input and the subprocess (trying to go beyond pexpect). I've taken this thread as reference, and some code chunk from pexpect (_read_incoming() method for popen_spawn) to read output (the fcntl method worked, but not satisfactorily).
The code runs but has a problem: There seems to be an additional carriage return being sent to the process. This is causing me issues when I try to do things like sending passwords to ssh etc.
Could you look into what might be the issue? Thanks!
The code is as follows:
from queue import Queue, Empty
from threading import Thread
import subprocess
import signal
import fcntl
import os
global terminating
terminating = False
def setNonBlocking(fd):
"""
Set the file description of the given file descriptor to non-blocking.
"""
print(fd)
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def enqueue(out, q):
fileno = out.fileno()
while not terminating:
buf = b''
try:
buf = os.read(fileno, 1024)
if buf and len(buf)>0:
q.put(buf)
except OSError as e:
#print("OS error: {0}".format(e))
pass
if not buf:
q.put(buf)
# for line in iter(out.readline, b''):
# if len(line.strip()) > 0:
# print(line)
# q.put(line)
out.close()
print('Terminating')
return
def get_output(q):
out_str = bytes()
while True:
try:
incoming = q.get_nowait()
except Empty:
break
else:
if incoming is None:
break
else:
out_str += incoming
if out_str:
return out_str
else:
return b''
def explore(cmd="/bin/bash"):
global terminating
universal_newlines = False
p = subprocess.Popen([cmd], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT,
bufsize=0, shell=False, universal_newlines=universal_newlines)
#setNonBlocking(p.stdout)
outQueue = Queue()
outThread = Thread(target=enqueue, args=(p.stdout, outQueue))
outThread.daemon = True
outThread.start()
while True:
try:
someInput = input()
print('[In]:'+someInput)
someInput += '\n'
if not universal_newlines:
p.stdin.write(someInput.encode('utf-8'))
else:
p.stdin.write(someInput)
p.stdin.flush()
out = get_output(outQueue).decode('utf-8')
print('[Out]:'+out)
#p.communicate(someInput+'\n')
except KeyboardInterrupt:
print('Interrupting')
p.send_signal(signal.SIGINT)
terminating = True
outThread.join()
break
p.wait()
if __name__ == '__main__':
explore()
Example run:
ls
[In]:ls
[Out]:
[In]:
[Out]:explorer.py
__init__.py
^CInterrupting
Terminating
The second In was an enter from user.
Update:
Tested the alternate using pexpect's popen_spawn module. Same result:
from pexpect.popen_spawn import PopenSpawn as Spawn
import signal
def explore(cmd="/bin/bash"):
p = Spawn(cmd)
while True:
try:
someInput = input()
print('[In]:'+someInput)
p.sendline(someInput)
out = p.read_nonblocking(size=1024, timeout=-1).decode('utf-8')
print('[Out]:'+out)
#p.communicate(someInput+'\n')
except KeyboardInterrupt:
print('Interrupting')
p.sendeof()
p.kill(signal.SIGINT)
break
if __name__ == '__main__':
explore()
I need to run an interactive Bash instance in a separated process in Python with it's own dedicated TTY (I can't use pexpect).
I used this code snippet I commonly see used in similar programs:
master, slave = pty.openpty()
p = subprocess.Popen(["/bin/bash", "-i"], stdin=slave, stdout=slave, stderr=slave)
os.close(slave)
x = os.read(master, 1026)
print x
subprocess.Popen.kill(p)
os.close(master)
But when I run it I get the following output:
$ ./pty_try.py
bash: cannot set terminal process group (10790): Inappropriate ioctl for device
bash: no job control in this shell
Strace of the run shows some errors:
...
readlink("/usr/bin/python2.7", 0x7ffc8db02510, 4096) = -1 EINVAL (Invalid argument)
...
ioctl(3, SNDCTL_TMR_TIMEBASE or SNDRV_TIMER_IOCTL_NEXT_DEVICE or TCGETS, 0x7ffc8db03590) = -1 ENOTTY (Inappropriate ioctl for device)
...
readlink("./pty_try.py", 0x7ffc8db00610, 4096) = -1 EINVAL (Invalid argument)
The code snippet seems pretty straightforward, is Bash not getting something it needs? what could be the problem here?
This is a solution to run an interactive command in subprocess. It uses pseudo-terminal to make stdout non-blocking(also some command needs a tty device, eg. bash). it uses select to handle input and ouput to the subprocess.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import select
import termios
import tty
import pty
from subprocess import Popen
command = 'bash'
# command = 'docker run -it --rm centos /bin/bash'.split()
# save original tty setting then set it to raw mode
old_tty = termios.tcgetattr(sys.stdin)
tty.setraw(sys.stdin.fileno())
# open pseudo-terminal to interact with subprocess
master_fd, slave_fd = pty.openpty()
try:
# use os.setsid() make it run in a new process group, or bash job control will not be enabled
p = Popen(command,
preexec_fn=os.setsid,
stdin=slave_fd,
stdout=slave_fd,
stderr=slave_fd,
universal_newlines=True)
while p.poll() is None:
r, w, e = select.select([sys.stdin, master_fd], [], [])
if sys.stdin in r:
d = os.read(sys.stdin.fileno(), 10240)
os.write(master_fd, d)
elif master_fd in r:
o = os.read(master_fd, 10240)
if o:
os.write(sys.stdout.fileno(), o)
finally:
# restore tty settings back
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
This is the solution that worked for me at the end (as suggested by qarma) :
libc = ctypes.CDLL('libc.so.6')
master, slave = pty.openpty()
p = subprocess.Popen(["/bin/bash", "-i"], preexec_fn=libc.setsid, stdin=slave, stdout=slave, stderr=slave)
os.close(slave)
... do stuff here ...
x = os.read(master, 1026)
print x
Here is a full object oriented solution to do interactive shell commands with TTYs using threads and queues for stdout and stderr IO handling. This took me a while to build from multiple locations but it works perfectly so far on Unix/Linux systems and also as part of a Juniper op script. Thought I would post this here to save others time in trying to build something like this.
import pty
import re
import select
import threading
from datetime import datetime, timedelta
import os
import logging
import subprocess
import time
from queue import Queue, Empty
lib_logger = logging.getLogger("lib")
# Handler function to be run as a thread for pulling pty channels from an interactive shell
def _pty_handler(pty_master, logger, queue, stop):
poller = select.poll()
poller.register(pty_master, select.POLLIN)
while True:
# Stop handler if flagged
if stop():
logger.debug("Disabling pty handler for interactive shell")
break
fd_event = poller.poll(100)
for descriptor, event in fd_event:
# Read data from pipe and send to queue if there is data to read
if event == select.POLLIN:
data = os.read(descriptor, 1).decode("utf-8")
if not data:
break
# logger.debug("Reading in to handler queue: " + data)
queue.put(data)
# Exit handler if stdout is closing
elif event == select.POLLHUP:
logger.debug("Disabling pty handler for interactive shell")
break
# Function for reading outputs from the given queue by draining it and returning the output
def _get_queue_output(queue: Queue) -> str:
value = ""
try:
while True:
value += queue.get_nowait()
except Empty:
return value
# Helper function to create the needed list for popen and print the command run to the logger
def popen_command(command, logger, *args):
popen_list = list()
popen_list.append(command)
command_output = command
for arg in args:
popen_list.append(arg)
command_output += " " + arg
lib_logger.debug("Making Popen call using: " + str(popen_list))
logger.debug("")
logger.debug(command_output)
logger.debug("")
return popen_list
# Class for create an interactive shell and sending commands to it along with logging output to loggers
class InteractiveShell(object):
def __init__(self, command, logger, *args):
self.logger = logger
self.command = command
self.process = None
self.popen_list = popen_command(command, logger, *args)
self.master_stdout = None
self.slave_stdout = None
self.master_stderr = None
self.slave_stderr = None
self.stdout_handler = None
self.stderr_handler = None
self.stdout_queue = None
self.stderr_queue = None
self.stop_handlers = False
# Open interactive shell and setup all threaded IO handlers
def open(self, shell_prompt, timeout=DEVICE_TIMEOUT):
# Create PTYs
self.master_stdout, self.slave_stdout = pty.openpty()
self.master_stderr, self.slave_stderr = pty.openpty()
# Create shell subprocess
self.process = subprocess.Popen(self.popen_list, stdin=self.slave_stdout, stdout=self.slave_stdout,
stderr=self.slave_stderr, bufsize=0, start_new_session=True)
lib_logger.debug("")
lib_logger.debug("Started interactive shell for command " + self.command)
lib_logger.debug("")
# Create thread and queues for handling pty output and start them
self.stdout_queue = Queue()
self.stderr_queue = Queue()
self.stdout_handler = threading.Thread(target=_pty_handler, args=(self.master_stdout,
lib_logger,
self.stdout_queue,
lambda: self.stop_handlers))
self.stderr_handler = threading.Thread(target=_pty_handler, args=(self.master_stderr,
lib_logger,
self.stderr_queue,
lambda: self.stop_handlers))
self.stdout_handler.daemon = True
self.stderr_handler.daemon = True
lib_logger.debug("Enabling stderr handler for interactive shell " + self.command)
self.stderr_handler.start()
lib_logger.debug("Enabling stdout handler for interactive shell " + self.command)
self.stdout_handler.start()
# Wait for shell prompt
lib_logger.debug("Waiting for shell prompt: " + shell_prompt)
return self.wait_for(shell_prompt, timeout)
# Close interactive shell which should also kill all threaded IO handlers
def close(self):
# Wait 5 seconds before closing to let shell handle all input and outputs
time.sleep(5)
# Stop IO handler threads and terminate the process then wait another 5 seconds for cleanup to happen
self.stop_handlers = True
self.process.terminate()
time.sleep(5)
# Check for any additional output from the stdout handler
output = ""
while True:
data = _get_queue_output(self.stdout_queue)
if data != "":
output += data
else:
break
for line in iter(output.splitlines()):
self.logger.debug(line)
# Check for any additional output from the stderr handler
output = ""
while True:
data = _get_queue_output(self.stderr_queue)
if data != "":
output += data
else:
break
for line in iter(output.splitlines()):
self.logger.error(line)
# Cleanup PTYs
os.close(self.master_stdout)
os.close(self.master_stderr)
os.close(self.slave_stdout)
os.close(self.slave_stderr)
lib_logger.debug("Interactive shell command " + self.command + " terminated")
# Run series of commands given as a list of a list of commands and wait_for strings. If no wait_for is needed then
# only provide the command. Return if all the commands completed successfully or not.
# Ex:
# [
# ["ssh jsas#" + vnf_ip, r"jsas#.*:"],
# ["juniper123", r"jsas#.*\$"],
# ["sudo su", r".*jsas:"],
# ["juniper123", r"root#.*#"],
# ["usermod -p 'blah' jsas"]
# ]
def run_commands(self, commands_list):
shell_status = True
for command in commands_list:
shell_status = self.run(command[0])
if shell_status and len(command) == 2:
shell_status = self.wait_for(command[1])
# Break out of running commands if a command failed
if not shell_status:
break
return shell_status
# Run given command and return False if error occurs otherwise return True
def run(self, command, sleep=0):
# Check process to make sure it is still running and if not grab the stderr output
if self.process.poll():
self.logger.error("Interactive shell command " + self.command + " closed with return code: " +
self.process.returncode)
data = _get_queue_output(self.stderr_queue)
if data != "":
self.logger.error("Interactive shell error messages:")
for line in iter(data.splitlines()):
self.logger.error(line)
return False
# Write command to process and check to make sure a newline is in command otherwise add it
if "\n" not in command:
command += "\n"
os.write(self.master_stdout, command.encode("utf-8"))
if sleep:
time.sleep(sleep)
return True
# Wait for specific regex expression in output before continuing return False if wait time expires otherwise return
# True
def wait_for(self, this, timeout=DEVICE_TIMEOUT):
timeout = datetime.now() + timedelta(seconds=timeout)
output = ""
# Keep searching for output until timeout occurs
while timeout > datetime.now():
data = _get_queue_output(self.stdout_queue)
if data != "":
# Add to output line and check for match to regex given and if match then break and send output to
# logger
output += data
lib_logger.debug("Checking for " + this + " in data: ")
for line in iter(output.splitlines()):
lib_logger.debug(line)
if re.search(r"{}\s?$".format(this), output):
break
time.sleep(1)
# Send output to logger
for line in iter(output.splitlines()):
self.logger.debug(line)
# If wait time expired print error message and return False
if timeout < datetime.now():
self.logger.error("Wait time expired when waiting for " + this)
return False
return True
I am trying to export a bash pipeline into python and use multiple processes to speed up my pipeline. So I have couple of subprocesses spawned with mulitprocessing.Pipe connecting them (sample code below).
However, I always get a hang somewhere towards the end of the process, with an output file missing the last bits. Looks like a buffer deadlock or flush, but couldn't figure it out. I am sure I am missing something obvious, please show me :) Any help or comment would be appreciated. Thank you
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import multiprocessing as MP
import subprocess as SP
def usage():
Usage = "Need /path/to/infile"
log2err(Usage)
sys.exit(1)
def log2err(*s):
print(s, file=sys.stderr)
def pipeWrite(proc, outpipe):
# ouput only
global killToken
while 1:
s = proc.stdout.read(bufferSize)
if len(s) == 0:
break
#log2err('Write %s: %s' %(os.getpid(), len(s)))
outpipe.send_bytes(s)
log2err("Write PID %s: sending kill" % os.getpid())
outpipe.send_bytes(killToken)
outpipe.close()
def pipeRead(proc, inpipe):
# input only
global killToken
while 1:
s = inpipe.recv_bytes(bufferSize)
#log2err('Read %s: %s' %(os.getpid(), len(s)))
if s == killToken:
log2err("Read PID %s: received kill" % os.getpid())
break
proc.stdin.write(s)
proc.stdin.flush()
# final cleanup
proc.stdin.flush()
proc.stdin.close()
inpipe.close()
def testRead(infile, outpipe):
stmt = "cat %s" % infile
stmt += " | tee >( wc -l 1>&2)"
proc = SP.Popen(stmt, shell=True, stdout=SP.PIPE, executable='/bin/bash', bufsize=bufferSize)
pipeWrite(proc, outpipe)
proc.stdout.close()
outpipe.close()
log2err('testRead is DONE')
def testGzip(inpipe, outpipe):
stmt = "gzip -c - | tee >(wc -l 1>&2)"
proc = SP.Popen(stmt, shell=True, stdout=SP.PIPE, stdin=SP.PIPE, executable='/bin/bash', bufsize=bufferSize)
PR = MP.Process(target=pipeRead, args=(proc, inpipe))
PW = MP.Process(target=pipeWrite, args=(proc, outpipe))
PW.start()
PR.start()
PR.join()
proc.stdin.flush()
proc.stdin.close()
proc.stdout.flush()
proc.stdout.close()
log2err("testGzip PID:%s with Read:%s and Write:%s" %(os.getpid(), PR.pid, PW.pid))
PW.join()
log2err('testGzip is DONE')
def testOutput(infile, inpipe):
stmt = "tee %s.gz | sha512sum - > %s.gz.sha512" % (infile, infile)
proc = SP.Popen(stmt, shell=True, stdin=SP.PIPE, executable='/bin/bash', bufsize=bufferSize)
pipeRead(proc, inpipe)
inpipe.close()
log2err('outputFinal is DONE')
if __name__ == "__main__" :
try:
infile = sys.argv[1]
if infile in ('-h', '--help'):
usage()
except IndexError:
usage()
bufferSize = 256*256
killToken = "I am done with all of these processes"
# the curl task stream and the curl checksum and output stream
ingestRecv, ingestSend = MP.Pipe(False)
ingestProc = MP.Process(target=testRead, args=(infile, ingestSend))
# encrypt stream
workRecv, workSend = MP.Pipe(False)
workProc = MP.Process(target=testGzip, args=(ingestRecv, workSend))
# output to file and sha checksum stream
outputProc = MP.Process(target=testOutput, args=(infile, workRecv))
ingestProc.start()
log2err("ingestProc PID:%s" % ingestProc.pid)
workProc.start()
log2err("workProc PID:%s" % workProc.pid)
outputProc.start()
log2err("outputProc PID:%s" % outputProc.pid)
ingestProc.join()
log2err("ingestProc: joined")
workProc.join()
log2err("workProc: joined")
outputProc.join()
log2err("outputProc: joined")
I am trying to remotely change the cwd via socket lib on existing client, but I am running into the trouble every time I send the actual command "cd ..".
Server:
import socket, subprocess, os, sys
s = socket.socket()
host = socket.gethostname()
ip = socket.gethostbyname(host)
port = 8080
s.bind((ip,port))
s.listen(5)
c, a = s.accept()
fr = c.recv(10000)
cwd = fr
print("IP: "+str(a[0])+":"+str(a[1])+"\tCONNECTED")
while True:
cmd = raw_input("\n"+cwd+"> ")
if cmd != "":
c.sendall(cmd)
data = c.recv(1024)
print("\n"+data)
if cmd == "cd ..":
c.sendall(cmd)
cwd = c.recv(1024)
Client:
import socket, subprocess, os, sys
i = 1
cwd = os.getcwd()
while 1:
s = socket.socket()
host = socket.gethostname()
ip = socket.gethostbyname(host)
port = 8080
try:
s.settimeout(5)
s.connect((ip,port))
s.settimeout(None)
s.sendall(cwd)
i = 1
while i == 1:
cmd = s.recv(10000)
if cmd != "over":
sp = subprocess.Popen(cmd, shell=True, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out = sp.stdout.read()+"_________________________________"
msg = out + sp.stderr.read()
s.sendall(msg)
if cmd == "over":
s.close()
i = 0
if cmd == "cd ..":
j = 0
k = 0
for i in cwd:
if i == '/':
k = j
j = j + 1
cd = cwd[0:k]
subprocess.Popen('echo', shell=True, cwd=cd)
s.sendall(cd)
print(cd)
except socket.error:
continue
Here is the error I get:
Traceback (most recent call last):
File "PycharmProjects/server-client/test_hq.py", line 25, in <module>
c.sendall(cmd)
File "/usr/lib/python2.7/socket.py", line 228, in meth
return getattr(self._sock,name)(*args)
socket.error: [Errno 104] Connection reset by peer
I can't figure it out what seems to be the problem...
This should be closer to what you want, it is a lot simpler to receive and send once instead of repeatedly sending and receiving the same commands:
Client.py:
import socket, subprocess, os, sys
cwd = os.getcwd()
def make_socket():
s = socket.socket()
host = socket.gethostname()
ip = socket.gethostbyname(host)
port = 8080
s.settimeout(5)
s.connect((ip, port))
s.settimeout(None)
s.sendall(cwd)
return s
while True:
s = make_socket()
try:
while True:
cmd = s.recv(10000)
if cmd == "cd ..":
# os.chdir("..") # uncomment to actually change directory
cd = cwd.rsplit(os.sep(), 1)[0]
subprocess.Popen('echo', shell=True, cwd=cd)
s.sendall(cd)
elif cmd != "over":
sp = subprocess.Popen(cmd, shell=True, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out = sp.stdout.read() + "_________________________________"
msg = out + sp.stderr.read()
s.sendall(msg)
else:
print("closed")
s.close()
sys.exit(0)
except socket.error as e:
print(e)
break
server.py:
import socket, subprocess, os, sys
s = socket.socket()
host = socket.gethostname()
ip = socket.gethostbyname(host)
port = 8080
s.bind((ip,port))
s.listen(5)
c, a = s.accept()
fr = c.recv(10000)
cwd = fr
print("IP: "+str(a[0])+":"+str(a[1])+"\tCONNECTED")
while True:
cmd = raw_input("\n"+cwd+"> ")
if cmd == "cd ..":
print("sending 2")
c.sendall(cmd)
# os.chdir("..") # uncomment to change dir
cwd = c.recv(10000)
elif cmd != "":
print("sending 1")
c.sendall(cmd)
data = c.recv(10000)
print("\n"+data)
If you want to handle the client closing the socket and sys.exit(0) on the server side you should catch a socket.error on the server side to avoid a broken pipe error.
try:
while True:
print(os.getcwd(),44444)
cmd = raw_input("\n"+cwd+"> ")
if cmd != "" and cmd != "cd ..":
print("sending 1")
c.sendall(cmd)
data = c.recv(10000)
print("\n"+data)
if cmd == "cd ..":
print("sending 2")
c.sendall(cmd)
# os.chdir("..") # uncomment to change dir
cwd = c.recv(10000)
except socket.error as e:
print("Exception caught for {}".format(e.strerror))
If you want to do different things based on the errno you can compare in the except:
if e.errno == errno.EPIPE: i.e Broken pipe etc..
All the errno's are listed here in the errno docs
Considering the comment, this might help with your cd issues:
import re
import os.path
# Other stuff
m = re.match(r'cd(?:\s+|$)(.*)', cmd)
if m:
dirs = m.groups()
# Default to cd is home directory
if len(dirs) == 0 or len(dirs[0]) == 0:
dir = os.environ['HOME']
else:
dir = dirs[0]
if dir == '..':
head, tail = os.path.split(cwd)
dir = head
subprocess.Popen('echo', shell=True, cwd=dir)
s.sendall(dir)
# Update cwd
cwd = dir
print(dir)
else:
# Some other command