How to debug crashing openoffice with pyuno - python

I'd like to use openoffice to programmatically convert docx to pdf. I know unoconv can do this, and indeed unoconv will do this for me, even if I run a separate listener (using unoconv -l) and invoke unoconv -n (so that it will die if it can't connect to the listener). Accordingly, I assume that my openoffice/pyuno environment is sane.
However, when I run an unoconv listener (or manually invoke openoffice as an acceptor), and try to connect with my own python code (derived from unoconv, and cross-checked with another openoffice library), the listener dies, and the uno bridge dies.
The error I get from the listener is:
terminate called after throwing an instance of 'com::sun::star::uno::RuntimeException'
The error I get on the python end is:
unoconv: RuntimeException during import phase:
Office probably died. Binary URP bridge disposed during call
I really have no idea how to go about diagnosing the problem here. Any suggestions as to the underlying cause or how to diagnose it would be greatly appreciated.
Code below:
#dependency on openoffice-python
import openoffice.streams as oostreams
import openoffice.officehelper as oohelper
import uno, unohelper
from com.sun.star.beans import PropertyValue
from com.sun.star.connection import NoConnectException
from com.sun.star.document.UpdateDocMode import QUIET_UPDATE
from com.sun.star.lang import DisposedException, IllegalArgumentException
from com.sun.star.io import IOException, XOutputStream
from com.sun.star.script import CannotConvertException
from com.sun.star.uno import Exception as UnoException
from com.sun.star.uno import RuntimeException
import logging
logger = logging.getLogger(__name__)
#connectionstring = 'uno:socket,host=127.0.0.1,port=2002;urp;StarOffice.ComponentContext'
connectionstring = 'socket,host=127.0.0.1,port=2002'
## context = uno.getComponentContext()
## svcmgr = context.ServiceManager
## resolver = svcmgr.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", context)
## unocontext = resolver.resolve("uno:%s" % connectionstring)
unocontext = oohelper.connect(connectionstring)
#unosvcmgr = unocontext.ServiceManager
desktop = unocontext.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", unocontext)
class OutputStream( unohelper.Base, XOutputStream ):
def __init__(self, stream=None):
self.closed = 0
self.stream = stream if stream is not None else sys.stdout
def closeOutput(self):
self.closed = 1
def writeBytes( self, seq ):
self.stream.write( seq.value )
def flush( self ):
pass
def UnoProps(**args):
props = []
for key in args:
prop = PropertyValue()
prop.Name = key
prop.Value = args[key]
props.append(prop)
return tuple(props)
FILTERS = {'pdf': 'writer_pdf_Export'}
def convert_stream(instream, outstream,
outdoctype=None, outformat=None):
'''instream and outstream are streams.
outdoctype and outformat are strings. They correspond
to the first two parameters to the Fmt constructor.To
convert to pdf use outdoctype="document",
outformat="pdf".
If you choose inappropriate values, an ValueError
will result.'''
#fmts is a global object of type FmtList
outputfilter = FILTERS[outformat]
inputprops = UnoProps(Hidden=True, ReadOnly=True, UpdateDocMode=QUIET_UPDATE, InputStream=oostreams.InputStream(instream))
inputurl = 'private:stream'
convert_worker(inputurl,inputprops,outputfilter,outstream=outstream)
return outstream
def convert_worker(inputurl, inputprops, outputfilter, outstream=None,inputfn=None):
global exitcode
document = None
try:
### Import phase
phase = "import"
document = desktop.loadComponentFromURL( inputurl , "_blank", 0, inputprops )
if not document:
raise UnoException("The document '%s' could not be opened." % inputurl, None)
### Import style template
phase = "import-style"
### Update document links
phase = "update-links"
try:
document.updateLinks()
except AttributeError:
# the document doesn't implement the XLinkUpdate interface
pass
### Update document indexes
phase = "update-indexes"
for ii in range(2):
# At first update Table-of-Contents.
# ToC grows, so page numbers grows too.
# On second turn update page numbers in ToC.
try:
document.refresh()
indexes = document.getDocumentIndexes()
except AttributeError:
# the document doesn't implement the XRefreshable and/or
# XDocumentIndexesSupplier interfaces
break
else:
for i in range(0, indexes.getCount()):
indexes.getByIndex(i).update()
### Export phase
phase = "export"
outputprops = UnoProps(FilterName=outputfilter, OutputStream=OutputStream(stream=outstream), Overwrite=True)
outputurl = "private:stream"
try:
document.storeToURL(outputurl, tuple(outputprops) )
except IOException as e:
raise UnoException("Unable to store document to %s (ErrCode %d)\n\nProperties: %s" % (outputurl, e.ErrCode, outputprops), None)
phase = "dispose"
document.dispose()
document.close(True)
except SystemError as e:
logger.error("unoconv: SystemError during %s phase:\n%s" % (phase, e))
exitcode = 1
except RuntimeException as e:
logger.error("unoconv: RuntimeException during %s phase:\nOffice probably died. %s" % (phase, e))
exitcode = 6
except DisposedException as e:
logger.error("unoconv: DisposedException during %s phase:\nOffice probably died. %s" % (phase, e))
exitcode = 7
except IllegalArgumentException as e:
logger.error("UNO IllegalArgument during %s phase:\nSource file cannot be read. %s" % (phase, e))
exitcode = 8
except IOException as e:
# for attr in dir(e): print '%s: %s', (attr, getattr(e, attr))
logger.error("unoconv: IOException during %s phase:\n%s" % (phase, e.Message))
exitcode = 3
except CannotConvertException as e:
# for attr in dir(e): print '%s: %s', (attr, getattr(e, attr))
logger.error("unoconv: CannotConvertException during %s phase:\n%s" % (phase, e.Message))
exitcode = 4
except UnoException as e:
if hasattr(e, 'ErrCode'):
logger.error("unoconv: UnoException during %s phase in %s (ErrCode %d)" % (phase, repr(e.__class__), e.ErrCode))
exitcode = e.ErrCode
pass
if hasattr(e, 'Message'):
logger.error("unoconv: UnoException during %s phase:\n%s" % (phase, e.Message))
exitcode = 5
else:
logger.error("unoconv: UnoException during %s phase in %s" % (phase, repr(e.__class__)))
exitcode = 2
pass

I don't know if this could be your case, but I discovered that LogMeIn (running on my box) is also using the port 2002. When I try unoconv on that machine, I get the same error: Binary URP bridge disposed during call.
I killed LogMeIn and everything worked after that.
Hope this helps!

Related

AWS lambda python Socket exception: Operation not permitted (1)

I have a python 3.6 AWS lambda function "ftp_sender" which is triggered by ObjectCreated event from S3. Then it downloads the file and sends it to the SFTP server.
And in CloudWatch logs I'm constantly seeing the messages like that:
[ERROR] 2018-07-26T12:30:21.543Z a56f9678-90c9-11e8-bf10-ddb8557d0ff0 Socket exception: Operation not permitted (1)
This messages appear in random positions even before the function started working, for example:
def ftp_sender(event, context):
# The error can be triggered here
print(event)
This seems not treated as a real error, but maybe someone knows what it is about?
[EDIT]
It looks like the issue caused by paramiko library. I have the following class inside the function:
class SftpSender:
def __init__(self, hostname, username, the_password, sftp_port=22, working_directory=''):
self.hostname = hostname
self.username = username
print('SFTP port: %s' % sftp_port)
self.transport = paramiko.Transport((hostname, sftp_port))
self.transport.connect(username=username, password=the_password)
self.sftp = paramiko.SFTPClient.from_transport(self.transport)
# self.transport.set_missing_host_key_policy(paramiko.WarningPolicy())
self.folder = working_directory
def send_file(self, filename, retries=4, timeout_between_retries=10, verb=True, the_lambda_mode=False):
retry_number = 0
sending_result = 'Not run'
if the_lambda_mode:
os.chdir("/tmp/")
if not os.path.isfile(filename):
return "Error: File %s is not found" % filename
while retry_number < retries:
if verb:
print('Sending %s to %s#%s (to folder "%s") retry %s of %s\n' % (filename, self.username, self.hostname, self.folder, retry_number, retries))
try:
destination_name = self.folder + '/' + filename
sending_result = self.sftp.put(filename, destination_name)
return 'OK'
except Exception as e:
print('Error: The following exception occured: "%s", result: "%s"' % (e, sending_result))
sleep(timeout_between_retries)
retry_number += 1
return 'Error: failed to send file %s (%s)' % (filename, e)

pysnmp resolving oids to mibname

Currently I have setup the trap listener and it can listen to snmp notifications fine. However it only comes back with the numerical oid, I want to be able to resolve this oid to a human readable name. I have looked at the documentation however I do not really understand it all that well. I am using the example script here http://pysnmp.sourceforge.net/examples/v1arch/asyncore/manager/ntfrcv/transport-tweaks.html
from pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher
from pysnmp.carrier.asyncore.dgram import udp, udp6, unix
from pyasn1.codec.ber import decoder
from pysnmp.proto import api
# noinspection PyUnusedLocal
def cbFun(transportDispatcher, transportDomain, transportAddress, wholeMsg):
while wholeMsg:
msgVer = int(api.decodeMessageVersion(wholeMsg))
if msgVer in api.protoModules:
pMod = api.protoModules[msgVer]
else:
print('Unsupported SNMP version %s' % msgVer)
return
reqMsg, wholeMsg = decoder.decode(
wholeMsg, asn1Spec=pMod.Message(),
)
print('Notification message from %s:%s: ' % (
transportDomain, transportAddress
)
)
reqPDU = pMod.apiMessage.getPDU(reqMsg)
if reqPDU.isSameTypeWith(pMod.TrapPDU()):
if msgVer == api.protoVersion1:
print('Enterprise: %s' % (pMod.apiTrapPDU.getEnterprise(reqPDU).prettyPrint()))
print('Agent Address: %s' % (pMod.apiTrapPDU.getAgentAddr(reqPDU).prettyPrint()))
print('Generic Trap: %s' % (pMod.apiTrapPDU.getGenericTrap(reqPDU).prettyPrint()))
print('Specific Trap: %s' % (pMod.apiTrapPDU.getSpecificTrap(reqPDU).prettyPrint()))
print('Uptime: %s' % (pMod.apiTrapPDU.getTimeStamp(reqPDU).prettyPrint()))
varBinds = pMod.apiTrapPDU.getVarBindList(reqPDU)
else:
varBinds = pMod.apiPDU.getVarBindList(reqPDU)
print('Var-binds:')
for oid, val in varBinds:
print('%s = %s' % (oid.prettyPrint(), val.prettyPrint()))
return wholeMsg
transportDispatcher = AsyncoreDispatcher()
transportDispatcher.registerRecvCbFun(cbFun)
# UDP/IPv4
transportDispatcher.registerTransport(
udp.domainName, udp.UdpSocketTransport().openServerMode(('localhost', 162))
)
# UDP/IPv6
transportDispatcher.registerTransport(
udp6.domainName, udp6.Udp6SocketTransport().openServerMode(('::1', 162))
)
## Local domain socket
# transportDispatcher.registerTransport(
# unix.domainName, unix.UnixSocketTransport().openServerMode('/tmp/snmp-manager')
# )
transportDispatcher.jobStarted(1)
try:
# Dispatcher will never finish as job#1 never reaches zero
transportDispatcher.runDispatcher()
except:
transportDispatcher.closeDispatcher()
raise
I was wondering how I would go about resolving the oids to mibs. I have downloaded the cisco mib .my files and have stuck them in a directory which I point the mibbuilder to like so,
snmpEngine = SnmpEngine()
mibBuilder = builder.MibBuilder()
mibPath = mibBuilder.getMibSources() + (builder.DirMibSource('/opt/mibs'),)
mibBuilder.setMibSources(*mibPath)
mibBuilder.loadModules('CISCO-CONFIG-MAN-MIB',)
mibViewController = view.MibViewController(mibBuilder)
and within the example script where I do the prettyPrint() statements I have
for oid, val in varBinds:
objectType = ObjectType(ObjectIdentity(oid.prettyPrint()))
objectType.resolveWithMib(mibViewController)
print str(objectType)
print('%s = %s' % (oid.prettyPrint(), val.prettyPrint()))
After making the changes I have now encountered two errors,
Traceback (most recent call last):
File "traplistener.py", line 200, in
'CISCO-BRIDGE-EXT-MIB',
File "/usr/local/lib/python2.7/site-packages/pysnmp/smi/builder.py", line 344, in loadModules
raise error.MibNotFoundError('%s compilation error(s): %s' % (modName, errs))
pysnmp.smi.error.MibNotFoundError: CISCO-CONFIG-MAN-MIB compilation error(s): missing; no module "CISCO-SMI" in symbolTable at MIB CISCO-CONFIG-MAN-MIB; missing; missing; missing; missing; missing; missing
Since you are working with a low-level API (rather than pysnmp.hlapi), I can offer you the following snippet that effectively comprises a MIB resolver:
from pysnmp.smi import builder, view, compiler, rfc1902
# Assemble MIB viewer
mibBuilder = builder.MibBuilder()
compiler.addMibCompiler(mibBuilder, sources=['file:///usr/share/snmp/mibs',
'http://mibs.snmplabs.com/asn1/#mib#'])
mibViewController = view.MibViewController(mibBuilder)
# Pre-load MIB modules we expect to work with
mibBuilder.loadModules('SNMPv2-MIB', 'SNMP-COMMUNITY-MIB')
# This is what we can get in TRAP PDU
varBinds = [
('1.3.6.1.2.1.1.3.0', 12345),
('1.3.6.1.6.3.1.1.4.1.0', '1.3.6.1.6.3.1.1.5.2'),
('1.3.6.1.6.3.18.1.3.0', '0.0.0.0'),
('1.3.6.1.6.3.18.1.4.0', ''),
('1.3.6.1.6.3.1.1.4.3.0', '1.3.6.1.4.1.20408.4.1.1.2'),
('1.3.6.1.2.1.1.1.0', 'my system')
]
# Run var-binds received in PDU (a sequence of OID-value pairs)
# through MIB viewer to turn them into MIB objects.
# You may want to catch and ignore MIB lookup errors here.
varBinds = [rfc1902.ObjectType(rfc1902.ObjectIdentity(x[0]), x[1]).resolveWithMib(mibViewController) for x in varBinds]
for varBind in varBinds:
print(varBind.prettyPrint())

Unable to create key under HKEY_CLASSES_ROOT in Windows registry

I am facing a weird problem where I am unable to create a key under HKEY_CLASSES_ROOT.
I have the below code :
import win32api
import win32con
import sys
def create_key(access, keyname, perms):
if sys.platform == 'win32':
try:
#print "keyname = %s" % keyname
key = win32api.RegCreateKeyEx(access, keyname, perms, None, 0, None)[0]
return key
except Exception, fault:
print('Error in creating key - %s', fault)
def add_base_key(access, keyname, perms):
print "in func add_base_key, keyname = %s, perms = %s" %(keyname, perms)
hkey = create_key(access, keyname, perms)
print "hkey = %s" % hkey
cls_keyname = ["CLSID\\{8682c35a-98fb-41f3-b65e-693f984caef4}"]
def add_entry():
try:
add_base_key(win32con.HKEY_CLASSES_ROOT, cls_keyname[0], win32con.KEY_ALL_ACCESS )
except Exception, fault:
print "fault = %s" %fault
if __name__ == "__main__":
add_entry()
On running this code from cmd prompt using admin rights, I got the below :
C:\>python "C:\Users\admin\Desktop\sample.py" in func add_base_key,
keyname = CLSID\{8682c35a-98fb-41f3-b65e-693f984caef4}, perms = 983103
hkey = <PyHKEY:386>
However, when I searched the registry entries I am not able to find the key which I have created. Any idea what am I missing here ?
As pointed out by #eryksun, it works when samDesired argument has a value of win32con.KEY_ALL_ACCESS | win32con.KEY_WOW64_64KEY.

Broken Pipe Error on python script with squid

I am running a simple python script to log the accessed url using squid url_rewriter_program.
However every time it runs, rewriter crashes with broken pipe error at sys.stdout.flush().
Please suggest a specific solution.
python code is:
import sys
import os
import io
line = sys.stdin.readline()
fo=open("/home/linux/Desktop/foo1.txt","a")
fo.write(line)
fo.close()
sys.stdout.write("\n")
sys.stdout.flush()
This is a squid redirector file, written on python, can you get, or compare with your script:
Redirector:
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#-----------------------------------------------------------------------------
# Name: redirector_master.py
# Purpose: SiSCont checker cuote
#
# Author: Ernesto Licea Martin
#
# Created: 2011/11/24
# RCS-ID: $Id: redirector_master.py $
# Copyright: (c) 2011
# Licence: GPL
#-----------------------------------------------------------------------------
import sys, string, libSiSCont
__name__= "redirector_master"
query="SELECT accounts_proxyquotatype.name, accounts_proxyaccount.proxy_quota, accounts_proxyaccount.proxy_quota_extra, accounts_proxyaccount.proxy_active, accounts_proxyaccounttype.name FROM accounts_proxyaccount INNER JOIN accounts_proxyaccounttype INNER JOIN accounts_proxyquotatype ON (accounts_proxyaccount.proxy_quota_type_id = accounts_proxyquotatype.id) AND (accounts_proxyaccount.proxy_account_type_id = accounts_proxyaccounttype.id) WHERE accounts_proxyaccount.proxy_username=%s"
class RedirMaster:
def __init__(self):
obj = libSiSCont.ParceConf()
obj.parcecfgfile()
self.__listModules = obj.getModList()
self.__redirDicc = obj.getRedirectURL()
self.__penalURL = obj.getPenalizedURL()
self.__confDicc = obj.getConfParam()
self.__dbDicc = obj.getDBDicc()
self.__proxyDicc = obj.getProxyCacheParam()
self.__dbParam = []
def getProxyTypes(self):
db=libSiSCont.connectDB(dbDicc=self.__dbDicc)
c=db.cursor()
c.execute("SELECT accounts_proxy")
def run(self):
modules=[]
for mod in self.__listModules:
try:
m=__import__(mod)
modules.append(m)
except Exception, e:
libSiSCont.reportLogs("%s.run" %__name__, 'Unable to load redirector module %s; the error was: %s' % (mod,str(e)))
if len(modules) == 0:
libSiSCont.reportLogs("%s.run" %__name__, 'No usable redirector module found; switching to trasparent behavour')
while 1:
try:
data_redir=raw_input()
data_redir=data_redir.split()
url,ip_host,user,method,urlgroup = data_redir[0:5]
ip=ip_host.split("/")[0]
host_name=ip_host.split("/")[1]
uri = url
mode=""
#Don't check cache access
if string.find(url,"cache_object") == 0:
sys.stdout.write("%s%s\n" %(mode,uri))
sys.stdout.flush()
continue
db=libSiSCont.connectDB(dbDicc=self.__dbDicc)
c=db.cursor()
c.execute(query,user)
cuote_type,cuote, ext_cuote, active, acc_type = c.fetchall()[0]
self.__dbParam=[cuote_type,int(cuote), int(ext_cuote), active, acc_type]
for module in modules:
try:
uri = module.redir(url = url, ip = ip, host_name = host_name, user = user, method = method, urlgroup = urlgroup, redirDicc = self.__redirDicc, penalURL = self.__penalURL, confDicc = self.__confDicc, proxyDicc = self.__proxyDicc, dbParam = self.__dbParam)
except Exception, e:
libSiSCont.reportLogs("%s.run" %__name__, 'Error while running module: %s -- The error was: %s' % (module,str(e)))
if uri != url:
mode = "301:"
break
sys.stdout.write("%s%s\n" %(mode,uri))
sys.stdout.flush()
except Exception, e:
if not string.find('%s' % e,'EOF') >= 0:
sys.stdout.write('%s\n' % uri)
sys.stdout.flush()
libSiSCont.reportLogs("%s.run" %__name__, '%s: data received from parent: %s' % (str(e),string.join(data_redir)))
else:
sys.exit()
obj=RedirMaster()
obj.run()
Helper:
#!/usr/bin/env python
import sys, syslog, libSiSCont, string,crypt
__name__ = "Helper"
query = "SELECT accounts_proxyaccount.proxy_username FROM accounts_proxyaccount WHERE accounts_proxyaccount.proxy_username=%s AND accounts_proxyaccount.proxy_password=%s"
class BasicAuth:
def __init__(self):
obj = libSiSCont.ParceConf()
obj.parcecfgfile()
self.__dbDicc = obj.getDBDicc()
def run(self):
while 1:
try:
user_pass = string.split(raw_input())
user = user_pass[0].strip("\n")
passwd = user_pass[1].strip("\n")
crypt_passwd = crypt.crypt(passwd,user)
db = libSiSCont.connectDB(self.__dbDicc)
c = db.cursor()
c.execute(query,(user,crypt_passwd))
if c.fetchone() == None:
libSiSCont.reportLogs('%s.run' %__name__,'User Authentication Fail, user = %s password= %s, Access Denied' %(user,passwd) )
sys.stdout.write("ERR\n")
sys.stdout.flush()
else:
libSiSCont.reportLogs('%s.run' %__name__, 'User Authentication Success, user = %s, Access Granted' %user)
sys.stdout.write("OK\n")
sys.stdout.flush()
except Exception, e:
if not string.find("%s" %e, "EOF") >= 0:
sys.stdout.write("ERR\n")
sys.stdout.flush()
libSiSCont.reportLogs('%s.run' %__name__, 'Authenticator error, user will navigate without authentication: %s' %str(e))
else:
sys.exit()
obj = BasicAuth()
obj.run()
I hope help you ;-)

Receiving extended data with ssh using twisted.conch as client

I'm currently in the process of learning ssh via the brute-force/ just keep hacking until I understand it approach. After some trial and error I've been able to successfully send a "pty-req" followed by a "shell" request, I can get the login preamble, send commands and receive stdout but I'm not exactly sure how to tell the SSH service I want to recieve stderr and status messages. Reading through other SSH implementations ( paramiko, Net::SSH ) hasn't been much of a guide at the moment.
That said, looking at one of the RFC's for SSH, I believe that perhaps one of the listed requests might be what I am looking for: https://www.rfc-editor.org/rfc/rfc4250#section-4.9.3
#!/usr/bin/env python
from twisted.conch.ssh import transport
from twisted.conch.ssh import userauth
from twisted.conch.ssh import connection
from twisted.conch.ssh import common
from twisted.conch.ssh.common import NS
from twisted.conch.ssh import keys
from twisted.conch.ssh import channel
from twisted.conch.ssh import session
from twisted.internet import defer
from twisted.internet import defer, protocol, reactor
from twisted.python import log
import struct, sys, getpass, os
log.startLogging(sys.stdout)
USER = 'dward'
HOST = '192.168.0.19' # pristine.local
PASSWD = "password"
PRIVATE_KEY = "~/id_rsa"
class SimpleTransport(transport.SSHClientTransport):
def verifyHostKey(self, hostKey, fingerprint):
print 'host key fingerprint: %s' % fingerprint
return defer.succeed(1)
def connectionSecure(self):
self.requestService(
SimpleUserAuth(USER,
SimpleConnection()))
class SimpleUserAuth(userauth.SSHUserAuthClient):
def getPassword(self):
return defer.succeed(PASSWD)
def getGenericAnswers(self, name, instruction, questions):
print name
print instruction
answers = []
for prompt, echo in questions:
if echo:
answer = raw_input(prompt)
else:
answer = getpass.getpass(prompt)
answers.append(answer)
return defer.succeed(answers)
def getPublicKey(self):
path = os.path.expanduser(PRIVATE_KEY)
# this works with rsa too
# just change the name here and in getPrivateKey
if not os.path.exists(path) or self.lastPublicKey:
# the file doesn't exist, or we've tried a public key
return
return keys.Key.fromFile(filename=path+'.pub').blob()
def getPrivateKey(self):
path = os.path.expanduser(PRIVATE_KEY)
return defer.succeed(keys.Key.fromFile(path).keyObject)
class SimpleConnection(connection.SSHConnection):
def serviceStarted(self):
self.openChannel(SmartChannel(2**16, 2**15, self))
class SmartChannel(channel.SSHChannel):
name = "session"
def getResponse(self, timeout = 10):
self.onData = defer.Deferred()
self.timeout = reactor.callLater( timeout, self.onData.errback, Exception("Timeout") )
return self.onData
def openFailed(self, reason):
print "Failed", reason
#defer.inlineCallbacks
def channelOpen(self, ignoredData):
self.data = ''
self.oldData = ''
self.onData = None
self.timeout = None
term = os.environ.get('TERM', 'xterm')
#winsz = fcntl.ioctl(fd, tty.TIOCGWINSZ, '12345678')
winSize = (25,80,0,0) #struct.unpack('4H', winsz)
ptyReqData = session.packRequest_pty_req(term, winSize, '')
try:
result = yield self.conn.sendRequest(self, 'pty-req', ptyReqData, wantReply = 1 )
except Exception as e:
print "Failed with ", e
try:
result = yield self.conn.sendRequest(self, "shell", '', wantReply = 1)
except Exception as e:
print "Failed shell with ", e
#fetch preample
data = yield self.getResponse()
"""
Welcome to Ubuntu 11.04 (GNU/Linux 2.6.38-8-server x86_64)
* Documentation: http://www.ubuntu.com/server/doc
System information as of Sat Oct 29 13:09:50 MDT 2011
System load: 0.0 Processes: 111
Usage of /: 48.0% of 6.62GB Users logged in: 1
Memory usage: 39% IP address for eth1: 192.168.0.19
Swap usage: 3%
Graph this data and manage this system at https://landscape.canonical.com/
New release 'oneiric' available.
Run 'do-release-upgrade' to upgrade to it.
Last login: Sat Oct 29 01:23:16 2011 from 192.168.0.17
"""
print data
while data != "" and data.strip().endswith("~$") == False:
try:
data = yield self.getResponse()
print repr(data)
"""
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
except Exception as e:
print e
break
self.write("false\n")
#fetch response
try:
data = yield self.getResponse()
except Exception as e:
print "Failed to catch response?", e
else:
print data
"""
false
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
self.write("true\n")
#fetch response
try:
data = yield self.getResponse()
except Exception as e:
print "Failed to catch response?", e
else:
print data
"""
true
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
self.write("echo Hello World\n\x00")
try:
data = yield self.getResponse()
except Exception as e:
print "Failed to catch response?", e
else:
print data
"""
echo Hello World
Hello World
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
#Close up shop
self.loseConnection()
dbgp = 1
def request_exit_status(self, data):
status = struct.unpack('>L', data)[0]
print 'status was: %s' % status
def dataReceived(self, data):
self.data += data
if self.onData is not None:
if self.timeout and self.timeout.active():
self.timeout.cancel()
if self.onData.called == False:
self.onData.callback(data)
def extReceived(self, dataType, data):
dbgp = 1
print "Extended Data recieved! dataType = %s , data = %s " % ( dataType, data, )
self.extendData = data
def closed(self):
print 'got data : %s' % self.data.replace("\\r\\n","\r\n")
self.loseConnection()
reactor.stop()
protocol.ClientCreator(reactor, SimpleTransport).connectTCP(HOST, 22)
reactor.run()
Additionally I tried adding in an explicit bad command to the remote shell:
self.write("ls -alF badPathHere\n\x00")
try:
data = yield self.getResponse()
except Exception as e:
print "Failed to catch response?", e
else:
print data
"""
ls -alF badPathHere
ls: cannot access badPathHere: No such file or directory
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
And it looks like stderr is being mixed into stderr
Digging through the source code for OpenSSH, channel session logic is handled in session.c at line
2227 function -> session_input_channel_req which if given a pty-req then a "shell" request leads to do_exec_pty which ultimately leads to the call to session_set_fds(s, ptyfd, fdout, -1, 1, 1). The forth argument would normally be a file descriptor responsible for handling stderr but since none is supplied then there won't be any extended data for stderr.
Ultimately, even if I modified openssh to provide a stderr FD, the problem resides with the shell. Complete guess work at this point but I believe that similar to logging into a ssh service via a terminal like xterm or putty, that stderr and stdout are sent together unless explicitly redirected via something like "2> someFile" which is beyond the scope of a SSH service provider.

Categories

Resources