Related
I'm trying to get a database backup from odoo erp using ftputil
this is the code
# -*- coding: utf-8 -*-
from odoo import models, fields, api, tools, _
from odoo.exceptions import Warning
import odoo
from odoo.http import content_disposition
import logging
_logger = logging.getLogger(__name__)
import os
import datetime
try:
from xmlrpc import client as xmlrpclib
except ImportError:
import xmlrpclib
import time
import base64
import socket
try:
import ftputil
except ImportError:
raise ImportError(
'This module needs ftputil to automatically write backups to the FTP through ftp. Please install ftputil on your system. (sudo pip3 install ftputil)')
def execute(connector, method, *args):
res = False
try:
res = getattr(connector, method)(*args)
except socket.error as error:
_logger.critical('Error while executing the method "execute". Error: ' + str(error))
raise error
return res
class db_backup(models.Model):
_name = 'db.backup'
#api.multi
def get_db_list(self, host, port, context={}):
uri = 'http://' + host + ':' + port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
db_list = execute(conn, 'list')
return db_list
#api.multi
def _get_db_name(self):
dbName = self._cr.dbname
return dbName
# Columns for local server configuration
host = fields.Char('Host', required=True, default='localhost')
port = fields.Char('Port', required=True, default=8069)
name = fields.Char('Database', required=True, help='Database you want to schedule backups for',
default=_get_db_name)
folder = fields.Char('Backup Directory', help='Absolute path for storing the backups', required='True',
default='/odoo/backups')
backup_type = fields.Selection([('zip', 'Zip'), ('dump', 'Dump')], 'Backup Type', required=True, default='zip')
autoremove = fields.Boolean('Auto. Remove Backups',
help='If you check this option you can choose to automaticly remove the backup after xx days')
days_to_keep = fields.Integer('Remove after x days',
help="Choose after how many days the backup should be deleted. For example:\nIf you fill in 5 the backups will be removed after 5 days.",
required=True)
# Columns for external server (SFTP)
sftp_write = fields.Boolean('Write to external server with sftp',
help="If you check this option you can specify the details needed to write to a remote server with SFTP.")
sftp_path = fields.Char('Path external server',
help='The location to the folder where the dumps should be written to. For example /odoo/backups/.\nFiles will then be written to /odoo/backups/ on your remote server.')
sftp_host = fields.Char('IP Address SFTP Server',
help='The IP address from your remote server. For example 192.168.0.1')
sftp_port = fields.Integer('SFTP Port', help='The port on the FTP server that accepts SSH/SFTP calls.', default=22)
sftp_user = fields.Char('Username SFTP Server',
help='The username where the SFTP connection should be made with. This is the user on the external server.')
sftp_password = fields.Char('Password User SFTP Server',
help='The password from the user where the SFTP connection should be made with. This is the password from the user on the external server.')
days_to_keep_sftp = fields.Integer('Remove SFTP after x days',
help='Choose after how many days the backup should be deleted from the FTP server. For example:\nIf you fill in 5 the backups will be removed after 5 days from the FTP server.',
default=30)
send_mail_sftp_fail = fields.Boolean('Auto. E-mail on backup fail',
help='If you check this option you can choose to automaticly get e-mailed when the backup to the external server failed.')
email_to_notify = fields.Char('E-mail to notify',
help='Fill in the e-mail where you want to be notified that the backup failed on the FTP.')
#api.multi
def _check_db_exist(self):
self.ensure_one()
db_list = self.get_db_list(self.host, self.port)
if self.name in db_list:
return True
return False
_constraints = [(_check_db_exist, _('Error ! No such database exists!'), [])]
#api.multi
def test_sftp_connection(self, context=None):
self.ensure_one()
# Check if there is a success or fail and write messages
messageTitle = ""
messageContent = ""
error = ""
has_failed = False
for rec in self:
db_list = self.get_db_list(rec.host, rec.port)
pathToWriteTo = rec.sftp_path
ipHost = rec.sftp_host
portHost = rec.sftp_port
usernameLogin = rec.sftp_user
passwordLogin = rec.sftp_password
# Connect with external server over SFTP, so we know sure that everything works.
try:
with ftputil.FTPHost(ipHost, usernameLogin, passwordLogin) as s:
messageTitle = _("Connection Test Succeeded!\nEverything seems properly set up for FTP back-ups!")
except Exception as e:
_logger.critical('There was a problem connecting to the remote ftp: ' + str(e))
error += str(e)
has_failed = True
messageTitle = _("Connection Test Failed!")
if len(rec.sftp_host) < 8:
messageContent += "\nYour IP address seems to be too short.\n"
messageContent += _("Here is what we got instead:\n")
finally:
if s:
s.close()
if has_failed:
raise Warning(messageTitle + '\n\n' + messageContent + "%s" % str(error))
else:
raise Warning(messageTitle + '\n\n' + messageContent)
#api.model
def schedule_backup(self):
conf_ids = self.search([])
for rec in conf_ids:
db_list = self.get_db_list(rec.host, rec.port)
if rec.name in db_list:
try:
if not os.path.isdir(rec.folder):
os.makedirs(rec.folder)
except:
raise
# Create name for dumpfile.
bkp_file = '%s_%s.%s' % (time.strftime('%Y_%m_%d_%H_%M_%S'), rec.name, rec.backup_type)
file_path = os.path.join(rec.folder, bkp_file)
uri = 'http://' + rec.host + ':' + rec.port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
bkp = ''
try:
# try to backup database and write it away
fp = open(file_path, 'wb')
odoo.service.db.dump_db(rec.name, fp, rec.backup_type)
fp.close()
except Exception as error:
_logger.debug(
"Couldn't backup database %s. Bad database administrator password for server running at http://%s:%s" % (
rec.name, rec.host, rec.port))
_logger.debug("Exact error from the exception: " + str(error))
continue
else:
_logger.debug("database %s doesn't exist on http://%s:%s" % (rec.name, rec.host, rec.port))
# Check if user wants to write to SFTP or not.
if rec.sftp_write is True:
try:
# Store all values in variables
dir = rec.folder
pathToWriteTo = rec.sftp_path
ipHost = rec.sftp_host
portHost = rec.sftp_port
usernameLogin = rec.sftp_user
passwordLogin = rec.sftp_password
_logger.debug('sftp remote path: %s' % pathToWriteTo)
try:
with ftputil.FTPHost(ipHost, usernameLogin, passwordLogin) as sftp:
pass
except Exception as error:
_logger.critical('Error connecting to remote server! Error: ' + str(error))
try:
sftp.chdir(pathToWriteTo)
except IOError:
# Create directory and subdirs if they do not exist.
currentDir = ''
for dirElement in pathToWriteTo.split('/'):
currentDir += dirElement + '/'
try:
sftp.chdir(currentDir)
except:
_logger.info('(Part of the) path didn\'t exist. Creating it now at ' + currentDir)
# Make directory and then navigate into it
sftp.mkdir(currentDir, 777)
sftp.chdir(currentDir)
pass
sftp.chdir(pathToWriteTo)
# Loop over all files in the directory.
for f in os.listdir(dir):
if rec.name in f:
fullpath = os.path.join(dir, f)
if os.path.isfile(fullpath):
try:
sftp.StatResult(os.path.join(pathToWriteTo, f))
_logger.debug(
'File %s already exists on the remote FTP Server ------ skipped' % fullpath)
# This means the file does not exist (remote) yet!
except IOError:
try:
# sftp.put(fullpath, pathToWriteTo)
sftp.upload(fullpath, os.path.join(pathToWriteTo, f))
_logger.info('Copying File % s------ success' % fullpath)
except Exception as err:
_logger.critical(
'We couldn\'t write the file to the remote server. Error: ' + str(err))
# Navigate in to the correct folder.
sftp.chdir(pathToWriteTo)
# Loop over all files in the directory from the back-ups.
# We will check the creation date of every back-up.
for file in sftp.listdir(pathToWriteTo):
if rec.name in file:
# Get the full path
fullpath = os.path.join(pathToWriteTo, file)
# Get the timestamp from the file on the external server
timestamp = sftp.StatResult(fullpath).st_atime
createtime = datetime.datetime.fromtimestamp(timestamp)
now = datetime.datetime.now()
delta = now - createtime
# If the file is older than the days_to_keep_sftp (the days to keep that the user filled in on the Odoo form it will be removed.
if delta.days >= rec.days_to_keep_sftp:
# Only delete files, no directories!
if sftp.isfile(fullpath) and (".dump" in file or '.zip' in file):
_logger.info("Delete too old file from SFTP servers: " + file)
sftp.unlink(file)
# Close the SFTP session.
sftp.close()
except Exception as e:
_logger.debug('Exception! We couldn\'t back up to the FTP server..')
# At this point the SFTP backup failed. We will now check if the user wants
# an e-mail notification about this.
if rec.send_mail_sftp_fail:
try:
ir_mail_server = self.env['ir.mail_server']
message = "Dear,\n\nThe backup for the server " + rec.host + " (IP: " + rec.sftp_host + ") failed.Please check the following details:\n\nIP address SFTP server: " + rec.sftp_host + "\nUsername: " + rec.sftp_user + "\nPassword: " + rec.sftp_password + "\n\nError details: " + tools.ustr(
e) + "\n\nWith kind regards"
msg = ir_mail_server.build_email("auto_backup#" + rec.name + ".com", [rec.email_to_notify],
"Backup from " + rec.host + "(" + rec.sftp_host + ") failed",
message)
ir_mail_server.send_email(self._cr, self._uid, msg)
except Exception:
pass
"""
Remove all old files (on local server) in case this is configured..
"""
if rec.autoremove:
dir = rec.folder
# Loop over all files in the directory.
for f in os.listdir(dir):
fullpath = os.path.join(dir, f)
# Only delete the ones which are from the current database
# (Makes it possible to save different databases in the same folder)
if rec.name in fullpath:
timestamp = os.stat(fullpath).st_ctime
createtime = datetime.datetime.fromtimestamp(timestamp)
now = datetime.datetime.now()
delta = now - createtime
if delta.days >= rec.days_to_keep:
# Only delete files (which are .dump and .zip), no directories.
if os.path.isfile(fullpath) and (".dump" in f or '.zip' in f):
_logger.info("Delete local out-of-date file: " + fullpath)
os.remove(fullpath)
I cant get pass this logger "_logger.critical(
'We couldn't write the file to the remote server. Error: ' + str(err))"
I am running a simple python script to log the accessed url using squid url_rewriter_program.
However every time it runs, rewriter crashes with broken pipe error at sys.stdout.flush().
Please suggest a specific solution.
python code is:
import sys
import os
import io
line = sys.stdin.readline()
fo=open("/home/linux/Desktop/foo1.txt","a")
fo.write(line)
fo.close()
sys.stdout.write("\n")
sys.stdout.flush()
This is a squid redirector file, written on python, can you get, or compare with your script:
Redirector:
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#-----------------------------------------------------------------------------
# Name: redirector_master.py
# Purpose: SiSCont checker cuote
#
# Author: Ernesto Licea Martin
#
# Created: 2011/11/24
# RCS-ID: $Id: redirector_master.py $
# Copyright: (c) 2011
# Licence: GPL
#-----------------------------------------------------------------------------
import sys, string, libSiSCont
__name__= "redirector_master"
query="SELECT accounts_proxyquotatype.name, accounts_proxyaccount.proxy_quota, accounts_proxyaccount.proxy_quota_extra, accounts_proxyaccount.proxy_active, accounts_proxyaccounttype.name FROM accounts_proxyaccount INNER JOIN accounts_proxyaccounttype INNER JOIN accounts_proxyquotatype ON (accounts_proxyaccount.proxy_quota_type_id = accounts_proxyquotatype.id) AND (accounts_proxyaccount.proxy_account_type_id = accounts_proxyaccounttype.id) WHERE accounts_proxyaccount.proxy_username=%s"
class RedirMaster:
def __init__(self):
obj = libSiSCont.ParceConf()
obj.parcecfgfile()
self.__listModules = obj.getModList()
self.__redirDicc = obj.getRedirectURL()
self.__penalURL = obj.getPenalizedURL()
self.__confDicc = obj.getConfParam()
self.__dbDicc = obj.getDBDicc()
self.__proxyDicc = obj.getProxyCacheParam()
self.__dbParam = []
def getProxyTypes(self):
db=libSiSCont.connectDB(dbDicc=self.__dbDicc)
c=db.cursor()
c.execute("SELECT accounts_proxy")
def run(self):
modules=[]
for mod in self.__listModules:
try:
m=__import__(mod)
modules.append(m)
except Exception, e:
libSiSCont.reportLogs("%s.run" %__name__, 'Unable to load redirector module %s; the error was: %s' % (mod,str(e)))
if len(modules) == 0:
libSiSCont.reportLogs("%s.run" %__name__, 'No usable redirector module found; switching to trasparent behavour')
while 1:
try:
data_redir=raw_input()
data_redir=data_redir.split()
url,ip_host,user,method,urlgroup = data_redir[0:5]
ip=ip_host.split("/")[0]
host_name=ip_host.split("/")[1]
uri = url
mode=""
#Don't check cache access
if string.find(url,"cache_object") == 0:
sys.stdout.write("%s%s\n" %(mode,uri))
sys.stdout.flush()
continue
db=libSiSCont.connectDB(dbDicc=self.__dbDicc)
c=db.cursor()
c.execute(query,user)
cuote_type,cuote, ext_cuote, active, acc_type = c.fetchall()[0]
self.__dbParam=[cuote_type,int(cuote), int(ext_cuote), active, acc_type]
for module in modules:
try:
uri = module.redir(url = url, ip = ip, host_name = host_name, user = user, method = method, urlgroup = urlgroup, redirDicc = self.__redirDicc, penalURL = self.__penalURL, confDicc = self.__confDicc, proxyDicc = self.__proxyDicc, dbParam = self.__dbParam)
except Exception, e:
libSiSCont.reportLogs("%s.run" %__name__, 'Error while running module: %s -- The error was: %s' % (module,str(e)))
if uri != url:
mode = "301:"
break
sys.stdout.write("%s%s\n" %(mode,uri))
sys.stdout.flush()
except Exception, e:
if not string.find('%s' % e,'EOF') >= 0:
sys.stdout.write('%s\n' % uri)
sys.stdout.flush()
libSiSCont.reportLogs("%s.run" %__name__, '%s: data received from parent: %s' % (str(e),string.join(data_redir)))
else:
sys.exit()
obj=RedirMaster()
obj.run()
Helper:
#!/usr/bin/env python
import sys, syslog, libSiSCont, string,crypt
__name__ = "Helper"
query = "SELECT accounts_proxyaccount.proxy_username FROM accounts_proxyaccount WHERE accounts_proxyaccount.proxy_username=%s AND accounts_proxyaccount.proxy_password=%s"
class BasicAuth:
def __init__(self):
obj = libSiSCont.ParceConf()
obj.parcecfgfile()
self.__dbDicc = obj.getDBDicc()
def run(self):
while 1:
try:
user_pass = string.split(raw_input())
user = user_pass[0].strip("\n")
passwd = user_pass[1].strip("\n")
crypt_passwd = crypt.crypt(passwd,user)
db = libSiSCont.connectDB(self.__dbDicc)
c = db.cursor()
c.execute(query,(user,crypt_passwd))
if c.fetchone() == None:
libSiSCont.reportLogs('%s.run' %__name__,'User Authentication Fail, user = %s password= %s, Access Denied' %(user,passwd) )
sys.stdout.write("ERR\n")
sys.stdout.flush()
else:
libSiSCont.reportLogs('%s.run' %__name__, 'User Authentication Success, user = %s, Access Granted' %user)
sys.stdout.write("OK\n")
sys.stdout.flush()
except Exception, e:
if not string.find("%s" %e, "EOF") >= 0:
sys.stdout.write("ERR\n")
sys.stdout.flush()
libSiSCont.reportLogs('%s.run' %__name__, 'Authenticator error, user will navigate without authentication: %s' %str(e))
else:
sys.exit()
obj = BasicAuth()
obj.run()
I hope help you ;-)
I'd like to use openoffice to programmatically convert docx to pdf. I know unoconv can do this, and indeed unoconv will do this for me, even if I run a separate listener (using unoconv -l) and invoke unoconv -n (so that it will die if it can't connect to the listener). Accordingly, I assume that my openoffice/pyuno environment is sane.
However, when I run an unoconv listener (or manually invoke openoffice as an acceptor), and try to connect with my own python code (derived from unoconv, and cross-checked with another openoffice library), the listener dies, and the uno bridge dies.
The error I get from the listener is:
terminate called after throwing an instance of 'com::sun::star::uno::RuntimeException'
The error I get on the python end is:
unoconv: RuntimeException during import phase:
Office probably died. Binary URP bridge disposed during call
I really have no idea how to go about diagnosing the problem here. Any suggestions as to the underlying cause or how to diagnose it would be greatly appreciated.
Code below:
#dependency on openoffice-python
import openoffice.streams as oostreams
import openoffice.officehelper as oohelper
import uno, unohelper
from com.sun.star.beans import PropertyValue
from com.sun.star.connection import NoConnectException
from com.sun.star.document.UpdateDocMode import QUIET_UPDATE
from com.sun.star.lang import DisposedException, IllegalArgumentException
from com.sun.star.io import IOException, XOutputStream
from com.sun.star.script import CannotConvertException
from com.sun.star.uno import Exception as UnoException
from com.sun.star.uno import RuntimeException
import logging
logger = logging.getLogger(__name__)
#connectionstring = 'uno:socket,host=127.0.0.1,port=2002;urp;StarOffice.ComponentContext'
connectionstring = 'socket,host=127.0.0.1,port=2002'
## context = uno.getComponentContext()
## svcmgr = context.ServiceManager
## resolver = svcmgr.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", context)
## unocontext = resolver.resolve("uno:%s" % connectionstring)
unocontext = oohelper.connect(connectionstring)
#unosvcmgr = unocontext.ServiceManager
desktop = unocontext.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", unocontext)
class OutputStream( unohelper.Base, XOutputStream ):
def __init__(self, stream=None):
self.closed = 0
self.stream = stream if stream is not None else sys.stdout
def closeOutput(self):
self.closed = 1
def writeBytes( self, seq ):
self.stream.write( seq.value )
def flush( self ):
pass
def UnoProps(**args):
props = []
for key in args:
prop = PropertyValue()
prop.Name = key
prop.Value = args[key]
props.append(prop)
return tuple(props)
FILTERS = {'pdf': 'writer_pdf_Export'}
def convert_stream(instream, outstream,
outdoctype=None, outformat=None):
'''instream and outstream are streams.
outdoctype and outformat are strings. They correspond
to the first two parameters to the Fmt constructor.To
convert to pdf use outdoctype="document",
outformat="pdf".
If you choose inappropriate values, an ValueError
will result.'''
#fmts is a global object of type FmtList
outputfilter = FILTERS[outformat]
inputprops = UnoProps(Hidden=True, ReadOnly=True, UpdateDocMode=QUIET_UPDATE, InputStream=oostreams.InputStream(instream))
inputurl = 'private:stream'
convert_worker(inputurl,inputprops,outputfilter,outstream=outstream)
return outstream
def convert_worker(inputurl, inputprops, outputfilter, outstream=None,inputfn=None):
global exitcode
document = None
try:
### Import phase
phase = "import"
document = desktop.loadComponentFromURL( inputurl , "_blank", 0, inputprops )
if not document:
raise UnoException("The document '%s' could not be opened." % inputurl, None)
### Import style template
phase = "import-style"
### Update document links
phase = "update-links"
try:
document.updateLinks()
except AttributeError:
# the document doesn't implement the XLinkUpdate interface
pass
### Update document indexes
phase = "update-indexes"
for ii in range(2):
# At first update Table-of-Contents.
# ToC grows, so page numbers grows too.
# On second turn update page numbers in ToC.
try:
document.refresh()
indexes = document.getDocumentIndexes()
except AttributeError:
# the document doesn't implement the XRefreshable and/or
# XDocumentIndexesSupplier interfaces
break
else:
for i in range(0, indexes.getCount()):
indexes.getByIndex(i).update()
### Export phase
phase = "export"
outputprops = UnoProps(FilterName=outputfilter, OutputStream=OutputStream(stream=outstream), Overwrite=True)
outputurl = "private:stream"
try:
document.storeToURL(outputurl, tuple(outputprops) )
except IOException as e:
raise UnoException("Unable to store document to %s (ErrCode %d)\n\nProperties: %s" % (outputurl, e.ErrCode, outputprops), None)
phase = "dispose"
document.dispose()
document.close(True)
except SystemError as e:
logger.error("unoconv: SystemError during %s phase:\n%s" % (phase, e))
exitcode = 1
except RuntimeException as e:
logger.error("unoconv: RuntimeException during %s phase:\nOffice probably died. %s" % (phase, e))
exitcode = 6
except DisposedException as e:
logger.error("unoconv: DisposedException during %s phase:\nOffice probably died. %s" % (phase, e))
exitcode = 7
except IllegalArgumentException as e:
logger.error("UNO IllegalArgument during %s phase:\nSource file cannot be read. %s" % (phase, e))
exitcode = 8
except IOException as e:
# for attr in dir(e): print '%s: %s', (attr, getattr(e, attr))
logger.error("unoconv: IOException during %s phase:\n%s" % (phase, e.Message))
exitcode = 3
except CannotConvertException as e:
# for attr in dir(e): print '%s: %s', (attr, getattr(e, attr))
logger.error("unoconv: CannotConvertException during %s phase:\n%s" % (phase, e.Message))
exitcode = 4
except UnoException as e:
if hasattr(e, 'ErrCode'):
logger.error("unoconv: UnoException during %s phase in %s (ErrCode %d)" % (phase, repr(e.__class__), e.ErrCode))
exitcode = e.ErrCode
pass
if hasattr(e, 'Message'):
logger.error("unoconv: UnoException during %s phase:\n%s" % (phase, e.Message))
exitcode = 5
else:
logger.error("unoconv: UnoException during %s phase in %s" % (phase, repr(e.__class__)))
exitcode = 2
pass
I don't know if this could be your case, but I discovered that LogMeIn (running on my box) is also using the port 2002. When I try unoconv on that machine, I get the same error: Binary URP bridge disposed during call.
I killed LogMeIn and everything worked after that.
Hope this helps!
My test code sends an email with an attachment and saves a hash that is in the subject and body. I then have a function that takes the hash searches for it, gets the uid and fetches the email returning the attachment data.
The problem I am having is when I send a message and then subsequently search for the hash the email server says there is no matching uid, however if I run another copy of the script it does find it! Even if the second script is ran first! It first finds it but the original one doesn't; even though it is later!
Output
$ python test_server_file_functions.py
Creating mail server
S: '* OK Gimap ready for requests from [ip] [data]'
C: '0001 CAPABILITY'
S: '* CAPABILITY IMAP4rev1 UNSELECT IDLE NAMESPACE QUOTA ID XLIST CHILDREN X-GM-EXT-1 XYZZY SASL-IR AUTH=XOAUTH AUTH=XOAUTH2'
S: '0001 OK Thats all she wrote! [data]'
C: '0002 LOGIN "user#gmail.com" "password"'
S: '* CAPABILITY IMAP4rev1 UNSELECT IDLE NAMESPACE QUOTA ID XLIST CHILDREN X-GM-EXT-1 UIDPLUS COMPRESS=DEFLATE ENABLE MOVE'
S: '0002 OK user#gmail.com Anonymous Test authenticated (Success)'
C: '0003 SELECT INBOX'
S: '* FLAGS (\\Answered \\Flagged \\Draft \\Deleted \\Seen)'
S: '* OK [PERMANENTFLAGS (\\Answered \\Flagged \\Draft \\Deleted \\Seen \\*)] Flags permitted.'
S: '* OK [UIDVALIDITY 1] UIDs valid.'
S: '* 0 EXISTS'
S: '* 0 RECENT'
S: '* OK [UIDNEXT 132] Predicted next UID.'
S: '0003 OK [READ-WRITE] INBOX selected. (Success)'
Does not exists
Created mail server
Sending email
Sent email
Waiting 3 minutes to make sure it isn't a simple delay with the email being relayed
Downloading Data...
C: '0004 SEARCH SUBJECT "EMS Data ID: 622904923b1825d5742ed25fb792fafe2e710c40ceea09660a604be8fabac35ae9b006c43c7a992159b8b0df376383830a6d4c54ed5b141c8429a4feec89cd8b"'
S: '* SEARCH'
S: '0004 OK SEARCH completed (Success)'
Unhandled Error
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/mail/imap4.py", line 2455, in _defaultHandler
cmd.finish(rest, self._extraInfo)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/mail/imap4.py", line 382, in finish
d.callback((send, lastLine))
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/internet/defer.py", line 368, in callback
self._startRunCallbacks(result)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/internet/defer.py", line 464, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/internet/defer.py", line 551, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/Users/user/Documents/gms/gms/mail_backend.py", line 178, in process_download_uid
raise IOError("Hash not found, however database indicates it was uploaded")
exceptions.IOError: Hash not found, however database indicates it was uploaded
There was an error retrieving the email
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/mail/imap4.py", line 2455, in _defaultHandler
cmd.finish(rest, self._extraInfo)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/mail/imap4.py", line 382, in finish
d.callback((send, lastLine))
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/internet/defer.py", line 368, in callback
self._startRunCallbacks(result)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/internet/defer.py", line 464, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/twisted/internet/defer.py", line 551, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/Users/user/Documents/gms/gms/mail_backend.py", line 178, in process_download_uid
raise IOError("Hash not found, however database indicates it was uploaded")
exceptions.IOError: Hash not found, however database indicates it was uploaded
Quiting...
Code
import os
# logging
from twisted.python import log
import sys
import time
import email
from Utils import BLOCK
# IMAP
from IMAPBackend import connectToIMAPServer, Command
# SMTP
from SMTPBackend import connectToSMTPServer
# Hash Database
from HashDatabase import HashDatabase, hash
# deferreds
from twisted.internet.defer import Deferred, DeferredList, succeed
from twisted.internet.task import deferLater
#reactor
from twisted.internet import reactor
BLOCK_SIZE = BLOCK / 1024 # convert from bytes (needed for FTP) to kilobytes
def createMailServer(username, password, smtp_server, imap_server, hash_db = "hash.db"):
# create smtp connection
smtp_d = connectToSMTPServer(smtp_server, username, password)
# create imap connection
imap_d = connectToIMAPServer(imap_server, username, password)
dl = DeferredList([smtp_d, imap_d])
dl.addCallback(lambda r: [ MailServer(r[0][1], r[1][1], username, hash_db) ][0] )
return dl
class ServerManager(object):
def __init__(self, mail_servers):
self.mail_servers = mail_servers
def get_server(self, accnt):
for ms in self.mail_servers:
if ms.account == accnt:
return succeed(ms)
def return_server(self):
# retrieve the size avialable on the servers
get_space_deferreds = []
for ms in self.mail_servers:
d = ms.get_space()
d.addCallback(lambda r: (ms, r))
get_space_deferreds.append(d)
dl = DeferredList(get_space_deferreds, consumeErrors = True)
dl.addCallback(self.parse_sizes)
return dl
def parse_sizes(self, results):
for no_error, result in results:
server = result[0]
result = result[1]
if no_error:# not an error so a potential server
for argument in result[0]:
if argument[0] == "QUOTA":
print "Argument"
print argument
print "/Argument"
used, total = argument[2][1:3]
available_kb = int(total) - int(used)
if available_kb > BLOCK_SIZE:# server with more then our block size
return server
else:
print "Error from account %s" % server.email_address
# no free space was found :-(
raise IOError("No free space was found.")
class MailServer(object):
"Manages a server"
size = 0
used_space = 0
def __init__(self, smtp_connection, imap_connection, email_address, hash_db = "hash.db"):
self.smtp_connection = smtp_connection
self.imap_connection = imap_connection
self.hash_database = HashDatabase(hash_db)
self.email_address = email_address
self.account = email_address
# current uploads
self.current_uploads = {}
# current downloads
self.current_downloads = {}
def get_space(self):
cmd = Command("GETQUOTAROOT", "INBOX", ["QUOTAROOT", "QUOTA"])
d = self.imap_connection.sendCommand(cmd)
return d
def upload_data(self, data):
"""
Uploads data to email server returns deferred that will return with the imap uid
"""
data_hash = hash(data)
if data_hash in self.current_uploads:
d = Deferred()
self.current_uploads[data_hash].append(d)
return d
if self.hash_database.hash_in_list(data_hash):
print "Data hash is in the database; not uploading"
return succeed(data_hash)
else:
d = Deferred()
self.current_uploads[data_hash] = [d]
id = "EMS Data ID: %s" % data_hash
connection_deferred = self.smtp_connection.send_email(self.email_address, self.email_address, id, id, [["raw_ems", "ems.dat", data] ])
connection_deferred.addCallback(self.upload_success, data_hash)
connection_deferred.addErrback(self.upload_error, data_hash)
connection_deferred.addBoth(self.notify_uploaders, data_hash)
return d
def notify_uploaders(self, result, data_hash):
for waitingDeferred in self.current_uploads.pop(data_hash):
# if r is a Failure, this is equivalent to calling .errback with
# that Failure.
waitingDeferred.callback(result)
def upload_success(self, result, data_hash):
# add to hash table
self.hash_database.add_hash(data_hash)
# immediatly searching doesn't seem to work so search on data retrieval
return data_hash
def upload_error(self, error, data_hash):
# upload error
log.msg("Erroring uploading file")
log.err(error)
return error # send error to uploader
def download_data(self, data_hash):
"""
Downloads data from the email server returns a deferred that will return with the data
"""
d = Deferred()
if data_hash in self.current_downloads:
self.current_downloads[data_hash].append(d)
return d
if not self.hash_database.hash_in_list(data_hash):
print "Data Hash has never been uploaded..."
raise IOError("No such data hash exists")
else:
self.current_downloads[data_hash] = [d]
id = "EMS Data ID: %s" % data_hash
connection_deferred = self.imap_connection.search("SUBJECT", "\"EMS Data ID: %s\"" % data_hash, uid = False)
connection_deferred.addCallback(self.process_download_uid)
connection_deferred.addErrback(self.download_error, data_hash)
connection_deferred.addBoth(self.notify_downloaders, data_hash)
return d
return d
def process_download_uid(self, id):
if len(id) == 0:
raise IOError("Hash not found, however database indicates it was uploaded")
d = self.imap_connection.fetchMessage(id[-1])
d.addCallback(self.process_download_attachment, id[-1])
return d
def process_download_attachment(self, data, id):
email_text = data[id]["RFC822"]
msg = email.message_from_string(email_text)
for part in msg.walk():
type = part.get_content_type()
print repr(type)
if "raw_ems" in type:
log.msg("Found Payload")
return part.get_payload(decode = True)
log.msg("No attachment found")
raise IOError("Data not found")
def download_error(self, error, data_hash):
log.msg("Error downloading file")
log.err(error)
return error
def notify_downloaders(self, result, data_hash):
for waitingDeferred in self.current_downloads.pop(data_hash):
# if r is a Failure, this is equivalent to calling .errback with
# that Failure.
waitingDeferred.callback(result)
def delete_data(self, data_hash):
if not self.hash_database.hash_in_list(data_hash):
raise IOError("No such data hash uploaded")
else:
# delete it to prevent anyone from trying to download it while it is being deleted
self.hash_database.delete_hash(data_hash)
d = self.imap_connection.search("SUBJECT", "\"EMS Data ID: %s\"" % data_hash, uid = False)
d.addCallback(self.delete_message)
d.addErrback(self.deletion_error, data_hash)
return d
def deletion_error(self, error, data_hash):
print "Deletion Error"
log.err(error)
# restore hash to database
self.hash_database.add_hash(data_hash)
raise IOError("Couldn't delete message hash")
def delete_message(self, id):
if len(id) == 0:
raise IOError("Hash not found, however database indicates it was uploaded")
d = self.imap_connection.setFlags(id[-1], ["\\Deleted"])
d.addCallback(lambda result: self.imap_connection.expunge())
return d
## Main Code ##
if __name__ == "__main__":
def deleted_email(result):
print "Deleted the email succesfully"
print "====Result===="
print result
print "====Result===="
print "Quiting..."
os._exit(0)
def error_deleting(error):
print "There was an error deleting the email"
error.printTraceback()
print "Quiting..."
os._exit(0)
def retrieved_data(result, ms, hash):
print "Retrieved data"
print "=====Data===="
print result
print "Deleting email"
d = ms.delete_data(hash)
d.addCallback(deleted_email)
d.addErrback(error_deleting)
return d
def email_retrieval_error(error):
print "There was an error retrieving the email"
error.printTraceback()
print "Quiting..."
os._exit(0)
def sent_email(hash, ms):
print "Sent email"
print "Waiting 3 minutes to make sure it isn't a simple delay with the email being relayed"
time.sleep(3 * 60)
print "Downloading Data..."
d = ms.download_data(hash)
d.addCallback(retrieved_data, ms, hash)
d.addErrback(email_retrieval_error)
return d
def email_sending_error(error):
print "There was an error sending the email"
error.printTraceback()
print "Quiting..."
os._exit(0)
def mail_server_created(ms):
# created mail server
print "Created mail server"
print "Sending email"
d = ms.upload_data("this is the attachment data I am sending to my email account")
d.addCallback(sent_email, ms)
d.addErrback(email_sending_error)
return d
def mail_server_error(error):
print "Error creating mail server"
error.printTraceback()
print "Quiting..."
os._exit(0)
# create mail server object
print "Creating mail server"
d = createMailServer("user#gmail.com", "password", "smtp.gmail.com:587", "imap.gmail.com:993", hash_db = "testhash.db")
d.addCallback(mail_server_created)
d.addCallback(mail_server_error)
from twisted.internet import reactor
reactor.run()
I am thinking I may have to re-select the mailbox? I look in the RFC3501 select and search commands and found nothing about such a problem
Search command works on the data which is collected by parsing the entire mail folder which is been selected by Select command
You will have to select the mail folder again to get the mail entry updated.
Search result will not have the new mail input unless the server has implemented the IDLE/NOOP functionality (again it solely depends on mail server)
I'm currently in the process of learning ssh via the brute-force/ just keep hacking until I understand it approach. After some trial and error I've been able to successfully send a "pty-req" followed by a "shell" request, I can get the login preamble, send commands and receive stdout but I'm not exactly sure how to tell the SSH service I want to recieve stderr and status messages. Reading through other SSH implementations ( paramiko, Net::SSH ) hasn't been much of a guide at the moment.
That said, looking at one of the RFC's for SSH, I believe that perhaps one of the listed requests might be what I am looking for: https://www.rfc-editor.org/rfc/rfc4250#section-4.9.3
#!/usr/bin/env python
from twisted.conch.ssh import transport
from twisted.conch.ssh import userauth
from twisted.conch.ssh import connection
from twisted.conch.ssh import common
from twisted.conch.ssh.common import NS
from twisted.conch.ssh import keys
from twisted.conch.ssh import channel
from twisted.conch.ssh import session
from twisted.internet import defer
from twisted.internet import defer, protocol, reactor
from twisted.python import log
import struct, sys, getpass, os
log.startLogging(sys.stdout)
USER = 'dward'
HOST = '192.168.0.19' # pristine.local
PASSWD = "password"
PRIVATE_KEY = "~/id_rsa"
class SimpleTransport(transport.SSHClientTransport):
def verifyHostKey(self, hostKey, fingerprint):
print 'host key fingerprint: %s' % fingerprint
return defer.succeed(1)
def connectionSecure(self):
self.requestService(
SimpleUserAuth(USER,
SimpleConnection()))
class SimpleUserAuth(userauth.SSHUserAuthClient):
def getPassword(self):
return defer.succeed(PASSWD)
def getGenericAnswers(self, name, instruction, questions):
print name
print instruction
answers = []
for prompt, echo in questions:
if echo:
answer = raw_input(prompt)
else:
answer = getpass.getpass(prompt)
answers.append(answer)
return defer.succeed(answers)
def getPublicKey(self):
path = os.path.expanduser(PRIVATE_KEY)
# this works with rsa too
# just change the name here and in getPrivateKey
if not os.path.exists(path) or self.lastPublicKey:
# the file doesn't exist, or we've tried a public key
return
return keys.Key.fromFile(filename=path+'.pub').blob()
def getPrivateKey(self):
path = os.path.expanduser(PRIVATE_KEY)
return defer.succeed(keys.Key.fromFile(path).keyObject)
class SimpleConnection(connection.SSHConnection):
def serviceStarted(self):
self.openChannel(SmartChannel(2**16, 2**15, self))
class SmartChannel(channel.SSHChannel):
name = "session"
def getResponse(self, timeout = 10):
self.onData = defer.Deferred()
self.timeout = reactor.callLater( timeout, self.onData.errback, Exception("Timeout") )
return self.onData
def openFailed(self, reason):
print "Failed", reason
#defer.inlineCallbacks
def channelOpen(self, ignoredData):
self.data = ''
self.oldData = ''
self.onData = None
self.timeout = None
term = os.environ.get('TERM', 'xterm')
#winsz = fcntl.ioctl(fd, tty.TIOCGWINSZ, '12345678')
winSize = (25,80,0,0) #struct.unpack('4H', winsz)
ptyReqData = session.packRequest_pty_req(term, winSize, '')
try:
result = yield self.conn.sendRequest(self, 'pty-req', ptyReqData, wantReply = 1 )
except Exception as e:
print "Failed with ", e
try:
result = yield self.conn.sendRequest(self, "shell", '', wantReply = 1)
except Exception as e:
print "Failed shell with ", e
#fetch preample
data = yield self.getResponse()
"""
Welcome to Ubuntu 11.04 (GNU/Linux 2.6.38-8-server x86_64)
* Documentation: http://www.ubuntu.com/server/doc
System information as of Sat Oct 29 13:09:50 MDT 2011
System load: 0.0 Processes: 111
Usage of /: 48.0% of 6.62GB Users logged in: 1
Memory usage: 39% IP address for eth1: 192.168.0.19
Swap usage: 3%
Graph this data and manage this system at https://landscape.canonical.com/
New release 'oneiric' available.
Run 'do-release-upgrade' to upgrade to it.
Last login: Sat Oct 29 01:23:16 2011 from 192.168.0.17
"""
print data
while data != "" and data.strip().endswith("~$") == False:
try:
data = yield self.getResponse()
print repr(data)
"""
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
except Exception as e:
print e
break
self.write("false\n")
#fetch response
try:
data = yield self.getResponse()
except Exception as e:
print "Failed to catch response?", e
else:
print data
"""
false
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
self.write("true\n")
#fetch response
try:
data = yield self.getResponse()
except Exception as e:
print "Failed to catch response?", e
else:
print data
"""
true
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
self.write("echo Hello World\n\x00")
try:
data = yield self.getResponse()
except Exception as e:
print "Failed to catch response?", e
else:
print data
"""
echo Hello World
Hello World
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
#Close up shop
self.loseConnection()
dbgp = 1
def request_exit_status(self, data):
status = struct.unpack('>L', data)[0]
print 'status was: %s' % status
def dataReceived(self, data):
self.data += data
if self.onData is not None:
if self.timeout and self.timeout.active():
self.timeout.cancel()
if self.onData.called == False:
self.onData.callback(data)
def extReceived(self, dataType, data):
dbgp = 1
print "Extended Data recieved! dataType = %s , data = %s " % ( dataType, data, )
self.extendData = data
def closed(self):
print 'got data : %s' % self.data.replace("\\r\\n","\r\n")
self.loseConnection()
reactor.stop()
protocol.ClientCreator(reactor, SimpleTransport).connectTCP(HOST, 22)
reactor.run()
Additionally I tried adding in an explicit bad command to the remote shell:
self.write("ls -alF badPathHere\n\x00")
try:
data = yield self.getResponse()
except Exception as e:
print "Failed to catch response?", e
else:
print data
"""
ls -alF badPathHere
ls: cannot access badPathHere: No such file or directory
\x1B]0;dward#pristine: ~\x07dward#pristine:~$
"""
And it looks like stderr is being mixed into stderr
Digging through the source code for OpenSSH, channel session logic is handled in session.c at line
2227 function -> session_input_channel_req which if given a pty-req then a "shell" request leads to do_exec_pty which ultimately leads to the call to session_set_fds(s, ptyfd, fdout, -1, 1, 1). The forth argument would normally be a file descriptor responsible for handling stderr but since none is supplied then there won't be any extended data for stderr.
Ultimately, even if I modified openssh to provide a stderr FD, the problem resides with the shell. Complete guess work at this point but I believe that similar to logging into a ssh service via a terminal like xterm or putty, that stderr and stdout are sent together unless explicitly redirected via something like "2> someFile" which is beyond the scope of a SSH service provider.