Test using mock raises exception - python

I have a strange error that I can't get my head around. I am trying to test whether a method is called within side another method. I have used the debugger on PyCharm and it seems to be, however, the test fails because the second function is called raises an exception.
test.py
def test_reply_to_toot(self, directory):
with patch("mastodon.Mastodon") as mastodon_mock:
mastodon_mock.return_value = Mock()
mastodon_mock.status_post.assert_called_with(bot.reply_to_toot("1", account_name="#fake", message="test"))
bot.py (method being tested)
# Set up Mastodon
mastodon = Mastodon(
access_token=os.getenv("ACCESS_TOKEN"),
api_base_url=settings.BASE_ADDRESS
)
def reply_to_toot(post_id, account_name, message=None, status_notifications=None):
media_ids = []
for fn in os.listdir(str(settings.INPUT_FOLDER)):
if fn.endswith(('.jpeg', '.png')):
print(Path(fn))
image_dict = mastodon.media_post(str(settings.INPUT_FOLDER / fn))
media_ids.append(image_dict["id"])
if message is not None:
parts = []
total_len = str(len(message) // settings.MAX_MESSAGE_LENGTH + 1)
count = 1
split_lines = message.splitlines(True)
while split_lines:
message_part = "#" + account_name + " {}/".format(count) + total_len + "\n\n"
while split_lines != [] and len(message_part) + len(split_lines[0]) < settings.MAX_MESSAGE_LENGTH:
message_part += split_lines[0]
split_lines = split_lines[1:]
parts.append(message_part)
count += 1
for part in parts:
print(part)
post_id = mastodon.status_post(status=part, media_ids=media_ids, in_reply_to_id=post_id)
else:
while media_ids:
mastodon.status_post(status=message, media_ids=media_ids[0:4], in_reply_to_id=post_id)
media_ids = media_ids[4:]
Exception raised:
Error
Traceback (most recent call last):
File "C:\Python35\lib\unittest\case.py", line 58, in testPartExecutor
yield
File "C:\Python35\lib\unittest\case.py", line 600, in run
testMethod()
File "C:\Python35\lib\unittest\mock.py", line 1157, in patched
return func(*args, **keywargs)
File "C:\Users\Hugh\PycharmProjects\summer-project\test\test.py", line 70, in test_reply_to_toot
mastodon_mock.status_post.assert_called_with(bot.reply_to_toot("1", account_name="#fake", message="test"))
File "C:\Users\Hugh\PycharmProjects\summer-project\src\bot.py", line 65, in reply_to_toot
post_id = mastodon.status_post(status=part, media_ids=media_ids, in_reply_to_id=post_id)
File "<decorator-gen-60>", line 2, in status_post
File "C:\Users\Hugh\PycharmProjects\summer-project\venv\lib\site-packages\mastodon\Mastodon.py", line 102, in wrapper
return function(self, *args, **kwargs)
File "C:\Users\Hugh\PycharmProjects\summer-project\venv\lib\site-packages\mastodon\Mastodon.py", line 1776, in status_post
return self.__api_request('POST', '/api/v1/statuses', params, headers = headers, use_json = use_json)
File "C:\Users\Hugh\PycharmProjects\summer-project\venv\lib\site-packages\mastodon\Mastodon.py", line 3429, in __api_request
error_msg)
mastodon.Mastodon.MastodonNotFoundError: ('Mastodon API returned error', 404, 'Not Found', 'The status you are trying to reply to does not appear to exist.')
Assertion failed
Assertion failed
Ran 1 test in 0.146s
FAILED (errors=1)
Process finished with exit code 1
Assertion failed
Assertion failed
Assertion failed
Assertion failed

Solution
Mock Mastodon within the context of bot module
Example
test_bot.py
import unittest
import bot
from unittest.mock import patch
class TestBot(unittest.TestCase):
#patch("bot.Mastodon")
def test_bot(self, mastodon_mock):
b = bot.Bot()
breakpoint()
mastodon_mock.return_value = Mock()
mastodon_mock.status_post.assert_called_with(
b.reply_to_toot("1", account_name="#fake", message="test")
)
bot.py
import os
from mastodon import Mastodon
class Bot(object):
# Set up Mastodon
def __init__(self, access_token="", base_address=""):
self.mastadon = Mastodon(
access_token,
base_address
)
def reply_to_toot(post_id, account_name, message=None, status_notifications=None):
media_ids = []
for fn in os.listdir(str(settings.INPUT_FOLDER)):
if fn.endswith(('.jpeg', '.png')):
print(Path(fn))
image_dict = self.mastodon.media_post(str(settings.INPUT_FOLDER / fn))
media_ids.append(image_dict["id"])
if message is not None:
parts = []
total_len = str(len(message) // settings.MAX_MESSAGE_LENGTH + 1)
count = 1
split_lines = message.splitlines(True)
while split_lines:
message_part = "#" + account_name + " {}/".format(count) + total_len + "\n\n"
while split_lines != [] and len(message_part) + len(split_lines[0]) < settings.MAX_MESSAGE_LENGTH:
message_part += split_lines[0]
split_lines = split_lines[1:]
parts.append(message_part)
count += 1
for part in parts:
print(part)
post_id = self.mastodon.status_post(status=part, media_ids=media_ids, in_reply_to_id=post_id)
else:
while media_ids:
self.mastodon.status_post(status=message, media_ids=media_ids[0:4], in_reply_to_id=post_id)
media_ids = media_ids[4:]
References
Partial Mocking: https://docs.python.org/3/library/unittest.mock-examples.html#partial-mocking

Related

How to set a labels for all instances in a GCP project that contain specific label key

I am pretty new to python and looking for a python code that will be called from AWS Lambda to update labels for all instances in gcp project that contain a specific label key/value pair.
from botocore.exceptions import ClientError
from typing import Dict, Iterable
from google.cloud import compute_v1
from googleapiclient import discovery
import json
import os
config = Config()
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="creds.json"
def updateComputeLabels(data, account):
log.info("-----")
log.info("updating Compute Engine labels")
RC_PlatformCode = data['NewImage']['PlatformCode']['S']
platformcode_gcp = 'rc_platformcode'
tagKey = data['NewImage']['Key']['S']
tagValue = data['NewImage']['Value']['S']
log.info("tagkey = " + tagKey)
labelKeyLower = tagKey.lower()
labelValueLower = tagValue.lower()
labelKey = labelKeyLower.replace("#", "_at_")
labelValue = labelValueLower.replace("#", "_at_")
instance_client = compute_v1.InstancesClient.from_service_account_json('creds.json')
service = discovery.build('compute', 'v1', 'GOOGLE_APPLICATION_CREDENTIALS')
request = compute_v1.AggregatedListInstancesRequest()
project = account
request.project = project
request.max_results = 50
agg_list = instance_client.aggregated_list(request=request)
all_instances = {}
print("Instances found:")
# Despite using the `max_results` parameter, you don't need to handle the pagination
# yourself. The returned `AggregatedListPager` object handles pagination
# automatically, returning separated pages as you iterate over the results.
for zone, response in agg_list:
if response.instances:
all_instances[zone] = response.instances
print(f" {zone}:")
for instance in response.instances:
log.info("instance name -> " + instance.name)
update_tag = False
try:
labels = instance.labels
labelfingerprint = instance.label_fingerprint
log.info("tags -> " + str(labels))
log.info("fingerprint -> " + str(labelfingerprint))
except ClientError:
continue
for label in labels:
if labels["rc_platformcode"] == RC_PlatformCode:
log.info(
f"RC_PlatformCode [{RC_PlatformCode}] present for instance [{instance.name}]")
update_tag = True
break
if update_tag:
create_tag = True
log.info("processing instance -> " + instance.name)
log.info("setting tag Key -> " + tagKey.lower())
log.info("setting tag Value -> " + tagValue.replace("#", "_at_"))
for label in labels:
log.info("checking tag -> " + label)
labels[tagKey.lower()] = tagValue.replace("#", "_at_")
instance_labels = labels
instances_set_labels_request_body = {
'labels': instance_labels,
'labelFingerprint': labelfingerprint
}
zone_clean = zone.replace("zones/","")
request = service.instances().setLabels(project=project, zone=zone_clean, instance=instance.name, body=instances_set_labels_request_body)
response = request.execute()
break
return all_instances
The code I currently have is the following way but it lacks quality and I am facing error when trying to call setLabels with the following error:
{
"errorMessage": "Object of type MessageMapContainer is not JSON serializable",
"errorType": "TypeError",
"stackTrace": [
" File \"/var/task/handler.py\", line 60, in handler\n updateComputeLabels(data, account)\n",
" File \"/var/task/cloudCompute.py\", line 78, in updateComputeLabels\n request = service.instances().setLabels(project=project, zone=zone_clean, instance=instance.name, body=instances_set_labels_request_body)\n",
" File \"/opt/python/googleapiclient/discovery.py\", line 1101, in method\n headers, actual_path_params, actual_query_params, body_value\n",
" File \"/opt/python/googleapiclient/model.py\", line 160, in request\n body_value = self.serialize(body_value)\n",
" File \"/opt/python/googleapiclient/model.py\", line 273, in serialize\n return json.dumps(body_value)\n",
" File \"/var/lang/lib/python3.7/json/__init__.py\", line 231, in dumps\n return _default_encoder.encode(obj)\n",
" File \"/var/lang/lib/python3.7/json/encoder.py\", line 199, in encode\n chunks = self.iterencode(o, _one_shot=True)\n",
" File \"/var/lang/lib/python3.7/json/encoder.py\", line 257, in iterencode\n return _iterencode(o, 0)\n",
" File \"/var/lang/lib/python3.7/json/encoder.py\", line 179, in default\n raise TypeError(f'Object of type {o.__class__.__name__} '\n"
]
}
Contents of instances_set_labels_request_body
{'labels': {'rc_platformcode': 'tst', 'rc_costcenter': '7777'}, 'labelFingerprint': '3E6RKl7bF_4='}
Can anyone help me or am I doing it all wrong? Thanks!

Python zlib error while bruteforcing protected zip file

Basically, i'm trying to write a script to bruteforce a protected zip file in python that tries every character combination (i.e. aa,ba,ca,da etc). But after a few tries it retrieves a strange error, and i'm not being able to find nowhere a solution for it.
Program:
import zipfile as z
class zipVictim:
def __init__(self,file):
self.found = False
self.password = ''
self.file = file
self.extracted_file_list = []
def bruteforceAttack(self,start_char: int,end_char: int,length: int,deep_loop=False,print_mode=False):
"""
Doc
"""
def _loop_chain(self,file,start_char,end_char,length):
lList = []
sPass = ''
iAttempt = 1
for iInc in range(length):
lList.append(start_char)
while lList[len(lList)-1] < end_char:
for iInc2 in range(start_char,end_char):
for iInc3 in range(len(lList)):
sPass = sPass + chr(lList[iInc3])
if iInc3 == 0:
lList[iInc3] = lList[iInc3] + 1
elif lList[iInc3 -1] > end_char:
lList[iInc3] = lList[iInc3] + 1
lList[iInc3 -1] = start_char
try:
if print_mode:
print("Attempt %s, password: %s" % (iAttempt,sPass),end='\r')
iAttempt = iAttempt + 1
oFile.extractall(pwd=sPass.encode())
self.extracted_file_list = oFile.namelist()
self.password = sPass
self.found = True
return self.found
except RuntimeError:
pass
sPass = ''
return self.found
oFile = self.file
if not deep_loop:
_loop_chain(self,oFile,start_char,end_char,length)
else:
for iInc in range(length):
_loop_chain(self,oFile,start_char,end_char,iInc+1)
if __name__ == '__main__':
file = z.ZipFile('data.zip')
s = zipVictim(file)
s.bruteforceAttack(64,125,2,print_mode=True)
Error Retrieved:
Traceback (most recent call last):
File "C:\Program Files (x86)\Microsoft Visual Studio\Shared\Python36_64\lib\zipfile.py", line 925, in _read1
data = self._decompressor.decompress(data, n)
zlib.error: Error -3 while decompressing data: invalid block type
Does anybody know what trigger this error and how to solve it?

Truncated file header while using multiprocessing

When I run the line:
def book_processing(pair, pool_length):
p = Pool(len(pool_length)*3)
temp_parameters = partial(book_call_mprocess, pair)
p.map_async(temp_parameters, pool_length).get(999999)
p.close()
p.join()
return exchange_books
I get the following error:
Traceback (most recent call last):
File "test_code.py", line 214, in <module>
current_books = book_call.book_processing(cp, book_list)
File "/home/user/Desktop/book_call.py", line 155, in book_processing
p.map_async(temp_parameters, pool_length).get(999999)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 567, in get
raise self._value
zipfile.BadZipfile: Truncated file header
I feel as though there is some resource that is being used that didn't close during the last loop, but I am not sure how to close it (still learning about multiprocessing library). This error only occurs when my code repeats this section relatively quickly (within the same minute). This does not happen often, but is clear when it does.
Edit (adding the book_call code):
def book_call_mprocess(currency_pair, ex_list):
polo_error = 0
live_error = 0
kraken_error = 0
gdax_error = 0
ex_list = set([ex_list])
ex_Polo = 'Polo'
ex_Live = 'Live'
ex_GDAX = 'GDAX'
ex_Kraken = 'Kraken'
cp_polo = 'BTC_ETH'
cp_kraken = 'XETHXXBT'
cp_live = 'ETH/BTC'
cp_GDAX = 'ETH-BTC'
# Instances
polo_instance = poloapi.poloniex(polo_key, polo_secret)
fookraken = krakenapi.API(kraken_key, kraken_secret)
publicClient = GDAX.PublicClient()
flag = False
while not flag:
flag = False
err = False
# Polo Book
try:
if ex_Polo in ex_list:
polo_books = polo_instance.returnOrderBook(cp_polo)
exchange_books['Polo'] = polo_books
except:
err = True
polo_error = 1
# Livecoin
try:
if ex_Live in ex_list:
method = "/exchange/order_book"
live_books = OrderedDict([('currencyPair', cp_live)])
encoded_data = urllib.urlencode(live_books)
sign = hmac.new(live_secret, msg=encoded_data, digestmod=hashlib.sha256).hexdigest().upper()
headers = {"Api-key": live_key, "Sign": sign}
conn = httplib.HTTPSConnection(server)
conn.request("GET", method + '?' + encoded_data, '', headers)
response = conn.getresponse()
live_books = json.load(response)
conn.close()
exchange_books['Live'] = live_books
except:
err = True
live_error = 1
# Kraken
try:
if ex_Kraken in ex_list:
kraken_books = fookraken.query_public('Depth', {'pair': cp_kraken})
exchange_books['Kraken'] = kraken_books
except:
err = True
kraken_error = 1
# GDAX books
try:
if ex_GDAX in ex_list:
gdax_books = publicClient.getProductOrderBook(level=2, product=cp_GDAX)
exchange_books['GDAX'] = gdax_books
except:
err = True
gdax_error = 1
flag = True
if err:
flag = False
err = False
error_list = ['Polo', polo_error, 'Live', live_error, 'Kraken', kraken_error, 'GDAX', gdax_error]
print_to_excel('excel/error_handler.xlsx', 'Book Call Errors', error_list)
print "Holding..."
time.sleep(30)
return exchange_books
def print_to_excel(workbook, worksheet, data_list):
ts = str(datetime.datetime.now()).split('.')[0]
data_list = [ts] + data_list
wb = load_workbook(workbook)
if worksheet == 'active':
ws = wb.active
else:
ws = wb[worksheet]
ws.append(data_list)
wb.save(workbook)
The problem lies in the function print_to_excel
And more specifically in here:
wb = load_workbook(workbook)
If two processes are running this function at the same time, you'll run into the following race condition:
Process 1 wants to open error_handler.xlsx, since it doesn't exist it creates an empty file
Process 2 wants to open error_handler.xlsx, it does exist, so it tries to read it, but it is still empty. Since the xlsx format is just a zip file consisting of a bunch of XML files, the process expects a valid ZIP header which it doesn't find and it omits zipfile.BadZipfile: Truncated file header
What looks strange though is your error message as in the call stack I would have expected to see print_to_excel and load_workbook.
Anyway, Since you confirmed that the problem really is in the XLSX handling you can either
generate a new filename via tempfile for every process
use locking to ensure that only one process runs print_to_excel at a time

GAE Search API raising Default text value is not appropriate for sort expression

I am doing a location search in Google App Engine and I want my search to be sorted based on proximity. I am getting the following error on the deployed version (production):
Search error:
Traceback (most recent call last):
File "/base/data/home/apps/s~sound-helper-87921/1.385231928987755902/application/search_handler.py", line 68, in doProductSearch
search_results = docs.Product.getIndex().search(search_query)
File "/base/data/home/runtimes/python27/python27_lib/versions/1/google/appengine/datastore/datastore_rpc.py", line 105, in positional_wrapper
return wrapped(*args, **kwds)
File "/base/data/home/runtimes/python27/python27_lib/versions/1/google/appengine/api/search/search.py", line 3676, in search
return self.search_async(query, deadline=deadline, **kwargs).get_result()
File "/base/data/home/runtimes/python27/python27_lib/versions/1/google/appengine/api/search/search.py", line 262, in get_result
return self._get_result_hook()
File "/base/data/home/runtimes/python27/python27_lib/versions/1/google/appengine/api/search/search.py", line 3690, in hook
_CheckStatus(response.status())
File "/base/data/home/runtimes/python27/python27_lib/versions/1/google/appengine/api/search/search.py", line 517, in _CheckStatus
raise _ERROR_MAP[status.code()](status.error_detail())
InvalidRequest: Failed to parse search request "distance(location, geopoint(30.008164999999998,-95.52959159999999)) < 2000"; Default text value is not appropriate for sort expression 'distance(location, geopoint(30.008165,-95.529592))'
The following is my code, which is pretty much copied from Google's tutorial:
def _buildQueryString(self, params):
userstr = string = params.get('querystr')
userprice = params.get('price')
userdist = params.get('less_than_distance')
loc = params.get('cur_location')
lat = loc.split(',')[0].split()[0]
lng = loc.split(',')[1].split()[0]
if userstr:
string = userstr
if userprice:
string = string + ' price < %s' % userprice
if userdist:
if not os.environ.get('SERVER_SOFTWARE','').startswith('Development'):
string = string + ' distance(%s, geopoint(%s,%s)) < %s' % (
docs.Product.LOCATION,lat,lng,userdist)
return string
def _buildQuery(self, params):
"""Build and return a search query object."""
user_query = self._buildQueryString(params)
doc_limit = self._getDocLimit()
try:
offsetval = int(params.get('offset', 0))
except ValueError:
offsetval = 0
loc = params.get('cur_location')
lat = loc.split(',')[0].split()[0]
lng = loc.split(',')[1].split()[0]
expr = 'distance(%s, geopoint(%f,%f))' % (docs.Product.LOCATION,float(lat),float(lng))
computed_expr_distance = search.FieldExpression(name='actual_distance',
expression=expr)
computed_expr_score = search.FieldExpression(name='actual_score',
expression='score')
returned_fields = [docs.Product.PID]
expr_list = []
expr_list.append(search.SortExpression(
expression=expr,
direction=search.SortExpression.ASCENDING,
default_value='2001'))
sortopts = search.SortOptions(expressions=expr_list, limit = doc_limit)
search_query = search.Query(
query_string=user_query.strip(),
options=search.QueryOptions(
limit=doc_limit,
offset=offsetval,
sort_options=sortopts,
returned_expressions=[computed_expr_distance],
returned_fields=returned_fields
)
)
return search_query
def doProductSearch(self, params):
"""Perform a product search and display the results."""
try:
search_query = self._buildQuery(params)
search_results = docs.Product.getIndex().search(search_query)
returned_count = len(search_results.results)
except search.Error:
logging.exception("Search error:")
msg = 'There was a search error (see logs).'
url = '/'
print('%s' % msg)
return [],[]
psearch_response = []
distances = []
# For each document returned from the search
for doc in search_results:
pdoc = docs.Product(doc)
for expr in doc.expressions:
if expr.name == 'actual_distance':
distances.append(expr.value)
pid = pdoc.getPID()
psearch_response.append(long(pid))
logging.debug('Distances: ' +str(distances))
return psearch_response, distances
Why is the Search API not recognizing my search query?
The problem was in my default_value. I modified the SortExpression to have an integer default_value instead of a string:
expr_list.append(search.SortExpression(
expression=expr,
direction=search.SortExpression.ASCENDING,
default_value=500000))

Error when using offlineimap: getfolder() asked for nonexisting folder

I'm trying to get my emails to work from commandline with mutt. I have been trying to follow these two guides: http://blog.developwithpassion.com/2013/05/02/getting-up-and-running-with-a-sane-mutt-setup/ and http://stevelosh.com/blog/2012/10/the-homely-mutt/#configuring-offlineimap
To do this there are 4 main steps:
1. setup offlineimap to download and keep synced your emails
2. setup mutt (the email user interface)
3. setup notmuch (to be able to search your emails)
4. setup msmtp (to be able to send emails)
Note that I am using Macbook Pro running the OS X 10.9.2. I am stuck at step 1 because I am getting an error with offlineimap! I am able to run offlineimap for a long time, i.e. it will sync all of the emails (36197 of them!) and then right at the end it spits out the following error:
ERROR: getfolder() asked for a nonexisting folder 'Drafts'.
Folder Deleted Items [acc: gloriphobia]:
Establishing connection to imap.gmail.com:993
Account sync gloriphobia:
*** Finished account 'gloriphobia' in 137:16
ERROR: Exceptions occurred during the run!
ERROR: command: UID => socket error: <class 'socket.error'> - [Errno 54] Connection reset by peer
Traceback:
File "/usr/local/Cellar/offline-imap/6.5.5/libexec/offlineimap/folder/IMAP.py", line 219, in getmessage
'(BODY.PEEK[])')
File "/usr/local/Cellar/offline-imap/6.5.5/libexec/offlineimap/imaplib2.py", line 1167, in uid
return self._simple_command('UID', command, *args, **kw)
File "/usr/local/Cellar/offline-imap/6.5.5/libexec/offlineimap/imaplib2.py", line 1615, in _simple_command
return self._command_complete(self._command(name, *args), kw)
File "/usr/local/Cellar/offline-imap/6.5.5/libexec/offlineimap/imaplib2.py", line 1378, in _command_complete
typ, dat = rqb.get_response('command: %s => %%s' % rqb.name)
File "/usr/local/Cellar/offline-imap/6.5.5/libexec/offlineimap/imaplib2.py", line 176, in get_response
raise typ(exc_fmt % str(val))
ERROR: getfolder() asked for a nonexisting folder 'Drafts'.
Traceback:
File "/usr/local/Cellar/offline-imap/6.5.5/libexec/offlineimap/accounts.py", line 241, in syncrunner
self.sync()
File "/usr/local/Cellar/offline-imap/6.5.5/libexec/offlineimap/accounts.py", line 320, in sync
localfolder = self.get_local_folder(remotefolder)
File "/usr/local/Cellar/offline-imap/6.5.5/libexec/offlineimap/accounts.py", line 269, in get_local_folder
replace(self.remoterepos.getsep(), self.localrepos.getsep()))
File "/usr/local/Cellar/offline-imap/6.5.5/libexec/offlineimap/repository/Maildir.py", line 134, in getfolder
OfflineImapError.ERROR.FOLDER)
My .offlineimaprc is:
[general]
accounts = gloriphobia
ui = TTYUI
pythonfile=~/Development/MuttMailPython/offline.py
fsync = False
[Account gloriphobia]
localrepository = gloriphobia_local
remoterepository = gloriphobia_remote
status_backend = sqlite
postsynchook = notmuch new
[Repository gloriphobia_local]
type = Maildir
localfolders = ~/.mail/Test
nametrans = get_remote_name
[Repository gloriphobia_remote]
maxconnections = 1
type = Gmail
cert_fingerprint = 89091347184d41768bfc0da9fad94bfe882dd358
remoteuser = myemailaddress
remotepasseval = get_keychain_pass(account="myemailaddress",server="imap.gmail.com")
realdelete = no
nametrans = get_local_name
folderfilter = is_included
My python file, the one that is called offline.py is:
#!/usr/bin/python
import subprocess
import re
class NameMapping:
def __init__(self, local_name, remote_name):
self.local_name = local_name
self.remote_name = remote_name
class LocalName:
def __init__(self, folder):
self.folder = folder
def matches(self, mapping):
return mapping.remote_name == self.folder
def mapped_folder_name(self, mapping):
return mapping.local_name
class RemoteName:
def __init__(self, folder):
self.folder = folder
def matches(self, mapping):
return mapping.local_name == self.folder
def mapped_folder_name(self, mapping):
return mapping.remote_name
def get_keychain_pass(account=None, server=None):
params = {
'security': '/usr/bin/security',
'command': 'find-internet-password',
'account': account,
'server': server,
'keychain': '/Users/mec07/Library/Keychains/login.keychain',
}
command = "sudo -u mec07 %(security)s -v %(command)s -g -a %(account)s -s %(server)s %(keychain)s" % params
output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
outtext = [l for l in output.splitlines()
if l.startswith('password: ')][0]
return re.match(r'password: "(.*)"', outtext).group(1)
def is_included(folder):
result = True
for pattern in exclusion_patterns:
result = result and (re.search(pattern, folder) == None)
return result
exclusion_patterns = [
"efax",
"earth_class_mail",
"eventbrite",
"gotomeeting",
"moshi_monsters",
"peepcode",
"raini_fowl",
"stuart_know",
"training.*2008",
"training.*2009",
"training.*2010",
"training.*2011",
"training.*2012",
"training.*nbdn",
"training.*nothin_but_bdd",
"unblock_us",
"web_hosting",
"webinars",
"Gmail.*Important"
]
name_mappings = [
NameMapping('inbox', '[Gmail]/Inbox'),
NameMapping('starred', '[Gmail]/Starred'),
NameMapping('important', '[Gmail]/Important'),
NameMapping('sent', '[Gmail]/Sent Mail'),
NameMapping('drafts', '[Gmail]/Drafts'),
NameMapping('archive', '[Gmail]/All Mail'),
NameMapping('spam', '[Gmail]/Spam'),
NameMapping('flagged', '[Gmail]/Starred'),
NameMapping('trash', '[Gmail]/Trash'),
NameMapping('deleted', '[Gmail]/Deleted Items'),
NameMapping('Mum', '[Gmail]/Jana'),
NameMapping('Maggie', '[Gmail]/Maggie'),
NameMapping('papers', '[Gmail]/Scholar Alert'),
NameMapping('sent items', '[Gmail]/Sent Items'),
NameMapping('sent messages', '[Gmail]/Sent Messages')
]
def find_name_mapping(name):
default_mapping = NameMapping(name.folder, name.folder)
for mapping in name_mappings:
if (name.matches(mapping)):
return mapping
return default_mapping
def get_name_mapping(name):
mapping = find_name_mapping(name)
return name.mapped_folder_name(mapping)
def get_remote_name(local_folder_name):
name = RemoteName(local_folder_name)
return get_name_mapping(name)
def get_local_name(remote_folder_name):
name = LocalName(remote_folder_name)
return get_name_mapping(name)
Thanks in advance for your help!
Add:
folderfilter = lambda folder: folder not in ['Drafts,]

Categories

Resources