I'm looking at parsing out just the most recent reply/message from an email thread as part of a zap.
I've found this link but how to I use it within a Zap? https://github.com/zapier/email-reply-parser
i.e. when I pick up a thread from gmail how do I just extract the most recent message?
Is this possible in Code by Zapier and if so how?
E.g.
Input:
Yes that is fine, I will email you in the morning.
On Fri, Nov 16, 2012 at 1:48 PM, Zapier wrote:
Our support team just commented on your open Ticket:
"Hi Royce, can we chat in the morning about your question?"
Ouput: i.e. the parsed email:
Yes that is fine, I will email you in the morning.
First off: it's not possible to use that in a code step directly. Python code steps don't have access to external packages.
That said, that package is just Python code, and there's nothing stopping you copying all of the important code into the Code step and using it that way.
It's worth noting that the linked code is pretty old and looks to be unmaintained, so it's unlikely to work without modifications.
I had a go at adapting this https://github.com/zapier/email-reply-parser which seemed to work as well.
"""
email_reply_parser is a python library port of GitHub's Email Reply Parser.
For more information, visit https://github.com/zapier/email-reply-parser
"""
import re
class EmailReplyParser(object):
""" Represents a email message that is parsed.
"""
#staticmethod
def read(text):
""" Factory method that splits email into list of fragments
text - A string email body
Returns an EmailMessage instance
"""
return EmailMessage(text).read()
#staticmethod
def parse_reply(text):
""" Provides the reply portion of email.
text - A string email body
Returns reply body message
"""
return EmailReplyParser.read(text).reply
class EmailMessage(object):
""" An email message represents a parsed email body.
"""
SIG_REGEX = re.compile(r'(--|__|-\w)|(^Sent from my (\w+\s*){1,3})')
QUOTE_HDR_REGEX = re.compile('On.*wrote:$')
QUOTED_REGEX = re.compile(r'(>+)')
HEADER_REGEX = re.compile(r'^\*?(From|Sent|To|Subject):\*? .+')
_MULTI_QUOTE_HDR_REGEX = r'(?!On.*On\s.+?wrote:)(On\s(.+?)wrote:)'
MULTI_QUOTE_HDR_REGEX = re.compile(_MULTI_QUOTE_HDR_REGEX, re.DOTALL | re.MULTILINE)
MULTI_QUOTE_HDR_REGEX_MULTILINE = re.compile(_MULTI_QUOTE_HDR_REGEX, re.DOTALL)
def __init__(self, text):
self.fragments = []
self.fragment = None
self.text = text.replace('\r\n', '\n')
self.found_visible = False
def read(self):
""" Creates new fragment for each line
and labels as a signature, quote, or hidden.
Returns EmailMessage instance
"""
self.found_visible = False
is_multi_quote_header = self.MULTI_QUOTE_HDR_REGEX_MULTILINE.search(self.text)
if is_multi_quote_header:
self.text = self.MULTI_QUOTE_HDR_REGEX.sub(is_multi_quote_header.groups()[0].replace('\n', ''), self.text)
# Fix any outlook style replies, with the reply immediately above the signature boundary line
# See email_2_2.txt for an example
self.text = re.sub('([^\n])(?=\n ?[_-]{7,})', '\\1\n', self.text, re.MULTILINE)
self.lines = self.text.split('\n')
self.lines.reverse()
for line in self.lines:
self._scan_line(line)
self._finish_fragment()
self.fragments.reverse()
return self
#property
def reply(self):
""" Captures reply message within email
"""
reply = []
for f in self.fragments:
if not (f.hidden or f.quoted):
reply.append(f.content)
return '\n'.join(reply)
def _scan_line(self, line):
""" Reviews each line in email message and determines fragment type
line - a row of text from an email message
"""
is_quote_header = self.QUOTE_HDR_REGEX.match(line) is not None
is_quoted = self.QUOTED_REGEX.match(line) is not None
is_header = is_quote_header or self.HEADER_REGEX.match(line) is not None
if self.fragment and len(line.strip()) == 0:
if self.SIG_REGEX.match(self.fragment.lines[-1].strip()):
self.fragment.signature = True
self._finish_fragment()
if self.fragment \
and ((self.fragment.headers == is_header and self.fragment.quoted == is_quoted) or
(self.fragment.quoted and (is_quote_header or len(line.strip()) == 0))):
self.fragment.lines.append(line)
else:
self._finish_fragment()
self.fragment = Fragment(is_quoted, line, headers=is_header)
def quote_header(self, line):
""" Determines whether line is part of a quoted area
line - a row of the email message
Returns True or False
"""
return self.QUOTE_HDR_REGEX.match(line[::-1]) is not None
def _finish_fragment(self):
""" Creates fragment
"""
if self.fragment:
self.fragment.finish()
if self.fragment.headers:
# Regardless of what's been seen to this point, if we encounter a headers fragment,
# all the previous fragments should be marked hidden and found_visible set to False.
self.found_visible = False
for f in self.fragments:
f.hidden = True
if not self.found_visible:
if self.fragment.quoted \
or self.fragment.headers \
or self.fragment.signature \
or (len(self.fragment.content.strip()) == 0):
self.fragment.hidden = True
else:
self.found_visible = True
self.fragments.append(self.fragment)
self.fragment = None
class Fragment(object):
""" A Fragment is a part of
an Email Message, labeling each part.
"""
def __init__(self, quoted, first_line, headers=False):
self.signature = False
self.headers = headers
self.hidden = False
self.quoted = quoted
self._content = None
self.lines = [first_line]
def finish(self):
""" Creates block of content with lines
belonging to fragment.
"""
self.lines.reverse()
self._content = '\n'.join(self.lines)
self.lines = None
#property
def content(self):
return self._content.strip()
return {'emailstring': EmailReplyParser.parse_reply(input_data['body'])}
Related
I use the blogger2wordpress python script that Google released back in 2010 (https://code.google.com/archive/p/google-blog-converters-appengine/downloads), to convert a 95mb blogger export file to wordpress wxr format.
However, the script has this code:
#!/usr/bin/env python
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import logging
import re
import sys
import time
from xml.sax.saxutils import unescape
import BeautifulSoup
import gdata
from gdata import atom
import iso8601
import wordpress
__author__ = 'JJ Lueck (EMAIL#gmail.com)'
###########################
# Constants
###########################
BLOGGER_URL = 'http://www.blogger.com/'
BLOGGER_NS = 'http://www.blogger.com/atom/ns#'
KIND_SCHEME = 'http://schemas.google.com/g/2005#kind'
YOUTUBE_RE = re.compile('http://www.youtube.com/v/([^&]+)&?.*')
YOUTUBE_FMT = r'[youtube=http://www.youtube.com/watch?v=\1]'
GOOGLEVIDEO_RE = re.compile('(http://video.google.com/googleplayer.swf.*)')
GOOGLEVIDEO_FMT = r'[googlevideo=\1]'
DAILYMOTION_RE = re.compile('http://www.dailymotion.com/swf/(.*)')
DAILYMOTION_FMT = r'[dailymotion id=\1]'
###########################
# Translation class
###########################
class Blogger2Wordpress(object):
"""Performs the translation of a Blogger export document to WordPress WXR."""
def __init__(self, doc):
"""Constructs a translator for a Blogger export file.
Args:
doc: The WXR file as a string
"""
# Ensure UTF8 chars get through correctly by ensuring we have a
# compliant UTF8 input doc.
self.doc = doc.decode('utf-8', 'replace').encode('utf-8')
# Read the incoming document as a GData Atom feed.
self.feed = atom.FeedFromString(self.doc)
self.next_id = 1
def Translate(self):
"""Performs the actual translation to WordPress WXR export format.
Returns:
A WordPress WXR export document as a string, or None on error.
"""
# Create the top-level document and the channel associated with it.
channel = wordpress.Channel(
title = self.feed.title.text,
link = self.feed.GetAlternateLink().href,
base_blog_url = self.feed.GetAlternateLink().href,
pubDate = self._ConvertPubDate(self.feed.updated.text))
posts_map = {}
for entry in self.feed.entry:
# Grab the information about the entry kind
entry_kind = ""
for category in entry.category:
if category.scheme == KIND_SCHEME:
entry_kind = category.term
if entry_kind.endswith("#comment"):
# This entry will be a comment, grab the post that it goes to
in_reply_to = entry.FindExtensions('in-reply-to')
post_item = None
# Check to see that the comment has a corresponding post entry
if in_reply_to:
post_id = self._ParsePostId(in_reply_to[0].attributes['ref'])
post_item = posts_map.get(post_id, None)
# Found the post for the comment, add the commment to it
if post_item:
# The author email may not be included in the file
author_email = ''
if entry.author[0].email:
author_email = entry.author[0].email.text
# Same for the the author's url
author_url = ''
if entry.author[0].uri:
author_url = entry.author[0].uri.text
post_item.comments.append(wordpress.Comment(
comment_id = self._GetNextId(),
author = entry.author[0].name.text,
author_email = author_email,
author_url = author_url,
date = self._ConvertDate(entry.published.text),
content = self._ConvertContent(entry.content.text)))
elif entry_kind.endswith('#post'):
# This entry will be a post
post_item = self._ConvertEntry(entry, False)
posts_map[self._ParsePostId(entry.id.text)] = post_item
channel.items.append(post_item)
elif entry_kind.endswith('#page'):
# This entry will be a static page
page_item = self._ConvertEntry(entry, True)
posts_map[self._ParsePageId(entry.id.text)] = page_item
channel.items.append(page_item)
wxr = wordpress.WordPressWxr(channel=channel)
return wxr.WriteXml()
def _ConvertEntry(self, entry, is_page):
"""Converts the contents of an Atom entry into a WXR post Item element."""
# A post may have an empty title, in which case the text element is None.
title = ''
if entry.title.text:
title = entry.title.text
# Check here to see if the entry points to a draft or regular post
status = 'publish'
if entry.control and entry.control.draft:
status = 'draft'
# If no link is present in the Blogger entry, just link
if entry.GetAlternateLink():
link = entry.GetAlternateLink().href
else:
link = BLOGGER_URL
# Declare whether this is a post of a page
post_type = 'post'
if is_page:
post_type = 'page'
blogger_blog = ''
blogger_permalink = ''
if entry.GetAlternateLink():
blogger_path_full = entry.GetAlternateLink().href.replace('http://', '')
blogger_blog = blogger_path_full.split('/')[0]
blogger_permalink = blogger_path_full[len(blogger_blog):]
# Create the actual item element
post_item = wordpress.Item(
title = title,
link = link,
pubDate = self._ConvertPubDate(entry.published.text),
creator = entry.author[0].name.text,
content = self._ConvertContent(entry.content.text),
post_id = self._GetNextId(),
post_date = self._ConvertDate(entry.published.text),
status = status,
post_type = post_type,
blogger_blog = blogger_blog,
blogger_permalink = blogger_permalink,
blogger_author = entry.author[0].name.text)
# Convert the categories which specify labels into wordpress labels
for category in entry.category:
if category.scheme == BLOGGER_NS:
post_item.labels.append(category.term)
return post_item
def _ConvertContent(self, text):
"""Unescapes the post/comment text body and replaces video content.
All <object> and <embed> tags in the post that relate to video must be
changed into the WordPress tags for embedding video,
e.g. [youtube=http://www.youtube.com/...]
If no text is provided, the empty string is returned.
"""
if not text:
return ''
# First unescape all XML tags as they'll be escaped by the XML emitter
content = unescape(text)
# Use an HTML parser on the body to look for video content
content_tree = BeautifulSoup.BeautifulSoup(content)
# Find the object tag
objs = content_tree.findAll('object')
for obj_tag in objs:
# Find the param tag within which contains the URL to the movie
param_tag = obj_tag.find('param', { 'name': 'movie' })
if not param_tag:
continue
# Get the video URL
video = param_tag.attrMap.get('value', None)
if not video:
continue
# Convert the video URL if necessary
video = YOUTUBE_RE.subn(YOUTUBE_FMT, video)[0]
video = GOOGLEVIDEO_RE.subn(GOOGLEVIDEO_FMT, video)[0]
video = DAILYMOTION_RE.subn(DAILYMOTION_FMT, video)[0]
# Replace the portion of the contents with the video
obj_tag.replaceWith(video)
return str(content_tree)
def _ConvertPubDate(self, date):
"""Translates to a pubDate element's time/date format."""
date_tuple = iso8601.parse_date(date)
return date_tuple.strftime('%a, %d %b %Y %H:%M:%S %z')
def _ConvertDate(self, date):
"""Translates to a wordpress date element's time/date format."""
date_tuple = iso8601.parse_date(date)
return date_tuple.strftime('%Y-%m-%d %H:%M:%S')
def _GetNextId(self):
"""Returns the next identifier to use in the export document as a string."""
next_id = self.next_id;
self.next_id += 1
return str(next_id)
def _ParsePostId(self, text):
"""Extracts the post identifier from a Blogger entry ID."""
matcher = re.compile('post-(\d+)')
matches = matcher.search(text)
return matches.group(1)
def _ParsePageId(self, text):
"""Extracts the page identifier from a Blogger entry ID."""
matcher = re.compile('page-(\d+)')
matches = matcher.search(text)
return matches.group(1)
if __name__ == '__main__':
if len(sys.argv) <= 1:
print 'Usage: %s <blogger_export_file>' % os.path.basename(sys.argv[0])
print
print ' Outputs the converted WordPress export file to standard out.'
sys.exit(-1)
wp_xml_file = open(sys.argv[1])
wp_xml_doc = wp_xml_file.read()
translator = Blogger2Wordpress(wp_xml_doc)
print translator.Translate()
wp_xml_file.close()
This scripts outputs the wxr file in the terminal window which is useless for me when the import file has tons of entries.
As I am not familiar with python, how can I modify the script to output the data into a .xml file?
Edit:
I did changed the end of the script to:
wp_xml_file = open(sys.argv[1])
wp_xml_doc = wp_xml_file.read()
translator = Blogger2Wordpress(wp_xml_doc)
print translator.Translate()
fh = open("testoutput.xml", "w")
fh.write(wp_xml_doc);
fh.close();
wp_xml_file.close()
But the produced file is an "invalid wxr file" :/
Can anybody help? Thanks!
Quick and dirty answer:
Output to the stdout is normal behaviour.
You might want to redirect it to a file for instance:
python2 blogger2wordpress your_blogger_export_file > backup
The output will be saved in the file named backup.
Or you can replace print translator.Translate() by
with open('output_file', 'w') as fd:
fd.write(translator.Translate())
This should do the trick (haven't tried).
We're trying to add a warning to every email we find suspicious, but once we do that - not all emails are showing as expected after the change: - on some EMails we ruin the styling and the message is displayed differently, while on others we see the warning at the top of the email (similar to Google warnings).
Are there any best practices for this action of adding a 'prefix' warning on the HTML part of the email? Do's/Don'ts ? Anything would be helpful!
Here's a snippet:
def replace_body(self, email_instance, message_id, **kwargs):
def _generate_html_part_from_plain(_part):
_plain_text_body = _part.get_payload(decode=True)
_content_charset = _part.get_content_charset()
_artificial_html_body = '<div dir="ltr">{}</div>'.format(_plain_text_body.replace('\r\n', '<br>'))
_html_part = MIMEText(_artificial_html_body, 'html', _content_charset)
return _content_charset, _html_part
body_prefix = self.generate_prefix(message_id, email_instance, kwargs)
if not body_prefix:
log('prefix is empty, no need to update body (message={message_id})')
return True
if email_instance.is_multipart():
payload = email_instance.get_payload()
for part in payload:
if part.get_content_subtype() == 'html':
self.log.debug('email is \'multipart\' - attaching HTML part with warning (message={})'
.format(message_id))
prefixed_part = self.attach_email_prefix(body_prefix, part, 'html', part.get_content_charset())
payload.remove(part)
payload.append(prefixed_part)
break
# when no html part present
else:
for part in payload:
if part.get_content_subtype() == 'plain':
self.log.debug('email is \'multipart\', but no \'html\' part found: '
'generating and attaching HTML part with warning (message={})'
.format(message_id))
content_charset, html_part = _generate_html_part_from_plain(part)
prefixed_part = self.attach_email_prefix(body_prefix, html_part, 'html', content_charset)
top_content_subtype = email_instance.get_content_subtype()
if top_content_subtype != 'alternative':
self.log.debug('email\'s top-content-subtype is {}, generating alternative part to hold'
'\'plain\' and \'html\' parts, and attaching to email (message={})'
.format(top_content_subtype, message_id))
alternative_part = MIMEMultipart('alternative')
new_plain_part = MIMEText(part.get_payload(decode=True), 'plain', content_charset)
alternative_part.set_payload([new_plain_part, prefixed_part])
payload.remove(part)
payload.append(alternative_part)
else:
payload.append(prefixed_part)
break
email_instance.set_payload(payload)
elif email_instance.get_content_subtype() == 'plain':
self.log.debug('email is \'text/plain\', attaching artificial HTML part with warning (message={})'
.format(message_id))
# plain_part = deepcopy(email_instance)
content_charset, html_part = _generate_html_part_from_plain(email_instance)
prefixed_part = self.attach_email_prefix(body_prefix, html_part, 'html', content_charset)
email_instance.replace_header('Content-Type', 'multipart/alternative')
new_plain_part = MIMEText(email_instance.get_payload(decode=True), 'plain', content_charset)
email_instance.set_payload([new_plain_part, prefixed_part])
# email_instance.attach(html_part)
else:
self.log.warning('failed to attach HTML warning (message={}, top-content-type={})'
.format(message_id, email_instance.get_content_type()))
log('prefix attached to message successfully (message={message_id})')
return True
def attach_email_prefix(self, prefix, email_instance, subtype='html', charset='us-ascii'):
def attach_to_text(_part):
sep = {'html': '<br>', 'text': '\r\n'}
_part_payload = _part.get_payload(decode=True)
if not isinstance(_part_payload, str):
self.log.debug(
'skipping prefix attaching: Content payload\'s type is not str: {}'.format(_part_payload))
return _part
_part.set_payload('{}{}{}'.format(prefix, sep[subtype], _part_payload), charset)
return _part
if email_instance.get_content_subtype() == subtype:
return attach_to_text(email_instance)
I am using the following code for tab completion in my program
import readline
import logging
import sys
LOG_FILENAME = '/tmp/completer.log'
logging.basicConfig(filename=LOG_FILENAME,
level=logging.DEBUG,
)
class SimpleCompleter(object):
def __init__(self, options):
self.options = sorted(options)
return
def complete(self, text, state):
response = None
if state == 0:
# This is the first time for this text, so build a match list.
if text:
self.matches = [s
for s in self.options
if s and s.startswith(text)]
logging.debug('Text: %s', text)
logging.debug('%s matches: %s', repr(text), self.matches)
else:
self.matches = self.options[:]
logging.debug('(empty input) matches: %s', self.matches)
# Return the state'th item from the match list,
# if we have that many.
try:
response = self.matches[state]
except IndexError:
response = None
logging.debug('complete(%s, %s) => %s',
repr(text), state, repr(response))
return response
def input_loop():
line = ''
while line != 'stop':
line = input('>')
print('Dispatch %s' % line)
# Register our completer function
readline.set_completer(SimpleCompleter(['CREATE', 'INPUT', 'USERNAME', 'VIEW']+ [account_names]).complete)
# Use the tab key for completion
readline.parse_and_bind('tab: complete')
# Prompt the user for text
input_loop()
However the code accepts keywords in any order, how can I change it so that keywords are processed in a specific order. For example if the first keyword is CREATE next tab completion shouldn't find any matches and allow for user to input an account name. Similarly if the first keyword is INPUT next it should match USERNAME or PASSWORD and then account name. Is it possible to do this with readline?
readline.get_line_buffer() gets the whole buffered text from which you can create the matches as required.
I have class named ExcelFile, his job is to manage excel files (read, extract data, and differents things for the stack).
I want to implement a system for managing errors/exceptions.
For example, ExcelFile as a method load(), like a "setup"
def load(self):
"""
Setup for excel file
Load workbook, worksheet and others characteristics (data lines, header...)
:return: Setup successfully or not
:rtype: bool
Current usage
:Example:
> excefile = ExcelFile('test.xls')
> excefile.load()
True
> excefile.nb_rows()
4
"""
self.workbook = xlrd.open_workbook(self.url)
self.sheet = self.workbook.sheet_by_index(0)
self.header_row_index = self.get_header_row_index()
if self.header_row_index == None: # If file doesn't have header (or not valid)
return False
self.header_fields = self.sheet.row_values(self.header_row_index)
self.header_fields_col_ids = self.get_col_ids(self.header_fields) # Mapping between header fields and col ids
self.nb_rows = self.count_rows()
self.row_start_data = self.header_row_index + self.HEADER_ROWS
return True
As you can see, I can encounter 2 differents errors:
The file is not an excel file (raise xlrd.XLRDError)
The file has an invalid header (so I return False)
I want to implement a good management system of ExcelFile errors, because this class is used a lot in the stack.
This is my first idea for processing that :
Implement a standard exception
class ExcelFileException(Exception):
def __init__(self, message, type=None):
self.message = message
self.type = type
def __str__(self):
return "{} : {} ({})".format(self.__class__.__name__, self.message, self.type)
Rewrite load method
def load(self):
"""
Setup for excel file
Load workbook, worksheet and others characteristics (data lines, header...)
:return: Setup successfully or not
:rtype: bool
Current usage
:Example:
> excefile = ExcelFile('test.xls')
> excefile.load()
True
> excefile.nb_rows()
4
"""
try:
self.workbook = xlrd.open_workbook(self.url)
except xlrd.XLRDError as e:
raise ExcelFileException("Unsupported file type", e.__class__.__name__)
self.sheet = self.workbook.sheet_by_index(0)
self.header_row_index = self.get_header_row_index()
if self.header_row_index == None: # If file doesn't have header (or not valid)
raise ExcelFileException("Invalid or empty header")
self.header_fields = self.sheet.row_values(self.header_row_index)
self.header_fields_col_ids = self.get_col_ids(self.header_fields) # Mapping between header fields and col ids
self.nb_rows = self.count_rows()
self.row_start_data = self.header_row_index + self.HEADER_ROWS
return True
And this an example in a calling method, a big problem is I have to manage a dict named "report" with errors in french, for customers success and other.
...
def foo():
...
file = ExcelFile(location)
try:
file.load()
except ExcelFileException as e:
log.warn(e.__str__())
if e.type == 'XLRDError'
self.report['errors'] = 'Long description of the error, in french (error is about invalid file type)'
else:
self.report['errors'] = 'Long description of the error, in french (error is about invalid header)'
...
What do you think about that ? Do you have a better way ?
Thank you
You could change your exception to log the errors in your dict:
class ExcelFileException(Exception):
def __init__(self, message, report, type=None):
report['errors'].append(message)
self.message = message
self.type = type
def __str__(self):
return "{} : {} ({})".format(self.__class__.__name__, self.message, self.type)
When you will raise an exception:
raise ExcelFileException("Invalid or empty header", report)
The errors will be present in self.dictionnary['errors']
Also the error can be fixed by installing missing a optional dependence Xlrd
pip install Xlrd
More available python packages when working with excel
(A same question is available in Stackoverflow. But that didn't help me because it used other function)
API Documentation
Hello, I am trying to implement Opensubtitle API with Python. I prefer trying to search subtitle file with hash, because it's accurate.
As I have never used xmlrpc before and quite new to using APIs, I had to study to make it work. But I am stuck at the final point. My program is returning Status 200 (OK), but the 'data' array is blank. I think, I am doing something wrong with the paramater passing thing. The code is here:
from xmlrpclib import ServerProxy
import hashCheck, os
server = 'http://api.opensubtitles.org/xml-rpc'
class MainEngine(object):
def __init__(self, language="None"):
self.rpc = ServerProxy(server, allow_none=True)
user_agent = 'OSTestUserAgentTemp'
self.Start()
def getToken(self):
self.logindata = self.rpc.LogIn(None, None, "en", "OSTestUserAgentTemp")
self.Token = self.logindata["token"]
return self.Token
def subSearch(self, path):
self.hash = hashCheck.hash(self.path)
token = self.getToken()
self.param = [
token, # token
[
'eng', # sublanguageid
self.hash, #hash
os.path.getsize(path), # byte size
]
]
Obj = self.rpc.SearchSubtitles(token, self.param)
print Obj
def Start(self):
# print server
self.path = "E:\Movies\English\Captain Phillips\Captain Phillips.mp4"
self.subSearch(self.path)
def main():
MainEngine()
if __name__ == '__main__':
main()