Python for/while loop - python

Today i am working on a project about incoming phone calls being transcripted and getting saved into text files, but i am also kinda new to python and python loops.
I want to loop over a SQL server column and let each row loop trough the azure Speech to text service i use (all of the phonecall OID's). I have been stuck on this problem for a couple days now so i thought i might find some help here.
import azure.cognitiveservices.speech as speechsdk
import time
from os import path
from pydub import AudioSegment
import requests
import hashlib
import sys
import os.path
import pyodbc
databaseName = '*'
username = '*'
password = '*'
server = '*'
driver = '*'
try:
CONNECTION_STRING = 'DRIVER='+driver+';SERVER='+server+';DATABASE='+databaseName+';UID='+username+';PWD='+ password
conn = pyodbc.connect(CONNECTION_STRING)
cursor = conn.cursor()
storedproc = "* = *'"
cursor.execute(storedproc)
row = cursor.fetchone()
while row:
array = [(int(row[1]))]
row = cursor.fetchone()
i = 0
while i<len(array):
OID = (array[i])
i = i + 1
print(OID)
string = f"{OID}*"
encoded = string.encode()
result = hashlib.sha256(encoded)
resultHash = (result.hexdigest())
Telefoongesprek = requests.get(f"*{OID}", headers={f"api-key":f"{resultHash}"})
with open("Telefoongesprek.mp3", "wb") as f:
f.write(Telefoongesprek.content)
src = "Telefoongesprek.mp3"
dst = "Telefoongesprek.wav"
sound = AudioSegment.from_file(src)
sound.export(dst, format="wav")
def speech_recognize_continuous_from_file():
speech_config = speechsdk.SpeechConfig(subscription="*", region="*")
speech_config.speech_recognition_language = "nl-NL"
audio_config = speechsdk.audio.AudioConfig(filename="Telefoongesprek.wav")
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
done = False
def stop_cb(evt):
print('CLOSING on {}'.format(evt))
nonlocal done
done = True
all_results = []
def handle_final_result(evt):
all_results.append(evt.result.text)
speech_recognizer.recognized.connect(handle_final_result)
speech_recognizer.session_started.connect(handle_final_result)
speech_recognizer.session_stopped.connect(handle_final_result)
speech_recognizer.canceled.connect(handle_final_result)
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
speech_recognizer.start_continuous_recognition()
while not done:
time.sleep(.5)
speech_recognizer.stop_continuous_recognition()
print(all_results)
telefoongesprek = str(all_results)
filename = f"C:\\Users\\Beau\\Contact-verkeer\\contact-verkeer\\telefoon\\STT Transcriptions\\Telefoongesprek#{OID}.txt"
file = open(filename, "w")
file.write(telefoongesprek)
file.close()
speech_recognize_continuous_from_file()
cursor.close()
del cursor
conn.close()
except Exception as e:
print("Error: %s" % e)
everything works apart form each other but i just dont know how to place the loop and witch one i should use (For/While loop). right here im trying to loop over an array but i dont this this is correct.
Error message: Decoding failed. ffmpeg returned error code: 1
[mp3 # 000001cb8c57e0o0] Failed to read frame size: could not seek to 1073.
which i am pretty sure means that my azure function can't find an mp3 file, what means that the "Mp3 to Wav" convert doesn't work.
Thanks in advance!

If I understand your question, you have a database with lots of phone call details. One of the field value in each row is used to create the associated mp3 file. You want to do speech to text using azure on each of the mp3 file you have in your database.
So you can do it in two ways:
Iterate though all rows in the database and create all the associted files into a folder in the local disk with the OID as your filename.
Then write another loop to iterate through this folder and send the files for transcription to Azure Speech to Text service.
The other technique is to do everything in a single loop like the way you have shown which will require some corrections.
Ok, so now that part is clear, we can go into the speech to text part. So azure allow you to send the compressed format for transcription, which means you actually don't need to convert it into wav file.
Please have a look at the modified code below with the changes:
# code snippet borrowed from azure samples
def speech_recognize_continuous_from_file(filename):
class BinaryFileReaderCallback(speechsdk.audio.PullAudioInputStreamCallback):
def __init__(self, filename: str):
super().__init__()
self._file_h = open(filename, "rb")
def read(self, buffer: memoryview) -> int:
try:
size = buffer.nbytes
frames = self._file_h.read(size)
buffer[:len(frames)] = frames
return len(frames)
except Exception as ex:
print('Exception in `read`: {}'.format(ex))
raise
def close(self) -> None:
print('closing file')
try:
self._file_h.close()
except Exception as ex:
print('Exception in `close`: {}'.format(ex))
raise
# Creates an audio stream format. For an example we are using MP3 compressed file here
compressed_format = speechsdk.audio.AudioStreamFormat(compressed_stream_format=speechsdk.AudioStreamContainerFormat.MP3)
callback = BinaryFileReaderCallback(filename=filename)
stream = speechsdk.audio.PullAudioInputStream(stream_format=compressed_format, pull_stream_callback=callback)
speech_config = speechsdk.SpeechConfig(subscription="*", region="*")
speech_config.speech_recognition_language = "nl-NL"
audio_config = speechsdk.audio.AudioConfig(stream=stream)
# Creates a speech recognizer using a file as audio input, also specify the speech language
speech_recognizer = speechsdk.SpeechRecognizer(speech_config, audio_config)
done = False
def stop_cb(evt):
print('CLOSING on {}'.format(evt))
nonlocal done
done = True
all_results = []
def handle_final_result(evt):
all_results.append(evt.result.text)
speech_recognizer.recognized.connect(handle_final_result)
speech_recognizer.session_started.connect(handle_final_result)
speech_recognizer.session_stopped.connect(handle_final_result)
speech_recognizer.canceled.connect(handle_final_result)
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
speech_recognizer.start_continuous_recognition()
while not done:
time.sleep(.5)
speech_recognizer.stop_continuous_recognition()
print(all_results)
telefoongesprek = str(all_results)
filename = f"C:\\Users\\Beau\\Contact-verkeer\\contact-verkeer\\telefoon\\STT Transcriptions\\Telefoongesprek#{OID}.txt"
file = open(filename, "w")
file.write(telefoongesprek)
file.close()
try:
CONNECTION_STRING = 'DRIVER='+driver+';SERVER='+server+';DATABASE='+databaseName+';UID='+username+';PWD='+ password
conn = pyodbc.connect(CONNECTION_STRING)
cursor = conn.cursor()
storedproc = "* = *'"
cursor.execute(storedproc)
row = cursor.fetchone()
# loop through the rows
while row:
array = [(int(row[1]))]
i = 0
while i<len(array):
OID = (array[i])
i = i + 1
print(OID)
string = f"{OID}*"
encoded = string.encode()
result = hashlib.sha256(encoded)
resultHash = (result.hexdigest())
telefoongesprek_response = requests.get(f"*{OID}", headers={f"api-key":f"{resultHash}"})
# save the file to local disk as mp3
with open("Telefoongesprek.mp3", "wb") as f:
f.write(telefoongesprek_response.content)
# do the speech to text on the mp3 file
speech_recognize_continuous_from_file(f.name)
# fetch the next row
row = cursor.fetchone()
cursor.close()
del cursor
conn.close()
except Exception as e:
print("Error: %s" % e)
I haven't tested this full code as i don't have the db connections with me. Please fell free to modify for your use case and let me know if you have any issues.

Related

How to fix aws lambda function logs error?

I'm trying to extract aws trust advisor data through lambda function(trigger by event scheduler) and upload to s3. However, some part of the function throws error. below is my code
##libraries
import boto3
import os
import csv
from csv import DictWriter
import time
import traceback
## bucket_name is set as env variable
bucket_name = "test-ta-reports"
fail_msg = 'Pulling Trusted Advisor data failed'
Filename = "/tmp/checks_list.csv"
obj_name = time.strftime("%Y-%m-%d-%H-%M-%S") + '/' + '.csv'
##upload to s3
def s3_upload(bucket_name, Filename, obj_name):
if obj_name is None:
obj_name = os.path.basename(Filename)
try:
s3 = boto3.client("s3", region_name="eu-west-1")
response = s3.upload_file(Filename, bucket_name, obj_name)
return True
except:
print('Data failed to upload to bucket')
traceback.print_exc()
return False
def lambda_handler(event, context):
try:
support_client = boto3.client('support', region_name='us-east-1')
ta_checks = support_client.describe_trusted_advisor_checks(language='en')
checks_list = {ctgs: [] for ctgs in list(set([checks['category'] for checks in ta_checks['checks']]))}
for checks in ta_checks['checks']:
print('Getting check:' + checks['name'] + checks['category'])
try:
check_summary = support_client.describe_trusted_advisor_check_summaries(
checkIds=[checks['id']])['summaries'][0]
if check_summary['status'] != 'not_available':
checks_list[checks['category']].append(
[checks['name'], check_summary['status'],
str(check_summary['resourcesSummary']['resourcesProcessed']),
str(check_summary['resourcesSummary']['resourcesFlagged']),
str(check_summary['resourcesSummary']['resourcesSuppressed']),
str(check_summary['resourcesSummary']['resourcesIgnored'])
])
else:
print("unable to append checks")
except:
print('Failed to get check: ' + checks['name'])
traceback.print_exc()
except:
print('Failed! Debug further.')
traceback.print_exc()
##rewrite dict to csv
with open('/tmp/checks_list.csv', 'w', newline='') as csvfile:
csv_writer = DictWriter(csvfile, fieldnames=['status','hasFlaggedResources','timestamp','resourcesSummary','categorySpecificSummary', 'checkId'])
csv_writer.writeheader()
csv_writer.writerow(check_summary)
return checks_list
if s3_upload(bucket_name, Filename, obj_name):
print("Successfully uploaded")
if __name__ == '__main__':
lambda_handler(event, context)
The error logs
unable to append checks
I'm new to Python. So, unsure of how to check for trackback stacks under else: statement. Is there any way to modify this code for getting traceback logs for the append block. Also, have i made any error in the above code. I'm unable to figure out any. PLz help
response = client.describe_trusted_advisor_check_summaries(
checkIds=[
'string',
]
)
describe_trusted_advisor_check_summaries() returns summarized results for one or more Trusted advisors. Here you are checking for the check_summary['status'] is not equal to not_avaialble i.e. alert status of the check is either "ok" (green), "warning" (yellow), "error" (red), and in that case, you are appending resourcesProcessed, resourcesFlagged, resourcesSuppressed, and resourcesIgnored to checks_list for further processing.
it's printing
unable to append checks
just because the status of the check is not_available. It is not an error log. Just deal with the case if the check status is not_available, what you should be doing?
See the documentation of describe_trusted_advisor_check_summaries. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/support.html#Support.Client.describe_trusted_advisor_check_summaries

How to apply changes to ontology saved on SQLite Database?

Everytime I create a new instance on my ontology, something goes wrong If I try to read from the same database again.
ps - these are all part of different views on Django
This is how I am adding instances to my ontology:
# OWLREADY2
try:
myworld = World(filename='backup.db', exclusive=False)
kiposcrum = myworld.get_ontology(os.path.dirname(__file__) + '/kipo.owl').load()
except:
print("Error opening ontology")
# Sync
#--------------------------------------------------------------------------
sync_reasoner()
seed = str(time.time())
id_unico = faz_id(seed)
try:
with kiposcrum:
# here I am creating my instance, these are all strings I got from the user
kiposcrum[input_classe](input_nome + id_unico)
if input_observacao != "":
kiposcrum[input_nome + id_unico].Observacao.append(input_observacao)
sync_reasoner()
status = "OK!"
myworld.close()
myworld.save()
except:
print("Mistakes were made!")
status = "Error!"
input_nome = "Mistakes were made!"
input_classe = "Mistakes were made!"
finally:
print(input_nome + " " + id_unico)
print(input_classe)
print(status)
This is how I am reading stuff from It:
# OWLREADY2
try:
myworld = World(filename='backup.db', exclusive=False)
kiposcrum = myworld.get_ontology(os.path.dirname(__file__) + '/kipo_fialho.owl').load()
except:
print("Error")
sync_reasoner()
try:
with kiposcrum:
num_inst = 0
# gets a list of properties given an instance informed by the user
propriedades = kiposcrum[instancia].get_properties()
num_prop = len(propriedades)
myworld.close()
I am 100% able to read from my ontology, but If I try to create an instance and then try to read the database again, something goes wrong.

How can I get my Python Code to restart when the network disconnects

I have a piece of Python Code running as a service that pulls weather data via API.
The code itself runs perfectly fine when everything is hunky dory, ie the network, but I have noticed that sometimes the WiFi on the Pi that is pulling the API data will drop and then the python codes seems to stop.
I have a small line of code providing the most basic of logs, but I would like to improve upon it greatly. The log code just provides me with the datetime.now so I can see when the last time the code ran was.
#!/usr/bin/python3
#import modules
import cymysql
from time import sleep
from urllib.request import urlopen
import json
import datetime
#set MySQl Variables
host = "localhost"
user = "xxx"
password = "xxx"
schema = "xxx"
#connect to MySQL DB
db = cymysql.connect(host, user, password, schema)
curs = db.cursor()
#set api key for DarkSky API
apikey="xxx"
# Latitude & longitude
lati="-26.20227"
longi="28.04363"
# Add units=si to get it in sensible ISO units.
url="https://api.forecast.io/forecast/"+apikey+"/"+lati+","+longi+"?units=si"
#begin infinite loop
while True:
#convert API reading to json and readable array 'weather'
meteo=urlopen(url).read()
meteo = meteo.decode('utf-8')
weather = json.loads(meteo)
#set variables for current weather
cTemp = (weather['currently']['temperature'])
cCond = (weather['currently']['summary'])
cRain1 = (weather['currently']['precipProbability'])
cRain2 = cRain1*100
cIcon = (weather['currently']['icon'])
oaSum = (weather['daily']['summary'])
#print variables - for testing purposes
#print (cTemp)
#print (cCond)
#print (cRain2)
#print (cIcon)
#print (oaSum)
#extract daily data from 'weather' array
daily = (weather['daily']['data'])
#create new arrays for daily variables
listHigh = []
listLow = []
listCond = []
listRain = []
listIcon = []
#set daily variables
for i in daily:
listHigh.append(i['temperatureHigh'])
for i in range(0,len(listHigh)):
high1 = listHigh[0]
high2 = listHigh[1]
high3 = listHigh[2]
high4 = listHigh[3]
high5 = listHigh[4]
high6 = listHigh[5]
high7 = listHigh[6]
high8 = listHigh[7]
for o in daily:
listLow.append(o['temperatureLow'])
for o in range(0,len(listLow)):
low1 = listLow[0]
low2 = listLow[1]
low3 = listLow[2]
low4 = listLow[3]
low5 = listLow[4]
low6 = listLow[5]
low7 = listLow[6]
low8 = listLow[7]
for p in daily:
listCond.append(p['summary'])
for p in range(0,len(listCond)):
cond1 = listCond[0]
cond2 = listCond[1]
cond3 = listCond[2]
cond4 = listCond[3]
cond5 = listCond[4]
cond6 = listCond[5]
cond7 = listCond[6]
cond8 = listCond[7]
for m in daily:
listRain.append(m['precipProbability'])
for m in range(0,len(listRain)):
rain1 = listRain[0]
rain2 = listRain[1]
rain3 = listRain[2]
rain4 = listRain[3]
rain5 = listRain[4]
rain6 = listRain[5]
rain7 = listRain[6]
rain8 = listRain[7]
#convert rain chance to readable percentage
prain1 = rain1*100
prain2 = rain2*100
prain3 = rain3*100
prain4 = rain4*100
prain5 = rain5*100
prain6 = rain6*100
prain7 = rain7*100
prain8 = rain8*100
for l in daily:
listIcon.append(l['icon'])
for l in range (0,len(listIcon)):
icon1 = listIcon[0]
icon2 = listIcon[1]
icon3 = listIcon[2]
icon4 = listIcon[3]
icon5 = listIcon[4]
icon6 = listIcon[5]
icon7 = listIcon[6]
icon8 = listIcon[7]
#print daily variables - for testing purposes
#print (high1)
#print (low1)
#print (cond1)
#print (prain1)
#print (icon1)
#print (high2)
#print (low2)
#print (cond2)
#print (prain2)
#print (icon2)
#update data in DataBase
try:
sql_update_query = """UPDATE weather SET current_temp = %s, cur$
varis = (cTemp, cCond, cRain2, cIcon, high1, low1, cond1, prain$
curs.execute(sql_update_query, varis)
db.commit()
except db.Error as error:
print("Error: {}".format(error))
db.rollback()
#write date to log file
with open ("/home/pi/CoRo/Projects/WeatherMan/weatherlog.txt", mode="w") as file:
file.write('Last Data was pulled at: %s' %(datetime.datetime.now()))
#set loop to sleep for 10 minutes and go again
sleep(600)
I understand that the Database Code is snipped, but it is just the variables being put in to the database, which I can see works.
However if the network disconnects, the code stops and the database is left with the last polled API data.
How would I restart the python code if the API get fails?
Thanks in advance,
You could rewrite the portion of your code that pulls the weather data as a function or separate module. This would allow you to call it only when the network connection is working. Some pseudo code below:
if network_connection:
pull_weather_data()
else:
do_something()
do_something() could be an effort to reconnect to the network, such as resetting your network adapter.
You could determine the state of the network connection by trying to ping your router or an external IP like one of Google's DNS server (8.8.8.8 or 8.8.4.4).
To avoid nested loops you could use the continue clause. For example:
while True:
if network_connection:
pull_weather_data()
else:
reset_network_connection()
time.sleep(180) # Sleep for 3 minutes.
continue
The continue will send the interpreter back to the start of the while loop. From there it will check the network connection and either pull data or reset the network connection and sleep for another 3 minutes.
Using Quernons answer above the code has been edited as follows:
#!/usr/bin/python3
#import modules
import os
import cymysql
from time import sleep
from urllib.request import urlopen
import json
import datetime
#set MySQl Variables
host = "localhost"
user = "xxx"
password = "xxx"
schema = "xxx"
#connect to MySQL DB
db = cymysql.connect(host, user, password, schema)
curs = db.cursor()
#set api key for DarkSky API
apikey="xxx"
# Latitude & longitude
lati="-26.20227"
longi="28.04363"
# Add units=si to get it in sensible ISO units not stupid Fahreneheit.
url="https://api.forecast.io/forecast/"+apikey+"/"+lati+","+longi+"?units=si"
#begin infinite loop
while True:
#function to check if there is an internet connection
def check_ping():
hostname = "8.8.8.8"
response = os.system("ping -c 1 " + hostname)
#and then check the response...
if response == 0:
pingstatus = 0
else:
pingstatus = 1
return pingstatus
networkstatus = check_ping()
#print check_ping() - for testing purposes
#print (networkstatus)
#function to pull weather data from API
def get_weather():
#insert weather data here with no changes
if networkstatus == 0:
get_weather()
else:
print ("Resetting Network Adapters")
dwnnw = 'ifconfig wlan0 down'
upnw = 'ifconfig wlan0 up'
os.system(dwnnw)
os.system(upnw)
sleep(180)
continue

Python: IOError 110 Connection timed out when reading from disk

I'm running a Python script on a Sun Grid Engine supercompute cluster that reads in a list of file ids, sends each to a worker process for analysis, and writes one output per input file to disk.
The trouble is I'm getting IOError(110, 'Connection timed out') somewhere inside the worker function, and I'm not sure why. I've received this error in the past when making network requests that were severely delayed, but in this case the worker is only trying to read data from disk.
My question is: What would cause a Connection timed out error when reading from disk, and how can one resolve this error? Any help others can offer would be very appreciated.
Full script (the IOError crops up in minhash_text()):
from datasketch import MinHash
from multiprocessing import Pool
from collections import defaultdict
from nltk import ngrams
import json
import sys
import codecs
import config
cores = 24
window_len = 12
step = 4
worker_files = 50
permutations = 256
hashband_len = 4
def minhash_text(args):
'''Return a list of hashband strings for an input doc'''
try:
file_id, path = args
with codecs.open(path, 'r', 'utf8') as f:
f = f.read()
all_hashbands = []
for window_idx, window in enumerate(ngrams(f.split(), window_len)):
window_hashbands = []
if window_idx % step != 0:
continue
minhash = MinHash(num_perm=permutations, seed=1)
for ngram in set(ngrams(' '.join(window), 3)):
minhash.update( ''.join(ngram).encode('utf8') )
hashband_vals = []
for i in minhash.hashvalues:
hashband_vals.append(i)
if len(hashband_vals) == hashband_len:
window_hashbands.append( '.'.join([str(j) for j in hashband_vals]) )
hashband_vals = []
all_hashbands.append(window_hashbands)
return {'file_id': file_id, 'hashbands': all_hashbands}
except Exception as exc:
print(' ! error occurred while processing', file_id, exc)
return {'file_id': file_id, 'hashbands': []}
if __name__ == '__main__':
file_ids = json.load(open('file_ids.json'))
file_id_path_tuples = [(file_id, path) for file_id, path in file_ids.items()]
worker_id = int(sys.argv[1])
worker_ids = list(ngrams(file_id_path_tuples, worker_files))[worker_id]
hashband_to_ids = defaultdict(list)
pool = Pool(cores)
for idx, result in enumerate(pool.imap(minhash_text, worker_ids)):
print(' * processed', idx, 'results')
file_id = result['file_id']
hashbands = result['hashbands']
for window_idx, window_hashbands in enumerate(hashbands):
for hashband in window_hashbands:
hashband_to_ids[hashband].append(file_id + '.' + str(window_idx))
with open(config.out_dir + 'minhashes-' + str(worker_id) + '.json', 'w') as out:
json.dump(dict(hashband_to_ids), out)
It turned out I was hammering the filesystem too hard, making too many concurrent read requests for files on the same server. That server could only allow a fixed number of reads in a given period, so any requests over that limit received a Connection Timed Out response.
The solution was to wrap each file read request in a while loop. Inside that while loop, try to read the appropriate file from disk. If the Connection timed out error springs, sleep for a second and try again. Only once the file has been read may the while loop be broken.

"Not implemented" Exception when using pywin32 to control Adobe Acrobat

I have written a script in python using pywin32 to save pdf files to text that up until recently was working fine. I use similar methods in Excel. The code is below:
def __pdf2Txt(self, pdf, fileformat="com.adobe.acrobat.accesstext"):
outputLoc = os.path.dirname(pdf)
outputLoc = os.path.join(outputLoc, os.path.splitext(os.path.basename(pdf))[0] + '.txt')
try:
win32com.client.gencache.EnsureModule('{E64169B3-3592-47d2-816E-602C5C13F328}', 0, 1, 1)
adobe = win32com.client.DispatchEx('AcroExch.App')
pdDoc = win32com.client.DispatchEx('AcroExch.PDDoc')
pdDoc.Open(pdf)
jObject = pdDoc.GetJSObject()
jObject.SaveAs(outputLoc, "com.adobe.acrobat.accesstext")
except:
traceback.print_exc()
return False
finally:
del jObject
pdDoc.Close()
del pdDoc
adobe.Exit()
del adobe
However this code has suddenly stopped working and I get the following output:
Traceback (most recent call last):
File "C:\Documents and Settings\ablishen\workspace\HooverKeyCreator\src\HooverKeyCreator.py", line 38, in __pdf2Txt
jObject.SaveAs(outputLoc, "com.adobe.acrobat.accesstext")
File "C:\Python27\lib\site-packages\win32com\client\dynamic.py", line 505, in __getattr__
ret = self._oleobj_.Invoke(retEntry.dispid,0,invoke_type,1)
com_error: (-2147467263, 'Not implemented', None, None)
False
I have similar code written in VB that works correctly so I'm guessing that it has something to do with the COM interfaces not binding to the appropriate functions correctly? (my COM knowledge is patchy).
Blish, this thread holds the key to the solution you are looking for: https://mail.python.org/pipermail/python-win32/2002-March/000260.html
I admit that the post above is not the easiest to find (probably because Google scores it low based on the age of the content?).
Specifically, applying this piece of advice will get things running for you: https://mail.python.org/pipermail/python-win32/2002-March/000265.html
For reference, the complete piece of code that does not require you to manually patch dynamic.py (snippet should run pretty much out of the box):
# gets all files under ROOT_INPUT_PATH with FILE_EXTENSION and tries to extract text from them into ROOT_OUTPUT_PATH with same filename as the input file but with INPUT_FILE_EXTENSION replaced by OUTPUT_FILE_EXTENSION
from win32com.client import Dispatch
from win32com.client.dynamic import ERRORS_BAD_CONTEXT
import winerror
# try importing scandir and if found, use it as it's a few magnitudes of an order faster than stock os.walk
try:
from scandir import walk
except ImportError:
from os import walk
import fnmatch
import sys
import os
ROOT_INPUT_PATH = None
ROOT_OUTPUT_PATH = None
INPUT_FILE_EXTENSION = "*.pdf"
OUTPUT_FILE_EXTENSION = ".txt"
def acrobat_extract_text(f_path, f_path_out, f_basename, f_ext):
avDoc = Dispatch("AcroExch.AVDoc") # Connect to Adobe Acrobat
# Open the input file (as a pdf)
ret = avDoc.Open(f_path, f_path)
assert(ret) # FIXME: Documentation says "-1 if the file was opened successfully, 0 otherwise", but this is a bool in practise?
pdDoc = avDoc.GetPDDoc()
dst = os.path.join(f_path_out, ''.join((f_basename, f_ext)))
# Adobe documentation says "For that reason, you must rely on the documentation to know what functionality is available through the JSObject interface. For details, see the JavaScript for Acrobat API Reference"
jsObject = pdDoc.GetJSObject()
# Here you can save as many other types by using, for instance: "com.adobe.acrobat.xml"
jsObject.SaveAs(dst, "com.adobe.acrobat.accesstext")
pdDoc.Close()
avDoc.Close(True) # We want this to close Acrobat, as otherwise Acrobat is going to refuse processing any further files after a certain threshold of open files are reached (for example 50 PDFs)
del pdDoc
if __name__ == "__main__":
assert(5 == len(sys.argv)), sys.argv # <script name>, <script_file_input_path>, <script_file_input_extension>, <script_file_output_path>, <script_file_output_extension>
#$ python get.txt.from.multiple.pdf.py 'C:\input' '*.pdf' 'C:\output' '.txt'
ROOT_INPUT_PATH = sys.argv[1]
INPUT_FILE_EXTENSION = sys.argv[2]
ROOT_OUTPUT_PATH = sys.argv[3]
OUTPUT_FILE_EXTENSION = sys.argv[4]
# tuples are of schema (path_to_file, filename)
matching_files = ((os.path.join(_root, filename), os.path.splitext(filename)[0]) for _root, _dirs, _files in walk(ROOT_INPUT_PATH) for filename in fnmatch.filter(_files, INPUT_FILE_EXTENSION))
# patch ERRORS_BAD_CONTEXT as per https://mail.python.org/pipermail/python-win32/2002-March/000265.html
global ERRORS_BAD_CONTEXT
ERRORS_BAD_CONTEXT.append(winerror.E_NOTIMPL)
for filename_with_path, filename_without_extension in matching_files:
print "Processing '{}'".format(filename_without_extension)
acrobat_extract_text(filename_with_path, ROOT_OUTPUT_PATH, filename_without_extension, OUTPUT_FILE_EXTENSION)
I have tested this on WinPython x64 2.7.6.3, Acrobat X Pro
makepy.py is a script that comes with the win32com python package.
Running it for your installation "wires" python into the COM/OLE object in Windows. The following is an excerpt of some code I used to talk to Excel and do some stuff in it. This example gets the name of sheet 1 in the current workbook. It automatically runs makepy if it has an exception:
import win32com;
import win32com.client;
from win32com.client import selecttlb;
def attachExcelCOM():
makepyExe = r'python C:\Python25\Lib\site-packages\win32com\client\makepy.py';
typeList = selecttlb.EnumTlbs();
for tl in typeList:
if (re.match('^Microsoft.*Excel.*', tl.desc, re.IGNORECASE)):
makepyCmd = "%s -d \"%s\"" % (makepyExe, tl.desc);
os.system(makepyCmd);
# end if
# end for
# end def
def getSheetName(sheetNum):
try:
xl = win32com.client.Dispatch("Excel.Application");
wb = xl.Workbooks.Item(sheetNum);
except Exception, detail:
print 'There was a problem attaching to Excel, refreshing connect config...';
print Exception, str(detail);
attachExcelCOM();
try:
xl = win32com.client.Dispatch("Excel.Application");
wb = xl.Workbooks.Item(sheetNum);
except:
print 'Could not attach to Excel...';
sys.exit(-1);
# end try/except
# end try/except
wsName = wb.Name;
if (wsName == 'PERSONAL.XLS'):
return( None );
# end if
print 'The target worksheet is:';
print ' ', wsName;
print 'Is this correct? [Y/N]',;
answer = string.strip( sys.stdin.readline() );
answer = answer.upper();
if (answer != 'Y'):
print 'Sheet not identified correctly.';
return(None);
# end if
return( (wb, wsName) );
# end def
# -- Main --
sheetInfo = getSheetName(sheetNum);
if (sheetInfo == None):
print 'Sheet not found';
sys.exit(-1);
else:
(wb, wsName) = sheetInfo;
# end if

Categories

Resources