I'm using python to analyze some records bib and ris files. I made two functions for each type. The first function is the one you see below:
def limpiarlineasris(self, data):
cont = data
dic = cont.splitlines()
cont = ""
con = []
i = 0
for a in dic:
if len(a) != 0:
con.append(a)
for a in con:
cont = cont + a + "\n"
return cont
That works well and I can compile without problem. The problem arises when I write the second function see below:
def limpiarlineasbib(self, data):
cont = data
dic = cont.splitlines()
cont = ""
con = []
separador = "°-°-°"
for a in dic:
if len(a)!= 0:
if a.startswith('#'):
con.append(separador)
else:
con.append(a)
for a in con:
cont = cont + a + "\n"
return cont
When building the first function no problem. But when I compile the second compiler shows me an error but does not tell me exactly what or where it is because I am using plyjy a jar to create Jython objects, and the console only shows me an exception Plyjy without the line where it occurs. I'm using Netbeans to compile
Related
I've solved the problem. The problem is related my %PATH%
I have a script which work with an argument. In powershell I've tried the command you can see below;
.\dsrf2csv.py C:\Python27\a\DSR_testdata.tsv.gz
And also you can see the script below,
def __init__(self, dsrf2csv_arg):
self.dsrf_filename = dsrf2csv_arg
dsrf_path, filename = os.path.split(self.dsrf_filename)
self.report_outfilename = os.path.join(dsrf_path, filename.replace('DSR', 'Report').replace('tsv', 'csv'))
self.summary_outfilename = os.path.join(dsrf_path, filename.replace('DSR', 'Summary').replace('tsv.gz', 'csv'))
But when I try to run this script there is no any action. How should I run this script with a file? (example : testdata.tsv.gz)
Note : Script and file in same location.
Full Scritp;
import argparse
import atexit
import collections
import csv
import gzip
import os
SKIP_ROWS = ['HEAD', '#HEAD', '#SY02', '#SY03', '#AS01', '#MW01', '#RU01',
'#SU03', '#LI01', '#FOOT']
REPORT_HEAD = ['Asset_ID', 'Asset_Title', 'Asset_Artist', 'Asset_ISRC',
'MW_Asset_ID', 'MW_Title', 'MW_ISWC', 'MW_Custom_ID',
'MW_Writers', 'Views', 'Owner_name', 'Ownership_Claim',
'Gross_Revenue', 'Amount_Payable', 'Video_IDs', 'Video_views']
SUMMARY_HEAD = ['SummaryRecordId', 'DistributionChannel',
'DistributionChannelDPID', 'CommercialModel', 'UseType',
'Territory', 'ServiceDescription', 'Usages', 'Users',
'Currency', 'NetRevenue', 'RightsController',
'RightsControllerPartyId', 'AllocatedUsages', 'AmountPayable',
'AllocatedNetRevenue']
class DsrfConverter(object):
"""Converts DSRF 3.0 to YouTube CSV."""
def __init__(self, dsrf2csv_arg):
""" Creating output file names """
self.dsrf_filename = dsrf2csv_arg
dsrf_path, filename = os.path.split(self.dsrf_filename)
print(dsrf_filename)
input("Press Enter to continue...")
self.report_outfilename = os.path.join(dsrf_path, filename.replace(
'DSR', 'Report').replace('tsv', 'csv'))
self.summary_outfilename = os.path.join(dsrf_path, filename.replace(
'DSR', 'Summary').replace('tsv.gz', 'csv'))
def parse_blocks(self, reader):
"""Generator for parsing all the blocks from the file.
Args:
reader: the handler of the input file
Yields:
block_lines: A full block as a list of rows.
"""
block_lines = []
current_block = None
for line in reader:
if line[0] in SKIP_ROWS:
continue
# Exit condition
if line[0] == 'FOOT':
yield block_lines
raise StopIteration()
line_block_number = int(line[1])
if current_block is None:
# Initialize
current_block = line_block_number
if line_block_number > current_block:
# End of block, yield and build a new one
yield block_lines
block_lines = []
current_block = line_block_number
block_lines.append(line)
# Also return last block
yield block_lines
def process_single_block(self, block):
"""Handles a single block in the DSR report.
Args:
block: Block as a list of lines.
Returns:
(summary_rows, report_row) tuple.
"""
views = 0
gross_revenue = 0
summary_rows = []
owners_data = {}
# Create an ordered dictionary with a key for every column.
report_row_dict = collections.OrderedDict(
[(column_name.lower(), '') for column_name in REPORT_HEAD])
for line in block:
if line[0] == 'SY02': # Save the financial Summary
summary_rows.append(line[1:])
continue
if line[0] == 'AS01': # Sound Recording information
report_row_dict['asset_id'] = line[3]
report_row_dict['asset_title'] = line[5]
report_row_dict['asset_artist'] = line[7]
report_row_dict['asset_isrc'] = line[4]
if line[0] == 'MW01': # Composition information
report_row_dict['mw_asset_id'] = line[2]
report_row_dict['mw_title'] = line[4]
report_row_dict['mw_iswc'] = line[3]
report_row_dict['mw_writers'] = line[6]
if line[0] == 'RU01': # Video level information
report_row_dict['video_ids'] = line[3]
report_row_dict['video_views'] = line[4]
if line[0] == 'SU03': # Usage data of Sound Recording Asset
# Summing up views and revenues for each sub-period
views += int(line[5])
gross_revenue += float(line[6])
report_row_dict['views'] = views
report_row_dict['gross_revenue'] = gross_revenue
if line[0] == 'LI01': # Ownership information
# if we already have parsed a LI01 line with that owner
if line[3] in owners_data:
# keep only the latest ownership
owners_data[line[3]]['ownership'] = line[6]
owners_data[line[3]]['amount_payable'] += float(line[9])
else:
# need to create the entry for that owner
data_dict = {'custom_id': line[5],
'ownership': line[6],
'amount_payable': float(line[9])}
owners_data[line[3]] = data_dict
# get rid of owners which do not have an ownership or an amount payable
owners_to_write = [o for o in owners_data
if (owners_data[o]['ownership'] > 0
and owners_data[o]['amount_payable'] > 0)]
report_row_dict['owner_name'] = '|'.join(owners_to_write)
report_row_dict['mw_custom_id'] = '|'.join([owners_data[o]
['custom_id']
for o in owners_to_write])
report_row_dict['ownership_claim'] = '|'.join([owners_data[o]
['ownership']
for o in owners_to_write])
report_row_dict['amount_payable'] = '|'.join([str(owners_data[o]
['amount_payable'])
for o in owners_to_write])
# Sanity check. The number of values must match the number of columns.
assert len(report_row_dict) == len(REPORT_HEAD), 'Row is wrong size :/'
return summary_rows, report_row_dict
def run(self):
finished = False
def removeFiles():
if not finished:
os.unlink(self.report_outfilename)
os.unlink(self.summary_outfilename)
atexit.register(removeFiles)
with gzip.open(self.dsrf_filename, 'rb') as dsr_file, gzip.open(
self.report_outfilename, 'wb') as report_file, open(
self.summary_outfilename, 'wb') as summary_file:
dsr_reader = csv.reader(dsr_file, delimiter='\t')
report_writer = csv.writer(report_file)
summary_writer = csv.writer(summary_file)
report_writer.writerow(REPORT_HEAD)
summary_writer.writerow(SUMMARY_HEAD)
for block in self.parse_blocks(dsr_reader):
summary_rows, report_row = self.process_single_block(block)
report_writer.writerow(report_row.values())
summary_writer.writerows(summary_rows)
finished = True
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(
description='Converts DDEX DSRF UGC profile reports to Standard CSV.')
required_args = arg_parser.add_argument_group('Required arguments')
required_args.add_argument('dsrf2csv_arg', type=str)
args = arg_parser.parse_args()
dsrf_converter = DsrfConverter(args.dsrf2csv_arg)
dsrf_converter.run()
In general to execute a python script in powershell like this .\script.py has two requirements:
Add the path to the python binaries to your %path%: $env:Path = $env:Path + ";C:\Path\to\python\binaries\"
Add the ending .py to the pathtext environment variable: $env:PATHEXT += ";.PY"
The latter will only be used in the current powershell session. If you want to add it to all future powershell sessions, add this line to your powershell profile (f.e. notepad $profile).
In your case there is also an issue with the python script you are trying to excute. def __init__(self) is an constructor for a class, like:
class Foo:
def __init__(self):
print "foo"
Did you give us your complete script?
I have a question about SAP silent logon which I implemented using win32com this way
from win32com.client import Dispatch
R3 = Dispatch("SAP.Functions")
R3.Conn.System = 'xxx'
R3.Conn.Client = '100'
# other values needed to pass to R3.Conn
R3.Conn.logon #here is the problem
In VB i can use R3.Conn.Logon(1, True) to make logon siliencely. But in Python Logon seems not to be a method and do not allow me to pass parameters to it.
I tried using R3.Conn.Logon(1, True) in Python, but it returned an error
Logon was not callable.
How should I call silent logon in Python?
Thanks
This works for me.
Still experimenting, I want to add field selection and of course a filter to the RFC_READ_TABLE. But the connection works.
from win32com.client import Dispatch
Functions = Dispatch("SAP.Functions")
Functions.Connection.Client = "000"
Functions.Connection.ApplicationServer = "your server"
Functions.Connection.Language = "EN"
Functions.Connection.User = "you"
Functions.Connection.Password = "your pass"
Functions.Connection.SystemNumber = "00"
Functions.Connection.UseSAPLogonIni = False
if (Functions.Connection.Logon (0,True) == True):
print("Logon OK")
RfcCallTransaction = Functions.Add("RFC_READ_TABLE")
strExport1 = RfcCallTransaction.exports("QUERY_TABLE")
strExport2 = RfcCallTransaction.exports("DELIMITER")
strExport3 = RfcCallTransaction.exports("ROWSKIPS")
strExport4 = RfcCallTransaction.exports("ROWCOUNT")
tblOptions = RfcCallTransaction.Tables("OPTIONS")
#RETURNED DATA
tblData = RfcCallTransaction.Tables("DATA")
tblFields = RfcCallTransaction.Tables("FIELDS")
strExport1.Value = 'AGR_DEFINE'
strExport2.Value = ";"
strExport3.Value = 0
strExport4.Value = 10
if RfcCallTransaction.Call == True:
print ("Function call successful")
#print (tblData.RowCount)
j = 1
while j < tblData.RowCount:
print (tblData(j,"WA"))
j = j + 1
I have posted the relevant part of my code below. Before that are just load functions, which I am pretty sure have no errors.
I am recieving error
IndexError: list index out of range( "namestaj["Naziv"] = deon[1]")
Does anyone see something out of order?
#load furniture from a txt file
def ucitajNamestaj():
listaNamestaja = open("namestaj.txt", "r").readlines()
namestaj = []
for red in listaNamestaja:
namestaj.append(stringToNamestaj(red))
return namestaj
#String to Furniture, dictionary
def stringToNamestaj(red):
namestaj = {}
deon = red.strip().split("|")
namestaj["Sifra"] = deon[0]
namestaj["Naziv"] = deon[1]
namestaj["Boja"] = deon[2]
namestaj["Kolicina"] = int(deon[3])
namestaj["Cena"] = float(deon[4])
namestaj["Kategorija"] = deon[5]
namestaj["Dostupan"] = deon[6]
return namestaj
Couple of things first, try always to provide a mcve and make sure you use properly the SO code directives, otherwise your question is unreadable.
Now, probably what's happening is your file has some empty lines and you're not skipping those, try this:
def ucitajNamestaj():
listaNamestaja = open("namestaj.txt", "r").readlines()
namestaj = []
for red in listaNamestaja:
if red.strip() == "":
continue
namestaj.append(stringToNamestaj(red))
return namestaj
def stringToNamestaj(red):
namestaj = {}
deon = red.strip().split("|")
namestaj["Sifra"] = deon[0]
namestaj["Naziv"] = deon[1]
namestaj["Boja"] = deon[2]
namestaj["Kolicina"] = int(deon[3])
namestaj["Cena"] = float(deon[4])
namestaj["Kategorija"] = deon[5]
namestaj["Dostupan"] = deon[6]
return namestaj
Sorry - My questions is how can I change a file object within a function from a different function?
I've been trying to work out this error in my first python script for too long now, Dr Google and the forums aren't helping me too much, but I'm hoping you can.
I have a looping function that generates alot of data and I would like to output it to a text file, and create a new text file after the third loop.
I have 2 functions defined, one to create the data hashes, the other to create the new files.
The new files are being created as expected (aaa.txt, baa.txt...etc) but the "hashit" function only ever writes to the first file (aaa.txt) even though the others are being created.
I have tried fo.close() fo.flush(), as well as referencing fo in the functions but can't seem to make it work. Also I've moved the fo.write from the function to the main body.
I have included a cut down version of the code that I've been using to troubleshoot this issue, the real one has several more loops increasing the string length.
Thanks in advance
import smbpasswd, hashlib
base = '''abcdefghijklmnopqrstuvwxyz '''
# base length 95
print(base)
baselen = len(base)
name = 'aaa.txt'
fo = open(name, "w")
print "Name of the file: ", fo.name
print "Closed or not : ", fo.closed
print "Opening mode : ", fo.mode
print "Softspace flag : ", fo.softspace
pw01 = 0
pw02 = 0
pw03 = 0
def hashit(passwd):
#2
# Need to install module
# sudo apt-get install python-smbpasswd
hex_dig_lm = smbpasswd.lmhash(passwd)
hex_dig_ntlm = smbpasswd.nthash(passwd)
#print '%s:%s' % smbpasswd.hash(passwd)
hash_md5 = hashlib.md5(passwd)
hex_dig_md5 = hash_md5.hexdigest()
print(passwd)
print(hex_dig_lm)
print(hex_dig_ntlm)
print(hex_dig_md5)
hashstring = passwd +","+ hex_dig_lm +","+ hex_dig_md5 + '\n'
fo.write(hashstring);
def newfile(name):
fo.flush()
fo = open(name, "a")
print("-------newfile------")
print "Name of the file: ", fo.name
print "Closed or not : ", fo.closed
print('NewFile : ' + name)
raw_input("\n\nPress the enter key to exit.")
# add 3rd digit
while (pw03 < baselen):
pwc03 = base[pw03]
name = pwc03 + 'aa.txt'
fo.close
newfile(name);
pw03 += 1
while (pw02 < baselen):
pwc02 = base[pw02]
pw02 += 1
while (pw01 < baselen):
pwc01 = base[pw01]
pw01 += 1
passwd = pwc03 + pwc02 + pwc01
hashit(passwd);
else:
pw01 = 0
else:
pw02 = 0
else:
pw03 = 0
In your newfile() function, add this line first:
global fo
In some code I pass a dictionary (TankDict) a string from a list. This throws a KeyError, no matter what letter I put in. When I copied and pasted the dictionary out of the context of the program and passed in the same letters from a list, they came out correctly. I have also run type(TankDict) and it comes back as 'dict'.
Here is the dictionary:
TankDict = {'E':0, 'F':1, 'G':2, 'H':3, 'I':4, 'J':5,
'K':6, 'L':7, 'M':8, 'N':9,
'O':10, 'P':11, 'Q':12, 'R':13, 'S':14, 'T':15,
'U':16, 'V':17, 'W':18, 'X':19}
The error:
enter code herechannelData[1] = tank_address_dict[channelData[1]]
KeyError: 'L'
(tank_address_dict is a function argument into which TankDict is passed)
the contents of channelData: ['447', 'L', '15', 'C']
Can anyone tell me the (probably simple) reason that this happens?
EDIT: Code!
This is the function where the error is:
def getTankID(channel,tank_address_dict,PTM_dict,channel_ref):
rawChannelData = 'NA'
for line in channel_ref:
if str(channel) in line: rawChannelData = line
if(rawChannelData == 'NA'): return -1;
channelData = rawChannelData.split(' ')
channelData.extend(['',''])
channelData[1] = channelData[1][:-1]
channelData[3] = channelData[1][-1]
channelData[1] = channelData[1][:-1]
channelData[2] = channelData[1][1:]
channelData[1] = channelData[1][:1]
print channelData #debug
print 'L' in tank_address_dict
print 'E' in tank_address_dict
print 'O' in tank_address_dict
print 'U' in tank_address_dict
print type(tank_address_dict)
channelData[1] = tank_address_dict[channelData[1]]
channelData[3] = PTM_dict[channelData[3]]
return(channelData[1:])
This is the function that calls it:
def runFile(model, datafile, time_scale, max_PEs, tank_address_dict, PMT_dict, channel_ref):
#add initSerial for ser0-4
while(True):
raw_data = datafile.readline() #intake data
if(raw_data == ''): break #End while loop if the file is done
data = raw_data.split(' ') #break up the parts of each line
del data[::2] #delete the human formatting
data[2] = data[2][:-1] #rm newline (NOTE: file must contain blank line at end!)
TankID = getTankID(data[0], tank_address_dict, PMT_dict,channel_ref)
if(TankID == -1):
print '!---Invalid channel number passed by datafile---!'; break #check for valid TankID
model[TankID[0]][TankID[1]][TankID[2]] = scale(data[2],(0,max_PEs),(0,4096))
createPackets(model)
#updateModel(ser0,ser1,ser2,ser3,ser4,packet)
data[2] = data[2]*time_scale #scale time
time.sleep(data[2]) #wait until the next event
print data #debug
if(TankID != -1): print '---File',datafile,'finished---' #report errors in file run
else: print '!---File',datafile,'finished with error---!'
And this is the code that calls that:
import hawc_func
import debug_options
#begin defs
model = hawc_func.createDataStruct() #create the data structure
TankDict = hawc_func.createTankDict() #tank grid coordinate conversion table
PTMDict = hawc_func.createPMTDict() #PMT conversion table
log1 = open('Logs/log1.txt','w') #open a logfile
data = open('Data/event.txt','r') #open data
channel_ref = open('aux_files/channel_map.dat','r')
time_scale = 1 #0-1 number to scale nano seconds? to seconds
#end defs
hawc_func.runFile(model,data,4000,TankDict,PTMDict,time_scale,channel_ref)
#hawc_func.runFile(model,data,TankDict,PTMDict)
#close files
log1.close()
data.close()
#end close files
print '-----Done-----' #confirm tasks finished
tank_address_dict is created through this function, run by the 3rd block of code, then passed on through the other two:
def createTankDict():
TankDict = {'E':0, 'F':1, 'G':2, 'H':3, 'I':4, 'J':5,
'K':6, 'L': 7, 'M':8, 'N':9,
'O':10, 'P':11, 'Q':12, 'R':13, 'S':14, 'T':15,
'U':16, 'V': 17, 'W':18, 'X':19}
return TankDict
You are not passing your arguments correctly.
def runFile(model, datafile, time_scale, max_PEs, tank_address_dict, PMT_dict, channel_ref):
hawc_func.runFile(model,data,4000,TankDict,PTMDict,time_scale,channel_ref)
Here, you have max_PEs = TankDict.
That may not be your only problem. Fix that first, and if you are still having problems, update your post with your fixed code and then tell us what your new error is.