I have a pyramid/python application with the following view callable:
#view_config(route_name='home_page', renderer='templates/edit.pt')
def home_page(request):
if 'form.submitted' in request.params:
name= request.params['name']
input_file=request.POST['stl'].file
vertices, normals = [],[]
if input_file.read(5) == b'solid':
for line in input_file:
parts = line.split()
if parts[0] == 'vertex':
vertices.append(map(float, parts[1:4]))
elif parts[0] == 'facet':
normals.append(map(float, parts[2:5]))
ordering=[]
N=len(normals)
for i in range(0,N):
ordering.append([3*i,3*i+1,3*i+2])
data=[vertices,ordering]
else:
f=input_file
points=[]
triangles=[]
normals=[]
def unpack (f, sig, l):
s = f.read (l)
fb.append(s)
return struct.unpack(sig, s)
def read_triangle(f):
n = unpack(f,"<3f", 12)
p1 = unpack(f,"<3f", 12)
p2 = unpack(f,"<3f", 12)
p3 = unpack(f,"<3f", 12)
b = unpack(f,"<h", 2)
normals.append(n)
l = len(points)
points.append(p1)
points.append(p2)
points.append(p3)
triangles.append((l, l+1, l+2))
#bytecount.append(b[0])
def read_length(f):
length = struct.unpack("#i", f.read(4))
return length[0]
def read_header(f):
f.seek(f.tell()+80)
read_header(f)
l = read_length(f)
try:
while True:
read_triangle(f)
#except Exception, e:
#print "Exception",e[0]
#write_as_ascii(outfilename)
data=[points,triangles]
jsdata=json.dumps(data)
renderer_dict = dict(name=name,data=jsdata)
path=shortuuid.uuid()
html_string = render('tutorial:templates/view.pt', renderer_dict, request=request)
s3=boto.connect_s3(aws_access_key_id = 'AKIAIJB6L7Q', aws_secret_access_key = 'sId01dYCMhl0wX' )
bucket=s3.get_bucket('cubes.supercuber.com')
k=Key(bucket)
k.key='%(path)s' % {'path':path}
k.set_contents_from_string(html_string, headers={'Content-Type': 'text/html'})
k.set_acl('public-read')
return HTTPFound(location="http://cubes.supercuber.com/%(path)s" % {'path':path})
return {}
as you can see, the code checks whether an uploaded stl file is ascii (if is starts with the word "solid") or binary. If its ascii, everything works fine and the data variable gets filled with the vertices and ordering. However, if it doesn't start with solid and is a binary stl file the data variable never gets filled. Why is this?
You read the first 5 bytes to check for the filetype, but never reset the file position to the beginning.
Add a call to .seek(0):
f=input_file
f.seek(0)
You can always seek relatively without a call to f.tell():
f.seek(80, 1) # relative seek, you can use `os.SEEK_CUR` as well.
Related
This is how the dataset was created
def createDataset1(
outputPath,
imagePathList,
labelList,
lexiconList=None,
validset_percent=10,
testset_percent=0,
random_seed=1111,
checkValid=True,
):
"""
Create LMDB dataset for CRNN training.
ARGS:
outputPath : LMDB output path
imagePathList : list of image path
labelList : list of corresponding groundtruth texts
lexiconList : (optional) list of lexicon lists
checkValid : if true, check the validity of every image
"""
train_path = os.path.join(outputPath, "training", "9M")
valid_path = os.path.join(outputPath, "validation", "9M")
# CAUTION: if train_path (lmdb) already exists, this function add dataset
# into it. so remove former one and re-create lmdb.
if os.path.exists(train_path):
os.system(f"rm -r {train_path}")
if os.path.exists(valid_path):
os.system(f"rm -r {valid_path}")
os.makedirs(train_path, exist_ok=True)
os.makedirs(valid_path, exist_ok=True)
gt_train_path = gt_file.replace(".txt", "_train.txt")
gt_valid_path = gt_file.replace(".txt", "_valid.txt")
data_log = open(gt_train_path, "w", encoding="utf-8")
if testset_percent != 0:
test_path = os.path.join(outputPath, "evaluation", dataset_name)
if os.path.exists(test_path):
os.system(f"rm -r {test_path}")
os.makedirs(test_path, exist_ok=True)
gt_test_path = gtFile.replace(".txt", "_test.txt")
assert(len(imagePathList) == len(labelList))
nSamples = len(imagePathList)
num_valid_dataset = int(nSamples * validset_percent / 100.0)
num_test_dataset = int(nSamples * testset_percent / 100.0)
num_train_dataset = nSamples - num_valid_dataset - num_test_dataset
print("validation datasets: ",num_valid_dataset,"\n", "test datasets: ", num_test_dataset, " \n training datasets: ", num_train_dataset)
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
random.seed(random_seed)
random.shuffle(imagePathList)
for i in tqdm(range(nSamples)):
data_log.write(imagePathList[i])
imagePath = imagePathList[i]
label = labelList[i]
if len(label) == 0:
continue
if not os.path.exists(imagePath):
print('%s does not exist' % imagePath)
continue
with open(imagePath, 'rb') as f:
imageBin = f.read()
if checkValid:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
embed_vec = fasttext_model[label]
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
embedKey = 'embed-%09d' % cnt
cache[imageKey] = imageBin
cache[labelKey] = label.encode()
cache[embedKey] = ' '.join(str(v) for v in embed_vec.tolist()).encode()
if lexiconList:
lexiconKey = 'lexicon-%09d' % cnt
cache[lexiconKey] = ' '.join(lexiconList[i])
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
#finish train dataset and start validation dataset
if i + 1 == num_train_dataset:
print(f"# Train dataset: {num_train_dataset} is finished")
cache["num-samples".encode()] = str(num_train_dataset).encode()
writeCache(env, cache)
data_log.close()
#start validation set
env = lmdb.open(valid_path, map_size=30 * 2 ** 30)
cache = {}
cnt = 0
data_log = open(gt_valid_path, "w", encoding="utf-8")
# Finish train/valid dataset and Start test dataset
if (i + 1 == num_train_dataset + num_valid_dataset) and num_test_dataset != 0:
print(f"# Valid dataset: {num_valid_dataset} is finished")
cache["num-samples".encode()] = str(num_valid_dataset).encode()
writeCache(env, cache)
data_log.close()
# start test set
env = lmdb.open(test_path, map_size=30 * 2 ** 30)
cache = {}
cnt = 0 # not 1 at this time
data_log = open(gt_test_path, "w", encoding="utf-8")
cnt += 1
if testset_percent == 0:
cache["num-samples".encode()] = str(num_valid_dataset).encode()
writeCache(env, cache)
print(f"# Valid datast: {num_valid_dataset} is finished")
else:
cache["num-samples".encode()] = str(num_test_dataset).encode()
writeCache(env, cache)
print(f"# Test datast: {num_test_dataset} is finished")
This is how i am trying to retrieve the data
class LmdbDataset(data.Dataset):
def __init__(self, root, voc_type, max_len, num_samples, transform=None):
super(LmdbDataset, self).__init__()
if global_args.run_on_remote:
dataset_name = os.path.basename(root)
data_cache_url = "/cache/%s" % dataset_name
if not os.path.exists(data_cache_url):
os.makedirs(data_cache_url)
if mox.file.exists(root):
mox.file.copy_parallel(root, data_cache_url)
else:
raise ValueError("%s not exists!" % root)
self.env = lmdb.open(data_cache_url, max_readers=32, readonly=True)
else:
self.env = lmdb.open(root, max_readers=32, readonly=True)
assert self.env is not None, "cannot create lmdb from %s" % root
self.txn = self.env.begin()
self.voc_type = voc_type
self.transform = transform
self.max_len = max_len
# nums = b"num-samples"
# print('NUM SAMPLES ------ \n',nums)
nSamples = self.txn.get('num-samples'.encode())
print("STRING nSamples :", nSamples)
self.nSamples = int(self.txn.get(b"num-samples"))
self.nSamples = min(self.nSamples, num_samples)
assert voc_type in ['LOWERCASE', 'ALLCASES', 'ALLCASES_SYMBOLS']
self.EOS = 'EOS'
self.PADDING = 'PADDING'
self.UNKNOWN = 'UNKNOWN'
self.voc = get_vocabulary(voc_type, EOS=self.EOS, PADDING=self.PADDING, UNKNOWN=self.UNKNOWN)
self.char2id = dict(zip(self.voc, range(len(self.voc))))
self.id2char = dict(zip(range(len(self.voc)), self.voc))
self.rec_num_classes = len(self.voc)
self.lowercase = (voc_type == 'LOWERCASE')
I am getting the error below whenever the code tries to call elf.txn.get(b"num-samples")
Traceback (most recent call last):
File "main.py", line 268, in <module>
main(args)
File "main.py", line 157, in main
train_dataset, train_loader = get_data_lmdb(args.synthetic_train_data_dir, args.voc_type, args.max_len, args.num_train,
File "main.py", line 66, in get_data_lmdb
dataset_list.append(LmdbDataset(data_dir_, voc_type, max_len, num_samples))
File "/Users/SEED/lib/datasets/dataset.py", line 189, in __init__
self.nSamples = int(self.txn.get(b"num-samples"))
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
I have tried many different suggestions online and some stackoverflow threads but could not figure out what is wrong.
What is causing this error and how can I fix this?
Your code is dense, convoluted and hard to follow the flow of data. It mixes lots of computation with IO, side effects, and even system calls (os.system(f"rm -r {test_path}"), you should use shutil.rmtree instead for example).
Try breaking up each action that you wish to perform so that it primarily does one specific thing:
logical operations but no side effects
input (read from file or network)
output (write to file, generate a result)
file system operations/cleanup
At each stage, you should perform verifications, and use the rule of least power. If you expect self.txn to always have 'num-samples, then you should use self.txn[b'num-samples']rather.get, which defaults to None`. This makes it easier to catch errors earlier in the chain.
Also I have no idea what the lmbd module is. Is that a library, or another file in your codebase? You should link to any libraries you are using if they aren't well known. I see several python packages pertaining to lmbd.
From the code example, I can dissect for you. May be it will help you to close the issue.
The log says..
self.nSamples = int(self.txn.get(b"num-samples"))
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
self.txn should be a dictionary to use get method there. As you are trying to get it, there can be two scenarios
num-samples can be '' in the data means there is no value
there is no variable called num-samples in the data.
Both of them can cause a NoneType which cannot be handled by type conversion(to int). So, you have to check whether that field is present in the data or not, by using self.txn.keys() and see if that key is present
Furthermore, if
self.nSamples = int(self.txn.get(b"num-samples"))
is failing and not the above statement
nSamples = self.txn.get('num-samples'.encode())
then you can simply decode the variable and use it there
like
Samples = nSamples.decode("utf-8")
try:
int_samples = int(Samples)
except TypeError:
print(Samples)
Suppose we have something like:
if True:
r = 0
else:
r = 1
print(r)
Why would we get UnboundLocalError: local variable 'r' referenced before assignment?
The actual code is shown below:
def rasterize_dot_verify_args(callable, parent):
if not hasattr(callable, "__call__"):
raise ValueError()
import inspect
siggy = inspect.signature(callable)
if (len(siggy.parameters) > 1):
raise ValueError()
def rasterize(callable, xparent, make_copy :bool = False):
rasterize_dot_verify_args(callable, xparent)
iparent = xparent
if make_copy:
import copy
iparent = copy.deepcopy(xparent)
if hasattr(iparent, "__iter__"):
in_kids = iter(iparent)
if in_kids != iparent:
lamby = lambda p, *, c=callable: rasterize(c, p)
out_kids = map(lamby, in_kids)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
r = callable(out_kids) # !!!!!!!!!!!!!!!!!!!!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
r = iparent # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
return r
import itertools as itts
sixify = lambda obj, *, itts=itts: itts.repeat(obj, 6)
inputs = map(sixify, range(1, 5))
# inputs = (_ for _ in [
# itts.repeat(1, 6),
# itts.repeat(2, 6),
# itts.repeat(3, 6),
# itts.repeat(4, 6)
# ])
print(rasterize(list, inputs))
I'm forced to add a little bit of text here because, "It looks like your post is mostly code; please add some more details."
oh dear.... even more text is needed.
r would not be assigned when hasattr(iparent, "__iter__") is True and in_kids != iparent is False. You should add an else block to the if in_kids != iparent: statement to assign r a value.
A better characterization of your code would be:
if test1:
if test2:
r = something
else:
r = something
Which should make it obvious how you'd get the error you got.
So usually the problem is that indexes can only be read once unless clear_on_read is false. But I haven't come across this error yet "Could not write to TensorArray index 0 because it has already been written to.".
def bod(i, centroids, base, seq, thres):
eu_dist = tf_sqrt(tf_reduce_sum(tf_square(tf_subtract(base,centroids[i])), axis=1))
argmin = tf_cast(tf_argmin(eu_dist), tf_int32)
default_write = tf_constant([0,0,0], tf_int32)
seq = tf_cond(tf_less(eu_dist[argmin], thres), lambda: seq.write(i, centroids[i,argmin]), lambda: seq.write(i, default_write))
return i, centroids, base, seq, thres
def find_sequence(centroids, thres = 1.0):
centroids_tf = tf_placeholder(tf_float32, shape=(3,None,3))
base_tf = tf_placeholder(tf_float32, shape=(3))
thres_tf = tf_placeholder(tf_float32)
i_tf = tf_constant(0, tf_int32)
seq = tf_TensorArray(tf_float32, size=(tf_shape(centroids_tf)[0]), )
con = lambda i, cen, base, seq, thres_: tf_less(i, tf_shape(centroids_tf)[0])
i, centroids_out, base_out, part, thres_ = tf_while_loop(con, bod, [i_tf, centroids_tf, base_tf, seq, thres_tf])
part_seq = part.stack()
object_seq = []
with tf_Session() as sess:
for cent in centroids[0]:
seq_out = sess.run(part_seq, feed_dict={centroids_tf: centroids[1:], base_tf: cent, thres_tf: thres})
append = True
for s in seq_out:
if s == [0,0,0]:
append = False
break
if append:
object_seq.append(seq_out)
return object_seq
Can anyone help? or tell me how I can get around it?
The following code runs and does what it's supposed to do but I'm having trouble using the XlxsWriter module in Python in order to get some of the results into a .xlxs file. The goal is for the output to contain information from the function block_trial where it tracks each block and gives me the all_powers variable the corresponds with that trial. Installing the module into my user directory goes smoothly but it won't give me a file that gives me both sets of information.
At the moment, I'm using:
import xlsxwriter
workbook = xlsxwriter.Workbook('SRT_data.xlsx')
worksheet = workbook.add_worksheet()
Widen the first column to make the text clearer.
worksheet.set_column('A:A', 20)
Add a bold format to use to highlight cells.
bold = workbook.add_format({'bold': True})
Write some simple text.
worksheet.write('A1', 'RT')
workbook.close()
But can't get any of my data to show up.
import random, math
num_features = 20
stim_to_vect = {}
all_stim = [1,2,3,4,5]
all_features = range(num_features)
zeros=[0 for i in all_stim]
memory=[]
def snoc(xs,x):
new_xs=xs.copy()
new_xs.append(x)
return new_xs
def concat(xss):
new_xs = []
for xs in xss:
new_xs.extend(xs)
return new_xs
def point_wise_mul(xs,ys):
return [x*y for x,y in zip(xs,ys)]
for s in snoc(all_stim, 0):
stim_to_vect[s]= []
for i in all_features:
stim_to_vect[s].append(random.choice([-1, 1]))
def similarity(x,y):
return(math.fsum(point_wise_mul(x,y))/math.sqrt(math.fsum(point_wise_mul(x,x))*math.fsum(point_wise_mul(y,y))))
def echo(probe,power):
echo_vect=[]
for j in all_features:
total=0
for i in range(len(memory)):
total+=math.pow(similarity(probe, memory[i]),power)*memory[i][j]
echo_vect.append(total)
return echo_vect
fixed_seq=[1,5,3,4,2,1,3,5,4,2,5,1]
prev_states={}
prev_states[0]=[]
prev=0
for curr in fixed_seq:
if curr not in prev_states.keys():
prev_states[curr]=[]
prev_states[curr].append(prev)
prev=curr
def update_memory(learning_parameter,event):
memory.append([i if random.random() <= learning_parameter else 0 for i in event])
for i in snoc(all_stim,0):
for j in prev_states[i]:
curr_stim = stim_to_vect[i]
prev_resp = stim_to_vect[j]
curr_resp = stim_to_vect[i]
update_memory(1.0, concat([curr_stim, prev_resp, curr_resp]))
def first_part(x):
return x[:2*num_features-1]
def second_part(x):
return x[2*num_features:]
def compare(curr_stim, prev_resp):
for power in range(1,10):
probe=concat([curr_stim,prev_resp,zeros])
theEcho=echo(probe,power)
if similarity(first_part(probe),first_part(theEcho))>0.97:
curr_resp=second_part(theEcho)
return power,curr_resp
return 10,zeros
def block_trial(sequence):
all_powers=[]
prev_resp = stim_to_vect[0]
for i in sequence:
curr_stim = stim_to_vect[i]
power,curr_resp=compare(curr_stim,prev_resp)
update_memory(0.7,concat([curr_stim,prev_resp,curr_resp]))
all_powers.append(power)
prev_resp=curr_resp
return all_powers
I am new to python and trying to get Kauffman's NK model work in it...I found the code online and was hoping I could make some changes over time but I am not able to run the code...It is giving error on line 31...'f = open(options.in_filenames)'....I am sure I am missing something really small....any help would be appreciated...
import csv
from numpy import *
import Gnuplot
import time
from optparse import OptionParser
from pylab import *
# set up and read command line options
parser = OptionParser()
parser.add_option("-f", "--file", dest="in_filenames",
help="read data from FILE - enclose comma-separated file list in quotes e.g. \"FILE1, FILE2\"", metavar="FILE")
(options, args) = parser.parse_args()
# set up constants
# column titles
columnvar_titles = (["A", "N", "K"])
series_titles = (["Average Fitness", "Maximum Fitness","Minimum Fitness",
"Average Wait Before Move","Maximum Wait Before Move",
"Minimum Wait Before Move","Average Number of Fitter Neighbours",
"Maximum Number of Fitter Neighbours",
"Minimum Number of Fitter Neighbours"])
f = open(options.in_filenames)
reader = csv.reader(f)
floats = []
options_dict={}
# start from the first line in the file
# read lines until we hit a blank
# lines will be in form "Key: Value"
# so split them and build a dictionary
while (1):
readstring = reader.next()
if len(readstring)==0:
break
dict_entry = readstring[0].split(': ', 1)
options_dict[dict_entry[0]] = dict_entry[1]
#print readstring
#print reader.line_num
#print options_dict
#print len(options_dict)
#print options_dict['Fitness_method']
# after the model parameters, we have blank line(s) before the data headers
# keep skipping blanks, then grab the first non-blank line
# then read the first line into a list of strings.
while (1):
readstring = reader.next()
if len(readstring) > 0:
column_headers = readstring;
break
#print column_headers
# need to check if we have a 'run' column
# single run gui output doesn't produce one, so need to add to column headers
# First six cols are "run, tick, A_size_of, RNGseed, [N & K]_size_of"
# "tick is already contained in the data, but is overwritten with K_size_of
# as it needs to be moved
# set add_run_data - flag to insert corresponding columns into the numeric data
add_run_data = 0
if column_headers[0]!="run":
column_headers[0] = "K_size_of"
column_headers = ["run", "tick", "A_size_of", "RngSeed", "N_size_of"] + column_headers
add_run_data = 1
print "Processing one run GUI output format..."
else:
print "Processing batch mode output..."
#print column_headers
# read lines from the data until we hit a blank.
# if data is numeric, put it into out 2d list of floats
countlines=0
while (1):
try:
readstring=reader.next()
countlines = countlines + 1
if len(readstring)==0:
print "Stopped reading"
break
try:
floats.append(map(float, readstring))
except:
print "Bad data - not adding"
except StopIteration:
#print "Read:", countlines
break
print "Read", countlines, "lines of data from file"
#print floats
xdata=array(floats)
#print xdata
# if we needed to add column headers before, we are dealing with single run
# output. If so, we need to add columns at the left of the data.
# First six cols are "run, tick, A_size_of, RNGseed, [N & K]_size_of"
# We add five (not six) cols, since "tick" is currently in the data already
# Other values are run = 1 (by def), RNGSeed (doesn't matter)
# A, N, K values are taken from the dictionary made from the header data in the file
if add_run_data == 1:
newcol = ones((xdata.shape[0],5), dtype="float")
xdata = concatenate((newcol, xdata), axis=1)
A_val = float(options_dict['A_size_of'])
N_val = float(options_dict['N_size_of'])
K_val = float(options_dict['K_size_of'])
#print A_val, N_val, K_val
xdata[:,1] = xdata[:,5] # ticks - already there but needs to move
xdata[:,2] = A_val
xdata[:,3] = 0 #RNG seed doesn't matter
xdata[:,4] = N_val
xdata[:,5] = K_val #overwrites original tick column
#print column_headers
#print xdata[1]
A_uniques = unique(xdata[:,2])
N_uniques = unique(xdata[:,4])
K_uniques = unique(xdata[:,5])
series_total = len(A_uniques) * len(N_uniques) * len(K_uniques)
# set up an array to hold averages (no columns for run number or rng seed)
# needs to move to handle multiple variables
maxticks = xdata[:,1].max()
#print series_total
#print maxticks
#print xdata.shape[1]-2
averages = zeros((series_total, maxticks, xdata.shape[1]-2), float)
# three loop setup for varying A / N / K values
series_counter = 0
Aseries_name=""
Nseries_name=""
Kseries_name=""
series_keys=[]
for A_value in A_uniques:
if len(A_uniques)>1:
Aseries_name = "A=" + str(A_value) + ", "
dataA = compress(xdata[:,2]==A_value, xdata, axis=0)
for N_value in N_uniques:
#if len(N_uniques)>1:
Nseries_name = "N=" + str(N_value)
dataAN = compress(dataA[:,4]==N_value, dataA, axis = 0)
for K_value in K_uniques:
#if len(K_uniques)>1:
Kseries_name = ", " + "K=" + str(K_value)
dataANK = compress(dataAN[:,5]==K_value, dataAN, axis = 0)
series_keys.append(Aseries_name + Nseries_name + Kseries_name)
# when multiple variables are used, run values continue to count from the
# previous variable value (e.g. A=2 (runs 1-100) A=3 (runs 101-200))
# we need to number the runs in ascending order from 1.
firstrun=dataANK[:,0].min()
lastrun=dataANK[:,0].max()
totalruns = 1 + lastrun - firstrun
#print firstrun, lastrun, totalruns
# for each run, find the last actual tick data
last_tick_array = zeros((totalruns, dataANK.shape[1]), float)
#print last_tick_array.shape
for k in arange(totalruns): # for each run get the data for the last tick
this_run=compress(dataANK[:,0]==k+firstrun, dataANK, axis=0)
last_tick_array[k]=this_run[-1]
#print "Last tick array"
#print last_tick_array[-1]
print "Processing simulation " + str(series_counter+1) + "/" + str(series_total)
for i in arange(maxticks): # for each tick value up to the maximum
# array to hold one tick from each run for averaging
# will contain either actual or extrapolated data
selected_ticks = zeros((totalruns, dataANK.shape[1]), float)
#print selected_ticks
# get dataANK for this tick from all runs. May be empty.
this_tick = compress(dataANK[:,1]==i+1, dataANK, axis=0)
#print "this tick"
#print this_tick
for j in arange(totalruns): # for each run
if (i+1) < last_tick_array[j,1]:# do we have actual data?
#print "Using real data"
# if so, get it
selected_ticks[j] = compress(this_tick[:,0]==j+firstrun, this_tick, axis=0)
#print selected_ticks
else:
# if not, use the last tick we do have
#print "Using last tick"
selected_ticks[j] = last_tick_array[j]
#print "selected_ticks"
#print selected_ticks[0]
averages[series_counter][i][0]=i+1 # tick number
averages[series_counter][i][1]=selected_ticks[:,2].max() #A_size_of
averages[series_counter][i][2]=selected_ticks[:,4].min() #N_size_of
averages[series_counter][i][3]=selected_ticks[:,5].min() #K_size_of
for m in xrange(6,16):
#print m
averages[series_counter][i][m-2]=selected_ticks[:,m].mean()
# increment to fill next index
series_counter = series_counter + 1
# matplotlib plots
print "Plotting graphs..."
matplotlib.use('Agg')
for graph_num in (4,5,6,7,8,9,10,11,12,13):
ylabel(column_headers[graph_num+2])
for ser in range(series_total):
plot(averages[ser][:,0], averages[ser][:,graph_num], label=series_keys[ser])
legend(loc='center right').draw_frame(0)
show()
savefig('nk' + column_headers[graph_num+2] + '.png')
clf()
print "Writing CSV files"
f1 = open("nk_allticks.csv","wt")
csv1 = csv.writer(f1)
f2 = open("nk_finaltick.csv","wt")
csv2 = csv.writer(f2)
column_headers.remove('RngSeed')
# replace spaces for underscores in column headers for better file compatability
for i in range(len(column_headers)):
column_headers[i]=column_headers[i].replace(' ','_')
try:
csv1.writerow(column_headers[1:])
csv2.writerow(column_headers[1:])
for series in range(series_total):
csv1.writerows(averages[series])
csv2.writerow(averages[series][-1])
finally:
f1.close
f2.close
f3 = open("nk_allticks_crosstab.csv","wt")
csv3 = csv.writer(f3)
out_array = zeros((averages[0].shape[0], 1 +(series_total * 10)), float)
out_array[:,0] = averages[0][:,0]
headers=[1000]#[8 * series_total]
headers[0]="tick"
datacol = 1
for column_num in (3, 4,5,6,7,8,9,10,11,12):
for ser in range (series_total):
headers.append((column_headers[column_num+2] + " " + series_keys[ser]).replace(' ','_'))
out_array[:,datacol] = averages[ser][:,column_num+1]
#print averages[ser][0]
datacol = datacol + 1
#print headers
#print out_array.shape
try:
csv3.writerow(headers)
csv3.writerows(out_array)
finally:
f3.close
You need to tell it which file to open. When you run the script you'd need to do something like -
python myscript.py -f myfile
Otherwise, you have not given it a file. Use the interpreter, it's great for catching these things.
>>> from optparse import OptionParser
>>> parser = OptionParser()
>>> parser.add_option("-f", "--file", dest="in_filenames",
... help="read data from FILE - enclose comma-separated file list in quotes e.g. \"FILE1, FILE2\"", metavar="FILE")
<Option at 0x29f47c8: -f/--file>
>>> (options, args) = parser.parse_args()
>>> options # Notice that in_filenames is None here
<Values at 0x29f6648: {'in_filenames': None}>
>>> import sys
>>> sys.argv
['']
>>> sys.argv = ['','-f','myfile'] # Let's explicitly set the argument
>>> (options, args) = parser.parse_args()
>>> options # Now it works...
<Values at 0x29fd848: {'in_filenames': 'myfile'}>