NK model using python - python

I am new to python and trying to get Kauffman's NK model work in it...I found the code online and was hoping I could make some changes over time but I am not able to run the code...It is giving error on line 31...'f = open(options.in_filenames)'....I am sure I am missing something really small....any help would be appreciated...
import csv
from numpy import *
import Gnuplot
import time
from optparse import OptionParser
from pylab import *
# set up and read command line options
parser = OptionParser()
parser.add_option("-f", "--file", dest="in_filenames",
help="read data from FILE - enclose comma-separated file list in quotes e.g. \"FILE1, FILE2\"", metavar="FILE")
(options, args) = parser.parse_args()
# set up constants
# column titles
columnvar_titles = (["A", "N", "K"])
series_titles = (["Average Fitness", "Maximum Fitness","Minimum Fitness",
"Average Wait Before Move","Maximum Wait Before Move",
"Minimum Wait Before Move","Average Number of Fitter Neighbours",
"Maximum Number of Fitter Neighbours",
"Minimum Number of Fitter Neighbours"])
f = open(options.in_filenames)
reader = csv.reader(f)
floats = []
options_dict={}
# start from the first line in the file
# read lines until we hit a blank
# lines will be in form "Key: Value"
# so split them and build a dictionary
while (1):
readstring = reader.next()
if len(readstring)==0:
break
dict_entry = readstring[0].split(': ', 1)
options_dict[dict_entry[0]] = dict_entry[1]
#print readstring
#print reader.line_num
#print options_dict
#print len(options_dict)
#print options_dict['Fitness_method']
# after the model parameters, we have blank line(s) before the data headers
# keep skipping blanks, then grab the first non-blank line
# then read the first line into a list of strings.
while (1):
readstring = reader.next()
if len(readstring) > 0:
column_headers = readstring;
break
#print column_headers
# need to check if we have a 'run' column
# single run gui output doesn't produce one, so need to add to column headers
# First six cols are "run, tick, A_size_of, RNGseed, [N & K]_size_of"
# "tick is already contained in the data, but is overwritten with K_size_of
# as it needs to be moved
# set add_run_data - flag to insert corresponding columns into the numeric data
add_run_data = 0
if column_headers[0]!="run":
column_headers[0] = "K_size_of"
column_headers = ["run", "tick", "A_size_of", "RngSeed", "N_size_of"] + column_headers
add_run_data = 1
print "Processing one run GUI output format..."
else:
print "Processing batch mode output..."
#print column_headers
# read lines from the data until we hit a blank.
# if data is numeric, put it into out 2d list of floats
countlines=0
while (1):
try:
readstring=reader.next()
countlines = countlines + 1
if len(readstring)==0:
print "Stopped reading"
break
try:
floats.append(map(float, readstring))
except:
print "Bad data - not adding"
except StopIteration:
#print "Read:", countlines
break
print "Read", countlines, "lines of data from file"
#print floats
xdata=array(floats)
#print xdata
# if we needed to add column headers before, we are dealing with single run
# output. If so, we need to add columns at the left of the data.
# First six cols are "run, tick, A_size_of, RNGseed, [N & K]_size_of"
# We add five (not six) cols, since "tick" is currently in the data already
# Other values are run = 1 (by def), RNGSeed (doesn't matter)
# A, N, K values are taken from the dictionary made from the header data in the file
if add_run_data == 1:
newcol = ones((xdata.shape[0],5), dtype="float")
xdata = concatenate((newcol, xdata), axis=1)
A_val = float(options_dict['A_size_of'])
N_val = float(options_dict['N_size_of'])
K_val = float(options_dict['K_size_of'])
#print A_val, N_val, K_val
xdata[:,1] = xdata[:,5] # ticks - already there but needs to move
xdata[:,2] = A_val
xdata[:,3] = 0 #RNG seed doesn't matter
xdata[:,4] = N_val
xdata[:,5] = K_val #overwrites original tick column
#print column_headers
#print xdata[1]
A_uniques = unique(xdata[:,2])
N_uniques = unique(xdata[:,4])
K_uniques = unique(xdata[:,5])
series_total = len(A_uniques) * len(N_uniques) * len(K_uniques)
# set up an array to hold averages (no columns for run number or rng seed)
# needs to move to handle multiple variables
maxticks = xdata[:,1].max()
#print series_total
#print maxticks
#print xdata.shape[1]-2
averages = zeros((series_total, maxticks, xdata.shape[1]-2), float)
# three loop setup for varying A / N / K values
series_counter = 0
Aseries_name=""
Nseries_name=""
Kseries_name=""
series_keys=[]
for A_value in A_uniques:
if len(A_uniques)>1:
Aseries_name = "A=" + str(A_value) + ", "
dataA = compress(xdata[:,2]==A_value, xdata, axis=0)
for N_value in N_uniques:
#if len(N_uniques)>1:
Nseries_name = "N=" + str(N_value)
dataAN = compress(dataA[:,4]==N_value, dataA, axis = 0)
for K_value in K_uniques:
#if len(K_uniques)>1:
Kseries_name = ", " + "K=" + str(K_value)
dataANK = compress(dataAN[:,5]==K_value, dataAN, axis = 0)
series_keys.append(Aseries_name + Nseries_name + Kseries_name)
# when multiple variables are used, run values continue to count from the
# previous variable value (e.g. A=2 (runs 1-100) A=3 (runs 101-200))
# we need to number the runs in ascending order from 1.
firstrun=dataANK[:,0].min()
lastrun=dataANK[:,0].max()
totalruns = 1 + lastrun - firstrun
#print firstrun, lastrun, totalruns
# for each run, find the last actual tick data
last_tick_array = zeros((totalruns, dataANK.shape[1]), float)
#print last_tick_array.shape
for k in arange(totalruns): # for each run get the data for the last tick
this_run=compress(dataANK[:,0]==k+firstrun, dataANK, axis=0)
last_tick_array[k]=this_run[-1]
#print "Last tick array"
#print last_tick_array[-1]
print "Processing simulation " + str(series_counter+1) + "/" + str(series_total)
for i in arange(maxticks): # for each tick value up to the maximum
# array to hold one tick from each run for averaging
# will contain either actual or extrapolated data
selected_ticks = zeros((totalruns, dataANK.shape[1]), float)
#print selected_ticks
# get dataANK for this tick from all runs. May be empty.
this_tick = compress(dataANK[:,1]==i+1, dataANK, axis=0)
#print "this tick"
#print this_tick
for j in arange(totalruns): # for each run
if (i+1) < last_tick_array[j,1]:# do we have actual data?
#print "Using real data"
# if so, get it
selected_ticks[j] = compress(this_tick[:,0]==j+firstrun, this_tick, axis=0)
#print selected_ticks
else:
# if not, use the last tick we do have
#print "Using last tick"
selected_ticks[j] = last_tick_array[j]
#print "selected_ticks"
#print selected_ticks[0]
averages[series_counter][i][0]=i+1 # tick number
averages[series_counter][i][1]=selected_ticks[:,2].max() #A_size_of
averages[series_counter][i][2]=selected_ticks[:,4].min() #N_size_of
averages[series_counter][i][3]=selected_ticks[:,5].min() #K_size_of
for m in xrange(6,16):
#print m
averages[series_counter][i][m-2]=selected_ticks[:,m].mean()
# increment to fill next index
series_counter = series_counter + 1
# matplotlib plots
print "Plotting graphs..."
matplotlib.use('Agg')
for graph_num in (4,5,6,7,8,9,10,11,12,13):
ylabel(column_headers[graph_num+2])
for ser in range(series_total):
plot(averages[ser][:,0], averages[ser][:,graph_num], label=series_keys[ser])
legend(loc='center right').draw_frame(0)
show()
savefig('nk' + column_headers[graph_num+2] + '.png')
clf()
print "Writing CSV files"
f1 = open("nk_allticks.csv","wt")
csv1 = csv.writer(f1)
f2 = open("nk_finaltick.csv","wt")
csv2 = csv.writer(f2)
column_headers.remove('RngSeed')
# replace spaces for underscores in column headers for better file compatability
for i in range(len(column_headers)):
column_headers[i]=column_headers[i].replace(' ','_')
try:
csv1.writerow(column_headers[1:])
csv2.writerow(column_headers[1:])
for series in range(series_total):
csv1.writerows(averages[series])
csv2.writerow(averages[series][-1])
finally:
f1.close
f2.close
f3 = open("nk_allticks_crosstab.csv","wt")
csv3 = csv.writer(f3)
out_array = zeros((averages[0].shape[0], 1 +(series_total * 10)), float)
out_array[:,0] = averages[0][:,0]
headers=[1000]#[8 * series_total]
headers[0]="tick"
datacol = 1
for column_num in (3, 4,5,6,7,8,9,10,11,12):
for ser in range (series_total):
headers.append((column_headers[column_num+2] + " " + series_keys[ser]).replace(' ','_'))
out_array[:,datacol] = averages[ser][:,column_num+1]
#print averages[ser][0]
datacol = datacol + 1
#print headers
#print out_array.shape
try:
csv3.writerow(headers)
csv3.writerows(out_array)
finally:
f3.close

You need to tell it which file to open. When you run the script you'd need to do something like -
python myscript.py -f myfile
Otherwise, you have not given it a file. Use the interpreter, it's great for catching these things.
>>> from optparse import OptionParser
>>> parser = OptionParser()
>>> parser.add_option("-f", "--file", dest="in_filenames",
... help="read data from FILE - enclose comma-separated file list in quotes e.g. \"FILE1, FILE2\"", metavar="FILE")
<Option at 0x29f47c8: -f/--file>
>>> (options, args) = parser.parse_args()
>>> options # Notice that in_filenames is None here
<Values at 0x29f6648: {'in_filenames': None}>
>>> import sys
>>> sys.argv
['']
>>> sys.argv = ['','-f','myfile'] # Let's explicitly set the argument
>>> (options, args) = parser.parse_args()
>>> options # Now it works...
<Values at 0x29fd848: {'in_filenames': 'myfile'}>

Related

Python 'list.insert()' only saves the last result of calculation loop

I was making my automatic stock strategy yield calculation program with Python. Here's my code:
import FinanceDataReader as fdr
import numpy as np
# ...(more modules for python)
pd.options.display.float_format = '{:.5f}'.format
file_list = os.listdir('/home/sejahui/projects/stock_data_excel')
for i in range(20):
os.chdir('/home/sejahui/projects/stock_data_excel')
odd = file_list[i]
data = pd.read_excel('/home/sejahui/projects/stock_data_excel/'+str(odd))
def calMACD(data, short=5, long=25, signal=9):
data.sort_index()
data['MVA_25']=data['Close'].ewm(span=long, adjust=False).mean()
data['MVA_5']=data['Close'].ewm(span=short, adjust=False).mean()
data['MACD']=data['Close'].ewm(span=short, adjust=False).mean() - data['Close'].ewm(span=long, adjust=False).mean()
data['Signal']=data['MACD'].ewm(span=signal, adjust=False).mean( )
#data['Buy_sign']=(data['MACD']-data['Signal']) >=600
data['Buy_sign']=np.where(data['MACD']-data['Signal'] >=451, 'Buy' , 'Sell' )
#data['Target_1']=(data['Close']-data['Close'].shift(1))/data['Close'].shift(1)*100
#data['Target_1']=np.where(data['Buy_sign']=='Buy', (data['Change'])+1,1)
#data['Target_2']=np.where(data['Buy_sign']=='Sell', (data['Change'])+1,1)
#data['Real_world']= 1000000*data['Target_1']
#data['Real_world_2']= 1000000*data['Target_2']
#data['Condition'] = np.where(data['Real_world']<1000000, data['Real_world']-data['Real_world'].shift(-2),1)
##data['Condition_2'] = np.where(data['Real_world']<1000000, data['Target_1'].shift(-2),1)
#data['Moneyflow'] =
#plt.plot(data['Date'], data['Real_world'])
#data[data.Buy_sign !='Sell']
'''
data['Target_1']=np.where(data['Buy_sign']=='Buy', data['Change'],1)
data['Target_2']=np.where(data['Buy_sign']=='Sell', data ['Change'],1)
data['Yield']=np.where(data['Buy_sign']=='Sell', data['Target_1']/data['Target_2'],1 )
'''
'''
data['Result']=data['Target_1'].cumprod()
data['Result_2']=data['Target_2'].cumprod()
data['??????'] = data['Result'] - data['Result_2']
'''
return data
Adjusted = calMACD(data)
Adjusted.drop(['Change'], axis=1, inplace = True)
Filtered = Adjusted[Adjusted.Buy_sign!='Sell'].copy()
#print(Filtered)
#Filtered = (Adjusted.Buy_sign =='Buy') #(Adjusted.Condition = 1.0)
#Master = Adjusted.loc[Adjusted,['Date','Buy_sign','Target_1','Real_world',]]
#print(Adjusted)
def backtester(Filtered):
Filtered['Change'] = ((Filtered['Close'] - Filtered['Close'].shift(1)) / Filtered['Close'].shift(1))+1
#data['Target_1']=np.where(data['Buy_sign']=='Buy', (data['Change'])+1,1)
Filtered['Real_world'] = 1000000*Filtered['Change']
#Filtered['Condition'] = np.where(Filtered['Real_world']<1000000, Filtered['Real_world'].shift(-2)-Filtered['Real_world'],1)
Filtered['Condition'] = np.where(Filtered['Real_world']<1000000, Filtered['Change'].shift(-2),1)
#Filtered['Target_1'] = np.where(Filtered['Buy_sign']=='Buy', (Filtered['Change'])+1,1)
#Filtered['Condition'] = np.where(Filtered['Real_world']<1000000, Filtered['Real_world'].shift(-2)-Filtered['Real_world'],1)
return Filtered
s = backtester(Filtered)
e = s[s.Condition!=1.00000]
x = e.dropna()
y = x['Condition']
list_1 = []
write_wb = Workbook()
write_ws = write_wb.create_sheet('MACD&Signal gap data sheet')
write_ws = write_wb.active
write_ws['A1'] = 'Name'
write_ws['B1'] = 'Profit'
try:
print(geometric_mean(y)*1000000*12)
except StatisticsError as e:
print ('Sell is empty':',odd)
else:
d = (geometric_mean(y)*1000000*12)
print(d,odd)
list_1.insert(i,d)
Print(list_1)
Here's the part where I'm troubling with:
s = backtester(Filtered)
e = s[s.Condition!=1.00000]
x = e.dropna()
y = x['Condition']
list_1 = []
try:
print(geometric_mean(y)*1000000*12)
except StatisticsError as e:
print ('Sell is empty':',odd)
else:
d = (geometric_mean(y)*1000000*12)
print(d)
list_1.insert(d)
print(list_1)
When I initiate the code where I am having problems, list only saves the last result of 'try, except, else' function. My intention was saving all the results. What change should I give to save all the results?
Here's the output of the list:
[11772769.197974786]
Your problem is that you are using insert instead of append and the main difference that insert takes a second argument for the position that you want to insert your element at and when none is provided it is 0 by default so you are consistently inserting at the same index resulting in a list with only the last element at the first position.
To fix that simply use append instead.
else:
d = (geometric_mean(y)*1000000*12)
print(d)
list_1.append(d)
You want to use append, not insert. see Python Data Structures
Change list_1.insert(d) to list_1.append(d)
The insert is defaulting to index 0 and just updating it each time.
Edit: Just noticed your answer is in the question title.

Bitcoin verify a single block in python

Currently i try to verify the Bitcoin Block 77504 by my own. But from the satoshi whitepaper it seems i have more questions than answer to do so.
First information from the previous block:
### What we know from last block ###
# height = 77503
# id = 00000000000447829abff59b3208a08ff28b3eb184b1298929abe6dd65c3578a
# version = 1
# timestamp = 1283325019
# bits = 459874456
# nonce = 1839166754
# difficulty = 623.3869598689275
# merkle_root = f18107935e8853011e477244241b5d786966495f8c59be46c92ac323c9cc8cde
# tx_count = 6
# size = 1438
# weight = 5752
Then the information from the block i want to verify
### What we now want to mine ###
# height = 77504
# id = 00000000004582246e63ff7e0760c6f009e5ef5ce1eb5397be6f3eb9d698bda5
# version = 1
# timestamp = 1283326637
# bits = 459874456
# nonce = 191169021
# difficulty = 623.3869598689275
# merkle_root = 59c77dabd9f005c771b23b846c79c7741dc0e70d912f9470eace886b42a0d601
# tx_count = 44
# size = 11052
# weight = 44208
# txids = ["b899c55adb5a9604b72643c0f6cd5bf6c2447bb0fc035c50e13d2e471cbf5aa5","05180e3252c48a54d4d0abe9359621f54f3031fd318a812be96da0f13bfa8bf3","29d641bd4a5d4b01ceee1126af920513d52e088bad500fad1358c96962e25e28","40d52b5aa4be889739410f82f36c71fdda554b999fb14fc12aeab5bb2e6498cb","62d5e84500cc674a5172bea5755a223da974f90f614deb45c160478a8974419c","78de7a104617f58620ae9e7cf58bcd875d8043ee5046d93c9d69224c2ae39a1e","8831ad38deb23e1fbea6d376f1805aec194760b0f334a3c4b623aa0751445c9b","8a6bd0c2d74ea785d886bd6d87b6a4eb4cd35af5fb7ae3a364eb1f76b114c375","90d6da6a4b48e7330ae926cd00623fa8d94fd0a2b9a001475da22cbc49435ff9","d002da9953844c767cf7d42092b81e8c5bb03baf520d79028013fd3400bc8651","d1f8573148126e8d17641276f22ece33b8276311d93794ed2975ebb802b98fc8","d22ed765adba9c7f5fef19ff15cb89559b4148d571fcb40ee2889231ac1b8dea","f32b000adf9ab6d7a66593cb20cba4d3a3e0cbb3453608ce11a780fab532add5","32d2ff811677a8dbed4f317c9fcae4796b491bde944cca4a993734be787b4e79","4b806d44d9aff762601f21ad541c0e99a77d0a14b730774a2d7721dd094d9030","8c5258a8e3f60c9edfa55b86780a9832c8cd5f407dbe25948cd2fd87910ca4c4","bc4fcea23cd93bac13ab75bad8d23576be88a89e72f2c455932f096d6dd2a2da","ca5c53ef34ff5a2f816daf648c8dafb01680502c2c0c98b82b9527392f707e70","f9db6e9a62502dfe8057e7b1c0f3b8f145d354ee4e341233bfe8861fff143822","fc3730bbfa443558c677da6898f106ee7d5516b14e21bf369def7cb6a5bf6b8b","1cfa85d94ebfb9206ad49f421319a6ee99b339e4e8d292b866459bb742731d83","80fa7f38cc02b05b765675adba589d426e6122b1e8158726df0c55cf44c937eb","8c72683585901ff96edd14bde9c87ee91a9d54c187a15aa333e3d6b916399fd2","905e015afa4df7d9dc4a1a80a029e469258045fe9288071b16af49a2f458c2cb","bd8fab0ca0072cd230a4bb0a6efff5964756a023ca53d1f06c3fa22800fe044c","464280d62b8965255c286f1c4c5c457f594db64bdef1c8aaa7ddf776fc4d320e","625b8ec5af9ad2c1506aca8ad61670ce3acf7070fe5aabc2dec06dcda119503a","a2e06f6b0ea68cc2c9bf44d09e54832c830971961ed8ea5ec553918ab7eb48d2","a4de41f56f0970d9b1948f1e386a124860891d790f506c2e3bbe71dd289031d4","11475d2fbbc5e3aee2eff54aa9bf2f83d5f33fffce528cc9804f820e0f6a76e7","5dc019a6397c25d0e7db56f3ed2ccdc1db5642701224d56fb9ad1d1017279e7b","e5d1e0e5a2309cb07ec522a1eb56da5aa5e58ecaea6d49e278a52c1c24230dae","21d192ea46007dbeef7c9673ac158c0f9dbf80e0785380ae562a1fbb10430ae7","8fafe7a8168563c4c186d792b49fc0fa4368c6b2e5a1217f2f98b127ff1cdf87","d2410a45bcc0e4f5b7a8d84e730ffd9744e0dd0d9fb2d7e93fb71e590bf0f1fb","6103334a35171bc5a153b51dd7c94977c62822b1cec2fcac20ea9d0a959129d7","6551831774420989df2d9deeab196e14025f2e5fd502feb86dfc7ccedb917ce0","7c1a188e0c94c7d61aea1ebddb359f508c99fdd0e028887bbf3a3036a1b5bf8a","8b9c989cee69c107697b13aebd677879db48275c089ae206c85eb8db45acf50f","4195c5abf97adb2108de8aeee99cb751e2b4f9698607f60e326b9a67b9127a31","800b308f49fe86ff3323dd6240190212626d052a017dd1cad01540790604c00f","1d2fb37bab59d6f3f83f7596fde128a0b7b0f7ccd8fabc8d2a929923a268a847","8a8149d58791ace6cefd803021b4e870acca5b2c40e2e1415f423e6ec4333e32","7a1eb6b8ee1ff52648cd9a099c7658be53627732b226aa93f56d430c85a52991"]
I have prepared a small script that should calculate it for me but no matter what i am not able to get to the target hash 00000000004582246e63ff7e0760c6f009e5ef5ce1eb5397be6f3eb9d698bda5 to verify the block mined. What is also unclear to me where would one have to add his own Bitcoin wallet to get the reward of the transaction.
from hashlib import sha256
def SHA256(text):
return sha256(text.encode("ascii")).hexdigest()
def mine(block_number, transactions, previous_hash, prefix_zeros):
prefix_str = '0'*prefix_zeros
text = str(block_number) + str(transactions) + str(previous_hash) + str(nonce_to_verify)
new_hash = SHA256(text)
if new_hash.startswith(prefix_str):
print(f"Jipiiii! Successfully mined bitcoins with nonce value:{nonce_to_verify}")
return new_hash
else:
new_hash = None
return new_hash
### normally this is unknown, would be somethinge like range(0,100000000000), i just want to verify a block ###
nonce_to_verify = 191169021
### In what format are transactions presented ? ###
transactions = ["b899c55adb5a9604b72643c0f6cd5bf6c2447bb0fc035c50e13d2e471cbf5aa5","05180e3252c48a54d4d0abe9359621f54f3031fd318a812be96da0f13bfa8bf3","29d641bd4a5d4b01ceee1126af920513d52e088bad500fad1358c96962e25e28","40d52b5aa4be889739410f82f36c71fdda554b999fb14fc12aeab5bb2e6498cb","62d5e84500cc674a5172bea5755a223da974f90f614deb45c160478a8974419c","78de7a104617f58620ae9e7cf58bcd875d8043ee5046d93c9d69224c2ae39a1e","8831ad38deb23e1fbea6d376f1805aec194760b0f334a3c4b623aa0751445c9b","8a6bd0c2d74ea785d886bd6d87b6a4eb4cd35af5fb7ae3a364eb1f76b114c375","90d6da6a4b48e7330ae926cd00623fa8d94fd0a2b9a001475da22cbc49435ff9","d002da9953844c767cf7d42092b81e8c5bb03baf520d79028013fd3400bc8651","d1f8573148126e8d17641276f22ece33b8276311d93794ed2975ebb802b98fc8","d22ed765adba9c7f5fef19ff15cb89559b4148d571fcb40ee2889231ac1b8dea","f32b000adf9ab6d7a66593cb20cba4d3a3e0cbb3453608ce11a780fab532add5","32d2ff811677a8dbed4f317c9fcae4796b491bde944cca4a993734be787b4e79","4b806d44d9aff762601f21ad541c0e99a77d0a14b730774a2d7721dd094d9030","8c5258a8e3f60c9edfa55b86780a9832c8cd5f407dbe25948cd2fd87910ca4c4","bc4fcea23cd93bac13ab75bad8d23576be88a89e72f2c455932f096d6dd2a2da","ca5c53ef34ff5a2f816daf648c8dafb01680502c2c0c98b82b9527392f707e70","f9db6e9a62502dfe8057e7b1c0f3b8f145d354ee4e341233bfe8861fff143822","fc3730bbfa443558c677da6898f106ee7d5516b14e21bf369def7cb6a5bf6b8b","1cfa85d94ebfb9206ad49f421319a6ee99b339e4e8d292b866459bb742731d83","80fa7f38cc02b05b765675adba589d426e6122b1e8158726df0c55cf44c937eb","8c72683585901ff96edd14bde9c87ee91a9d54c187a15aa333e3d6b916399fd2","905e015afa4df7d9dc4a1a80a029e469258045fe9288071b16af49a2f458c2cb","bd8fab0ca0072cd230a4bb0a6efff5964756a023ca53d1f06c3fa22800fe044c","464280d62b8965255c286f1c4c5c457f594db64bdef1c8aaa7ddf776fc4d320e","625b8ec5af9ad2c1506aca8ad61670ce3acf7070fe5aabc2dec06dcda119503a","a2e06f6b0ea68cc2c9bf44d09e54832c830971961ed8ea5ec553918ab7eb48d2","a4de41f56f0970d9b1948f1e386a124860891d790f506c2e3bbe71dd289031d4","11475d2fbbc5e3aee2eff54aa9bf2f83d5f33fffce528cc9804f820e0f6a76e7","5dc019a6397c25d0e7db56f3ed2ccdc1db5642701224d56fb9ad1d1017279e7b","e5d1e0e5a2309cb07ec522a1eb56da5aa5e58ecaea6d49e278a52c1c24230dae","21d192ea46007dbeef7c9673ac158c0f9dbf80e0785380ae562a1fbb10430ae7","8fafe7a8168563c4c186d792b49fc0fa4368c6b2e5a1217f2f98b127ff1cdf87","d2410a45bcc0e4f5b7a8d84e730ffd9744e0dd0d9fb2d7e93fb71e590bf0f1fb","6103334a35171bc5a153b51dd7c94977c62822b1cec2fcac20ea9d0a959129d7","6551831774420989df2d9deeab196e14025f2e5fd502feb86dfc7ccedb917ce0","7c1a188e0c94c7d61aea1ebddb359f508c99fdd0e028887bbf3a3036a1b5bf8a","8b9c989cee69c107697b13aebd677879db48275c089ae206c85eb8db45acf50f","4195c5abf97adb2108de8aeee99cb751e2b4f9698607f60e326b9a67b9127a31","800b308f49fe86ff3323dd6240190212626d052a017dd1cad01540790604c00f","1d2fb37bab59d6f3f83f7596fde128a0b7b0f7ccd8fabc8d2a929923a268a847","8a8149d58791ace6cefd803021b4e870acca5b2c40e2e1415f423e6ec4333e32","7a1eb6b8ee1ff52648cd9a099c7658be53627732b226aa93f56d430c85a52991"]
### Just a check of 5 leading zeros... but why the difficulty 623.3869598689275 how to get to the 11 zeros? ###
difficulty=11
### Last Block (77503) found ###
lastfoundblock = "00000000000447829abff59b3208a08ff28b3eb184b1298929abe6dd65c3578a"
print("start mining")
new_hash = mine(77504,transactions,lastfoundblock, difficulty)
print("finnished mining.")
print(f"Found block is: {new_hash} should be the same as 00000000004582246e63ff7e0760c6f009e5ef5ce1eb5397be6f3eb9d698bda5")
Help would be appreciated so that i can verify a single block. Already pointing in the right directions would be appreciated so that i can solve my problem.
As no one was able to answer it... here is the code to verify a block's nonce:
import hashlib, struct, binascii
from time import time
def get_target_str(bits):
# https://en.bitcoin.it/wiki/Difficulty
exp = bits >> 24
mant = bits & 0xffffff
target_hexstr = '%064x' % (mant * (1<<(8*(exp - 3))))
print(f'T: {target_hexstr}')
target_str = bytes.fromhex(target_hexstr)
return target_str
def verify_nonce(version, prev_block, mrkl_root,
timestamp, bits_difficulty,nonce):
target_str = get_target_str(bits_difficulty)
header = ( struct.pack("<L", version) +
bytes.fromhex(prev_block)[::-1] +
bytes.fromhex(mrkl_root)[::-1] +
struct.pack("<LLL", timestamp, bits_difficulty, nonce))
hash_result = hashlib.sha256(hashlib.sha256(header).digest()).digest()
return bytes.hex(hash_result[::-1])
#nonce += 1
test1_version = 0x3fff0000
test1_prev_block = "0000000000000000000140ac4688aea45aacbe7caf6aaca46f16acd93e1064c3"
test1_merkle_root = "422458fced12693312058f6ee4ada19f6df8b29d8cac425c12f4722e0dc4aafd"
test1_timestamp = 0x5E664C76
test1_bits_diff = 0x17110119
test1_nonce1 = 538463288 #(0x20184C38)
test1_block_hash = "0000000000000000000d493c3c1b91c8059c6b0838e7e68fbcf8f8382606b82c"
test1_calc_block_hash = verify_nonce(test1_version,
test1_prev_block,
test1_merkle_root,
test1_timestamp,
test1_bits_diff,
test1_nonce1)
print(f'S: {test1_block_hash}')
print(f'R: {test1_calc_block_hash}')
if test1_block_hash == test1_calc_block_hash:
print("hashing is correct")
Thanks to https://github.com/razvancazacu/bitcoin-mining-crypto

How to read large NetCDF data sets without using a for - Python

Good morning, I have a problem when reading a large netCDF file in python, which contains meteorological information, that information must go through it to assemble the information and then insert it into the database, but the time it takes to go through and assemble the information is too much, I know there must be other ways to perform the same process more efficiently, currently I access the information through a for loop, below the code
content = nc.Dataset(pathFile+file)
XLONG, XLAT = content.variables["XLONG"], content.variables["XLAT"]
Times = content.variables["Times"] #Horas formar b 'b
RAINC = content.variables["RAINC"] #Lluvia
Q2 = content.variables["Q2"] #Humedad especifica
T2 = content.variables["T2"] #Temperatura
U10 = content.variables["U10"] #Viento zonal
V10 = content.variables["V10"] #Viento meridional
SWDOWN = content.variables["SWDOWN"] #Radiacion incidente
PSFC = content.variables["PSFC"] #Presion de la superficie
SST = content.variables["SST"] #Temperatura de la superficie del mar
CLDFRA = content.variables["CLDFRA"] #Fraccion de nubes
for c2 in range(len(XLONG[0])):
for c3 in range(len(XLONG[0][c2])):
position += 1
for hour in range(len(Times)):
dateH = getDatetimeInit(dateFormatFile.hour) if hour == 0 else getDatetimeForHour(hour, dateFormatFile.hour)
hourUTC = getHourUTC(hour)
RAINH = str(RAINC[hour][0][c2][c3])
Q2H = str(Q2[hour][0][c2][c3])
T2H = str(convertKelvinToCelsius(T2[hour][0][c2][c3]))
U10H = str(U10[hour][0][c2][c3])
V10H = str(V10[hour][0][c2][c3])
SWDOWNH = str(SWDOWN[hour][0][c2][c3])
PSFCH = str(PSFC[hour][0][c2][c3])
SSTH = str(SST[hour][0][c2][c3])
CLDFRAH = str(CLDFRA[hour][0][c2][c3] )
rowData = [idRun, functions.IDMODEL, idTime, position, dateH.year, dateH.month, dateH.day, dateH.hour, RAINH, Q2H, T2H, U10H, V10H, SWDOWNH, PSFCH, SSTH, CLDFRAH]
dataProcess.append(rowData)
I would use NumPy. Let us assume you have netCDF with 2 variables, "t2" and "slp". Then you could use the following code to vectorize your data:
#!//usr/bin/env ipython
# ---------------------
import numpy as np
from netCDF4 import Dataset
# ---------------------
filein = 'test.nc'
ncin = Dataset(filein);
tair = ncin.variables['t2'][:];
slp = ncin.variables['slp'][:];
ncin.close();
# -------------------------
tairseries = np.reshape(tair,(np.size(tair),1));
slpseries = np.reshape(slp,(np.size(slp),1));
# --------------------------
## if you want characters:
#tairseries = np.array([str(val) for val in tairseries]);
#slpseries = np.array([str(val) for val in slpseries]);
# --------------------------
rowdata = np.concatenate((tairseries,slpseries),axis=1);
# if you want characters, do this in the end:
row_asstrings = [[str(vv) for vv in val] for val in rowdata]
# ---------------------------
Nevertheless, I have a feeling that using strings is not very good idea. In my example, the conversion from numerical arrays to strings, took quite long time and therefore I did not implement it before concatenation.
If you want also some time/location information, you can do like this:
#!//usr/bin/env ipython
# ---------------------
import numpy as np
from netCDF4 import Dataset
# ---------------------
filein = 'test.nc'
ncin = Dataset(filein);
xin = ncin.variables['lon'][:]
yin = ncin.variables['lat'][:]
timein = ncin.variables['time'][:]
tair = ncin.variables['t2'][:];
slp = ncin.variables['slp'][:];
ncin.close();
# -------------------------
tairseries = np.reshape(tair,(np.size(tair),1));
slpseries = np.reshape(slp,(np.size(slp),1));
# --------------------------
## if you want characters:
#tairseries = np.array([str(val) for val in tairseries]);
#slpseries = np.array([str(val) for val in slpseries]);
# --------------------------
rowdata = np.concatenate((tairseries,slpseries),axis=1);
# if you want characters, do this in the end:
#row_asstrings = [[str(vv) for vv in val] for val in rowdata]
# ---------------------------
# =========================================================
nx = np.size(xin);ny = np.size(yin);ntime = np.size(timein);
xm,ym = np.meshgrid(xin,yin);
xmt = np.tile(xm,(ntime,1,1));ymt = np.tile(ym,(ntime,1,1))
timem = np.tile(timein[:,np.newaxis,np.newaxis],(1,ny,nx));
xvec = np.reshape(xmt,(np.size(tair),1));yvec = np.reshape(ymt,(np.size(tair),1));timevec = np.reshape(timem,(np.size(tair),1)); # to make sure that array's size match, I am using the size of one of the variables
rowdata = np.concatenate((xvec,yvec,timevec,tairseries,slpseries),axis=1);
In any case, with variable sizes (744,150,150), it took less than 2 seconds to vectorize 2 variables.

ArcGIS Python Map Book PDF not working blank PDF

The purpose of the code is to make a PDF map book that displays all of the large lakes in North America. I'm trying to run this code to make a map book but it gives me a blank PDF. How can I fix this?
## Import arcpy module
import arcpy
import math
import os
from arcpy import env
arcpy.env.overwriteOutput = True
# Define inputs and outputs - Script arguments
arcpy.env.workspace = r"F:\Geog173\Lab7\Lab7_Data"
Lakes = "NA_Big_Lakes.shp"
Cities = "NA_Cities.shp"
NA = "North_America.shp"
##Python arguments
## Arguments = NA_Big_Lakes.shp NA_Cities.shp New_Lakes.shp Center_Lakes.shp
Lakes= 'NA_Big_Lakes.shp'
NA = 'North_America.shp'
Cities = 'NA_Cities.shp'
##New_Lakes = 'New_Lakes.shp'
##Center_Lakes = 'Center_Lakes.shp'
# Identify the geometry field
desc = arcpy.Describe(Lakes)
shapeName = desc.ShapeFieldName
# Identify the geometry field in Cities shapefile
##desc = arcpy.Describe(Cities)
##shapefieldnameCity = desc.ShapeFieldName
#Get lake cursor
inrows = arcpy.SearchCursor(Lakes)
# Set up variables for output path and PDF file name
outDir = r"F:\Geog173\Lab7\Lab7_Data"
finalMapPDF_filename = outDir + r"\NA_Big_Lake_Mapbook.pdf"
# Check whether the mapbook PDF exists. If it does, delete it.
if os.path.exists(finalMapPDF_filename):
os.remove(finalMapPDF_filename)
# Create map book PDF
finalMapPDF = arcpy.mapping.PDFDocumentCreate(finalMapPDF_filename)
# Create MapDocument object pointing to specified mxd
mxd = arcpy.mapping.MapDocument(outDir + r"\OriginalMap.mxd")
# Get dataframe
df = arcpy.mapping.ListDataFrames(mxd)[0]
# ----------------------------------------------------------------------------#
# Start appending pages. Title page first.
# ----------------------------------------------------------------------------#
# Find text element with value "test", and replace it with other value
mapText = "A Map Book for North American Large Lakes " + '\n\r' + "Kishore, A., Geog173, Geography, UCLA" + '\n\r' + " Lake number: 18" + '\n\r' + " Total area: 362117 km2" + '\n\r' + " Mean area: 20118 km2"
print mapText
for elm in arcpy.mapping.ListLayoutElements(mxd, "TEXT_ELEMENT"):
if elm.text == "test":
elm.text = mapText
arcpy.RefreshTOC()
arcpy.RefreshActiveView()
#df.extent = feature.extent
arcpy.mapping.ExportToPDF(mxd, outDir + r"\TempMapPages.pdf")
# Append multi-page PDF to finalMapPDF
finalMapPDF.appendPages(outDir + r"\TempMapPages.pdf")
#initialize text value, so it can be reused in next iteration
for elm in arcpy.mapping.ListLayoutElements(mxd, "TEXT_ELEMENT"):
if elm.text == mapText:
elm.text = "test"
# ----------------------------------------------------------------------------#
# Loop through each lake
# ----------------------------------------------------------------------------#
# Loop through each row/feature
lakecount = 0
for row in inrows:
lakecount = lakecount + 1
CITY_NAME = ""
CNTRY_NAME = ""
ADMIN_NAME = ""
POP_CLASS = ""
DISTANCE = 0
XY = ""
#print "shapeName" , shapeName
# Create the geometry object
feature = row.getValue(shapeName)
mapText = "Lake FID: " + str(row.FID) + ", Area (km2): " + str(row.Area_km2)
print mapText
# Find text element with value "test", and replace it with other value
for elm in arcpy.mapping.ListLayoutElements(mxd, "TEXT_ELEMENT"):
if elm.text == "test":
elm.text = mapText
arcpy.RefreshTOC()
arcpy.RefreshActiveView()
df.extent = feature.extent
arcpy.mapping.ExportToPDF(mxd, outDir + r"\TempMapPages.pdf")
# Append multi-page PDF to finalMapPDF
finalMapPDF.appendPages(outDir + r"\TempMapPages.pdf")
# Set up properties for Adobe Reader and save PDF.
finalMapPDF.updateDocProperties(pdf_open_view = "USE_THUMBS",
pdf_layout = "SINGLE_PAGE")
finalMapPDF.saveAndClose()
# Done. Clean up and let user know the process has finished.
del row, inrows
del mxd, finalMapPDF
print "Map book for lakes in North America is complete!"
First off you should remove the last lines of your code where you delete the mxd. Run the code again and inspect the MXD. Are the data layers drawing properly? I recommend having code that completely works before performing file cleanup so you can identify potential errors.

Why is my array not being filled in this python script?

I have a pyramid/python application with the following view callable:
#view_config(route_name='home_page', renderer='templates/edit.pt')
def home_page(request):
if 'form.submitted' in request.params:
name= request.params['name']
input_file=request.POST['stl'].file
vertices, normals = [],[]
if input_file.read(5) == b'solid':
for line in input_file:
parts = line.split()
if parts[0] == 'vertex':
vertices.append(map(float, parts[1:4]))
elif parts[0] == 'facet':
normals.append(map(float, parts[2:5]))
ordering=[]
N=len(normals)
for i in range(0,N):
ordering.append([3*i,3*i+1,3*i+2])
data=[vertices,ordering]
else:
f=input_file
points=[]
triangles=[]
normals=[]
def unpack (f, sig, l):
s = f.read (l)
fb.append(s)
return struct.unpack(sig, s)
def read_triangle(f):
n = unpack(f,"<3f", 12)
p1 = unpack(f,"<3f", 12)
p2 = unpack(f,"<3f", 12)
p3 = unpack(f,"<3f", 12)
b = unpack(f,"<h", 2)
normals.append(n)
l = len(points)
points.append(p1)
points.append(p2)
points.append(p3)
triangles.append((l, l+1, l+2))
#bytecount.append(b[0])
def read_length(f):
length = struct.unpack("#i", f.read(4))
return length[0]
def read_header(f):
f.seek(f.tell()+80)
read_header(f)
l = read_length(f)
try:
while True:
read_triangle(f)
#except Exception, e:
#print "Exception",e[0]
#write_as_ascii(outfilename)
data=[points,triangles]
jsdata=json.dumps(data)
renderer_dict = dict(name=name,data=jsdata)
path=shortuuid.uuid()
html_string = render('tutorial:templates/view.pt', renderer_dict, request=request)
s3=boto.connect_s3(aws_access_key_id = 'AKIAIJB6L7Q', aws_secret_access_key = 'sId01dYCMhl0wX' )
bucket=s3.get_bucket('cubes.supercuber.com')
k=Key(bucket)
k.key='%(path)s' % {'path':path}
k.set_contents_from_string(html_string, headers={'Content-Type': 'text/html'})
k.set_acl('public-read')
return HTTPFound(location="http://cubes.supercuber.com/%(path)s" % {'path':path})
return {}
as you can see, the code checks whether an uploaded stl file is ascii (if is starts with the word "solid") or binary. If its ascii, everything works fine and the data variable gets filled with the vertices and ordering. However, if it doesn't start with solid and is a binary stl file the data variable never gets filled. Why is this?
You read the first 5 bytes to check for the filetype, but never reset the file position to the beginning.
Add a call to .seek(0):
f=input_file
f.seek(0)
You can always seek relatively without a call to f.tell():
f.seek(80, 1) # relative seek, you can use `os.SEEK_CUR` as well.

Categories

Resources