I am new to pygame and I am trying to code a game and with it I am using an engine by a youtuber as they used it in their tutorial. Everyhting is working fine but for some reason I have a problem with loading animations. It shows "IndexError: list index out of range" and says that to
animation_id = entity_info[1]
Does anyone know how to fix this? Here is the code to the 2 problems:
global animation_database
animation_database = {}
global animation_higher_database
animation_higher_database = {}
def load_animations(path):
global animation_higher_database, e_colorkey
f = open(path + 'entity_animations.txt','r')
data = f.read()
f.close()
for animation in data.split('\n'):
sections = animation.split(' ')
anim_path = sections[0]
entity_info = anim_path.split('/')
entity_type = entity_info[0]
animation_id = entity_info[1]
timings = sections[1].split(';')
tags = sections[2].split(';')
sequence = []
n = 0
timing: str
for timing in timings:
sequence.append([n, int(timing)])
n += 1
anim = animation_sequence(sequence,path + anim_path,e_colorkey)
if entity_type not in animation_higher_database:
animation_higher_database[entity_type] = {}
animation_higher_database[entity_type][animation_id] = [anim.copy(),tags]
This part is my code:
e.load_animations(r'C:/Users/zo432/PycharmProjects/New game/data/images/entities/')
If you need of the other code just message I can give you.
Related
I am trying to add data that I am reading from a series of JSON files to a Numpy array (or whatever data collection would work best). My idea, is that I want to sort a collection of episodes of a tv show by episode title.
The problem I have encountered, is actually creating the collection from the data.
The intent, is that I want to be able to have a collection of the items found within the for loop [a,b,c,d]; for each episode of the show.
Is a Numpy array the best way to go about making this collection, or should I use something else?
season1 = open('THEJSONFILES\seasonone.json', 'r')
season_array = np.array(['episodeTitle','seasonNum', 'episodeNum', 'plotContents'])
def ReadTheDarnJsonFile(jsonTitle):
seasondata = jsonTitle.read()
seasonobj = j.loads(seasondata)
list = (seasonobj['episodes'])
for i in range(len(list)):
a = str(list[i].get('title'))
b = str(list[i].get('seasonNumber'))
c = str(list[i].get('episodeNumber'))
d = str(list[i].get('plot'))
print(a, b, c, d)
print("----------------")
# np.append(season_array, [a,b,c,d]) this is not correct
ReadTheDarnJsonFile(season1)
print(season_array)
2 notes. First I would avoid using list as a variable name because it is a keyword in python. Second I would recommend using a custom class for your data for maximum readability.
season1 = open('THEJSONFILES\seasonone.json', 'r')
season_array = np.array(['episodeTitle','seasonNum', 'episodeNum', 'plotContents'])
class episode:
def __init__(self,title,seasonNumber,episodeNumber,plot):
self.title = title
self.seasonNumber = seasonNumber
self.episodeNumber = episodeNumber
self.plot = plot
def summary(self):
print("Season "+str(self.seasonNumber)+" Episode "+str(self.episodeNumber))
print(self.title)
print(self.plot)
def ReadTheDarnJsonFile(jsonTitle):
seasondata = jsonTitle.read()
seasonobj = j.loads(seasondata)
episodes = (seasonobj['episodes'])
season_array = []
for i in range(len(episodes)):
a = str(list[i].get('title'))
b = str(list[i].get('seasonNumber'))
c = str(list[i].get('episodeNumber'))
d = str(list[i].get('plot'))
season_array.append(episode(a,b,c,d)) this is not correct
return season_array
season_array = Read
TheDarnJsonFile(season1)
for item in season_array:
item.summary()
Here is what I ended up doing.
import json as j
import pandas as pd
emptyArray = []
season1 = open('THEJSONFILES\seasonone.json', 'r')
season2 = open('THEJSONFILES\seasontwo.json', 'r')
season3 = open('THEJSONFILES\seasonthree.json', 'r')
season4 = open('THEJSONFILES\seasonfour.json', 'r')
season5 = open('THEJSONFILES\seasonfive.json', 'r')
season6 = open('THEJSONFILES\seasonsix.json', 'r')
season7 = open('THEJSONFILES\seasonseven.json', 'r')
columnData = ["episodeTitle", "seasonIndex", "episodeIndex", "plot", "imageURL"]
finalDf = pd.DataFrame
def ReadTheDarnJsonFile(jsonTitle):
df = pd.DataFrame(columns = columnData)
seasonData = jsonTitle.read()
seasonObj = j.loads(seasonData)
currentSeasonList = (seasonObj['episodes'])
for i in range(len(currentSeasonList)):
tempTitle = str(currentSeasonList[i].get('title'))
tempSN = str(currentSeasonList[i].get('seasonNumber'))
tempEN = str(currentSeasonList[i].get('episodeNumber'))
tempPlot = str(currentSeasonList[i].get('plot'))
tempImage = str(currentSeasonList[i].get('image'))
dataObj = pd.Series([tempTitle, tempSN, tempEN, tempPlot, tempImage], index=(df.columns))
df.loc[i] = dataObj
emptyArray.append(df)
ReadTheDarnJsonFile(season1)
ReadTheDarnJsonFile(season2)
ReadTheDarnJsonFile(season3)
ReadTheDarnJsonFile(season4)
ReadTheDarnJsonFile(season5)
ReadTheDarnJsonFile(season6)
ReadTheDarnJsonFile(season7)
finalDf = pd.concat(emptyArray)
print(emptyArray)
holyOutput = finalDf.sort_values(by=['episodeTitle'])
holyOutput.reset_index(inplace=True)
holyOutput.to_json("P:\\ProjectForStarWarsCloneWarsJson\JSON\OutputJsonV2.json")
I was making my automatic stock strategy yield calculation program with Python. Here's my code:
import FinanceDataReader as fdr
import numpy as np
# ...(more modules for python)
pd.options.display.float_format = '{:.5f}'.format
file_list = os.listdir('/home/sejahui/projects/stock_data_excel')
for i in range(20):
os.chdir('/home/sejahui/projects/stock_data_excel')
odd = file_list[i]
data = pd.read_excel('/home/sejahui/projects/stock_data_excel/'+str(odd))
def calMACD(data, short=5, long=25, signal=9):
data.sort_index()
data['MVA_25']=data['Close'].ewm(span=long, adjust=False).mean()
data['MVA_5']=data['Close'].ewm(span=short, adjust=False).mean()
data['MACD']=data['Close'].ewm(span=short, adjust=False).mean() - data['Close'].ewm(span=long, adjust=False).mean()
data['Signal']=data['MACD'].ewm(span=signal, adjust=False).mean( )
#data['Buy_sign']=(data['MACD']-data['Signal']) >=600
data['Buy_sign']=np.where(data['MACD']-data['Signal'] >=451, 'Buy' , 'Sell' )
#data['Target_1']=(data['Close']-data['Close'].shift(1))/data['Close'].shift(1)*100
#data['Target_1']=np.where(data['Buy_sign']=='Buy', (data['Change'])+1,1)
#data['Target_2']=np.where(data['Buy_sign']=='Sell', (data['Change'])+1,1)
#data['Real_world']= 1000000*data['Target_1']
#data['Real_world_2']= 1000000*data['Target_2']
#data['Condition'] = np.where(data['Real_world']<1000000, data['Real_world']-data['Real_world'].shift(-2),1)
##data['Condition_2'] = np.where(data['Real_world']<1000000, data['Target_1'].shift(-2),1)
#data['Moneyflow'] =
#plt.plot(data['Date'], data['Real_world'])
#data[data.Buy_sign !='Sell']
'''
data['Target_1']=np.where(data['Buy_sign']=='Buy', data['Change'],1)
data['Target_2']=np.where(data['Buy_sign']=='Sell', data ['Change'],1)
data['Yield']=np.where(data['Buy_sign']=='Sell', data['Target_1']/data['Target_2'],1 )
'''
'''
data['Result']=data['Target_1'].cumprod()
data['Result_2']=data['Target_2'].cumprod()
data['??????'] = data['Result'] - data['Result_2']
'''
return data
Adjusted = calMACD(data)
Adjusted.drop(['Change'], axis=1, inplace = True)
Filtered = Adjusted[Adjusted.Buy_sign!='Sell'].copy()
#print(Filtered)
#Filtered = (Adjusted.Buy_sign =='Buy') #(Adjusted.Condition = 1.0)
#Master = Adjusted.loc[Adjusted,['Date','Buy_sign','Target_1','Real_world',]]
#print(Adjusted)
def backtester(Filtered):
Filtered['Change'] = ((Filtered['Close'] - Filtered['Close'].shift(1)) / Filtered['Close'].shift(1))+1
#data['Target_1']=np.where(data['Buy_sign']=='Buy', (data['Change'])+1,1)
Filtered['Real_world'] = 1000000*Filtered['Change']
#Filtered['Condition'] = np.where(Filtered['Real_world']<1000000, Filtered['Real_world'].shift(-2)-Filtered['Real_world'],1)
Filtered['Condition'] = np.where(Filtered['Real_world']<1000000, Filtered['Change'].shift(-2),1)
#Filtered['Target_1'] = np.where(Filtered['Buy_sign']=='Buy', (Filtered['Change'])+1,1)
#Filtered['Condition'] = np.where(Filtered['Real_world']<1000000, Filtered['Real_world'].shift(-2)-Filtered['Real_world'],1)
return Filtered
s = backtester(Filtered)
e = s[s.Condition!=1.00000]
x = e.dropna()
y = x['Condition']
list_1 = []
write_wb = Workbook()
write_ws = write_wb.create_sheet('MACD&Signal gap data sheet')
write_ws = write_wb.active
write_ws['A1'] = 'Name'
write_ws['B1'] = 'Profit'
try:
print(geometric_mean(y)*1000000*12)
except StatisticsError as e:
print ('Sell is empty':',odd)
else:
d = (geometric_mean(y)*1000000*12)
print(d,odd)
list_1.insert(i,d)
Print(list_1)
Here's the part where I'm troubling with:
s = backtester(Filtered)
e = s[s.Condition!=1.00000]
x = e.dropna()
y = x['Condition']
list_1 = []
try:
print(geometric_mean(y)*1000000*12)
except StatisticsError as e:
print ('Sell is empty':',odd)
else:
d = (geometric_mean(y)*1000000*12)
print(d)
list_1.insert(d)
print(list_1)
When I initiate the code where I am having problems, list only saves the last result of 'try, except, else' function. My intention was saving all the results. What change should I give to save all the results?
Here's the output of the list:
[11772769.197974786]
Your problem is that you are using insert instead of append and the main difference that insert takes a second argument for the position that you want to insert your element at and when none is provided it is 0 by default so you are consistently inserting at the same index resulting in a list with only the last element at the first position.
To fix that simply use append instead.
else:
d = (geometric_mean(y)*1000000*12)
print(d)
list_1.append(d)
You want to use append, not insert. see Python Data Structures
Change list_1.insert(d) to list_1.append(d)
The insert is defaulting to index 0 and just updating it each time.
Edit: Just noticed your answer is in the question title.
I'm working on a web scraping project, and have all the right code that returns me the json data in the format that I want if I used the #print command below, but when I got to run the same code except through Pandas Dataframe it only returns the first row of Data that I'm looking for. Just running the print, it returns the expected 17 rows of data I'm looking for. Dataframe to CSV gives me the first row only. Totally stumped! So grateful for anyone's help!
for item in response['body']:
DepartureDate = item['legs'][0][0]['departDate']
ReturnDate = item['legs'][1][0]['departDate']
Airline = item['legs'][0][0]['airline']['code']
Origin = item['legs'][0][0]['depart']
Destination = item['legs'][0][0]['destination']
OD = (Origin + Destination)
TrueBaseFare = item['breakdown']['baseFareAmount']
YQYR = item['breakdown']['fuelSurcharge']
TAX = item['breakdown']['totalTax']
TTL = item['breakdown']['totalFareAmount']
MARKEDUPTTL = item['breakdown']['totalCalculatedFareAmount']
MARKUP = ((MARKEDUPTTL - TTL) / (TTL)*100)
FBC = item['fareBasisCode']
#print(DepartureDate,ReturnDate,Airline,OD,TrueBaseFare,YQYR,TAX,TTL,MARKEDUPTTL,MARKUP,FBC)
MI = pd.DataFrame(
{'Dept': [DepartureDate],
'Ret': [ReturnDate],
'AirlineCode': [Airline],
'Routing': [OD],
'RealFare': [TrueBaseFare],
'Fuel': [YQYR],
'Taxes': [TAX],
'RealTotal': [TTL],
'AgencyTotal': [MARKEDUPTTL],
'Margin': [MARKUP],
'FareBasis': [FBC],
})
df = pd.DataFrame(MI)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
df.to_csv('MITest7.csv')
When you print all your values after the cycle, you will see that you get only the last values. To resolve this problem you need to create lists and put there your values.
Try this:
DepartureDate = []
ReturnDate = []
Airline = []
Origin = []
Destination = []
OD = []
TrueBaseFare = []
YQYR = []
TAX = []
TTL = []
MARKEDUPTTL = []
MARKUP = []
FBC = []
for item in response['body']:
DepartureDate.append(item['legs'][0][0]['departDate'])
ReturnDate.append(item['legs'][1][0]['departDate'])
Airline.append(item['legs'][0][0]['airline']['code'])
Origin.append(item['legs'][0][0]['depart'])
Destination.append(item['legs'][0][0]['destination'])
OD.append((Origin[-1] + Destination[-1]))
TrueBaseFare.append(item['breakdown']['baseFareAmount'])
YQYR.append(item['breakdown']['fuelSurcharge'])
TAX.append(item['breakdown']['totalTax'])
TTL.append(item['breakdown']['totalFareAmount'])
MARKEDUPTTL.append(item['breakdown']['totalCalculatedFareAmount'])
MARKUP.append(((MARKEDUPTTL[-1] - TTL[-1]) / (TTL[-1])*100))
FBC.append(item['fareBasisCode'])
I'm writing a Python script that will color in various areas of my city's Census Block Groups (of which there are 18) different colors according to their respective median household incomes on a map that's in the SVG format.
Sounds simple enough, right? Well, I can't figure out how, though I'm making slight progress. What I've tried so far is making a list of each of the block group paths according to how the SVG references them, making a list of the median household incomes, then passing in the code that colors them. However, this just.. doesn't seem to be working, for whatever reason. Can any of you wonderful people help figure out where I'm misfiring?
import csv
from bs4 import BeautifulSoup
icbg = []
reader = csv.reader(open('censusdata.csv'),delimiter=",")
#read and get income
for row in reader:
income = row[6]
income = int(income)
icbg.append(income)
svg = open('NM2.svg','r')
soup = BeautifulSoup(svg,"lxml")
#find CBGs and incomes
path1 = soup.find('path')
path2 = path1.find_next('path')
path3 = path2.find_next('path')
path4 = path3.find_next('path')
path5 = path4.find_next('path')
path6 = path5.find_next('path')
path7 = path6.find_next('path')
path8 = path7.find_next('path')
path9 = path8.find_next('path')
path10 = path9.find_next('path')
path11 = path10.find_next('path')
path12 = path11.find_next('path')
path13 = path12.find_next('path')
path14 = path13.find_next('path')
path15 = path14.find_next('path')
path16 = path15.find_next('path')
path17 = path16.find_next('path')
path18 = path17.find_next('path')
incomep1 = icbg[0]
incomep2 = icbg[1]
incomep3 = icbg[2]
incomep4 = icbg[3]
incomep5 = icbg[4]
incomep6 = icbg[5]
incomep7 = icbg[6]
incomep8 = icbg[7]
incomep9 = icbg[8]
incomep10 = icbg[9]
incomep11 = icbg[10]
incomep12 = icbg[11]
incomep13 = icbg[12]
incomep14 = icbg[13]
incomep15 = icbg[14]
incomep16 = icbg[15]
incomep17 = icbg[16]
incomep18 = icbg[17]
paths = (path1, path2, path3, path4, path5, path6, path7, path8, path9, path10,
path11, path12, path13, path14, path15, path16, path17, path18)
incomes = (incomep1,incomep2,incomep3,incomep4,incomep5,incomep6,incomep7,incomep8,
incomep9,incomep10,incomep11,incomep12,incomep13,incomep14,incomep15,incomep16,incomep17,incomep18)
#set colors
colors = ['fee5d9','fcae91','fb6a4a','de2d26','a50f15']
for p in paths:
for i in range(0,17):
it = incomes[i]
if it > 20000:
color_class = 2
elif it > 25000:
color_class = 1
elif it > 30000:
color_class = 3
elif it > 35000:
color_class = 4
color = colors[color_class]
path_style = "font-size:12px;fill:#%s;fill-rule:nonzero;stroke:#000000;stroke-opacity:1;stroke-width:0.1;stroke-miterlimit:4;stroke-dasharray:none;stroke-linecap:butt;marker-start:none;stroke-linejoin:bevel" % color
p['style'] = path_style
print(soup.prettify())
Running this gives me an SVG file like so: fill:#fb6a4a;fill-rule:nonzero;stroke:#000000;stroke-opacity:1;stroke-width:0.1;stroke-miterlimit:4;stroke-dasharray:none;stroke-linecap:butt;marker-start:none;stroke-linejoin:bevel"> comes up 18 times, meaning for every available path, even though these paths have different incomes.
could the problem be with the way I wrote my comparisons?
From my understanding of what you are trying to do, your problem is that you have 2 for loops instead of one. You should loop through the paths and incomes at the same time. The way you are doing it now is you are looping through all the incomes for each path. The following code simply moves the paths into the same loop as the income so they are looped through at the same time.
for i in range(0,17):
it = incomes[i]
p = paths[i]
if it > 20000:
color_class = 2
elif it > 25000:
color_class = 1
elif it > 30000:
color_class = 3
elif it > 35000:
color_class = 4
color = colors[color_class]
path_style = "font-size:12px;fill:#%s;fill-rule:nonzero;stroke:#000000;stroke-opacity:1;stroke-width:0.1;stroke-miterlimit:4;stroke-dasharray:none;stroke-linecap:butt;marker-start:none;stroke-linejoin:bevel" % color
p['style'] = path_style
The following code runs and does what it's supposed to do but I'm having trouble using the XlxsWriter module in Python in order to get some of the results into a .xlxs file. The goal is for the output to contain information from the function block_trial where it tracks each block and gives me the all_powers variable the corresponds with that trial. Installing the module into my user directory goes smoothly but it won't give me a file that gives me both sets of information.
At the moment, I'm using:
import xlsxwriter
workbook = xlsxwriter.Workbook('SRT_data.xlsx')
worksheet = workbook.add_worksheet()
Widen the first column to make the text clearer.
worksheet.set_column('A:A', 20)
Add a bold format to use to highlight cells.
bold = workbook.add_format({'bold': True})
Write some simple text.
worksheet.write('A1', 'RT')
workbook.close()
But can't get any of my data to show up.
import random, math
num_features = 20
stim_to_vect = {}
all_stim = [1,2,3,4,5]
all_features = range(num_features)
zeros=[0 for i in all_stim]
memory=[]
def snoc(xs,x):
new_xs=xs.copy()
new_xs.append(x)
return new_xs
def concat(xss):
new_xs = []
for xs in xss:
new_xs.extend(xs)
return new_xs
def point_wise_mul(xs,ys):
return [x*y for x,y in zip(xs,ys)]
for s in snoc(all_stim, 0):
stim_to_vect[s]= []
for i in all_features:
stim_to_vect[s].append(random.choice([-1, 1]))
def similarity(x,y):
return(math.fsum(point_wise_mul(x,y))/math.sqrt(math.fsum(point_wise_mul(x,x))*math.fsum(point_wise_mul(y,y))))
def echo(probe,power):
echo_vect=[]
for j in all_features:
total=0
for i in range(len(memory)):
total+=math.pow(similarity(probe, memory[i]),power)*memory[i][j]
echo_vect.append(total)
return echo_vect
fixed_seq=[1,5,3,4,2,1,3,5,4,2,5,1]
prev_states={}
prev_states[0]=[]
prev=0
for curr in fixed_seq:
if curr not in prev_states.keys():
prev_states[curr]=[]
prev_states[curr].append(prev)
prev=curr
def update_memory(learning_parameter,event):
memory.append([i if random.random() <= learning_parameter else 0 for i in event])
for i in snoc(all_stim,0):
for j in prev_states[i]:
curr_stim = stim_to_vect[i]
prev_resp = stim_to_vect[j]
curr_resp = stim_to_vect[i]
update_memory(1.0, concat([curr_stim, prev_resp, curr_resp]))
def first_part(x):
return x[:2*num_features-1]
def second_part(x):
return x[2*num_features:]
def compare(curr_stim, prev_resp):
for power in range(1,10):
probe=concat([curr_stim,prev_resp,zeros])
theEcho=echo(probe,power)
if similarity(first_part(probe),first_part(theEcho))>0.97:
curr_resp=second_part(theEcho)
return power,curr_resp
return 10,zeros
def block_trial(sequence):
all_powers=[]
prev_resp = stim_to_vect[0]
for i in sequence:
curr_stim = stim_to_vect[i]
power,curr_resp=compare(curr_stim,prev_resp)
update_memory(0.7,concat([curr_stim,prev_resp,curr_resp]))
all_powers.append(power)
prev_resp=curr_resp
return all_powers