Extracting Nested List-Dictionaries to Pandas Series in a DataFrame - python

I have a pandas dataframe that I have extracted from a JSON file for breweries I'm interested in. most of these columns are as nested list of dictionaries. However two columns 'hours' and 'memberships' are being problematic.
I'd like to extract the 'hours' column into 7 columns "Mon_Hours","Tue_hours"...'Sun_Hours'.
I have tried and tried to figure this out but these two columns are proving challenging.
Here is a link to the initial data: https://www.coloradobrewerylist.com/wp-json/cbl_api/v1/locations/?location-type%5Bnin%5D=404,405&page_size=1000&page_token=1
and here is my code:
import requests
import re
import pandas as pd
import numpy as np
import csv
import json
from datetime import datetime
### get the data from the Colorado Brewery list
url = "https://www.coloradobrewerylist.com/wp-json/cbl_api/v1/locations/?location-type%5Bnin%5D=404,405&page_size=1000&page_token=1"
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
data=response.json()
### convert results to table
pd.set_option('display.max_columns', None)
brewdf = pd.DataFrame.from_dict(data['results'])
#brewdf
############################################
#### CLEAN UP NESTED LIST-DICT COLUMNS #####
############################################
## cleanup dogs column
dogs = pd.json_normalize(brewdf['dogs'])
dogs2 = dogs.squeeze()
dogsdf = pd.json_normalize(dogs2)
dogsdf = dogsdf.drop(columns =['id','slug'])
dogsdf = dogsdf.rename(columns={'name':'dogs_allowed'})
#dogsdf
## cleanup parking column
parking = pd.json_normalize(brewdf['parking'])
parking = parking.rename(columns = {0:'Parking1',1:'Parking2',2:'Parking3'})
a = pd.json_normalize(parking['Parking1'])
b = pd.json_normalize(parking['Parking2'])
c = pd.json_normalize(parking['Parking3'])
parkcombo = pd.concat([a,b,c],ignore_index=True, axis=1)
parkcombo = parkcombo.rename(columns = {2:'P1',5:'P2',8:'P3'})
parkcombo['parking_type'] = parkcombo['P1'].map(str) + ',' + parkcombo['P2'].map(str) + ',' + parkcombo['P3'].map(str)
parkcombo['parking_type'] = parkcombo['parking_type'].str.replace(",nan",'')
parkdf = parkcombo['parking_type'].to_frame()
#parkdf
## cleanup food type column
food = pd.json_normalize(brewdf['food_type'])
food
food = food.rename(columns = {0:'Food1',1:'Food2',2:'Food3',3:'Food4',4:'Food5',5:'Food6'})
a = pd.json_normalize(food['Food1'])
b = pd.json_normalize(food['Food2'])
c = pd.json_normalize(food['Food3'])
d = pd.json_normalize(food['Food4'])
e = pd.json_normalize(food['Food5'])
f = pd.json_normalize(food['Food6'])
foodcombo = pd.concat([a,b,c,d,e,f],ignore_index=True, axis =1)
foodcombo
foodcombo = foodcombo.rename(columns = {2:'F1',5:'F2',8:'F3',11:'F4',14:'F5',17:'F6'})
foodcombo['food_type'] = foodcombo['F1'].map(str) + ',' + foodcombo['F2'].map(str) + ',' + foodcombo['F3'].map(str) + ',' + foodcombo['F4'].map(str)+ ',' + foodcombo['F5'].map(str) + ',' + foodcombo['F6'].map(str)
foodcombo['food_type'] = foodcombo['food_type'].str.replace(",nan",'')
fooddf = foodcombo['food_type'].to_frame()
#fooddf
## cleanup patio column
patio = pd.json_normalize(brewdf['patio'])
patio = patio.rename(columns = {0:'P1',1:'P2',2:'P3'})
a = pd.json_normalize(patio['P1'])
b = pd.json_normalize(patio['P2'])
c = pd.json_normalize(patio['P3'])
patiocombo = pd.concat([a,b,c],ignore_index=True, axis =1)
patiocombo
patiocombo = patiocombo.rename(columns = {2:'P1',5:'P2',8:'P3'})
patiocombo['patio_type'] = patiocombo['P1'].map(str) + ',' + patiocombo['P2'].map(str) + ',' + patiocombo['P3'].map(str)
patiocombo['patio_type'] = patiocombo['patio_type'].str.replace(",nan",'')
patiodf = patiocombo['patio_type'].to_frame()
#patiodf
## clean visitor type column
visitor = pd.json_normalize(brewdf['visitors'])
visitor
visitor = visitor.rename(columns = {0:'V1',1:'V2',2:'V3'})
a = pd.json_normalize(visitor['V1'])
b = pd.json_normalize(visitor['V2'])
c = pd.json_normalize(visitor['V3'])
visitorcombo = pd.concat([a,b,c],ignore_index=True, axis =1)
visitorcombo
visitorcombo = visitorcombo.rename(columns = {2:'V1',5:'V2',8:'V3'})
visitorcombo['visitor_type'] = visitorcombo['V1'].map(str) + ',' + visitorcombo['V2'].map(str) + ',' + visitorcombo['V3'].map(str)
visitorcombo['visitor_type'] = visitorcombo['visitor_type'].str.replace(",nan",'')
visitordf = visitorcombo['visitor_type'].to_frame()
#visitordf
## clean tour type column
tour = pd.json_normalize(brewdf['tour_type'])
tour
tour = tour.rename(columns = {0:'T1',1:'T2',2:'T3',3:'T4'})
a = pd.json_normalize(tour['T1'])
b = pd.json_normalize(tour['T2'])
c = pd.json_normalize(tour['T3'])
d = pd.json_normalize(tour['T4'])
tourcombo = pd.concat([a,b,c,d],ignore_index=True, axis =1)
tourcombo
tourcombo = tourcombo.rename(columns = {2:'T1',5:'T2',8:'T3',11:'T4'})
tourcombo['tour_type'] = tourcombo['T1'].map(str) + ',' + tourcombo['T2'].map(str) + ',' + tourcombo['T3'].map(str) + ','+ tourcombo['T4'].map(str)
tourcombo['tour_type'] = tourcombo['tour_type'].str.replace(",nan",'')
tourdf = tourcombo['tour_type'].to_frame()
#tourdf
## clean other drinks column
odrink = pd.json_normalize(brewdf['otherdrinks_type'])
odrink
odrink = odrink.rename(columns = {0:'O1',1:'O2',2:'O3',3:'O4',4:'O5',5:'O6',6:'O7',7:'O8',8:'O9'})
a = pd.json_normalize(odrink['O1'])
b = pd.json_normalize(odrink['O2'])
c = pd.json_normalize(odrink['O3'])
d = pd.json_normalize(odrink['O4'])
e = pd.json_normalize(odrink['O5'])
f = pd.json_normalize(odrink['O6'])
g = pd.json_normalize(odrink['O7'])
h = pd.json_normalize(odrink['O8'])
i = pd.json_normalize(odrink['O9'])
odrinkcombo = pd.concat([a,b,c,d,e,f,g,h,i],ignore_index=True, axis =1)
odrinkcombo
odrinkcombo = odrinkcombo.rename(columns = {2:'O1',5:'O2',8:'O3',11:'O4',14:'O5',17:'O6',20:'O7',23:'O8',26:'O9'})
odrinkcombo['odrink_type'] = odrinkcombo['O1'].map(str) + ',' + odrinkcombo['O2'].map(str) + ',' + odrinkcombo['O3'].map(str) + ','+ odrinkcombo['O4'].map(str) + ','+ odrinkcombo['O5'].map(str)+ ','+ odrinkcombo['O6'].map(str)+ ','+ odrinkcombo['O7'].map(str)+','+ odrinkcombo['O8'].map(str)+','+ odrinkcombo['O9'].map(str)
odrinkcombo['odrink_type'] = odrinkcombo['odrink_type'].str.replace(",nan",'')
odrinkdf = odrinkcombo['odrink_type'].to_frame()
#odrinkdf
## clean to-go column
togo = pd.json_normalize(brewdf['togo_type'])
togo
togo = togo.rename(columns = {0:'TG1',1:'TG2',2:'TG3',3:'TG4',4:'TG5'})
a = pd.json_normalize(togo['TG1'])
b = pd.json_normalize(togo['TG2'])
c = pd.json_normalize(togo['TG3'])
d = pd.json_normalize(togo['TG4'])
e = pd.json_normalize(togo['TG5'])
togocombo = pd.concat([a,b,c,d,e],ignore_index=True, axis =1)
togocombo
togocombo = togocombo.rename(columns = {2:'TG1',5:'TG2',8:'TG3',11:'TG4',14:'TG5'})
togocombo['togo_type'] = togocombo['TG1'].map(str) + ',' + togocombo['TG2'].map(str) + ',' + togocombo['TG3'].map(str) + ','+ togocombo['TG4'].map(str) + ','+ togocombo['TG5'].map(str)
togocombo['togo_type'] = togocombo['togo_type'].str.replace(",nan",'')
togodf = togocombo['togo_type'].to_frame()
#togodf
## clean merch column
merch = pd.json_normalize(brewdf['merch_type'])
merch
merch = merch.rename(columns = {0:'M1',1:'M2',2:'M3',3:'M4',4:'M5',5:'M6',6:'M7',7:'M8',8:'M9',9:'M10',10:'M11',11:'M12'})
a = pd.json_normalize(merch['M1'])
b = pd.json_normalize(merch['M2'])
c = pd.json_normalize(merch['M3'])
d = pd.json_normalize(merch['M4'])
e = pd.json_normalize(merch['M5'])
f = pd.json_normalize(merch['M6'])
g = pd.json_normalize(merch['M7'])
h = pd.json_normalize(merch['M8'])
i = pd.json_normalize(merch['M9'])
j = pd.json_normalize(merch['M10'])
k = pd.json_normalize(merch['M11'])
l = pd.json_normalize(merch['M12'])
merchcombo = pd.concat([a,b,c,d,e,f,g,h,i,j,k,l],ignore_index=True, axis =1)
merchcombo
merchcombo = merchcombo.rename(columns = {2:'M1',5:'M2',8:'M3',11:'M4',14:'M5',17:'M6',20:'M7',23:'M8',26:'M9',29:'M10',32:'M11',35:'M12'})
merchcombo['merch_type'] = (merchcombo['M1'].map(str) + ',' + merchcombo['M2'].map(str) + ',' + merchcombo['M3'].map(str) + ','+ merchcombo['M4'].map(str) + ','
+ merchcombo['M5'].map(str) + ',' + merchcombo['M6'].map(str)+ ',' + merchcombo['M7'].map(str) + ',' + merchcombo['M8'].map(str)
+ ',' + merchcombo['M9'].map(str)+ ',' + merchcombo['M10'].map(str)+ ',' + merchcombo['M11'].map(str)+ ',' + merchcombo['M12'].map(str))
merchcombo['merch_type'] = merchcombo['merch_type'].str.replace(",nan",'')
merchdf = merchcombo['merch_type'].to_frame()
#merchdf
### clean description column
brewdf['description'] = brewdf['description'].str.replace(r'<[^<>]*>', '', regex=True)
#brewdf
### replace nan with null
brewdf = brewdf.replace('nan',np.nan)
brewdf = brewdf.replace('None',np.nan)
brewdf
cleanedbrewdf = brewdf.drop(columns = {'food_type','tour_type','otherdrinks_type','articles','merch_type','togo_type','patio','visitors','parking','dogs'})
mergedbrewdf = pd.concat([cleanedbrewdf,dogsdf,parkdf,fooddf,patiodf,
visitordf,tourdf,odrinkdf,togodf,merchdf,],ignore_index=False,axis=1)
mergedbrewdf
### remove non-existing
finalbrewdf = mergedbrewdf.loc[(mergedbrewdf['lon'].notnull())].copy()
finalbrewdf['lon'] = finalbrewdf['lon'].astype(float)
finalbrewdf['lat'] = finalbrewdf['lat'].astype(float)
finalbrewdf
Can someone please point me in the right direction for the hours and memberships columns? Also, is there a more efficient way to look through these different columns? They have different nested list-dict lengths which I thought might prevent me from writing a function.

Related

Creating a df based on total permutations deriving from user-input variables

I would like to pass 'n' amount of cities to travel to and corresponding days in each city to a function that would return a df with all possible permutations of the journey. The kayak_search_url column in the df should contain this string in the first row:
https://www.kayak.com/flights/AMS-WAW,nearby/2023-02-14/WAW-BOG,nearby/2023-02-17/BOG-MIL,nearby/2023-02-20/MIL-SDQ,nearby/2023-02-23/SDQ-AMS,nearby/2023-02-25/?sort=bestflight_a
...but instead contains this string:
https://www.kayak.com/flights/AMS-WAW,nearby/2023-02-14/AMS-BOG,nearby/2023-02-17/AMS-MIL,nearby/2023-02-20/AMS-SDQ,nearby/2023-02-23/AMS,nearby/2023-02-25/?sort=bestflight_a
I can't figure out why the origin code 'AMS' shows up instead of the chain of cities. Here's the code:
# List the cities you want to travel to and from, how long you'd like to stay in each, and the appropriate start/end dates
start_city = 'Amsterdam'
end_city = 'Amsterdam'
start_date = '2023-02-14'
cities = ['Warsaw', 'Bogota', 'Milan', 'Santo Domingo']
days = [3,3,3,2]
def generate_permutations(cities, days, start_city, end_city, start_date):
city_to_days = dict(zip(cities, days))
permutations = list(itertools.permutations(cities))
df = pd.DataFrame(permutations, columns=['city' + str(i) for i in range(1, len(cities) + 1)])
df['origin'] = start_city
df['end'] = end_city
first_column = df.pop('origin')
df.insert(0, 'origin', first_column)
st_dt = pd.to_datetime(start_date)
df = df.assign(flight_dt_1=st_dt)
for i in range(len(cities)):
df['flight_dt_' + str(i + 2)] = df['flight_dt_' + str(i + 1)] + df['city' + str(i + 1)].map(city_to_days).map(lambda x: pd.Timedelta(days=x))
# IATA city code dictionary from iata_code.csv file in repo and create Kayak 'url' column for each permutation
iata = {'Amsterdam': 'AMS',
'Warsaw': 'WAW',
'Bogota': 'BOG',
'Milan': 'MIL',
'Santo Domingo': 'SDQ'}
url = 'https://www.kayak.com/flights/'
df['kayak_search_url'] = df.apply(lambda x: url + ''.join([iata[x['origin']] + '-' + iata[x['city' + str(i+1)]] + \
',nearby/' + str(x['flight_dt_' + str(i+1)].strftime("%Y-%m-%d")) + '/' \
for i in range(len(cities))]) + iata[x['end']] + ',nearby/' + str(x['flight_dt_' + str(len(cities) + 1)].strftime("%Y-%m-%d")) + \
'/?sort=bestflight_a', axis=1)
return df
Let's break down the desired URL to highlight its structure:
https://www.kayak.com/flights
/AMS-WAW,nearby/2023-02-14
/WAW-BOG,nearby/2023-02-17
/BOG-MIL,nearby/2023-02-20
/MIL-SDQ,nearby/2023-02-23
/SDQ-AMS,nearby/2023-02-25
/?sort=bestflight_a
Obviously only the middle section needs to generated as the other parts are static. We can also generate that middle section before constructing the dataframe:
def generate_permutations(cities, days, start_city, end_city, start_date):
iata = {
"Amsterdam": "AMS",
"Warsaw": "WAW",
"Bogota": "BOG",
"Milan": "MIL",
"Santo Domingo": "SDQ",
}
permutations = [
(start_city,) + p + (end_city,) for p in itertools.permutations(cities)
]
flight_dates = pd.to_datetime(start_date) + pd.to_timedelta(
np.array([0] + days).cumsum(),
unit="D",
)
# Generate the URLs
urls = []
for p in permutations:
# The pattern for each segment is
# START-END,nearby/yyyy-dd-dd
mid_url = "/".join(
[
f"{iata[s]}-{iata[e]},nearby/{fd:%Y-%m-%d}"
for s, e, fd in zip(p[:-1], p[1:], flight_dates)
]
)
urls.append(f"https://www.kayak.com/flights/{mid_url}/?sort=bestflight_a")
# Generate the resulting dataframe
return (
pd.DataFrame(
permutations,
columns=["origin", *[f"city{i+1}" for i in range(len(cities))], "end"],
)
.merge(
pd.DataFrame(
flight_dates,
index=[f"flight_dt_{i+1}" for i in range(len(flight_dates))],
).T,
how="cross",
)
.assign(kayak_search_url=urls)
)

Python Return Statement Not Providing Expected Results

Just attempting to return values from a defined function. When calling the function first and attempting to print the return values I receive "[variable] not defined". However, if I run "print(qb_stat_filler())" it prints the results in a tuple. I need the individual variables returned to use in a separate function.
For Example
print(qb_stat_filler())
outputs: (0, 11, 24, 24.2024, 39.1143, 293.0, 1.9143000000000001, 0.2262, 97.84333355313255)
but when trying
qb_stat_filler()
print(cmp_avg)
print(att_avg)
outputs: NameError: name 'cmp_avg' is not defined
Process finished with exit code 1
I've tried establishing the variables outside of the function, then passing and returning them and that did not work either. Any thoughts?
def qb_stat_filler():
n_input = input('Player name: ')
t_input = input('Players team: ')
loc_input = input('H or #: ')
o_input = input('Opponent: ')
# convert index csv to dictionary of player values
q = pd.read_csv('Models\\QB Indexes\\QBname.csv')
q = q[['Player', 'Num']]
qb_dict = dict(q.values)
name = qb_dict.get('{}'.format(n_input))
t = pd.read_csv('Models\\QB Indexes\\Tmname.csv')
t = t[['Tm', 'Num']]
tm_dict = dict(t.values)
team = tm_dict.get('{}'.format(t_input))
loc = 0
if loc_input == '#':
loc = 0
elif loc_input == 'H':
loc = 1
z = pd.read_csv('Models\\QB Indexes\\Oppname.csv')
z = z[['Opp', 'Num']]
opp_dict = dict(z.values)
opp = opp_dict.get('{}'.format(o_input))
*there are several lines of code here that involve SQL
queries and data cleansing*
cmp_avg = (cmp_match + cmpL4) / 2
att_avg = (patt_match + pattL4) / 2
pyds_avg = (py_match + pydsL4) / 2
ptd_avg = (ptdL4 + ptd_match) / 2
int_avg = (intL4 + int_match) / 2
qbr_avg = (qbr_match + qbrL4) / 2
return name, team, opp, cmp_avg, att_avg, pyds_avg, ptd_avg,
int_avg, qbr_avg
qb_stat_filler()
You might consider:
def qb_stat_filler():
stats = {}
...
stats['name'] = name
z = z[['Opp', 'Num']]
opp_dict = dict(z.values)
stats['opp'] = opp_dict.get('{}'.format(o_input))
...
stats['cmp_avg'] = (cmp_match + cmpL4) / 2
stats['att_avg'] = (patt_match + pattL4) / 2
stats['pyds_avg'] = (py_match + pydsL4) / 2
stats['ptd_avg'] = (ptdL4 + ptd_match) / 2
stats['int_avg'] = (intL4 + int_match) / 2
stats['qbr_avg'] = (qbr_match + qbrL4) / 2
return stats
...
stats = qb_stat_filler()
print(stats['cmp_avg'])

How can I display max number of loses from this dataframe in Pandas?

I wrote a webscraper which is downloading table tennis data. There is info about players, match score etc. I would like to display players which lost the most matches per day. I've created data frame and I would like to sum p1_status and p2_status, then I would like to display Surname and number of loses next to player.
https://gyazo.com/19c70e071db78071e83045bfcea0e772
Here is my code:
s = Service("D:/setka/chromedriver.exe")
option = webdriver.ChromeOptions()
driver = webdriver.Chrome(service=s)
hall = 10
num =1
filename = "C:/Users/filip/result2.csv"
f=open(filename,"w")
headers = "p1_surname, p1_name, p1_score, p2_surname, p2_name, p2_score, p1_status, p2_status \n"
f.write(headers)
while hall <= 10:
for period in [1]:
url = 'https://tabletennis.setkacup.com/en/schedule?date=2021-12-04&hall=' + \
str(hall) + '&' + 'period=' + str(period)
driver.get(url)
time.sleep(5)
divs = driver.find_elements(By.CSS_SELECTOR, "div.score-result")
for div in divs:
data = div.text.split()
#print(data)
if(num % 2) == 0:
f.write(str(data[0]) + "," + str(data[1]) + "," + str(data[2] + "," + "\n"))
else:
f.write(str(data[0]) + "," + str(data[1]) + "," + str(data[2] + ","))
num = num +1
hall =hall + 1
f.close()
df_results=pd.read_csv('C:/Users/filip/result2.csv', sep = r',',
skipinitialspace = True)
df_results.reset_index(drop=True, inplace=True)
df_results.loc[df_results['p1_score'] > df_results['p2_score'], ['p1_status','p2_status']] = ['won','lost']
df_results.loc[df_results['p1_score'] < df_results['p2_score'], ['p1_status','p2_status']] = ['lost','won']
df_results.loc[df_results['p1_score'] == df_results['p2_score'], ['p1_status','p2_status']] = ['not played','not played']
df_results.loc[((df_results['p1_score'] < 3) & (df_results['p1_score']!=0) & (df_results['p2_score'] <3) & (df_results['p2_score']!=0)), ['p1_status','p2_status']] = ['inplay','inplays']
df_results.loc[df_results['p1_status'] != df_results['p2_status'], ['match_status']] = ['finished']
df_results.loc[df_results['p1_status'] == df_results['p2_status'], ['match_status']] = ['not played']
df_results.loc[((df_results['p1_status'] =='inplay') & (df_results['p2_status']=='inplays')), ['match_status']] = ['inplay']
df_results = df_results.dropna(axis=1)
df_results.head(30)
Split your dataframe in 2 parts (p1_, p2_) to count defeats of each player then merge them:
Setup a MRE:
df = pd.DataFrame({'p1_surname': list('AABB'), 'p2_surname': list('CDCD'),
'p1_status': list('LWWW'), 'p2_status': list('WLLL')})
print(df)
# Output:
p1_surname p2_surname p1_status p2_status
0 A C L W
1 A D W L
2 B C W L
3 B D W L
>>> pd.concat([
df.filter(like='p1_').set_index('p1_surname')['p1_status'].eq('L').rename('loses'),
df.filter(like='p2_').set_index('p2_surname')['p2_status'].eq('L').rename('loses')]) \
.groupby(level=0).sum().rename_axis('surname').reset_index()
surname loses
0 A 1
1 B 0
2 C 1
3 D 2

Python - Extracts only one when using openAPI

I'm extracting movie data from open API.
I want to bring the first director and actor, but everyone is printed out.
This is my code.
url = "http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieInfo.json?key='KeyValue'&movieCd=20177478"
res = requests.get(url)
test = res.text
d = json.loads(test)
movieinfo = d['movieInfoResult']['movieInfo']
moviename = movieinfo['movieNm']
print("movie_name = " + moviename)
moviedt = movieinfo['openDt']
print("movie_dt = " + moviedt)
for b in d["movieInfoResult"]["movieInfo"]["directors"]:
print("director_name = " + b["peopleNm"])
When I run this code result is like this.
movie_name = avengers
movie_dt = 20180425
director_name = Anthony Russo
director_name = Joe Russo
How can I bring only one person like this.
I need just the first person.
movie_name = avengers
movie_dt = 20180425
director_name = Anthony Russo
Open API site(korean) - https://www.kobis.or.kr/kobisopenapi/homepg/apiservice/searchServiceInfo.do
You can break for loop after printing or you can directly access the first value (if you are sure directors array is not empty)
url = "http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieInfo.json?key='KeyValue'&movieCd=20177478"
res = requests.get(url)
test = res.text
d = json.loads(test)
movieinfo = d['movieInfoResult']['movieInfo']
moviename = movieinfo['movieNm']
print("movie_name = " + moviename)
moviedt = movieinfo['openDt']
print("movie_dt = " + moviedt)
for b in d["movieInfoResult"]["movieInfo"]["directors"]:
print("director_name = " + b["peopleNm"])
break
or
url = "http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieInfo.json?key='KeyValue'&movieCd=20177478"
res = requests.get(url)
test = res.text
d = json.loads(test)
movieinfo = d['movieInfoResult']['movieInfo']
moviename = movieinfo['movieNm']
print("movie_name = " + moviename)
moviedt = movieinfo['openDt']
print("movie_dt = " + moviedt)
print("director_name = " + d["movieInfoResult"]["movieInfo"]["directors"][0]["peopleNm"])

how to feed strings in an empty list?

I am trying to store the values obtained from excel sheet cells to a list. The code provided basically collects data from different continuous rows and columns and creates a string of those values. I could work upt o storing the string value but I don't really know how to store the strings in a list, Can anyone help me with this?
for i in range(NR):
print("This TC checks the output for")
for j in range(NC):
inputVariable = str(ws[get_column_letter(ColumnStart+j) + str(rowStart-1)].value)
c = str((ws.cell(row = (rowStart + i),column = (ColumnStart +j)).value))
if (ws.cell(row = (rowStart + i),column = (ColumnStart+j)).value) == (ws.cell(row = (MaxValRow),column = (ColumnStart+j)).value):
b = '(maximum)'
elif (ws.cell(row = (rowStart + i),column = (ColumnStart+j)).value) == (ws.cell(row = (MinValRow),column = (ColumnStart+j)).value):
b = '(minimum)'
else:
b ='(intermediate)'
Commentstr = str(j+1) + '. The value of input ' + inputVariable + ' =' + " " + c + b
# need to create a list here to store the commentstr for each iteration
NR = no. of rows, NC = no. of columns
my_list=[]
for i in range(NR):
x=0
print("This TC checks the output for")
for j in range(NC):
inputVariable = str(ws[get_column_letter(ColumnStart+j) + str(rowStart-1)].value)
c = str((ws.cell(row = (rowStart + i),column = (ColumnStart +j)).value))
if (ws.cell(row = (rowStart + i),column = (ColumnStart+j)).value) == (ws.cell(row = (MaxValRow),column = (ColumnStart+j)).value):
b = '(maximum)'
elif (ws.cell(row = (rowStart + i),column = (ColumnStart+j)).value) == (ws.cell(row = (MinValRow),column = (ColumnStart+j)).value):
b = '(minimum)'
else:
b ='(intermediate)'
Commentstr = str(j+1) + '. The value of input ' + inputVariable + ' =' + " " + c + b
my_list[x]=Commentstr
x+=1

Categories

Resources