I am trying to hide the data for a single column if the column contain value that is in exceptionList then it should escape and move to next but somehow i am not able to hide that and throws error
if(x in exceptionList):
ValueError: The truth value of a Series is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
here is mine code
data = [['NISAMANEE ROWELL', '9198762345','98 Oxford Ave.Elk Grove Village, IL 60007'], ['ALICE BAISDEN', '8756342865', '94 Valley Rd.Miami Gardens, FL 33056'], ['MARC COGNETTI', '9198762345', '221 Summer CircleGreer, SC 29650'], ['JOHNS HOPKINS HEALTHCARE', '9654987642', '8522 Pendergast AvenueVilla Park, IL 60181']]
df = pd.DataFrame(data, columns = ['Name', 'Number', 'Address'])
df
def title_format(inp):
return inp.str.title()
def new(x):
#x = input('Enter your column name')
#x = x.title()
x = title_format(x)
print(x)
exc_list=['Mackesson Inc','Care','Healthcare','Henery Schien','Besse','LLC','CandP','INC','LTD','PHARMACY','PHARMACEUTICAL','HOSPITAL','COMPANY','ELECTRONICS','APP','VOLUNTEERS','SPECIALITIES','APPLIANCE','EXPRESS','MAGAZINE','SUPPLY','ENDOSCOPY','NETWandK','SCHOOL','AT&T','SOLUTIONS','SANITATION','SYSTEMS','COMPOUNDING','CLINIC','UTILITIES','DEPARTMENT','CREATIVE','PIN','employment','consultant','units','label','machine','anesthesia','services','medical','community','plaza','tech','bipolar','brand','commerce','testing','inspection','killer','plus','electric','division','diagnostic','materials','imaging','international','district','chamber','city','products','essentials','life','scissand','leasing','units','health','healthcare','surgical','enterprises','print','radiology','water','screens','telecom']
exceptionList = [z.title() for z in exc_list]
if(x in exceptionList):
return x
else:
return x.str.replace(x, 'X' * random.randrange(3, 8))
#new(df.Name.astype(str))
new(df['Name'].astype(str))
As far as i understand you want, i change several lines in your code:
import pandas as pd
import random
data = [['NISAMANEE ROWELL', '9198762345','98 Oxford Ave.Elk Grove Village, IL 60007'], ['ALICE BAISDEN', '8756342865', '94 Valley Rd.Miami Gardens, FL 33056'], ['MARC COGNETTI', '9198762345', '221 Summer CircleGreer, SC 29650'], ['Healthcare', '9654987642', '8522 Pendergast AvenueVilla Park, IL 60181']]
df = pd.DataFrame(data, columns = ['Name', 'Number', 'Address'])
def title_format(inp):
return inp.str.title()
def new(x):
#x = input('Enter your column name')
#x = x.title()
x = title_format(x)
print(x)
exc_list=['Mackesson Inc','Care','Healthcare','Henery Schien','Besse','LLC','CandP','INC','LTD','PHARMACY','PHARMACEUTICAL','HOSPITAL','COMPANY','ELECTRONICS','APP','VOLUNTEERS','SPECIALITIES','APPLIANCE','EXPRESS','MAGAZINE','SUPPLY','ENDOSCOPY','NETWandK','SCHOOL','AT&T','SOLUTIONS','SANITATION','SYSTEMS','COMPOUNDING','CLINIC','UTILITIES','DEPARTMENT','CREATIVE','PIN','employment','consultant','units','label','machine','anesthesia','services','medical','community','plaza','tech','bipolar','brand','commerce','testing','inspection','killer','plus','electric','division','diagnostic','materials','imaging','international','district','chamber','city','products','essentials','life','scissand','leasing','units','health','healthcare','surgical','enterprises','print','radiology','water','screens','telecom']
exceptionList = [z.title() for z in exc_list]
match = [x1 in exceptionList for x1 in x]
df.loc[match,'Name'] = ['X' * random.randrange(3, 8) for a in range(sum(match))]
# return x
# else:
# return x.str.replace(x, 'X' * random.randrange(3, 8))
#new(df.Name.astype(str))
new(df['Name'].astype(str))
df
Out[1]:
Name Number Address
0 NISAMANEE ROWELL 9198762345 98 Oxford Ave.Elk Grove Village, IL 60007
1 ALICE BAISDEN 8756342865 94 Valley Rd.Miami Gardens, FL 33056
2 MARC COGNETTI 9198762345 221 Summer CircleGreer, SC 29650
3 XXXXXXX 9654987642 8522 Pendergast AvenueVilla Park, IL 60181
More optimal way to do the same
exc_list = [x.title() for x in exc_list]
df['Name'] = df['Name'].map(str.title)
df['match'] = [nn in exc_list for nn in df['Name']]
df.loc[df['match'] == True,'Name'] = ['X' * random.randrange(3, 8) for a in range(sum(df['match']))]
To hide first 3 symbols
exc_list = [x.title() for x in exc_list]
df['Name'] = df['Name'].map(str.title)
df['match'] = [nn in exc_list for nn in df['Name']]
df['NameIf'] = list(zip(df['Name'], [(lambda x: 'XXX' + s[3:] if len(x)>3 else 'XXX')(s) for s in df['Name']]))
df['Name'] = [n[0][n[1]] for n in list(zip(df['NameIf'],df['match'].astype(int)))]
df = df.drop(['NameIf', 'match'], axis = 1)
df
To hide whole row
exc_list = [x.title() for x in exc_list]
df['Name'] = df['Name'].map(str.title)
df['match'] = [nn in exc_list for nn in df['Name']]
hide_row = {c:'XXX' for c in df.columns}
df[df['match'] != True].merge(pd.DataFrame(hide_row, index = df[df['match'] == True].index), how = 'outer')
short explanation
# Step 1. this gives you DataFrame without matching
df[df['match'] != True]
Out[3]:
Name Number Address match
0 Nisamanee Rowell 9198762345 98 Oxford Ave.Elk Grove Village, IL 60007 False
1 Alice Baisden 8756342865 94 Valley Rd.Miami Gardens, FL 33056 False
2 Marc Cognetti 9198762345 221 Summer CircleGreer, SC 29650 False
# Step 2. this opposite gives you DataFrame with matching
df[df['match'] == True]
Out[4]:
Name Number Address match
3 Healthcare 9654987642 8522 Pendergast AvenueVilla Park, IL 60181 True
# Step 3. but you take only index from Step 2. And create new dataframe with indexes and 'XXX' columns
hide_row = {c:'XXX' for c in df.columns}
pd.DataFrame(hide_row, index = df[df['match'] == True].index)
Out[5]:
Name Number Address match
3 XXX XXX XXX XXX
# Step 4. And then you just merge two dataframes from step 1 and step 3 by indexes
df[df['match'] != True].merge(pd.DataFrame(hide_row, index = df[df['match'] == True].index), how = 'outer')
Just small change in your code will work, mind you that's not optimal but works just fine.
data = [['NISAMANEE ROWELL', '9198762345','98 Oxford Ave.Elk Grove Village, IL 60007'], ['ALICE BAISDEN', '8756342865', '94 Valley Rd.Miami Gardens, FL 33056'], ['MARC COGNETTI', '9198762345', '221 Summer CircleGreer, SC 29650'], ['Healthcare', '9654987642', '8522 Pendergast AvenueVilla Park, IL 60181']]
df = pd.DataFrame(data, columns = ['Name', 'Number', 'Address'])
df
def title_format(inp):
return inp.title()
def new(x):
#x = input('Enter your column name')
#x = x.title()
x = title_format(x)
print(x)
exc_list=['Mackesson Inc','Care','Healthcare','Henery Schien','Besse','LLC','CandP','INC','LTD','PHARMACY','PHARMACEUTICAL','HOSPITAL','COMPANY','ELECTRONICS','APP','VOLUNTEERS','SPECIALITIES','APPLIANCE','EXPRESS','MAGAZINE','SUPPLY','ENDOSCOPY','NETWandK','SCHOOL','AT&T','SOLUTIONS','SANITATION','SYSTEMS','COMPOUNDING','CLINIC','UTILITIES','DEPARTMENT','CREATIVE','PIN','employment','consultant','units','label','machine','anesthesia','services','medical','community','plaza','tech','bipolar','brand','commerce','testing','inspection','killer','plus','electric','division','diagnostic','materials','imaging','international','district','chamber','city','products','essentials','life','scissand','leasing','units','health','healthcare','surgical','enterprises','print','radiology','water','screens','telecom']
exceptionList = [z.title() for z in exc_list]
if(x in exceptionList):
return x
else:
return x.replace(x, 'X' * random.randrange(3, 8))
#new(df.Name.astype(str))
df['Name'] = df['Name'].apply(new)
Related
Given a small dataset df as follows:
id name address
0 1 ABC tower 北京市朝阳区
1 2 AC park 北京市海淀区
2 3 ZR hospital 上海市黄浦区
3 4 Fengtai library NaN
4 5 Square Point 上海市虹口区
I would like to obtain longitude and latidude for address column and append them to orginal dataframe. Please note there are NaNs in address column.
The code below gives me a table with addresses, longitude and latitude, but it ignores the NaN address rows, also the code should be improved:
import pandas as pd
import requests
import json
df = df[df['address'].notna()]
res = []
for addre in df['address']:
url = "http://restapi.amap.com/v3/geocode/geo?key=f057101329c0200f170be166d9b023a1&address=" + addre
dat = {
'count': "1",
}
r = requests.post(url, data = json.dumps(dat))
s = r.json()
infos = s['geocodes']
for j in range(0, 10000):
# print(j)
try:
more_infos = infos[j]
# print(more_infos)
except:
continue
try:
data = more_infos['location']
# print(data)
except:
continue
try:
lon_lat = data.split(',')
lon = float(lon_lat[0])
lat = float(lon_lat[1])
except:
continue
res.append([addre, lon, lat])
result = pd.DataFrame(res)
result.columns = ['address', 'longitude', 'latitude']
print(result)
result.to_excel('result.xlsx', index = False)
Out:
address longitude latitude
0 北京市朝阳区 116.601144 39.948574
1 北京市海淀区 116.329519 39.972134
2 上海市黄浦区 121.469240 31.229860
3 上海市虹口区 121.505133 31.264600
But how could I get the final result as follows? Thanks for your kind help at advance.
id name address longitude latitude
0 1 ABC tower 北京市朝阳区 116.601144 39.948574
1 2 AC park 北京市海淀区 116.329519 39.972134
2 3 ZR hospital 上海市黄浦区 121.469240 31.229860
3 4 Fengtai library NaN NaN NaN
4 5 Square Point 上海市虹口区 121.505133 31.264600
use pd.merge, as result is the longitude & latitude dataframe.
dfn = pd.merge(df, result, on='address', how='left')
or
for _, row in df.iterrows():
_id = row['id']
name = row['name']
addre = row['address']
if pd.isna(row['address']):
res.append([_id, name, addre, None, None])
continue
###### same code ######
url = '...'
# ...
###### same code ######
res.append([_id, name, addre, lon, lat])
result = pd.DataFrame(res)
result.columns = ['id', 'name', 'address', 'longitude', 'latitude']
print(result)
result.to_excel('result.xlsx', index = False)
Okay, I need help. I created a function to search a string for a specific word. If the function finds the search_word it will return the word the and N words that precede it. The function works fine with my test strings but I cannot figure out how to apply the function to an entire series?
My goal is to create a new column in the data frame that contains the n_words_prior whenever the search_word exists.
n_words_prior = []
test = "New School District, Dale County"
def n_before_string(string, search_word, N):
global n_words_prior
n_words_prior = []
found_word = string.find(search_word)
if found_word == -1: return ""
sentence= string[0:found_word]
n_words_prior = sentence.split()[N:]
n_words_prior.append(search_word)
return n_words_prior
The current dataframe looks like this:
data = [['Alabama', 'New School District, Dale County'],
['Alaska', 'Matanuska-Susitna Borough'],
['Arizona', 'Pima County - Tuscon Unified School District']]
df = pd.DataFrame(data, columns = ['State', 'Place'])
The improved function would take the inputs 'Place','County',-1 and create the following result.
improved_function(column, search_word, N)
new_data = [['Alabama', 'New School District, Dale County','Dale County'],
['Alaska', 'Matanuska-Susitna Borough', ''],
['Arizona', 'Pima County - Tuscon Unified School District','Pima County']]
new_df = pd.DataFrame(new_data, columns = ['State', 'Place','Result'])
I thought embedding this function would help, but it has only made things more confusing.
def fast_add(place, search_word):
df[search_word] = df[Place].str.contains(search_word).apply(lambda search_word: 1 if search_word == True else 0)
def fun(sentence, search_word, n):
"""Return search_word and n preceding words from sentence."""
words = sentence.split()
for i,word in enumerate(words):
if word == search_word:
return ' '.join(words[i-n:i+1])
return ''
Example:
df['Result'] = df.Place.apply(lambda x: fun(x, 'County', 1))
Result:
State Place Result
0 Alabama New School District, Dale County Dale County
1 Alaska Matanuska-Susitna Borough
2 Arizona Pima County - Tuscon Unified School District Pima County
Dataset:
> df
Id Clean_Data
1918916 Luxury Apartments consisting 11 towers Well equipped gymnasium Swimming Pool Toddler Pool Health Club Steam Room Sauna Jacuzzi Pool Table Chess Billiards room Carom Table Tennis indoor games
1495638 near medavakkam junction calm area near global hospital
1050651 No Pre Emi No Booking Amount No Floor Rise Charges No Processing Fee HLPROJECT HIGHLIGHTS
Below is the code which is successfully returning the matching words in ngrams from the list of values in Category.py
df['one_word_tokenized_text'] =df["Clean_Data"].str.split()
df['bigram'] = df['Clean_Data'].apply(lambda row: list(ngrams(word_tokenize(row), 2)))
df['trigram'] = df['Clean_Data'].apply(lambda row: list(ngrams(word_tokenize(row), 3)))
df['four_words'] = df['Clean_Data'].apply(lambda row: list(ngrams(word_tokenize(row), 4)))
token=pd.Series(df["one_word_tokenized_text"])
Lid=pd.Series(df["Id"])
matches= token.apply(lambda x: pd.Series(x).str.extractall("|".join(["({})".format(cat) for cat in Categories.HealthCare])))
match_list= [[m for m in match.values.ravel() if isinstance(m, str)] for match in matches]
match_df = pd.DataFrame({"ID":Lid,"jc1":match_list})
def match_word(feature, row):
categories = []
for bigram in row.bigram:
joined = ' '.join(bigram)
if joined in feature:
categories.append(joined)
for trigram in row.trigram:
joined = ' '.join(trigram)
if joined in feature:
categories.append(joined)
for fourwords in row.four_words:
joined = ' '.join(fourwords)
if joined in feature:
categories.append(joined)
return categories
match_df['Health1'] = df.apply(partial(match_word, HealthCare), axis=1)
match_df['HealthCare'] = match_df[match_df.columns[[1,2]]].apply(lambda x: ','.join(x.dropna().astype(str)),axis=1)
Category.py
category = [('steam room','IN','HealthCare'),
('sauna','IN','HealthCare'),
('Jacuzzi','IN','HealthCare'),
('Aerobics','IN','HealthCare'),
('yoga room','IN','HealthCare'),]
HealthCare= [e1 for (e1, rel, e2) in category if e2=='HealthCare']
Output:
ID HealthCare
1918916 Jacuzzi
1495638
1050651 Aerobics, Jacuzzi, yoga room
Here if I mention the features in "Category list" in the exact letter case as mentioned in the dataset, then the code identifies it and returns the value, else it won't.
So I want my code to be case insensitive and even track "Steam Room","Sauna" under health category. I tried with ".lower()" function, but am not sure how to implement it.
edit 2: only category.py is updated
Category.py
category = [('steam room','IN','HealthCare'),
('sauna','IN','HealthCare'),
('jacuzzi','IN','HealthCare'),
('aerobics','IN','HealthCare'),
('Yoga room','IN','HealthCare'),
('booking','IN','HealthCare'),
]
category1 = [value[0].capitalize() for index, value in enumerate(category)]
category2 = [value[0].lower() for index, value in enumerate(category)]
test = []
test2 =[]
for index, value in enumerate(category1):
test.append((value, category[index][1],category[index][2]))
for index, value in enumerate(category2):
test2.append((value, category[index][1],category[index][2]))
category = category + test + test2
HealthCare = [e1 for (e1, rel, e2) in category if e2=='HealthCare']
Your unaltered dataset
import pandas as pd
from nltk import ngrams, word_tokenize
import Categories
from Categories import *
from functools import partial
data = {'Clean_Data':['Luxury Apartments consisting 11 towers Well equipped gymnasium Swimming Pool Toddler Pool Health Club Steam Room Sauna Jacuzzi Pool Table Chess Billiards room Carom Table Tennis indoor games',
'near medavakkam junction calm area near global hospital',
'No Pre Emi No Booking Amount No Floor Rise Charges No Processing Fee HLPROJECT HIGHLIGHTS '],
'Id' : [1918916, 1495638,1050651]}
df = pd.DataFrame(data)
df['one_word_tokenized_text'] =df["Clean_Data"].str.split()
df['bigram'] = df['Clean_Data'].apply(lambda row: list(ngrams(word_tokenize(row), 2)))
df['trigram'] = df['Clean_Data']).apply(lambda row: list(ngrams(word_tokenize(row), 3)))
df['four_words'] = df['Clean_Data'].apply(lambda row: list(ngrams(word_tokenize(row), 4)))
token=pd.Series(df["one_word_tokenized_text"])
Lid=pd.Series(df["Id"])
matches= token.apply(lambda x: pd.Series(x).str.extractall("|".join(["({})".format(cat) for cat in Categories.HealthCare])))
match_list= [[m for m in match.values.ravel() if isinstance(m, str)] for match in matches]
match_df = pd.DataFrame({"ID":Lid,"jc1":match_list})
def match_word(feature, row):
categories = []
for bigram in row.bigram:
joined = ' '.join(bigram)
if joined in feature:
categories.append(joined)
for trigram in row.trigram:
joined = ' '.join(trigram)
if joined in feature:
categories.append(joined)
for fourwords in row.four_words:
joined = ' '.join(fourwords)
if joined in feature:
categories.append(joined)
return categories
match_df['Health1'] = df.apply(partial(match_word, HealthCare), axis=1)
match_df['HealthCare'] = match_df[match_df.columns[[1,2]]].apply(lambda x: ','.join(x.dropna().astype(str)),axis=1)enize(row), 4)))
Output
print match_df
+--------+----------------+-------------+------------------------------------+
|ID |jc1 |Health1 |HealthCare |
+--------+----------------+-------------+------------------------------------+
|1918916 |[sauna, jacuzzi]| |['sauna', 'jacuzzi'],['steam room'] |
+--------+----------------+-------------+------------------------------------+
|1495638 | | | |
+--------+----------------+-------------+------------------------------------+
|1050651 | [Booking] | | ['Booking'],[] | |
+--------+----------------+-------------+------------------------------------+
I am supposed to get certain information from a .txt file and output it. This is the information I need:
State with the maximum population
State with the minimum population
Average state population
State of Texas population
The DATA looks like:
Alabama
AL
4802982
Alaska
AK
721523
Arizona
AZ
6412700
Arkansas
AR
2926229
California
CA
37341989
This is my code that does not really do anything I need it to do:
def main():
# Open the StateCensus2010.txt file.
census_file = open('StateCensus2010.txt', 'r')
# Read the state name
state_name = census_file.readline()
while state_name != '':
state_abv = census_file.readline()
population = int(census_file.readline())
state_name = state_name.rstrip('\n')
state_abv = state_abv.rstrip('\n')
print('State Name: ', state_name)
print('State Abv.: ', state_abv)
print('Population: ', population)
print()
state_name = census_file.readline()
census_file.close()
main()
All I have it doing is reading the state name, abv and converting the population into an int. I don't need it to do anything of that, however I'm unsure how to do what the assignment is asking. Any hints would definitely be appreciated! I've been trying some things for the past few hours to no avail.
Update:
This is my updated code however I'm receving the following error:
Traceback (most recent call last):
File "main.py", line 13, in <module>
if population > max_population:
TypeError: unorderable types: str() > int()
Code:
with open('StateCensus2010.txt', 'r') as census_file:
while True:
try:
state_name = census_file.readline()
state_abv = census_file.readline()
population = int(census_file.readline())
except IOError:
break
# data processing here
max_population = 0
for population in census_file:
if population > max_population:
max_population = population
print(max_population)
As the data is in consistent order; Statename, State Abv, Population. So you just need to read the lines one time, and display all three 3 information. Below is the sample code.
average = 0.0
total = 0.0
state_min = 999999999999
state_max = 0
statename_min = ''
statename_max = ''
texas_population = 0
with open('StateCensus2010.txt','r') as file:
# split new line, '\n' here means newline
data = file.read().split('\n')
# get the length of the data by using len() method
# there are 50 states in the text file
# each states have 3 information stored,
# state name, state abreviation, population
# that's why length of data which is 150/3 = 50 states
state_total = len(data)/3
# this count is used as an index for the list
count = 0
for i in range(int(state_total)):
statename = data[count]
state_abv = data[count+1]
population = int(data[count+2])
print('Statename : ',statename)
print('State Abv : ',state_abv)
print('Population: ',population)
print()
# sum all states population
total += population
if population > state_max:
state_max = population
statename_max = statename
if population < state_min:
state_min = population
statename_min = statename
if statename == 'Texas':
texas_population = population
# add 3 because we want to jump to next state
# for example the first three lines is Alabama info
# the next three lines is Alaska info and so on
count += 3
# divide the total population with number of states
average = total/state_total
print(str(average))
print('Lowest population state :', statename_min)
print('Highest population state :', statename_max)
print('Texas population :', texas_population)
This problem is pretty easy using pandas.
Code:
states = []
for line in data:
states.append(
dict(state=line.strip(),
abbrev=next(data).strip(),
pop=int(next(data)),
)
)
df = pd.DataFrame(states)
print(df)
print('\nmax population:\n', df.ix[df['pop'].idxmax()])
print('\nmin population:\n', df.ix[df['pop'].idxmin()])
print('\navg population:\n', df['pop'].mean())
print('\nAZ population:\n', df[df.abbrev == 'AZ'])
Test Data:
from io import StringIO
data = StringIO(u'\n'.join([x.strip() for x in """
Alabama
AL
4802982
Alaska
AK
721523
Arizona
AZ
6412700
Arkansas
AR
2926229
California
CA
37341989
""".split('\n')[1:-1]]))
Results:
abbrev pop state
0 AL 4802982 Alabama
1 AK 721523 Alaska
2 AZ 6412700 Arizona
3 AR 2926229 Arkansas
4 CA 37341989 California
max population:
abbrev CA
pop 37341989
state California
Name: 4, dtype: object
min population:
abbrev AK
pop 721523
state Alaska
Name: 1, dtype: object
avg population:
10441084.6
AZ population:
abbrev pop state
2 AZ 6412700 Arizona
Another pandas solution, from the interpreter:
>>> import pandas as pd
>>>
>>> records = [line.strip() for line in open('./your.txt', 'r')]
>>>
>>> df = pd.DataFrame([records[i:i+3] for i in range(0, len(records), 3)],
... columns=['State', 'Code', 'Pop']).dropna()
>>>
>>> df['Pop'] = df['Pop'].astype(int)
>>>
>>> df
State Code Pop
0 Alabama AL 4802982
1 Alaska AK 721523
2 Arizona AZ 6412700
3 Arkansas AR 2926229
4 California CA 37341989
>>>
>>> df.ix[df['Pop'].idxmax()]
State California
Code CA
Pop 37341989
Name: 4, dtype: object
>>>
>>> df.ix[df['Pop'].idxmin()]
State Alaska
Code AK
Pop 721523
Name: 1, dtype: object
>>>
>>> df['Pop'].mean()
10441084.6
>>>
>>> df.ix[df['Code'] == 'AZ' ]
State Code Pop
2 Arizona AZ 6412700
Please try this the earlier code was not python 3 compatible. It supported python 2.7
def extract_data(state):
total_population = 0
for states, stats in state.items():
population = stats.get('population')
state_name = stats.get('state_name')
states = states
total_population = population + total_population
if 'highest' not in vars():
highest = population
higherst_state_name = state_name
highest_state = states
if 'lowest' not in vars():
lowest = population
lowest_state_name = state_name
lowest_state = states
if highest < population:
highest = population
higherst_state_name = state_name
highest_state = states
if lowest > population:
lowest = population
lowest_state_name = state_name
lowest_state = states
print(highest_state, highest)
print(lowest_state, lowest)
print(len(state))
print(int(total_population/len(state)))
print(state.get('TX').get('population'))
def main():
# Open the StateCensus2010.txt file.
census_file = open('states.txt', 'r')
# Read the state name
state_name = census_file.readline()
state = {}
while state_name != '':
state_abv = census_file.readline()
population = int(census_file.readline())
state_name = state_name.rstrip('\n')
state_abv = state_abv.rstrip('\n')
if state_abv in state:
state[state_abv].update({'population': population, 'state_name': state_name})
else:
state.setdefault(state_abv,{'population': population, 'state_name': state_name})
state_name = census_file.readline()
census_file.close()
return state
state=main()
extract_data(state)
How can i put all the Rows value to cell
rows = db().select(i.INV_ITEMCODE, n.INV_NAME, orderby=i.INV_ITEMCODE, join=n.on(i.POS_TASKCODE == n.POS_TASKCODE))
for r in rows:
code = str(r.db_i_item.INV_ITEMCODE)
desc = str(r.db_i_name.INV_NAME)
row = [dict(rows=rows)]
cell = [code, desc]
row = [dict(cell=cell, id=str(+1))]
records = []
total = []
result = None
result = dict(records=str(total), total='1', row=row , page='1') #records should get the total cell
return result
the RESULT return only ONE cell value
dict: {'records': '[]', 'total': '1', 'page': '1', 'row': [{'cell': ['LUBS001', 'Hav. Fully Synthetic 1L'], 'id': '1'}]}
but the ROWS have the query:
Rows: db_i_item.INV_ITEMCODE,db_i_name.INV_NAME
LUBS001,Hav. Fully Synthetic 1L
LUBS002,Hav. Formula 1L
LUBS003,Hav. SF 1L
LUBS004,Hav. Plus 2T 200ML
LUBS005,Havoline Plus 2T 1L
LUBS006,Havoline Super 4T 1L
LUBS007,Havoline EZY 4T 1L
LUBS008,Delo Sports 1L
LUBS009,Delo Gold Multigrade 1L
LUBS010,Delo Gold Monograde 1L
LUBS011,Delo Silver 1L
LUBS012,Super Diesel 1L
LUBS013,Brake Fluid 250ML
LUBS014,Brake Fluid 500ML
LUBS015,Brake Fluid 1L
LUBS016,Texamatic ATF 1L
LUBS020,Coolant
LUBS21,Delo
PET001,DIESEL
PET002,SILVER
PET003,GOLD
PET004,REGULAR
PET005,KEROSENE
got it :D
'items = db(q1 & q2 & search).select(i.INV_ITEMCODE, n.INV_NAME, m.INV_KIND, p.INV_PRICE, m.INV_DRTABLE, p.INV_PRICECODE, p.INV_ITEMCODEX, orderby=o)
ri = 0
rows = []
for ri, r in enumerate(items):
if r.db_i_matrix.INV_KIND == 'W':
kind = 'Wet'
else:
kind = 'Dry'
cell = [ str(ri + 1), str(r.db_i_item.INV_ITEMCODE), str(r.db_i_name.INV_NAME), str(kind), str(r.db_i_price.INV_PRICE),
str(r.db_i_matrix.INV_DRTABLE), str(r.db_i_price.INV_PRICECODE), str(r.db_i_price.INV_ITEMCODEX)]
records = ri + 1
rows += [dict(id=str(ri + 1), cell=cell)]
ikind = dict(records=records, totals='1', rows=rows)'