LabelEncoder cannot inverse_transform (unseen labels) after imputing missing values - python

I'm at a beginner to intermediate data science level. I want to impute missing values from a dataframe using knn.
As the dataframe contains strings and floats, I need to encode / decode values using LabelEncoder.
My method is as follows:
Replace NaN to be able to encode
Encode the text values and put them in a dictionary
Retrieve the NaN (previously converted) to be imputed with knn
Assign values with knn
Decode values from the dictionary
Unfortunately, in the last step, imputing values adds new values that cannot be decoded (unseen labels error message).
Could you please explain to me what I am doing wrong? Ideally help me to correct it please. Before concluding, I wanted to say that I know that there are other tools like OneHotEncoder, but I don't know them well enough and I found LabelEncoder much more intuitive because you can see it directly in the dataframe (where LabelEncoder provides an array).
Please find below an example of my method, thank you very much for your help :
[1]
# Import libraries.
import pandas as pd
import numpy as np
# intialise data of lists.
data = {'Name':['Jack', np.nan, 'Victoria', 'Nicolas', 'Victor', 'Brad'], 'Age':[59, np.nan, 29, np.nan, 65, 50], 'Car color':['Blue', 'Black', np.nan, 'Black', 'Grey', np.nan], 'Height ':[177, 150, np.nan, 180, 175, 190]}
# Make a DataFrame
df = pd.DataFrame(data)
# Print the output.
df
Output :
Name Age Car color Height
0 Jack 59.0 Blue 177.0
1 NaN NaN Black 150.0
2 Victoria 29.0 NaN NaN
3 Nicolas NaN Black 180.0
4 Victor 65.0 Grey 175.0
5 Brad 50.0 NaN 190.0
[2]
# LabelEncoder does not work with NaN values, so I replace them with value '1000' :
df = df.replace(np.nan, 1000)
# And to avoid errors, str columns must be set as strings (even '1000' value) :
df[['Name','Car color']] = df[['Name','Car color']].astype(str)
df
Output
Name Age Car color Height
0 Jack 59.0 Blue 177.0
1 1000 1000.0 Black 150.0
2 Victoria 29.0 1000 1000.0
3 Nicolas 1000.0 Black 180.0
4 Victor 65.0 Grey 175.0
5 Brad 50.0 1000 190.0
[3]
# Import LabelEncoder library :
from sklearn.preprocessing import LabelEncoder
# define labelencoder :
le = LabelEncoder()
# Import defaultdict library to make a dict of labelencoder :
from collections import defaultdict
# Initiate a dict of LabelEncoder values :
encoder_dict = defaultdict(LabelEncoder)
# Make a new dataframe of LabelEncoder values :
df[['Name','Car color']] = df[['Name','Car color']].apply(lambda x: encoder_dict[x.name].fit_transform(x))
# Show output :
df
Output
Name Age Car color Height
0 2 59.0 2 177.0
1 0 1000.0 1 150.0
2 5 29.0 0 1000.0
3 3 1000.0 1 180.0
4 4 65.0 3 175.0
5 1 50.0 0 190.0
[4]
#Reverse back 1000 to missing values in order to impute them :
df = df.replace(1000, np.nan)
df
Output
Name Age Car color Height
0 2 59.0 2 177.0
1 0 NaN 1 150.0
2 5 29.0 0 NaN
3 3 NaN 1 180.0
4 4 65.0 3 175.0
5 1 50.0 0 190.0
[5]
# Import knn imputer library to replace impute missing values :
from sklearn.impute import KNNImputer
# Define imputer :
imputer = KNNImputer(n_neighbors=2)
# impute and reassign index/colonnes :
df = pd.DataFrame(np.round(imputer.fit_transform(df)),columns = df.columns)
df
Output
Name Age Car color Height
0 2.0 59.0 2.0 177.0
1 0.0 47.0 1.0 150.0
2 5.0 29.0 0.0 165.0
3 3.0 44.0 1.0 180.0
4 4.0 65.0 3.0 175.0
5 1.0 50.0 0.0 190.0
[6]
# Decode data :
inverse_transform_lambda = lambda x: encoder_dict[x.name].inverse_transform(x)
# Apply it to df -> THIS IS WHERE ERROR OCCURS :
df[['Name','Car color']].apply(inverse_transform_lambda)
Error message :
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-55-8a5e369215f6> in <module>()
----> 1 df[['Name','Car color']].apply(inverse_transform_lambda)
5 frames
/usr/local/lib/python3.6/dist-packages/pandas/core/frame.py in apply(self, func, axis, broadcast, raw, reduce, result_type, args, **kwds)
6926 kwds=kwds,
6927 )
-> 6928 return op.get_result()
6929
6930 def applymap(self, func):
/usr/local/lib/python3.6/dist-packages/pandas/core/apply.py in get_result(self)
184 return self.apply_raw()
185
--> 186 return self.apply_standard()
187
188 def apply_empty_result(self):
/usr/local/lib/python3.6/dist-packages/pandas/core/apply.py in apply_standard(self)
290
291 # compute the result using the series generator
--> 292 self.apply_series_generator()
293
294 # wrap results
/usr/local/lib/python3.6/dist-packages/pandas/core/apply.py in apply_series_generator(self)
319 try:
320 for i, v in enumerate(series_gen):
--> 321 results[i] = self.f(v)
322 keys.append(v.name)
323 except Exception as e:
<ipython-input-54-f16f4965b2c4> in <lambda>(x)
----> 1 inverse_transform_lambda = lambda x: encoder_dict[x.name].inverse_transform(x)
/usr/local/lib/python3.6/dist-packages/sklearn/preprocessing/_label.py in inverse_transform(self, y)
297 "y contains previously unseen labels: %s" % str(diff))
298 y = np.asarray(y)
--> 299 return self.classes_[y]
300
301 def _more_tags(self):
IndexError: ('arrays used as indices must be of integer (or boolean) type', 'occurred at index Name')

Based on my comment you should do
# Decode data :
inverse_transform_lambda = lambda x: encoder_dict[x.name].inverse_transform(x.astype(int)) # or x[].astype(int)

Related

how to simultaneously print all the matched values in a new column in pandas

I am a newer in pandas. I would like to search for some targets in a particular column and print all the matched targets in a new column. Here is my code.
# coding=utf-8
import pandas as pd
import numpy as np
#########
classes = [('Carbon;Pyruvate;vitamins', 16.7),
('Pyruvate;Carbohydrate;Pentose and glucuronate', 30),
('Lipid;Carbon;Galactose', 40.5),
('Galactose;Pyruvate;Fatty acid', 57),
('Fatty acid;Lipid', 22)]
labels = ['Ko_class','FPKM']
alls = pd.DataFrame.from_records(classes, columns=labels)
target =['Carbon', 'Pyruvate','Galactose']
#####
def match(x):
for i in target:
if i in x:
return i
else:
return np.nan
alls['Pathways'] = alls['Ko_class'].apply(match)
Its results are:
Ko_class FPKM Pathways
0 Carbon;Pyruvate;vitamins 16.7 Carbon
1 Pyruvate;Carbohydrate;Pentose and glucuronate 30.0 Pyruvate
2 Lipid;Carbon;Galactose 40.5 Carbon
3 Galactose;Pyruvate;Fatty acid 57.0 Pyruvate
4 Fatty acid;Lipid 22.0 NaN
The expected results are:
Ko_class FPKM Pathways
0 Carbon;Pyruvate;vitamins 16.7 Carbon;Pyruvate
1 Pyruvate;Carbohydrate;Pentose and glucuronate 30.0 Pyruvate
2 Lipid;Carbon;Galactose 40.5 Carbon;Galactose
3 Galactose;Pyruvate;Fatty acid 57.0 Galactose;Pyruvate
4 Fatty acid;Lipid 22.0 NaN
My question is: how to print all the matched targets in the new column like "Carbon;Pyruvate" not only "Carbon".
Use str.extractall with aggregation as string instead of your custom function:
regex = '|'.join(target)
alls['Pathsways'] = (alls['Ko_class'].str.extractall(f'({regex})')[0]
.groupby(level=0).agg(';'.join))
Or with a list comprehension and a set:
S = set(target)
alls['Pathways'] = [';'.join(x for x in s.split(';') if x in S)
for s in alls['Ko_class']]
output:
Ko_class FPKM Pathways
0 Carbon;Pyruvate;vitamins 16.7 Carbon;Pyruvate
1 Pyruvate;Carbohydrate;Pentose and glucuronate 30.0 Pyruvate
2 Lipid;Carbon;Galactose 40.5 Carbon;Galactose
3 Galactose;Pyruvate;Fatty acid 57.0 Galactose;Pyruvate
4 Fatty acid;Lipid 22.0 NaN

How do you aggregate multiple columns from large DataFrame using pandas?

I am importing an excel worksheet using pandas and trying to remove any instance where there is a duplicate area measurement for a given Frame. The sheet I'm playing with looks vaguely like the table below wherein there are n number of files, a measured area from each frame of an individual file, and the Frame Number that corresponds to each area measurement.
Filename.0
Area.0
Frame.0
Filename.1
Area.1
Frame.1
...
Filename.n
Area.n
Filename.n
Exp327_Date_File_0
600
1
Exp327_Date_File_1
830
1
...
Exp327_Date_File_n
700
1
Exp327_Date_File_0
270
2
Exp327_Date_File_1
730
1
...
Exp327_Date_File_n
600
2
Exp327_Date_File_0
230
3
Exp327_Date_File_1
630
2
...
Exp327_Date_File_n
500
3
Exp327_Date_File_0
200
4
Exp327_Date_File_1
530
3
...
Exp327_Date_File_n
400
4
NaN
NaN
NaN
Exp327_Date_File1
430
4
...
NaN
NaN
NaN
If I manually go through the excel worksheet and concatenate the filenames into just 3 unique columns containing my entire dataset like so:
Filename
Area
Frame
Exp327_Date_File_0
600
1
Exp327_Date_File_0
270
2
etc...
etc...
etc...
Exp327_Date_File_n
530
4
I have been able to successfully use pandas to remove the duplicates using the following:
df_1 = df.groupby(['Filename', 'Frame Number']).agg('Area': 'sum')
However, manually concatenating everything into this format isn't feasible when I have hundreds of File replicates and I will then have to separate everything back out into multiple column-sets (similar to how the data is presented in Table 1). How do I either (1) use pandas to create a new Dataframe with every 3 columns stacked on top of each other which I can then group and aggregate before breaking back up into individual sets of columns based on Filename or (2) loop through the multiple filenames and aggregate any Frames with multiple Areas? I have tried option 2:
(row, col) = df.shape #shape of the data frame the excel file was read into
for count in range(0,round(col/3)): #iterate through the data
aggregation_functions = {'Area.'+str(count):'sum'} #add Areas together
df_2.groupby(['Filename.'+str(count), 'Frame Number.'+str(count)]).agg(aggregation_functions)
However, this just returns the same DataFrame without any of the Areas summed together. Any help would be appreciated and please let me know if my question is unclear
Here is a way to achieve option (1):
import numpy as np
import pandas as pd
# sample data
df = pd.DataFrame({'Filename.0': ['Exp327_Date_File_0', 'Exp327_Date_File_0',
'Exp327_Date_File_0', 'Exp327_Date_File_0',
np.NaN],
'Area.0': [600, 270, 230, 200, np.NaN],
'Frame.0': [1, 2, 3, 4, np.NaN],
'Filename.1': ['Exp327_Date_File_1', 'Exp327_Date_File_1',
'Exp327_Date_File_1', 'Exp327_Date_File_1',
'Exp327_Date_File_1'],
'Area.1': [830, 730, 630, 530, 430],
'Frame.1': [1, 1, 2, 3, 4],
'Filename.2': ['Exp327_Date_File_2', 'Exp327_Date_File_2',
'Exp327_Date_File_2', 'Exp327_Date_File_2',
'Exp327_Date_File_2'],
'Area.2': [700, 600, 500, 400, np.NaN],
'Frame.2': [1, 2, 3, 4, np.NaN]})
# create list of sub-dataframes, each with 3 columns, partitioning the original dataframe
subframes = [df.iloc[:, j:(j + 3)] for j in np.arange(len(df.columns), step=3)]
# set column names to the same values for each subframe
for subframe in subframes:
subframe.columns = ['Filename', 'Area', 'Frame']
# concatenate the subframes
df_long = pd.concat(subframes)
df_long
Filename Area Frame
0 Exp327_Date_File_0 600.0 1.0
1 Exp327_Date_File_0 270.0 2.0
2 Exp327_Date_File_0 230.0 3.0
3 Exp327_Date_File_0 200.0 4.0
4 NaN NaN NaN
0 Exp327_Date_File_1 830.0 1.0
1 Exp327_Date_File_1 730.0 1.0
2 Exp327_Date_File_1 630.0 2.0
3 Exp327_Date_File_1 530.0 3.0
4 Exp327_Date_File_1 430.0 4.0
0 Exp327_Date_File_2 700.0 1.0
1 Exp327_Date_File_2 600.0 2.0
2 Exp327_Date_File_2 500.0 3.0
3 Exp327_Date_File_2 400.0 4.0
4 Exp327_Date_File_2 NaN NaN

Panda(Python): add a new column in a data frame which depends on its row value and aggregated value from another data frame

I am new to python and pandas, so my doubt can be silly also.
Problem:
So I have two data frames let's say df1 and df2 where
df1 is like
treatment1 treatment2 value comparision test adjustment statsig p_value
0 Treatment Control 0.795953 Treatment:Control t-test Benjamini-Hochberg False 0.795953
1 Treatment2 Control 0.795953 Treatment2:Control t-test Benjamini-Hochberg False 0.795953
2 Treatment2 Treatment 0.795953 Treatment2:Treatment t-test Benjamini-Hochberg False 0.795953
and df2 is like
group_type metric
0 Treatment 31.0
1 Treatment2 83.0
2 Treatment 51.0
3 Treatment 20.0
4 Control 41.0
.. ... ...
336 Treatment3 35.0
337 Treatment3 9.0
338 Treatment3 35.0
339 Treatment3 9.0
340 Treatment3 35.0
I want to add a column mean_percentage_lift in df1 where
lift_mean_percentage = (mean(treatment1)/mean(treatment2) -1) * 100
where `treatment1` and `treatment2` can be anything in `[Treatment, Control, Treatment2]`
My Approach:
I am using the assign function of the data frame.
df1.assign(mean_percentage_lift = lambda dataframe: lift_mean_percentage(df2, dataframe['treatment1'], dataframe['treatment2']))
where
def lift_mean_percentage(df, treatment1, treatment2):
treatment1_data = df[df[group_type_col] == treatment1]
treatment2_data = df[df[group_type_col] == treatment2]
mean1 = treatment1_data['metric'].mean()
mean2 = treatment2_data['metric'].mean()
return (mean1/mean2 -1) * 100
But I am getting this error Can only compare identically-labeled Series objects for line
treatment1_data = df[df[group_type_col] == treatment1]. Is there something I am doing wrong is there any alternative to this.
For dataframe df2:
group_type metric
0 Treatment 31.0
1 Treatment2 83.0
2 Treatment 51.0
3 Treatment 20.0
4 Control 41.0
5 Treatment3 35.0
6 Treatment3 9.0
7 Treatment 35.0
8 Treatment3 9.0
9 Control 5.0
You can try:
def lift_mean_percentage(df, T1, T2):
treatment1= df['metric'][df['group_type']==T1].mean()
treatment2= df['metric'][df['group_type']==T2].mean()
return (treatment1/treatment2 -1) * 100
runing:
lift_mean_percentage(df2,'Treatment2','Control')
the result:
260.8695652173913

How to aggregate stats in one dataframe based on filtering values in another dataframe?

I have 2 dataframes. rdf is the reference dataframe I am trying to use to define the interval (top and bottom) to calculate an average between (all of the depths between this interval), but use ldf to actually run that calculation since it contains the values. rdf defines the top and bottom for each id number an average should be run for. There are multiple intervals for each id.
rdf is formatted as such:
ID Top Bottom
1 2010 3000
1 4300 4500
1 4550 5000
1 7100 7700
2 3200 4100
2 4120 4180
2 4300 5300
2 5500 5520
3 2300 2380
3 3200 4500
ldf is fromated as such:
ID Depth(ft) Value1 Value2 Value3
1 2000 45 .32 423
1 2000.5 43 .33 500
1 2001 40 .12 643
1 2001.5 28 .10 20
1 2002 40 .10 34
1 2002.5 23 .11 60
1 2003 34 .08 900
1 2003.5 54 .04 1002
2 2000 40 .28 560
2 2000 38 .25 654
...
3 2000 43 .30 343
I want to use rdf to define the top and bottom of the interval to calculate the average for Value1, Value2, and Value3. I would also like to have a count documented as well (not all of the values between the intervals necessarily exist, so it could be less than the difference of Bottom - Top). This will then modify rdf to make a new file:
new_rdf is formatted as such:
ID Top Bottom avgValue1 avgValue2 avgValue3 ThicknessCount(ft)
1 2010 3000 54 .14 456 74
1 4300 4500 23 .18 632 124
1 4550 5000 34 .24 780 111
1 7100 7700 54 .19 932 322
2 3200 4100 52 .32 134 532
2 4120 4180 16 .11 111 32
2 4300 5300 63 .29 872 873
2 5500 5520 33 .27 1111 9
3 2300 2380 63 .13 1442 32
3 3200 4500 37 .14 1839 87
I've been going back and forth on the best way to do this. I tried mimicking this time series example: Sum set of values from pandas dataframe within certain time frame
but it doesn't seem translatable:
import pandas as pd
Top = rdf['Top']
Bottom = rdf['Bottom']
Depths = ldf['DEPTH']
def get_depths(x):
n = ldf[(ldf['DEPTH']>x['top']) & (ldf['DEPTH']<x['bottom'])]
return n['ID'].values[0],n['DEPTH'].sum()
test = pd.DataFrame({'top':Top, 'bottom':Bottom})
test[['ID','Value1']] = test.apply(lambda x : get_depths(x),1).apply(pd.Series)
I get "TypeError: Invalid comparison between dtype=float64 and str"
And it works if I use the samples they made in the post, but it doesn't work with my data. I'm also hoping there's a more, simple way to do this.
Edit # 2A:
Note:
Sample DataFrame below is not exactly the same as posted in question
Posting a new code here that does uses Top and Bottom from rdf to check for DEPTH in ldf to calculate .mean() for each group using for-loop. A range_key is created in rdf that is unique to each row, assuming that the DataFrame rdf does not have any duplicates.
# Import libraries
import pandas as pd
# Create DataFrame
rdf = pd.DataFrame({
'ID': [1,1,1,1,2,2,2,2,3,3],
'Top': [2000,4300,4500,7100,3200,4120,4300,5500,2300,3200],
'Bottom':[2500,4500,5000,7700,4100,4180,5300,5520,2380,4500]
})
ldf = pd.DataFrame({
'ID': [1,1,1,1,1,1,1,1,2,2,3],
'DEPTH': [2000,2000.5,2001,2001.5,4002,4002.5,5003,5003.5,2000,2000,2000],
'Value1':[45,43,40,28,40,23,34,54,40,38,43],
'Value2':[.32,.33,.12,.10,.10,.11,.08,.04,.28,.25,.30],
'Value3':[423,500,643,20,34,60,900,1002,560,654,343]
})
# Create a key for merge later
ldf['range_key'] = np.nan
rdf['range_key'] = np.linspace(1,rdf.shape[0],rdf.shape[0]).astype(int).astype(str)
# Flag each row for a range
for i in range(ldf.shape[0]):
for j in range(rdf.shape[0]):
d = ldf['DEPTH'][i]
if (d>= rdf['Top'][j]) & (d<=rdf['Bottom'][j]):
rkey = rdf['range_key'][j]
ldf['range_key'][i]=rkey
break;
ldf['range_key'] = ldf['range_key'].astype(int).astype(str) # Convert to string
# Calculate mean for groups
ldf_mean = ldf.groupby(['ID','range_key']).mean().reset_index()
ldf_mean = ldf_mean.drop(['DEPTH'], axis=1)
# Merge into 'rdf'
new_rdf = rdf.merge(ldf_mean, on=['ID','range_key'], how='left')
new_rdf = new_rdf.drop(['range_key'], axis=1)
new_rdf
Output:
ID Top Bottom Value1 Value2 Value3
0 1 2000 2500 39.0 0.2175 396.5
1 1 4300 4500 NaN NaN NaN
2 1 4500 5000 NaN NaN NaN
3 1 7100 7700 NaN NaN NaN
4 2 3200 4100 NaN NaN NaN
5 2 4120 4180 NaN NaN NaN
6 2 4300 5300 NaN NaN NaN
7 2 5500 5520 NaN NaN NaN
8 3 2300 2380 NaN NaN NaN
9 3 3200 4500 NaN NaN NaN
Edit # 1:
Code below seems to work. Added an if-statement to the return from the code posted in question above. Not sure if this is what you were looking to get. It calculates the .sum(). The first value in rdf is changed to a lower the range to match the data in ldf.
# Import libraries
import pandas as pd
# Create DataFrame
rdf = pd.DataFrame({
'ID': [1,1,1,1,2,2,2,2,3,3],
'Top': [2000,4300,4500,7100,3200,4120,4300,5500,2300,3200],
'Bottom':[2500,4500,5000,7700,4100,4180,5300,5520,2380,4500]
})
ldf = pd.DataFrame({
'ID': [1,1,1,1,1,1,1,1,2,2,3],
'DEPTH': [2000,2000.5,2001,2001.5,2002,2002.5,2003,2003.5,2000,2000,2000],
'Value1':[45,43,40,28,40,23,34,54,40,38,43],
'Value2':[.32,.33,.12,.10,.10,.11,.08,.04,.28,.25,.30],
'Value3':[423,500,643,20,34,60,900,1002,560,654,343]
})
##### Code from the question (copy-pasted here)
Top = rdf['Top']
Bottom = rdf['Bottom']
Depths = ldf['DEPTH']
def get_depths(x):
n = ldf[(ldf['DEPTH']>x['top']) & (ldf['DEPTH']<x['bottom'])]
if (n.shape[0]>0):
return n['ID'].values[0],n['DEPTH'].sum()
test = pd.DataFrame({'top':Top, 'bottom':Bottom})
test[['ID','Value1']] = test.apply(lambda x : get_depths(x),1).apply(pd.Series)
Output:
test
top bottom ID Value1
0 2000 2500 1.0 14014.0
1 4300 4500 NaN NaN
2 4500 5000 NaN NaN
3 7100 7700 NaN NaN
4 3200 4100 NaN NaN
5 4120 4180 NaN NaN
6 4300 5300 NaN NaN
7 5500 5520 NaN NaN
8 2300 2380 NaN NaN
9 3200 4500 NaN NaN
Sample data and imports
import pandas
import numpy
import random
# dfr
rdata = {'ID': [1, 1, 1, 1, 2, 2, 2, 2, 3, 3],
'Top': [2010, 4300, 4550, 7100, 3200, 4120, 4300, 5500, 2300, 3200],
'Bottom': [3000, 4500, 5000, 7700, 4100, 4180, 5300, 5520, 2380, 4500]}
dfr = pd.DataFrame(rdata)
# display(dfr.head())
ID Top Bottom
0 1 2010 3000
1 1 4300 4500
2 1 4550 5000
3 1 7100 7700
4 2 3200 4100
# df
np.random.seed(365)
random.seed(365)
rows = 10000
data = {'id': [random.choice([1, 2, 3]) for _ in range(rows)],
'depth': [np.random.randint(2000, 8000) for _ in range(rows)],
'v1': [np.random.randint(40, 50) for _ in range(rows)],
'v2': np.random.rand(rows),
'v3': [np.random.randint(20, 1000) for _ in range(rows)]}
df = pd.DataFrame(data)
df.sort_values(['id', 'depth'], inplace=True)
df.reset_index(drop=True, inplace=True)
# display(df.head())
id depth v1 v2 v3
0 1 2004 48 0.517014 292
1 1 2004 41 0.997347 859
2 1 2006 42 0.278217 851
3 1 2006 49 0.570363 32
4 1 2009 43 0.462985 409
Use each row of dfr to filter and extract stats from df
There are plenty of answers on SO dealing with "TypeError: Invalid comparison between dtype=float64 and str". The numeric columns need to be cleaned of any value that can't be converted to a numeric type.
This code deals with using one dataframe to filter and return metrics for another dataframe.
For each row in dfr:
Filter df
Aggregate the mean and count for v1, v2 and v3
.T to transpose the mean and count rows to columns
Convert to a numpy array
Slice the array for the 3 means and append the array to the v_mean
Slice the array for the max count and append the value to count
They could be all the same, if there are no NaNs in the data
Convert the list of arrays, v_mean to a dataframe, and join it to dfr_new
Add counts a column in dfr_new
v_mean = list()
counts = list()
for idx, (i, t, b) in dfr.iterrows(): # iterate through each row of dfr
data = df[['v1', 'v2', 'v3']][(df.id == i) & (df.depth >= t) & (df.depth <= b)].agg(['mean', 'count']).T.to_numpy() # apply filters and get stats
v_mean.append(data[:, 0]) # get the 3 means
counts.append(data[:, 1].max()) # get the max of the 3 counts; each column has a count, the count cound be different if there are NaNs in data
# copy dfr to dfr_new
dfr_new = dfr.copy()
# add stats values
dfr_new = dfr_new.join(pd.DataFrame(v_mean, columns=['v1_m', 'v2_m', 'v3_m']))
dfr_new['counts'] = counts
# display(dfr_new)
ID Top Bottom v1_mean v2_mean v3_mean count
0 1 2010 3000 44.577491 0.496768 502.068266 542.0
1 1 4300 4500 44.555556 0.518066 530.968254 126.0
2 1 4550 5000 44.446281 0.538855 482.818182 242.0
3 1 7100 7700 44.348083 0.489983 506.681416 339.0
4 2 3200 4100 44.804040 0.487011 528.707071 495.0
5 2 4120 4180 45.096774 0.526687 520.967742 31.0
6 2 4300 5300 44.476980 0.529476 523.095764 543.0
7 2 5500 5520 46.000000 0.608876 430.500000 12.0
8 3 2300 2380 44.512195 0.456632 443.195122 41.0
9 3 3200 4500 44.554755 0.516616 501.841499 694.0

Build DataFrame values from multiple dataframes Pandas

I am trying to build a dataframe where the data is grabbed from multiple files. I have created an empty dataframe with the desired shape, but I am having trouble grabbing the data. I found this but when I concat, I am still getting NaN values.
Edit2: I changed the order of df creation and put concat inside the for loop and same result. (for obvious reasons)
import pandas as pd
import os
import glob
def daily_country_framer():
# create assignments
country_source = r"C:\Users\USER\PycharmProjects\Corona Stats\Country Series"
list_of_files = glob.glob(country_source + r"\*.csv")
latest_file = max(list_of_files, key=os.path.getctime)
last_frame = pd.read_csv(latest_file)
date_list = []
label_list = []
# build date_list values
for file in os.listdir(country_source):
file = file.replace('.csv', '')
date_list.append(file)
# build country_list values
for country in last_frame['Country']:
label_list.append(country)
# create dataframe for each file in folder
for filename in os.listdir(country_source):
filepath = os.path.join(country_source, filename)
if not os.path.isfile(filepath):
continue
df1 = pd.read_csv(filepath)
df = pd.DataFrame(index=label_list, columns=date_list)
df1 = pd.concat([df])
print(df1)
daily_country_framer()
Two sample dataframes: (notice the different shapes)
Country Confirmed Deaths Recovered
0 World 1595350 95455 353975
1 Afghanistan 484 15 32
2 Albania 409 23 165
3 Algeria 1666 235 347
4 Andorra 583 25 58
.. ... ... ... ...
180 Vietnam 255 0 128
181 West Bank and Gaza 263 1 44
182 Western Sahara 4 0 0
183 Zambia 39 1 24
184 Zimbabwe 11 3 0
[185 rows x 4 columns]
Country Confirmed Deaths Recovered
0 World 1691719 102525 376096
1 Afghanistan 521 15 32
2 Albania 416 23 182
3 Algeria 1761 256 405
4 Andorra 601 26 71
.. ... ... ... ...
181 West Bank and Gaza 267 2 45
182 Western Sahara 4 0 0
183 Yemen 1 0 0
184 Zambia 40 2 25
185 Zimbabwe 13 3 0
[186 rows x 4 columns]
Current output:
01-22-2020 01-23-2020 ... 04-09-2020 04-10-2020
World NaN NaN ... NaN NaN
Afghanistan NaN NaN ... NaN NaN
Albania NaN NaN ... NaN NaN
Algeria NaN NaN ... NaN NaN
Andorra NaN NaN ... NaN NaN
... ... ... ... ... ...
West Bank and Gaza NaN NaN ... NaN NaN
Western Sahara NaN NaN ... NaN NaN
Yemen NaN NaN ... NaN NaN
Zambia NaN NaN ... NaN NaN
Zimbabwe NaN NaN ... NaN NaN
[186 rows x 80 columns]
Desired output: (where NaN equals corresponding values from target column or a list of all columns ie: if ['Confirmed'] then 0,1,2,3,4, if all then [0,0,0],[1,0,0],[2,0,0])
Your code (with comments inline):
import pandas as pd
import os
import glob
def daily_country_framer():
# create assignments
country_source = r"C:\Users\USER\PycharmProjects\Corona Stats\Country Series"
list_of_files = glob.glob(country_source + r"\*.csv")
latest_file = max(list_of_files, key=os.path.getctime)
last_frame = pd.read_csv(latest_file)
date_list = []
label_list = []
# build date_list values
for file in os.listdir(country_source):
file = file.replace('.csv', '')
date_list.append(file)
# build country_list values
for country in last_frame['Country']: # == last_frame['Country'].tolist()
label_list.append(country)
# create dataframe for each file in folder
for filename in os.listdir(country_source):
filepath = os.path.join(country_source, filename)
if not os.path.isfile(filepath):
continue
df1 = pd.read_csv(filepath)
# you redefine df1 for every file in the loop. So if there
# are 10 files, only the last one is actually used anywhere
# outside this loop.
df = pd.DataFrame(index=label_list, columns=date_list)
df1 = pd.concat([df])
# here you just redefined df1 again as the concatenation of the
# empty dataframe you just created in the line above.
print(df1)
daily_country_framer()
So hopefully that illuminates why you were getting the results you were getting. It was doing exactly what you asked it to do.
What you want to do is get a dictionary with dates as keys and the associated dataframe as values, then concatenate that. This can be quite expensive because of some quirks with how pandas does concatenation, but if you concatenate along axis=0, you should be fine.
A better way might be the following:
import pandas as pd
import os
def daily_country_framer(country_source):
accumulator = {}
# build date_list values
for filename in os.listdir(country_source):
date = filename.replace('.csv', '')
filepath = os.path.join(country_source, filename)
accumulator[date] = pd.read_csv(filepath)
# now we have a dictionary of {date : data} -- perfect!
df = pd.concat(accumulator)
return df
daily_country_framer("C:\Users\USER\PycharmProjects\Corona Stats\Country Series")
Does that work?

Categories

Resources