Pivot table reindexing in pandas - python

Having a dataframe as below:
df1 = pd.DataFrame({'Name1':['A','Q','A','B','B','C','C','C','E','E','E'],
'Name2':['B','C','D','C','D','D','A','B','A','B','C'],'Marks2':[10,20,6,50, 88,23,140,9,60,65,70]})
df1
#created a new frame
new=df1.loc[(df1['Marks2'] <= 50)]
new
#created a pivot table
temp=new.pivot_table(index="Name1", columns="Name2", values="Marks2")
temp
I tried to re-index the pivot table.
new_value=['E']
order = new_value+list(temp.index.difference(new_value))
matrix=temp.reindex(index=order, columns=order)
matrix
But the values related to 'E' is not present in pivot table. dataframe df1 contains values related with E. I need to add the value related to E in the pivot_table
Expected output:

Based on the comments my understanding of the intended result:
E A B C D
E NaN 60.0 65.0 70.0 NaN
A NaN NaN 10.0 NaN 6.0
C NaN NaN 9.0 NaN 23.0
Q NaN NaN NaN 20.0 NaN
Code:
Activate the inlcuded #print() statements to see what the steps do.
Especially at the header 'formatting' in the end you may adapt acc. your needs.
import pandas as pd
import numpy as np
df1 = pd.DataFrame({'Name1':['A','Q','A','B','B','C','C','C','E','E','E'],
'Name2':['B','C','D','C','D','D','A','B','A','B','C'],
'Marks2':[10,20,6,50, 88,23,140,9,60,65,70]})
df1['Marks2'] = np.where( (df1['Marks2'] >= 50) & (df1['Name1'] != 'E'),
np.nan, df1['Marks2'])
#print(df1)
temp=df1.pivot_table(index="Name1", columns="Name2", values="Marks2")
#print(temp)
name1_to_move = 'E'
# build new index with name1_to_move at the start (top in df idx)
idx=temp.index.tolist()
idx.pop(idx.index(name1_to_move))
idx.insert(0, name1_to_move)
# moving the row to top by reindex
temp=temp.reindex(idx)
#print(temp)
temp.insert(loc=0, column=name1_to_move, value=np.nan)
#print(temp)
temp.index.name = None
#print(temp)
temp = temp.rename_axis(None, axis=1)
print(temp)

Related

Assigning values in DataFrame when columns names and values are in single row

I have two dataframes like below,
import numpy as np
import pandas as pd
df1 = pd.DataFrame({1: np.zeros(5), 2: np.zeros(5)}, index=['a','b','c','d','e'])
and
df2 = pd.DataFrame({'category': [1,1,2,2], 'value':[85,46, 39, 22]}, index=[0, 1, 3, 4])
The value from second dataframe is required to be assigned in first dataframe such that the index and column relationship is maintained. The second dataframe index is iloc based and has column category which is actually containing column names of first dataframe. The value is value to be assigned.
Following is the my solution with expected output,
for _category in df2['category'].unique():
df1.loc[df1.iloc[df2[df2['category'] == _category].index.tolist()].index, _category] = df2[df2['category'] == _category]['value'].values
Is there a pythonic way of doing so without the for loop?
One option is to pivot and update:
df3 = df1.reset_index()
df3.update(df2.pivot(columns='category', values='value'))
df3 = df3.set_index('index').rename_axis(None)
Alternative, reindex df2 (in two steps, numerical and by label), and combine_first with df1:
df3 = (df2
.pivot(columns='category', values='value')
.reindex(range(max(df2.index)+1))
.set_axis(df1.index)
.combine_first(df1)
)
output:
1 2
a 85.0 0.0
b 46.0 0.0
c 0.0 0.0
d 0.0 39.0
e 0.0 22.0
Here's one way by replacing the 0s in df1 with NaN; pivotting df2 and filling in the NaNs in df1 with df2:
out = (df1.replace(0, pd.NA).reset_index()
.fillna(df2.pivot(columns='category', values='value'))
.set_index('index').rename_axis(None).fillna(0))
Output:
1 2
a 85.0 0.0
b 46.0 0.0
c 0.0 0.0
d 0.0 39.0
e 0.0 22.0

insert missing rows in df with dictionary values

Hello I have the following dataframe
df = pd.DataFrame(data={'grade_1':['A','B','C'],
'grade_1_count': [19,28,32],
'grade_2': ['pass','fail',np.nan],
'grade_2_count': [39,18, np.nan]})
whereby some grades as missing, and need to be inserted in to the grade_n column according to the values in this dictionary
grade_dict = {'grade_1':['A','B','C','D','E','F'],
'grade_2' : ['pass','fail','not present', 'borderline']}
and the corresponding row value in the _count column should be filled with np.nan
so the expected output is like this
expected_df = pd.DataFrame(data={'grade_1':['A','B','C','D','E','F'],
'grade_1_count': [19,28,32,0,0,0],
'grade_2': ['pass','fail','not preset','borderline', np.nan, np.nan],
'grade_2_count': [39,18,0,0,np.nan,np.nan]})
so far I have this rather inelegant code that creates a column that includes all the correct categories for the grades, but i cannot reinsert it in to the dataframe, or fill the count columns with zeros (where the np.nans just reflect empty cells due to coercing columns with different lengths of rows) I hope that makes sense. any advice would be great. thanks
x=[]
for k, v in grade_dict.items():
out = df[k].reindex(grade_dict[k], axis=0, fill_value=0)
x = pd.concat([out], axis=1)
x[k] = x.index
x = x.reset_index(drop=True)
df[k] = x.fillna(np.nan)
Here is a solution using two consecutive merges:
# set up combinations
from itertools import zip_longest
df2 = pd.DataFrame(list(zip_longest(*grade_dict.values())), columns=grade_dict)
# merge
(df2.merge(df.filter(like='grade_1'),
on='grade_1', how='left')
.merge(df.filter(like='grade_2'),
on='grade_2', how='left')
.sort_index(axis=1)
)
output:
grade_1 grade_1_count grade_2 grade_2_count
0 A 19.0 pass 39.0
1 B 28.0 fail 18.0
2 C 32.0 not present NaN
3 D NaN borderline NaN
4 E NaN None NaN
5 F NaN None NaN
multiple merges:
df2 = pd.DataFrame(list(zip_longest(*grade_dict.values())), columns=grade_dict)
for col in grade_dict:
df2 = df2.merge(df.filter(like=col),
on=col, how='left')
df2
If you only need to merge on grade_1 without updating the non-NaNs of grade_2, you can cast grade_dict into a df and then use combine_first:
print (df.set_index("grade_1").combine_first(pd.DataFrame(grade_dict.values(),
index=grade_dict.keys()).T.set_index("grade_1"))
.fillna({"grade_1_count": 0}).reset_index())
grade_1 grade_1_count grade_2 grade_2_count
0 A 19.0 pass 39.0
1 B 28.0 fail 18.0
2 C 32.0 not present NaN
3 D 0.0 borderline NaN
4 E 0.0 None NaN
5 F 0.0 None NaN

Merge 3 or more dataframes

I'am trying to merge 3 dataframes by index however so far unsuccessfully.
Here is the code:
import pandas as pd
from functools import reduce
#identifying csvs
x='/home/'
csvpaths = ("Data1.csv", "Data2.csv", "Data3.csv")
dfs = list() # an empty list
#creating dataframes based on number of csvs
for i in range (len(csvpaths)):
dfs.append(pd.read_csv(str(x)+ csvpaths[i],index_col=0))
print(dfs[1])
#creating suffix for each dataframe's columns
S=[]
for y in csvpaths:
s=str(y).split('.csv')[0]
S.append(s)
print(S)
#merging attempt
dfx = lambda a,b: pd.merge(a,b,on='SHIP_ID',suffixes=(S)), dfs
print(dfx)
print(dfx.columns)
if i try to export it as csv i get an error as follows(similar error when i try to print dfx.columns):
'tuple' object has no attribute 'to_csv'
the output i want is merger of the 3 dataframes as follows(with respective suffixes), please help.
[Note:table below is very simplified,original table consists of dozens of columns and thousands of rows, hence require practical merging method]
Try:
for s,el in zip(suffixes, dfs):
el.columns=[str(col)+s for col in el.columns]
dfx=pd.concat(dfs, ignore_index=True, sort=False, axis=1)
For the test case I used:
import pandas as pd
dfs=[pd.DataFrame({"x": [1,2,7], "y": list("ghi")}), pd.DataFrame({"x": [5,6], "z": [4,4]}), pd.DataFrame({"x": list("acgjksd")})]
suffixes=["_1", "_2", "_3"]
for s,el in zip(suffixes, dfs):
el.columns=[str(col)+s for col in el.columns]
>>> pd.concat(dfs, ignore_index=True, sort=False, axis=1)
x_1 y_1 x_2 z_2 x_3
0 1.0 g 5.0 4.0 a
1 2.0 h 6.0 4.0 c
2 7.0 i NaN NaN g
3 NaN NaN NaN NaN j
4 NaN NaN NaN NaN k
5 NaN NaN NaN NaN s
6 NaN NaN NaN NaN d
Edit:
for s,el in zip(suffixes, dfs):
el.columns=[str(col)+s for col in el.columns]
el.set_index('ID', inplace=True)
dfx=pd.concat(dfs, ignore_index=False, sort=False, axis=1).reset_index()

Conditionally Set Multiple Column Values with Dynamic Values Using Loc or Apply

I'm still a novice in Pandas and cannot seem to combine these few basic steps.
Goal:
I would like to perform an efficient lookup and replace of multiple columns based on a conditional.
I have dataframe df, and need an index lookup from another dataframe lookup if columns lower_limit and upper_limit are both NaN.
I couldn't get merge/join to work because there is a difference between index names (think C_something, F_something from DataFrame lookup), left out for simplicity.
Input:
DataFrames:
import pandas as pd; import numpy as np
df = pd.DataFrame([['A', 3, 5],['B', 2, np.NaN],['C', np.NaN, np.NaN],['D', np.NaN, np.NaN]])
df.columns = ['Name','lower_limit','upper_limit']
df = df.set_index('Name')
lookup = pd.DataFrame([['C_Male', 4, 6],['C_Female', 5, 7],['E_Male', 2, 3],['E_Female', 3, 4]])
lookup.columns = ['Name', 'lower', 'upper']
lookup = lookup.set_index('Name')
# index: Name + index_modifier is the lookup index of interest for example
index_modifier = '_Male'
DataFrames visualized:
# df # lookup
lower_limit upper_limit lower upper
Name Name
A 3.0 5.0 C_Male 4 6
B 2.0 NaN C_Female 5 7
C NaN NaN E_Male 2 3
D NaN NaN E_Female 3 4
Expected output:
# df
lower_limit upper_limit
Name
A 3.0 5.0
B 2.0 NaN #<-- Does not meet conditional
C 4.0 6.0 #<-- Looked-up with index_modifier and changed
D NaN NaN #<-- Looked-up with index_modifier and left unchanged
Broken Code:
I have tried using df.loc() docs and this answer to mask and set values, but cannot seem to get unique values based on that row's index.
Mask and Set Using df.loc
# error: need get index of each row only
df.loc[(df.lower_limit.isnull()) & (df.upper_limit.isnull()), ['lower_limit','upper_limit'] ] = lookup.loc[df.index + index_modifier]
Mask with df.loc and Then Set
ix_of_interest = df.loc[(df.lower_limit.isnull()) & (df.upper_limit.isnull())].index
# only keep index values that are in DataFrame 'lookup'
ix_of_interest = [ix for ix in ix_of_interest if ((ix + index_modifier) in lookup.index)]
lookup_ix = [ix + index_modifier for ix in lookup_ix]
# error: Not changing values. I think there is a mismatch of bracket depths for one
df.loc[ix_of_interest, ['lower_limit','upper_limit'] ] = lookup.loc[lookup_ix]
I have also tried to use df.apply() to set the values. See this question.
def do_lookup(row):
# error:'numpy.float64' object has no attribute 'is_null'
if row.lower_limit.isnull() and row.upper_limit.isnull():
if (row.name + index_modifier) in lookup.index:
return lookup.loc[row.name + index_modifier]
df['lower_limit', 'upper_limit'] = df.apply(do_lookup, axis=1)
or lambda
df['lower_limit', 'upper_limit'] = df.apply(lambda x: lookup.loc[x.name + index_modifier].to_list()
# isnull() or isnan() would be better
if ((x.lower_limit == np.NaN) and (x.upper_limit == np.NaN))
# else may not be needed here
else [np.NaN, np.NaN],
axis=1)
This seems like it should be a series of simple steps, but I cannot get them to work correctly. Any insight would be greatly appreciated - my rubber ducky is tired and confused.
You can use Series.fillna with DataFrame.add_suffix:
index_modifier = '_Male'
init_index=df.index
df=df.T.add_suffix(index_modifier).T
df['lower_limit'].fillna(lookup['lower'],inplace=True)
df['upper_limit'].fillna(lookup['upper'],inplace=True)
df.index=init_index
print(df)
lower_limit upper_limit
A 3.0 5.0
B 2.0 NaN
C 4.0 6.0
D NaN NaN

search for value with in column by multiindex and take value of another column

I have a multiindex dataframe df and I have a second dataframe df1. I like to search in df1 for "SPX" after the value of "correl" an add in df the value in the column "correl":
import pandas as pd
import numpy as np
np.arrays = [['one','one','one','two','two','two'],
["DJ30","SPX","Example","Example","Example","Example"]]
df = pd.DataFrame(columns=[])
df = pd.DataFrame(np.random.randn(6,2),
index=pd.MultiIndex.from_tuples(list(zip(*np.arrays))),
columns=['correl','beta'])
df['correl'] = ''
df['beta'] = ''
df
df1 = pd.DataFrame([[0.95, 0.7, "SPX"]],
columns=['correl', 'beta', 'index'])
df1
I expect:
correl whatever
one DJ30
SPX 0.95
Example
two
Example
Example
Example
You can reset_index, merge and set_index:
df.reset_index().merge(df1,
left_on='level_1',
right_on='index',
suffixes=('_x',''),
how='left')\
.set_index(['level_0','level_1'])
Output:
correl beta index
level_0 level_1
one DJ30 NaN NaN NaN
SPX 0.95 0.7 SPX
Example NaN NaN NaN
two Example NaN NaN NaN
Example NaN NaN NaN
Example NaN NaN NaN

Categories

Resources