delete all nan values from list in pandas dataframe - python

if any elements are there along with nan then i want to keep element and want to delete nan only like
example 1 ->
index values
0 [nan,'a',nan,nan]
output should be like
index values
0 [a]
example 2->
index values
0 [nan,'a',b,c]
1 [nan,nan,nan]
output should be like
index values
0 [a,b,c]
1 []

This is one approach using df.apply.
import pandas as pd
import numpy as np
df = pd.DataFrame({"a": [[np.nan, np.nan, np.nan, "a", np.nan], [np.nan, np.nan], ["a", "b"]]})
df["a"] = df["a"].apply(lambda x: [i for i in x if str(i) != "nan"])
print(df)
Output:
a
0 [a]
1 []
2 [a, b]

You can use the fact that np.nan == np.nan evaluates to False:
df = pd.DataFrame([[0, [np.nan, 'a', 'b', 'c']],
[1, [np.nan, np.nan, np.nan]],
[2, [np.nan, 'a', np.nan, np.nan]]],
columns=['index', 'values'])
df['values'] = df['values'].apply(lambda x: [i for i in x if i == i])
print(df)
index values
0 0 [a, b, c]
1 1 []
2 2 [a]
lambda is just an anonymous function. You could also use a named function:
def remove_nan(x):
return [i for i in x if i == i]
df['values'] = df['values'].apply(remove_nan)
Related: Why is NaN not equal to NaN?

df['values'].apply(lambda v: pd.Series(v).dropna().values )

You can use pd.Series.map on df.values
import pandas as pd
my_filter = lambda x: not pd.isna(x)
df['new_values'] = df['values'].map(lambda x: list(filter(my_filter, x)))

Related

insert python list in all rows new pd.Dataframe column

I have python list:
my_list = [1, 'V']
I have pd.Dataframe:
A B C
0 f v b
1 f i n
2 f i m
I need to create new column in my dataframe with value = my_list:
A B C D
0 f v b [1, 'V']
1 f i n [1, 'V']
2 f i m [1, 'V']
As far as I understand python lists can be values, bc df.groupby with apply "list":
df = df.groupby(['A', 'B'], group_keys=True)['C'].apply(list).reset_index(name='H')
A B H
0 f i [n, m]
1 f v [b]
Its posible without convert my_list type? What the the easiest way to do that?
I tried:
df['D'] = my_list
df['D'] = pd.Series(my_list)
but they did not meet my expectations
You can try using: np.repeat and set its repeat parameter to number of rows which can be found out from the shape of the dataframe.
my_list = [1, 'V']
df = pd.DataFrame({'col1': ['f', 'f', 'f'], 'col2': ['v', 'i', 'i'], 'col3': ['b', 'n', 'm']})
df['new_col'] = np.repeat(my_list, df.shape[0])
This will repeat the values of my_list as many times as there are rows in the DataFrame.
You can do it by creating a new array with my_list through hstack and then forming a new DataFrame. The code below has been tested and works fine.
import numpy as np
import pandas as ph
a1 = np.array([['f','v','b'], ['f','i','n'], ['f','i','m']])
a2 = np.array([1, 'V']).repeat(3).reshape(2,3).transpose()
df = pd.DataFrame(np.hstack((a1,a2)))
Edit: Another code that has been tested is:
import pandas as pd
import numpy as np
a1 = np.array([['f','v','b'], ['f','i','n'], ['f','i','m']])
a2 = np.squeeze(np.dstack((np.array(1).repeat(3), np.array('V').repeat(3))))
df = pd.DataFrame(np.hstack((a1,a2)))

Compare 2 list columns in pandas and find the diff

DataFrame
df = pd.DataFrame({
'Id': [1,1,1,1,2,2,3,4,4,4],
'Col_1':['AD11','BZ23','CQ45','DL36','LM34','MM23','DL35','AD11','BP23','CQ45'],
'Col_2':['AD11',nan,nan,'DL36',nan,nan,'DL35',nan,nan,'CQ45']]
}, columns=['Id','Col_1','Col_2'])
Looks Like
Original data frame looks like this
Please note that Col_1 & Col_2 has alpha numeric values and has more than one character. For eg : 'AD34' , 'EC45', etc.
After groupby and reset index
g = df.groupby('Id')['Col_1','Col_2'].agg(['unique'])
g= g.reset_index(drop=True)
g.columns = [''.join(col).strip() for col in g.columns.values]
I want to
store results that match in Match column
results that do not match No_match column
Result :
I tried to use some logic from this
post but doesnt solve my issue.
Is there a better way to do the transformation for my requirement ?
Appreciate the help.
First remove missing values from list and then use set.intersection and set.difference:
g = df.groupby('Id')[['Col_1','Col_2']].agg([lambda x: x.dropna().unique().tolist()])
g= g.reset_index(drop=True)
g.columns = [f'{a}_unique' for a, b in g.columns]
z = list(zip(g['Col_1_unique'], g['Col_2_unique']))
g['Match'] = [list(set(a).intersection(b)) for a, b in z]
g['No_Match'] = [list(set(a).difference(b)) for a, b in z]
print (g)
Col_1_unique Col_2_unique Match No_Match
0 [AD11, BZ23, CQ45, DL36] [AD11, DL36] [DL36, AD11] [CQ45, BZ23]
1 [LM34, MM23] [] [] [LM34, MM23]
2 [DL35] [DL35] [DL35] []
3 [AD11, BP23, CQ45] [CQ45] [CQ45] [AD11, BP23]
Here, my simple logic is to compare both list, by same value on same positions.
Such as, [a,b,c] & [b,a,c] so match will be [c] only.
Code:
df = pd.DataFrame({
'Id': [1,1,1,1,2,2,3,4,4,4],
'Col_1':['A','B','C','D','L','M','D','A','B','C'],
'Col_2':['A','','','D','','','D','', '', 'C']
}, columns=['Id','Col_1','Col_2'])
#In order to compare list by values and position I needed to add unique value on null value
#So the both list length would be same
df['Col_2'] = df.apply(lambda x : x.name if x.Col_2=='' else x.Col_2, axis=1)
g = df.groupby('Id')['Col_1','Col_2'].agg(['unique'])
g= g.reset_index(drop=True)
g.columns = [''.join(col).strip() for col in g.columns.values]
g['Match'] = g.apply(lambda x: [a for a, b in zip(x.Col_1unique, x.Col_2unique) if a==b], axis=1)
g['Not_Match'] = g.apply(lambda x: [a for a, b in zip(x.Col_1unique, x.Col_2unique) if a!=b], axis=1)
g
Output:
Col_1unique Col_2unique Match Not_Match
0 [A, B, C, D] [A, 1, 2, D] [A, D] [B, C]
1 [L, M] [4, 5] [] [L, M]
2 [D] [D] [D] []
3 [A, B, C] [7, 8, C] [C] [A, B]
Please try to use the below code but make it more efficient, for time being i tried the below,
import pandas as pd
df = pd.DataFrame({
'Id': [1, 1, 1, 1, 2, 2, 3, 4, 4, 4],
'Col_1': ['A', 'B', 'C', 'D', 'L', 'M', 'D', 'A', 'B', 'C'],
'Col_2': ['A', 'nan', 'nan', 'D', 'nan', 'nan', 'D', 'nan', 'nan', 'C']})
print(df)
df['Match'] = ''
df['No-Match'] = ''
for i, row in df.iterrows():
if row['Col_1'] == row['Col_2']:
df.at[i, 'Match'] = row['Col_1']
else:
df.at[i, 'No-Match'] = row['Col_1']
print(df)
g = df.groupby('Id')['Id','Col_1','Col_2','Match','No-Match'].agg(['unique'])
g= g.reset_index(drop=True)
g.columns = [''.join(col).strip() for col in g.columns.values]
print(g)
Once you run this, you will get the below output:
Idunique Col_1unique Col_2unique Matchunique No-Matchunique
0 [1] [A, B, C, D] [A, nan, D] [A, D] [B, C]
1 [2] [L, M] [nan] [] [L, M]
2 [3] [D] [D] [D] []
3 [4] [A, B, C] [nan, C] [C] [A, B]

Pandas Multi-index set value based on three different condition

The objective is to create a new multiindex column based on 3 conditions of the column (B)
Condition for B
if B<0
CONDITION_B='l`
elif B<-1
CONDITION_B='L`
else
CONDITION_B='g`
Naively, I thought, we can simply create two different mask and replace the value as suggested
# Handle CONDITION_B='l` and CONDITION_B='g`
mask_2 = df.loc[:,idx[:,'B']]<0
appenddf_2=mask_2.replace({True:'g',False:'l'}).rename(columns={'A':'iv'},level=1)
and then
# CONDITION_B='L`
mask_33 = df.loc[:,idx[:,'B']]<-0.1
appenddf_2=mask_33.replace({True:'G'}).rename(columns={'A':'iv'},level=1)
As expected, this will throw an error
TypeError: sequence item 1: expected str instance, bool found
May I know how to handle the 3 different condition
Expected output
ONE TWO
B B
g L
l l
l g
g l
L L
The code to produce the error is
import pandas as pd
import numpy as np
np.random.seed(3)
arrays = [np.hstack([['One']*2, ['Two']*2]) , ['A', 'B', 'A', 'B']]
columns = pd.MultiIndex.from_arrays(arrays)
df= pd.DataFrame(np.random.randn(5, 4), columns=list('ABAB'))
df.columns = columns
idx = pd.IndexSlice
mask_2 = df.loc[:,idx[:,'B']]<0
appenddf_2=mask_2.replace({True:'g',False:'l'}).rename(columns={'A':'iv'},level=1)
mask_33 = df.loc[:,idx[:,'B']]<-0.1
appenddf_2=mask_33.replace({True:'G'}).rename(columns={'A':'iv'},level=1)
IIUC:
np.select() is ideal in this case:
conditions=[
df.loc[:,idx[:,'B']].lt(0) & df.loc[:,idx[:,'B']].gt(-1),
df.loc[:,idx[:,'B']].lt(-1),
df.loc[:,idx[:,'B']].ge(0)
]
labels=['l','L','g']
out=pd.DataFrame(np.select(conditions,labels),columns=df.loc[:,idx[:,'B']].columns)
OR
via np.where():
s=np.where(df.loc[:,idx[:,'B']].lt(0) & df.loc[:,idx[:,'B']].gt(-1),'l',np.where(df.loc[:,idx[:,'B']].lt(-1),'L','g'))
out=pd.DataFrame(s,columns=df.loc[:,idx[:,'B']].columns)
output of out:
One Two
B B
0 g L
1 l l
2 l g
3 g l
4 L L
I don't fully understand what you want to do but try something like this:
df = pd.DataFrame({'B': [ 0, -1, -2, -2, -1, 0, 0, -1, -1, -2]})
df['ONE'] = np.where(df['B'] < 0, 'l', 'g')
df['TWO'] = np.where(df['B'] < -1, 'L', df['ONE'])
df = df.set_index(['ONE', 'TWO'])
Output result:
>>> df
B
ONE TWO
g g 0
l l -1
L -2
L -2
l -1
g g 0
g 0
l l -1
l -1
L -2

Calculate number ofdays until first value appears in pandas dataframe

I have the follosing dataset:
import pandas as pd
from datetime import datetime
import numpy as np
date_rng = pd.date_range(start='2020-07-01', end='2020-07-10', freq='d')
l1 = [np.nan, np.nan, 3, np.nan, np.nan, 4, np.nan, np.nan, 5, np.nan]
l2 = [np.nan, np.nan, np.nan, np.nan, np.nan, 4, np.nan, np.nan, 1, 3]
df = pd.DataFrame({
'date':date_rng,
'value':l1,
'group':'a'
})
df2 = pd.DataFrame({
'date':date_rng,
'value':l2,
'group':'b'
})
df = df.append(df2, ignore_index=True)
df
I would like to count the days until the first value appears for each group. I was able to find the date with the following code, but would get the number of days for each group.
# first valid valuefor each column
df.set_index(["date"]).groupby('group')['value'].apply(pd.Series.first_valid_index)
EDIT:
This would be the expected outcome:
columns = ["group", "number_of_days"]
df_features = pd.DataFrame([["a", 3],
["b", 6],],
columns=columns)
df_features
Use GroupBy.first for first days per groups, subtract by Series.sub, convert to days by Series.dt.days, add 1 and convert to 2 column DataFrame:
s1 = df.groupby('group')['date'].first()
s2 = df.set_index(["date"]).groupby('group')['value'].apply(pd.Series.first_valid_index)
df = s2.sub(s1).dt.days.add(1).reset_index(name='number_of_days')
print (df)
group number_of_days
0 a 3
1 b 6

Python Pandas concatenate strings and numbers into one string

I am working with a pandas dataframe and trying to concatenate multiple string and numbers into one string.
This works
df1 = pd.DataFrame({'Col1': ['a', 'b', 'c'], 'Col2': ['a', 'b', 'c']})
df1.apply(lambda x: ', '.join(x), axis=1)
0 a, a
1 b, b
2 c, c
How can I make this work just like df1?
df2 = pd.DataFrame({'Col1': ['a', 'b', 1], 'Col2': ['a', 'b', 1]})
df2.apply(lambda x: ', '.join(x), axis=1)
TypeError: ('sequence item 0: expected str instance, int found', 'occurred at index 2')
Consider the dataframe df
np.random.seed([3,1415])
df = pd.DataFrame(
np.random.randint(10, size=(3, 3)),
columns=list('abc')
)
print(df)
a b c
0 0 2 7
1 3 8 7
2 0 6 8
You can use astype(str) ahead of the lambda
df.astype(str).apply(', '.join, 1)
0 0, 2, 7
1 3, 8, 7
2 0, 6, 8
dtype: object
Using a comprehension
pd.Series([', '.join(l) for l in df.values.astype(str).tolist()], df.index)
0 0, 2, 7
1 3, 8, 7
2 0, 6, 8
dtype: object
In [75]: df2
Out[75]:
Col1 Col2 Col3
0 a a x
1 b b y
2 1 1 2
In [76]: df2.astype(str).add(', ').sum(1).str[:-2]
Out[76]:
0 a, a, x
1 b, b, y
2 1, 1, 2
dtype: object
You have to convert column types to strings.
import pandas as pd
df2 = pd.DataFrame({'Col1': ['a', 'b', 1], 'Col2': ['a', 'b', 1]})
df2.apply(lambda x: ', '.join(x.astype('str')), axis=1)

Categories

Resources