Insert empty row after every Nth row in pandas dataframe - python

I have a dataframe:
pd.DataFrame(columns=['a','b'],data=[[3,4],
[5,5],[9,3],[1,2],[9,9],[6,5],[6,5],[6,5],[6,5],
[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5]])
I want to insert two empty rows after every third row so the resulting output looks like that:
a b
0 3.0 4.0
1 5.0 5.0
2 9.0 3.0
3 NaN NaN
4 NaN NaN
5 1.0 2.0
6 9.0 9.0
7 6.0 5.0
8 NaN NaN
9 NaN NaN
10 6.0 5.0
11 6.0 5.0
12 6.0 5.0
13 NaN NaN
14 NaN NaN
15 6.0 5.0
16 6.0 5.0
17 6.0 5.0
18 NaN NaN
19 NaN NaN
20 6.0 5.0
21 6.0 5.0
22 6.0 5.0
23 NaN NaN
24 NaN NaN
25 6.0 5.0
26 6.0 5.0
I tried a number of things but didn't get any closer to the desired output.

The following should scale well with the size of the DataFrame since it doesn't iterate over the rows and doesn't create intermediate DataFrames.
import pandas as pd
df = pd.DataFrame(columns=['a','b'],data=[[3,4],
[5,5],[9,3],[1,2],[9,9],[6,5],[6,5],[6,5],[6,5],
[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5]])
def add_empty_rows(df, n_empty, period):
""" adds 'n_empty' empty rows every 'period' rows to 'df'.
Returns a new DataFrame. """
# to make sure that the DataFrame index is a RangeIndex(start=0, stop=len(df))
# and that the original df object is not mutated.
df = df.reset_index(drop=True)
# length of the new DataFrame containing the NaN rows
len_new_index = len(df) + n_empty*(len(df) // period)
# index of the new DataFrame
new_index = pd.RangeIndex(len_new_index)
# add an offset (= number of NaN rows up to that row)
# to the current df.index to align with new_index.
df.index += n_empty * (df.index
.to_series()
.groupby(df.index // period)
.ngroup())
# reindex by aligning df.index with new_index.
# Values of new_index not present in df.index are filled with NaN.
new_df = df.reindex(new_index)
return new_df
Tests:
# original df
>>> df
a b
0 3 4
1 5 5
2 9 3
3 1 2
4 9 9
5 6 5
6 6 5
7 6 5
8 6 5
9 6 5
10 6 5
11 6 5
12 6 5
13 6 5
14 6 5
15 6 5
16 6 5
# add 2 empty rows every 3 rows
>>> add_empty_rows(df, 2, 3)
a b
0 3.0 4.0
1 5.0 5.0
2 9.0 3.0
3 NaN NaN
4 NaN NaN
5 1.0 2.0
6 9.0 9.0
7 6.0 5.0
8 NaN NaN
9 NaN NaN
10 6.0 5.0
11 6.0 5.0
12 6.0 5.0
13 NaN NaN
14 NaN NaN
15 6.0 5.0
16 6.0 5.0
17 6.0 5.0
18 NaN NaN
19 NaN NaN
20 6.0 5.0
21 6.0 5.0
22 6.0 5.0
23 NaN NaN
24 NaN NaN
25 6.0 5.0
26 6.0 5.0
# add 5 empty rows every 4 rows
>>> add_empty_rows(df, 5, 4)
a b
0 3.0 4.0
1 5.0 5.0
2 9.0 3.0
3 1.0 2.0
4 NaN NaN
5 NaN NaN
6 NaN NaN
7 NaN NaN
8 NaN NaN
9 9.0 9.0
10 6.0 5.0
11 6.0 5.0
12 6.0 5.0
13 NaN NaN
14 NaN NaN
15 NaN NaN
16 NaN NaN
17 NaN NaN
18 6.0 5.0
19 6.0 5.0
20 6.0 5.0
21 6.0 5.0
22 NaN NaN
23 NaN NaN
24 NaN NaN
25 NaN NaN
26 NaN NaN
27 6.0 5.0
28 6.0 5.0
29 6.0 5.0
30 6.0 5.0
31 NaN NaN
32 NaN NaN
33 NaN NaN
34 NaN NaN
35 NaN NaN
36 6.0 5.0

Try this:
(pd.concat([df,pd.DataFrame([[np.NaN]*2],
index = [i for i in df.index if i%3 == 2] * 2,
columns = list('ab'))])
.sort_index()
.reset_index(drop=True))
Output:
a b
0 3.0 4.0
1 5.0 5.0
2 9.0 3.0
3 NaN NaN
4 NaN NaN
5 1.0 2.0
6 9.0 9.0
7 6.0 5.0
8 NaN NaN
9 NaN NaN
10 6.0 5.0
11 6.0 5.0
12 6.0 5.0
13 NaN NaN
14 NaN NaN
15 6.0 5.0
16 6.0 5.0
17 6.0 5.0
18 NaN NaN
19 NaN NaN
20 6.0 5.0
21 6.0 5.0
22 6.0 5.0
23 NaN NaN
24 NaN NaN
25 6.0 5.0
26 6.0 5.0

You can iterate over rows and add two rows every third rows
data = [[row.tolist(), [pd.NA]*len(row), [pd.NA]*len(row)]
if (idx+1) % 3 == 0 else [row.tolist()]
for idx, row in df.iterrows()]
out = pd.DataFrame([i for lst in data for i in lst], columns=df.columns)
print(data)
[[[3, 4]],
[[5, 5]],
[[9, 3], [<NA>, <NA>], [<NA>, <NA>]],
[[1, 2]],
[[9, 9]],
[[6, 5], [<NA>, <NA>], [<NA>, <NA>]],
[[6, 5]],
[[6, 5]],
[[6, 5], [<NA>, <NA>], [<NA>, <NA>]],
[[6, 5]],
[[6, 5]],
[[6, 5], [<NA>, <NA>], [<NA>, <NA>]],
[[6, 5]],
[[6, 5]],
[[6, 5], [<NA>, <NA>], [<NA>, <NA>]],
[[6, 5]],
[[6, 5]]]
print(out)
a b
0 3 4
1 5 5
2 9 3
3 <NA> <NA>
4 <NA> <NA>
5 1 2
6 9 9
7 6 5
8 <NA> <NA>
9 <NA> <NA>
10 6 5
11 6 5
12 6 5
13 <NA> <NA>
14 <NA> <NA>
15 6 5
16 6 5
17 6 5
18 <NA> <NA>
19 <NA> <NA>
20 6 5
21 6 5
22 6 5
23 <NA> <NA>
24 <NA> <NA>
25 6 5
26 6 5

Related

Convert column vector into multi-column matrix

I have a column vector with say 30 values (1-30) I would like to try to manipulate this vector so that it becomes a matrix with 5 values in the first column, 10 values in the second and 15 values in the third column. How would I implement this using Pandas or NumPy?
import pandas as pd
#Create data
df = pd.DataFrame(np.linspace(1,20,20))
print(df)
1
2
:
28
29
30
In order to get something like this:
# Manipulate the column vector to make columns where the first column has 5
# the second column has 10 and the last column has 15 values
'T1' 'T2' 'T3'
1 6 16
2 7 17
3 8 18
4 9 19
5 10 20
NA 11 21
NA 12 22
NA 13 23
NA 14 24
NA 15 25
NA NA 26
NA NA 27
NA NA 28
NA NA 29
NA NA 30
It took a little time to find out what series is this, and I found that its a triangular series , just a modified one.
tri = lambda x:int((0.25+2*x)**0.5-0.5)
This would give results like:
0 1 1 2 2 2 3 3 3 3 4 4 4 4 4 5 5 5 5 5 5 ...
And after the modification:
modtri = lambda x:int((0.25+2*(x//5))**0.5-0.5)
0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 ...
So each occurrence in normal triangular series repeats 5 times.
The above modtri function would directly map the index starting from 0, to appropriate group ids.
and so after that, this would do the job:
df[0].groupby(modtri).apply(lambda x: pd.Series(x.values)).unstack().T
Full execution:
import pandas as pd
import numpy as np
df = pd.DataFrame(np.linspace(1,30,30))
N = 5 #the increment value
modtri = lambda x:int((0.25+2*(x//N))**0.5-0.5)
df2 = df[0].groupby(modtri).apply(lambda x: pd.Series(x.values)).unstack().T
df2.rename(columns={0: "T1", 1: "T2",2:"T3"},inplace=True)
print(df2)
Output:
T1 T2 T3
0 1.0 6.0 16.0
1 2.0 7.0 17.0
2 3.0 8.0 18.0
3 4.0 9.0 19.0
4 5.0 10.0 20.0
5 NaN 11.0 21.0
6 NaN 12.0 22.0
7 NaN 13.0 23.0
8 NaN 14.0 24.0
9 NaN 15.0 25.0
10 NaN NaN 26.0
11 NaN NaN 27.0
12 NaN NaN 28.0
13 NaN NaN 29.0
14 NaN NaN 30.0
Try this by slicing with reindexing:
df['T1'] = df[0][0:5]
df['T2'] = df[0][5:15].reset_index(drop=True)
df['T3'] = df[0][15:].reset_index(drop=True)
Original data before operation:
df = pd.DataFrame(np.linspace(1,30,30))
print(df)
0
0 1.0
1 2.0
2 3.0
3 4.0
4 5.0
5 6.0
6 7.0
7 8.0
8 9.0
9 10.0
10 11.0
11 12.0
12 13.0
13 14.0
14 15.0
15 16.0
16 17.0
17 18.0
18 19.0
19 20.0
20 21.0
21 22.0
22 23.0
23 24.0
24 25.0
25 26.0
26 27.0
27 28.0
28 29.0
29 30.0
Running new codes:
df['T1'] = df[0][0:5]
df['T2'] = df[0][5:15].reset_index(drop=True)
df['T3'] = df[0][15:].reset_index(drop=True)
print(df)
0 T1 T2 T3
0 1.0 1.0 6.0 16.0
1 2.0 2.0 7.0 17.0
2 3.0 3.0 8.0 18.0
3 4.0 4.0 9.0 19.0
4 5.0 5.0 10.0 20.0
5 6.0 NaN 11.0 21.0
6 7.0 NaN 12.0 22.0
7 8.0 NaN 13.0 23.0
8 9.0 NaN 14.0 24.0
9 10.0 NaN 15.0 25.0
10 11.0 NaN NaN 26.0
11 12.0 NaN NaN 27.0
12 13.0 NaN NaN 28.0
13 14.0 NaN NaN 29.0
14 15.0 NaN NaN 30.0
15 16.0 NaN NaN NaN
16 17.0 NaN NaN NaN
17 18.0 NaN NaN NaN
18 19.0 NaN NaN NaN
19 20.0 NaN NaN NaN
20 21.0 NaN NaN NaN
21 22.0 NaN NaN NaN
22 23.0 NaN NaN NaN
23 24.0 NaN NaN NaN
24 25.0 NaN NaN NaN
25 26.0 NaN NaN NaN
26 27.0 NaN NaN NaN
27 28.0 NaN NaN NaN
28 29.0 NaN NaN NaN
29 30.0 NaN NaN NaN

Pandas: grab positions in dataframe which indexes are listed in another dataframe

Suppose that I have 2 dataframes, with indexes populated so that elements in columns are unique, because in real data they are:
vals = pd.DataFrame(np.random.randint(0,10,(10, 3)), columns=list('ABC'))
indexes = pd.DataFrame(np.argsort(np.random.randint(0,10,(10, 3)), axis=0)[:5], columns=list('ABC'))
>>> vals
A B C
0 64 20 48
1 28 60 81
2 5 73 77
3 74 66 86
4 41 39 21
5 65 37 98
6 10 20 73
7 6 70 3
8 36 29 28
9 43 13 12
>>> indexes
A B C
0 4 2 3
1 3 3 8
2 5 1 7
3 9 8 9
4 2 4 0
I would like to retain only those values in vals which indexes are listed in indexes. I don't care about row integrity or NAs, as I'll use the columns as Series later.
This is what I came up with:
vals_indexes = pd.DataFrame()
for i in range(vals.shape[1]):
vals_indexes = pd.concat([vals_indexes, vals.iloc[[e for e in indexes.iloc[:, i] if e in vals.index], i]], axis=1)
>>> vals_indexes
A B C
0 NaN NaN 48.0
1 NaN 60.0 NaN
2 5.0 73.0 NaN
3 74.0 66.0 86.0
4 41.0 39.0 NaN
5 65.0 NaN NaN
7 NaN NaN 3.0
8 NaN 29.0 28.0
9 43.0 NaN 12.0
Which is a bit ugly, but works for me. Question: is there a more effective way to do this?
use .loc within a loop to replace non existing index with nan
for i in vals.columns:
vals.loc[vals[i].isin(list(indexes[i].unique())),i]=np.nan
print(vals)
A B C
0 NaN 2.0 NaN
1 NaN 5.0 NaN
2 2.0 3.0 NaN
3 NaN NaN NaN
4 NaN NaN 6.0
5 9.0 NaN NaN
6 NaN NaN 4.0
7 NaN 7.0 NaN
8 2.0 NaN NaN
9 NaN NaN NaN

Pandas backfill specific value

I have dataframe as such:
df = pd.DataFrame({'val': [np.nan,np.nan,np.nan,np.nan, 15, 1, 5, 2,np.nan, np.nan, np.nan, np.nan,np.nan,np.nan,2,23,5,12, np.nan np.nan, 3,4,5]})
df['name'] = ['a']*8 + ['b']*15
df
>>>
val name
0 NaN a
1 NaN a
2 NaN a
3 NaN a
4 15.0 a
5 1.0 a
6 5.0 a
7 2.0 a
8 NaN b
9 NaN b
10 NaN b
11 NaN b
12 NaN b
13 NaN b
14 2.0 b
15 23.0 b
16 5.0 b
17 12.0 b
18 NaN b
19 NaN b
20 3.0 b
21 4.0 b
22 5.0 b
For each name i want to backfill the prior 3 na spots with -1 so that I end up with
>>>
val name
0 NaN a
1 -1.0 a
2 -1.0 a
3 -1.0 a
4 15.0 a
5 1.0 a
6 5.0 a
7 2.0 a
8 NaN b
9 NaN b
10 NaN b
11 -1.0 b
12 -1.0 b
13 -1.0 b
14 2.0 b
15 23.0 b
16 5.0 b
17 12.0 b
18 -1 b
19 -1 b
20 3.0 b
21 4.0 b
22 5.0 b
Note there can be multiple sections with NaN. If a section has less than 3 nans it will fill all of them (it backfills all up to 3).
You can using first_valid_index, return the first not null value of each group
then assign the -1 in by using the loc
idx=df.groupby('name').val.apply(lambda x : x.first_valid_index())
for x in idx:
df.loc[x - 3:x - 1, 'val'] = -1
df
Out[51]:
val name
0 NaN a
1 -1.0 a
2 -1.0 a
3 -1.0 a
4 15.0 a
5 1.0 a
6 5.0 a
7 2.0 a
8 NaN b
9 NaN b
10 NaN b
11 -1.0 b
12 -1.0 b
13 -1.0 b
14 2.0 b
15 23.0 b
16 5.0 b
17 12.0 b
Update
s=df.groupby('name').val.bfill(limit=3)
s.loc[s.notnull()&df.val.isnull()]=-1
s
Out[59]:
0 NaN
1 -1.0
2 -1.0
3 -1.0
4 15.0
5 1.0
6 5.0
7 2.0
8 NaN
9 NaN
10 NaN
11 -1.0
12 -1.0
13 -1.0
14 2.0
15 23.0
16 5.0
17 12.0
18 NaN
19 -1.0
20 -1.0
21 -1.0
22 3.0
23 4.0
24 5.0
Name: val, dtype: float64

pandas backfill NaN by incrementing the last value

I have a data frame:
A B C
Timestamp
1 NaN NaN NaN
2 NaN NaN NaN
3 NaN NaN 5
4 NaN NaN 4
5 NaN 3 3
6 NaN 2 NaN
7 3 1 NaN
8 2 NaN NaN
9 1 NaN NaN
I would like to backfill it by incrementing the last available value in each column so it looks like this:
A B C
Timestamp
1 9 7 7
2 8 6 6
3 7 5 5
4 6 4 4
5 5 3 3
6 4 2 NaN
7 3 1 NaN
8 2 NaN NaN
9 1 NaN NaN
Let's try this:
df1 = df1[::-1].fillna(method='ffill')
(df1 + (df1 == df1.shift()).cumsum()).sort_index()
Output:
A B C
Timestamp
1 9.0 7.0 7.0
2 8.0 6.0 6.0
3 7.0 5.0 5.0
4 6.0 4.0 4.0
5 5.0 3.0 3.0
6 4.0 2.0 NaN
7 3.0 1.0 NaN
8 2.0 NaN NaN
9 1.0 NaN NaN
You can try this:
def bfill_increment(col):
col_null = col.isnull()[::-1]
groups = col_null.diff().fillna(0).cumsum()
return col_null.groupby(groups).cumsum()[::-1] + col.bfill()
df.apply(bfill_increment)

Python Pandas Dataframe replace values below treshold

How can I apply a function element-wise to a pandas DataFrame and pass a column-wise calculated value (e.g. quantile of column)? For example, what if I want to replace all elements in a DataFrame (with NaN) where the value is lower than the 80th percentile of the column?
def _deletevalues(x, quantile):
if x < quantile:
return np.nan
else:
return x
df.applymap(lambda x: _deletevalues(x, x.quantile(0.8)))
Using applymap only allows one to access each value individually and throws (of course) an AttributeError: ("'float' object has no attribute 'quantile'
Thank you in advance.
Use DataFrame.mask:
df = df.mask(df < df.quantile())
print (df)
a b c
0 NaN 7.0 NaN
1 NaN NaN 6.0
2 NaN NaN 5.0
3 8.0 NaN NaN
4 7.0 3.0 5.0
5 6.0 7.0 NaN
6 NaN NaN NaN
7 8.0 4.0 NaN
8 NaN NaN 6.0
9 7.0 7.0 6.0
In [139]: df
Out[139]:
a b c
0 1 7 3
1 1 2 6
2 3 0 5
3 8 2 1
4 7 3 5
5 6 7 2
6 0 2 1
7 8 4 1
8 5 0 6
9 7 7 6
for all columns:
In [145]: df.apply(lambda x: np.where(x < x.quantile(),np.nan,x))
Out[145]:
a b c
0 NaN 7.0 NaN
1 NaN NaN 6.0
2 NaN NaN 5.0
3 8.0 NaN NaN
4 7.0 3.0 5.0
5 6.0 7.0 NaN
6 NaN NaN NaN
7 8.0 4.0 NaN
8 NaN NaN 6.0
9 7.0 7.0 6.0
or
In [149]: df[df < df.quantile()] = np.nan
In [150]: df
Out[150]:
a b c
0 NaN 7.0 NaN
1 NaN NaN 6.0
2 NaN NaN 5.0
3 8.0 NaN NaN
4 7.0 3.0 5.0
5 6.0 7.0 NaN
6 NaN NaN NaN
7 8.0 4.0 NaN
8 NaN NaN 6.0
9 7.0 7.0 6.0

Categories

Resources