Python - Find first and last index of consecutive NaN groups in series - python

I am trying to get a list of tuples with the first and last index of grouped NaNs.
An example input could look like
import pandas as pd
import numpy as np
series = pd.Series([1,2,3,np.nan,np.nan,4,5,6,7,np.nan,8,9,np.nan,np.nan,np.nan])
get_nan_inds(series)
and the output should be
[(3, 5), (9, 10), (12, 15)]
The only similar question I could find doesn't solve my problem.

Alternative solution:
import pandas as pd
import numpy as np
series = pd.Series([1,2,3,np.nan,np.nan,4,5,6,7,np.nan,8,9,np.nan,np.nan,np.nan])
def get_nan_inds(series):
is_null_diff = pd.isnull(pd.Series(list(series) + [False])).diff() #Need to add False at the end for the case when the last elemetn is null
res = [i for i, x in enumerate(list(is_null_diff)) if x is True]
res = [(a, b) for i, (a,b) in enumerate(zip(res, res[1:])) if i % 2 == 0]
return res
get_nan_inds(series)

While wrting this question I came up with the following function in case someone else has a similar problem.
def get_nan_inds(series):
''' Obtain the first and last index of each consecutive NaN group.
'''
series = series.reset_index(drop=True)
index = series[series.isna()].index.to_numpy()
if len(index) == 0:
return []
indices = np.split(index, np.where(np.diff(index) > 1)[0] + 1)
return [(ind[0], ind[-1] + 1) for ind in indices]

Related

find maximums and filter first values

I use argrelextrema to find maximum value in list
import pandas as pd
from scipy.signal import argrelextrema
import numpy as np
noise_filter = 3
numbers = pd.Series ([101.5,100,101.1,99,105,22,21,20, 19]).to_numpy()
res = argrelextrema(numbers, np.greater_equal, order = noise_filter, mode = 'clip')[0]
print (res)
In this example it returns
[0 4]
I want to filter indexes if they are less than noise_filter.
So 0 will be filtered.
How can I do it inside line
res = argrelextrema(numbers, np.greater_equal, order = noise_filter, mode = 'clip')[0]
So it will look like
res = argrelextrema(numbers, np.greater_equal & (index > noise_filter), order = noise_filter, mode = 'clip')[0]
Simple desicion when I make slice of res
res = res[noise_filter:]
doesn't suits for me.
if you want to filter everything below your noise_filter
filtered = [n for n in res if n > noise_filter]

Count matching combinations in a pandas dataframe

I need to find a more efficient solution for the following problem:
Given is a dataframe with 4 variables in each row. I need to find the list of 8 elements that includes all the variables per row in a maximum amount of rows.
A working, but very slow, solution is to create a second dataframe containing all possible combinations (basically a permutation without repetation). Then loop through every combination and compare it wit the inital dataframe. The amount of solutions is counted and added to the second dataframe.
import numpy as np
import pandas as pd
from itertools import combinations
df = pd.DataFrame(np.random.randint(0,20,size=(100, 4)), columns=list('ABCD'))
df = 'x' + df.astype(str)
listofvalues = df['A'].tolist()
listofvalues.extend(df['B'].tolist())
listofvalues.extend(df['C'].tolist())
listofvalues.extend(df['D'].tolist())
listofvalues = list(dict.fromkeys(listofvalues))
possiblecombinations = list(combinations(listofvalues, 6))
dfcombi = pd.DataFrame(possiblecombinations, columns = ['M','N','O','P','Q','R'])
dfcombi['List'] = dfcombi.M.map(str) + ',' + dfcombi.N.map(str) + ',' + dfcombi.O.map(str) + ',' + dfcombi.P.map(str) + ',' + dfcombi.Q.map(str) + ',' + dfcombi.R.map(str)
dfcombi['Count'] = ''
for x, row in dfcombi.iterrows():
comparelist = row['List'].split(',')
pointercounter = df.index[(df['A'].isin(comparelist) == True) & (df['B'].isin(comparelist) == True) & (df['C'].isin(comparelist) == True) & (df['D'].isin(comparelist) == True)].tolist()
row['Count'] = len(pointercounter)
I assume there must be a way to avoid the for - loop and replace it with some pointer, i just can not figure out how.
Thanks!
Your code can be rewritten as:
# working with integers are much better than strings
enums, codes = df.stack().factorize()
# encodings of df
s = [set(x) for x in enums.reshape(-1,4)]
# possible combinations
from itertools import combinations, product
possiblecombinations = np.array([set(x) for x in combinations(range(len(codes)), 6)])
# count the combination with issubset
ret = [0]*len(possiblecombinations)
for a, (i,b) in product(s, enumerate(possiblecombinations)):
ret[i] += a.issubset(b)
# the combination with maximum count
max_combination = possiblecombinations[np.argmax(ret)]
# in code {0, 3, 4, 5, 17, 18}
# and in values:
codes[list(max_combination)]
# Index(['x5', 'x15', 'x12', 'x8', 'x0', 'x6'], dtype='object')
All that took about 2 seconds as oppose to your code that took around 1.5 mins.

Collapse sequences of numbers into ranges

Today I'm requesting help with a Python script that I'm writing; I'm using the CSV module to parse a large document with about 1,100 rows, and from each row it's pulling a Case_ID, a unique number that no other row has. For example:
['10215', '10216', '10277', '10278', '10279', '10280', '10281', '10282', '10292', '10293',
'10295', '10296', '10297', '10298', '10299', '10300', '10301', '10302', '10303', '10304',
'10305', '10306', '10307', '10308', '10309', '10310', '10311', '10312', '10313', '10314',
'10315', '10316', '10317', '10318', '10319', '10320', '10321', '10322', '10323', '10324',
'10325', '10326', '10344', '10399', '10400', '10401', '10402', '10403', '10404', '10405',
'10406', '10415', '10416', '10417', '10418', '10430', '10448', '10492', '10493', '10494',
'10495', '10574', '10575', '10576', '10577', '10578', '10579', '10580', '10581', '10582',
'10583', '10584', '10585', '10586', '10587', '10588', '10589', '10590', '10591', '10592',
'10593', '10594', '10595', '10596', '10597', '10598', '10599', '10600', '10601', '10602',
'10603', '10604', '10605', '10606', '10607', '10608', '10609', '10610', '10611', '10612',
'10613', '10614', '10615', '10616', '10617', '10618', '10619', '10620', '10621', '10622',
'10623', '10624', '10625', '10626', '10627', '10628', '10629', '10630', '10631', '10632',
'10633', '10634', '10635', '10636', '10637', '10638', '10639', '10640', '10641', '10642',
'10643', '10644', '10645', '10646', '10647', '10648', '10649', '10650', '10651', '10652',
'10653', '10654', '10655', '10656', '10657', '10658', '10659', '10707', '10708', '10709',
'10710', '10792', '10793', '10794', '10795', '10908', '10936', '10937', '10938', '10939',
'11108', '11109', '11110', '11111', '11112', '11113', '11114', '11115', '11116', '11117',
'11118', '11119', '11120', '11121', '11122', '11123', '11124', '11125', '11126', '11127',
'11128', '11129', '11130', '11131', '11132', '11133', '11134', '11135', '11136', '11137',
'11138', '11139', '11140', '11141', '11142', '11143', '11144', '11145', '11146', '11147',
'11148', '11149', '11150', '11151', '11152', '11153', '11154', '11155', '11194', '11195',
'11196', '11197', '11198', '11199', '11200', '11201', '11202', '11203', '11204', '11205',
'11206', '11207', '11208', '11209', '11210', '11211', '11212', '11213', '11214', '11215',
'11216', '11217', '11218', '11219', '11220', '11221', '11222', '11223', '11224', '11225',
'11226', '11227', '11228', '11229', '11230', '11231', '11232', '11233', '11234', '11235',
'10101', '10102', '10800', '11236']
As you can see, this list is quite an eyeful, so I'd like to include a small little function in my script that can reduce all of the sequential ranges down to hyphenated bookends of a sort, for example 10,277 - 10,282.
Thanks to all for any help included! Have a great day.
Doable. Let's see if this can be done with pandas.
import pandas as pd
data = ['10215', '10216', '10277', ...]
# Load data as series.
s = pd.Series(data)
# Find all consecutive rows with a difference of one
# and bin them into groups using `cumsum`.
v = s.astype(int).diff().bfill().ne(1).cumsum()
# Use `groupby` and `apply` to condense the consecutive numbers into ranges.
# This is only done if the group size is >1.
ranges = (
s.groupby(v).apply(
lambda x: '-'.join(x.values[[0, -1]]) if len(x) > 1 else x.item()).tolist())
print (ranges)
['10215-10216',
'10277-10282',
'10292-10293',
'10295-10326',
'10344',
'10399-10406',
'10415-10418',
'10430',
'10448',
'10492-10495',
'10574-10659',
'10707-10710',
'10792-10795',
'10908',
'10936-10939',
'11108-11155',
'11194-11235',
'10101-10102',
'10800',
'11236']
Your data must be sorted for this to work.
You can just use a simple loop here with the following logic:
Create a list to store the ranges (ranges).
Iterate over the values in your list (l)
If ranges is empty, append a list with the first value in l to ranges
Otherwise if the difference between the current and previous value is 1, append the current value to the last list in ranges
Otherwise append a list with the current value to ranges
Code:
l = ['10215', '10216', '10277', '10278', '10279', '10280', ...]
ranges = []
for x in l:
if not ranges:
ranges.append([x])
elif int(x)-prev_x == 1:
ranges[-1].append(x)
else:
ranges.append([x])
prev_x = int(x)
Now you can compute your final ranges by concatenating the first and last element of each list in ranges (if there are at least 2 elements).
final_ranges = ["-".join([r[0], r[-1]] if len(r) > 1 else r) for r in ranges]
print(final_ranges)
#['10215-10216',
# '10277-10282',
# '10292-10293',
# '10295-10326',
# '10344',
# '10399-10406',
# '10415-10418',
# '10430',
# '10448',
# '10492-10495',
# '10574-10659',
# '10707-10710',
# '10792-10795',
# '10908',
# '10936-10939',
# '11108-11155',
# '11194-11235',
# '10101-10102',
# '10800',
# '11236']
This also assumes your data is sorted. You could simplify the code to combine items 3 and 5.
For purely educational purposes (this is much more inefficient that the loop above), here's the same thing using map and reduce:
from functools import reduce
def myreducer(ranges, x):
if not ranges:
return [[x]]
elif (int(x) - int(ranges[-1][-1]) == 1):
return ranges[:-1] + [ranges[-1]+[x]]
else:
return ranges + [[x]]
final_ranges = map(
lambda r: "-".join([r[0], r[-1]] if len(r) > 1 else r),
reduce(myreducer, l, [])
)
There is also the pynumparser package:
import pynumparser
pynumparser.NumberSequence().encode([1, 2, 3, 5, 6, 7, 8, 10])
# result: '1-3,5-8,10'
pynumparser.NumberSequence().parse('1-3,5-8,10')
# result: (1, 2, 3, 5, 6, 7, 8, 10)

Spliting dataframe in 10 equal parts and merge 9 parts after picking one at a time in loop

I need to split dataframe into 10 parts then use one part as the testset and remaining 9 (merged to use as training set) , I have come up to the following code where I am able to split the dataset , and m trying to merge the remaining sets after picking one of those 10.
The first iteration goes fine , but I get following error in second iteration.
df = pd.DataFrame(np.random.randn(10, 4), index=list(xrange(10)))
for x in range(3):
dfList = np.array_split(df, 3)
testdf = dfList[x]
dfList.remove(dfList[x])
print testdf
traindf = pd.concat(dfList)
print traindf
print "================================================"
I don't think you have to split the dataframe in 10 but just in 2.
I use this code for splitting a dataframe in training set and validation set:
test_index = np.random.choice(df.index, int(len(df.index)/10), replace=False)
test_df = df.loc[test_index]
train_df = df.loc[~df.index.isin(test_index)]
okay I got it working this way :
df = pd.DataFrame(np.random.randn(10, 4), index=list(xrange(10)))
dfList = np.array_split(df, 3)
for x in range(3):
trainList = []
for y in range(3):
if y == x :
testdf = dfList[y]
else:
trainList.append(dfList[y])
traindf = pd.concat(trainList)
print testdf
print traindf
print "================================================"
But better approach is welcome.
You can use the permutation function from numpy.random
import numpy as np
import pandas as pd
import math as mt
l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
df = pd.DataFrame({'a': l, 'b': l})
shuffle the dataframe index
shuffled_idx = np.random.permutation(df.index)
divide the shuffled_index into N equal(ish) parts
for this example, let N = 4
N = 4
n = len(shuffled_idx) / N
parts = []
for j in range(N):
parts.append(shuffled_idx[mt.ceil(j*n): mt.ceil(j*n+n)])
# to show each shuffled part of the data frame
for k in parts:
print(df.iloc[k])
I wrote a piece of script find / fork it on github for the purpose of splitting a Pandas dataframe randomly. Here's a link to Pandas - Merge, join, and concatenate functionality!
Same code for your reference:
import pandas as pd
import numpy as np
from xlwings import Sheet, Range, Workbook
#path to file
df = pd.read_excel(r"//PATH TO FILE//")
df.columns = [c.replace(' ',"_") for c in df.columns]
x = df.columns[0].encode("utf-8")
#number of parts the data frame or the list needs to be split into
n = 7
seq = list(df[x])
np.random.shuffle(seq)
lists1 = [seq[i:i+n] for i in range(0, len(seq), n)]
listsdf = pd.DataFrame(lists1).reset_index()
dataframesDict = dict()
# calling xlwings workbook function
Workbook()
for i in range(0,n):
if Sheet.count() < n:
Sheet.add()
doubles[i] =
df.loc[df.Column_Name.isin(list(listsdf[listsdf.columns[i+1]]))]
Range(i,"A1").value = doubles[i]
Looks like you are trying to do a k-fold type thing, rather than a one-off. This code should help. You may also find the SKLearn k-fold functionality works in your case, that's also worth checking out.
# Split dataframe by rows into n roughly equal portions and return list of
# them.
def splitDf(df, n) :
splitPoints = list(map( lambda x: int(x*len(df)/n), (list(range(1,n)))))
splits = list(np.split(df.sample(frac=1), splitPoints))
return splits
# Take splits from splitDf, and return into test set (splits[index]) and training set (the rest)
def makeTrainAndTest(splits, index) :
# index is zero based, so range 0-9 for 10 fold split
test = splits[index]
leftLst = splits[:index]
rightLst = splits[index+1:]
train = pd.concat(leftLst+rightLst)
return train, test
You can then use these functions to make the folds
df = <my_total_data>
n = 10
splits = splitDf(df, n)
trainTest = []
for i in range(0,n) :
trainTest.append(makeTrainAndTest(splits, i))
# Get test set 2
test2 = trainTest[2][1].shape
# Get training set zero
train0 = trainTest[0][0]

Pandas optimizing an interpolation/counting algorithm

I have a bunch of data (10M + records) that breaks down to an identifier, a location and a date. I want to find the number of times that any identifier moved from some locationA to some other locationB over the entire set of dates. Any identifier may not have a location for all possible dates. When an identifier does not have a location recorded, that should be treated as an actual 'unknown' location for that date.
Here is some reproducible fake data...
import numpy as np
import pandas as pd
import datetime
base = datetime.date.today()
num_days = 50
dates = np.array([base - datetime.timedelta(days=x) for x in range(num_days-1, -1, -1)])
ids = np.arange(50)
mi = pd.MultiIndex.from_product([ids, dates])
locations = np.array([chr(x) for x in 97 + np.random.randint(26, size=len(mi))])
s = pd.Series(locations, index=mi)
mask = np.random.rand(len(mi)) > .5
s[mask] = np.nan
s = s.dropna()
My initial thought was to create a dataframe and use boolean masking/vectorized operations to solve this
df = s.unstack(0).fillna('unknown')
Apparently my data is sparse enough to cause a MemoryError (from all the extra entries resulting from unstacking).
My current working solution is the following
def series_fn(s):
s = s.reindex(pd.date_range(s.index.levels[1].min(), s.index.levels[1].max()), level=-1).fillna('unknown')
mask_prev = (s != s.shift(-1))[:-1]
mask_next = (s != s.shift())[1:]
s_prev = s[:-1][mask_prev]
s_next = s[1:][mask_next]
s_tup = pd.Series(list(zip(s_prev, s_next)))
return s_tup.value_counts()
result_per_id = s.groupby(level=0).apply(series_fn)
result = result_per_id.sum(level=-1)
result looks like
(a, b) 1
(a, c) 5
(a, e) 3
(a, f) 3
(a, g) 3
(a, h) 3
(a, i) 1
(a, j) 1
(a, k) 2
(a, l) 2
...
This is going to take ~5 hours for all my data. Does anyone know any faster ways of doing this?
Thanks!
Hmmm, I guess I should have transposed the data... well that was a relatively simple fix. Instead of using groupby and apply,
s = s.reorder_levels(['date', 'id'])
s = s.sortlevel(0)
results = []
for i in range(len(s.index.levels[0])-1):
t = time.time()
s0 = s.loc[s.index.levels[0][i]]
s1 = s.loc[s.index.levels[0][i+1]]
df = pd.concat((s0, s1), axis=1)
# Note: this is slower than the line above
# df = s.loc[s.index.levels[0][0:2], :].unstack(0)
df = df.fillna('unknown')
mi = pd.MultiIndex.from_arrays((df.iloc[:, 0], df.iloc[:, 1]))
s2 = pd.Series(1, mi)
res = s2.groupby(level=[0, 1]).apply(np.sum)
results.append(res)
print(time.time() - t)
results = pd.concat(results, axis=1)
Still unclear on why the commented out section takes about three times as long as the three lines above it.

Categories

Resources