Pandas dataframe split by memory usage - python

Is there a way to split up a pandas dataframe into multiple dataframes constrained by memory usage?

def split_dataframe(df, size):
# size of each row
row_size = df.memory_usage().sum() / len(df)
# maximum number of rows of each segment
row_limit = size // row_size
# number of segments
seg_num = (len(df) + row_limit - 1) // row_limit
# split df
segments = [df.iloc[i*row_limit : (i+1)*row_limit] for i in range(seg_num)]
return segments

The easiest way to do this is if the columns of the dataframe are consistent datatypes (i.e., not objects). Here's an example of how you might go about this.
import numpy as np
import pandas as pd
from __future__ import division
df = pd.DataFrame({'a': [1]*100, 'b': [1.1, 2] * 50, 'c': range(100)})
# calculate the number of bytes a row occupies
row_bytes = df.dtypes.apply(lambda x: x.itemsize).sum()
mem_limit = 1024
# get the maximum number of rows in a segment
max_rows = mem_limit / row_bytes
# get the number of dataframes after splitting
n_dfs = np.ceil(df.shape[0] / max_rows)
# get the indices of the dataframe segments
df_segments = np.array_split(df.index, n_dfs)
# create a list of dataframes that are below mem_limit
split_dfs = [df.loc[seg, :] for seg in df_segments]
split_dfs
Also, if you can split by columns instead of rows, pandas has a handy memory_usage method.

Related

Convert Array to dataframe with Longitude, Lattitude coordinates

Imported Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
I am trying to creat a Heatmap out of my strava dataset ( which turns to be a csv file of 155479 rows with Georaphical cooridnates) I tried first to display the whole dataset on Folium using python, the problem is that Folium seemed to crash when i tried to upload the whole dataset ( it was working with a sample).
Meanwhile I found this post https://towardsdatascience.com/create-a-heatmap-from-the-logs-of-your-activity-tracker-c9fc7ace1657 the code is working in displaying all the datset.
size_x, size_y = 1000, 1000
​
df2 = df[(df.lat > LAT_MIN) & (df.lat < LAT_MAX) &
(df.lon > LAT_MIN) & (df.lon < LAT_MAX)].copy()
df2['x'] = (size_x * (df2.lon - df2.lon.min())/(df2.lon.max() -df2.lon.min())).astype(int)
df2['y'] = (size_y * (df2.lat - df2.lat.min())/(df2.lat.max() - df2.lat.min())).astype(int)
data = np.zeros((size_x,size_y))
width = 2 ​
df3 = df2[['x', 'y','type']].groupby(['x', 'y']).count().reset_index()
for index, row in df3.iterrows():
x = int(row['x'])
y = int(row['y'])
data[y - width:y + width, x - width:x + width] += row ['type'] ​
max = len(df2.source.unique()) * 1
and creating a descent heatmap
#data[data > max] = max data = (data - data.min()) / (data.max() -
#data.min()) cmap = plt.get_cmap('hot')
#data = cmap(data)
However when i try to convert this below array to a Dataframe
df_data = pd.DataFrame(data) df_data.head()
​
​I dont understand the below error
ValueError: Must pass 2-d input. shape=(1000, 1000, 4)
The error means that Pandas can't organize your data into a table. By definition, tables have 2 dimensions (rows and columns), but the data you passed has 3 dimensions: 1000, 1000 and 4.
To make it work, you should reshape the data to 2 dimensions.

Python's `.loc` is really slow on selecting subsets of Data

I'm having a large multindexed (y,t) single valued DataFrame df. Currently, I'm selecting a subset via df.loc[(Y,T), :] and create a dictionary out of it. The following MWE works, but the selection is very slow for large subsets.
import numpy as np
import pandas as pd
# Full DataFrame
y_max = 50
Y_max = range(1, y_max+1)
t_max = 100
T_max = range(1, t_max+1)
idx_max = tuple((y,t) for y in Y_max for t in T_max)
df = pd.DataFrame(np.random.sample(y_max*t_max), index=idx_max, columns=['Value'])
# Create Dictionary of Subset of Data
y1 = 4
yN = 10
Y = range(y1, yN+1)
t1 = 5
tN = 9
T = range(t1, tN+1)
idx_sub = tuple((y,t) for y in Y for t in T)
data_sub = df.loc[(Y,T), :] #This is really slow
dict_sub = dict(zip(idx_sub, data_sub['Value']))
# result, e.g. (y,t) = (5,7)
dict_sub[5,7] == df.loc[(5,7), 'Value']
I was thinking of using df.loc[(y1,t1),(yN,tN), :], but it does not work properly, as the second index is only bounded in the final year yN.
One idea is use Index.isin with itertools.product in boolean indexing:
from itertools import product
idx_sub = tuple(product(Y, T))
dict_sub = df.loc[df.index.isin(idx_sub),'Value'].to_dict()
print (dict_sub)

Dataframe with Monte Carlo Simulation calculation next row Problem

I want to build up a Dataframe from scratch with calculations based on the Value before named Barrier option. I know that i can use a Monte Carlo simulation to solve it but it just wont work the way i want it to.
The formula is:
Value in row before * np.exp((r-sigma**2/2)*T/TradingDays+sigma*np.sqrt(T/TradingDays)*z)
The first code I write just calculates the first column. I know that I need a second loop but can't really manage it.
The result should be, that for each simulation it will calculate a new value using the the value before, for 500 Day meaning S_1 should be S_500 with a total of 1000 simulations. (I need to generate new columns based on the value before using the formular.)
similar to this:
So for the 1. Simulations 500 days, 2. Simulation 500 day and so on...
import numpy as np
import pandas as pd
from scipy.stats import norm
import random as rd
import math
simulation = 0
S_0 = 42
T = 2
r = 0.02
sigma = 0.20
TradingDays = 500
df = pd.DataFrame()
for i in range (0,TradingDays):
z = norm.ppf(rd.random())
simulation = simulation + 1
S_1 = S_0*np.exp((r-sigma**2/2)*T/TradingDays+sigma*np.sqrt(T/TradingDays)*z)
df = df.append ({
'S_1':S_1,
'S_0':S_0
}, ignore_index=True)
df = df.round ({'Z':6,
'S_T':2
})
df.index += 1
df.index.name = 'Simulation'
print(df)
I found another possible code which i found here and it does solve the problem but just for one row, the next row is just the same calculation. Generate a Dataframe that follow a mathematical function for each column / row
If i just replace it with my formular i get the same problem.
replacing:
exp(r - q * sqrt(sigma))*T+ (np.random.randn(nrows) * sqrt(deltaT)))
with:
exp((r-sigma**2/2)*T/nrows+sigma*np.sqrt(T/nrows)*z))
import numpy as np
import pandas as pd
from scipy.stats import norm
import random as rd
import math
S_0 = 42
T = 2
r = 0.02
sigma = 0.20
TradingDays = 50
Simulation = 100
df = pd.DataFrame({'s0': [S_0] * Simulation})
for i in range(1, TradingDays):
z = norm.ppf(rd.random())
df[f's{i}'] = df.iloc[:, -1] * np.exp((r-sigma**2/2)*T/TradingDays+sigma*np.sqrt(T/TradingDays)*z)
print(df)
I would work more likely with the last code and solve the problem with it.
How about just overwriting the value of S_0 by the new value of S_1 while you loop and keeping all simulations in a list?
Like this:
import numpy as np
import pandas as pd
import random
from scipy.stats import norm
S_0 = 42
T = 2
r = 0.02
sigma = 0.20
trading_days = 50
output = []
for i in range(trading_days):
z = norm.ppf(random.random())
value = S_0*np.exp((r - sigma**2 / 2) * T / trading_days + sigma * np.sqrt(T/trading_days) * z)
output.append(value)
S_0 = value
df = pd.DataFrame({'simulation': output})
Perhaps I'm missing something, but I don't see the need for a second loop.
Also, this eliminates calling df.append() in a loop, which should be avoided. (See here)
Solution based on the the answer of bartaelterman, thank you very much!
import numpy as np
import pandas as pd
from scipy.stats import norm
import random as rd
import math
#Dividing the list in chunks to later append it to the dataframe in the right order
def chunk_list(lst, chunk_size):
for i in range(0, len(lst), chunk_size):
yield lst[i:i + chunk_size]
def blackscholes():
d1 = ((math.log(S_0/K)+(r+sigma**2/2)*T)/(sigma*np.sqrt(2)))
d2 = ((math.log(S_0/K)+(r-sigma**2/2)*T)/(sigma*np.sqrt(2)))
preis_call_option = S_0*norm.cdf(d1)-K*np.exp(-r*T)*norm.cdf(d2)
return preis_call_option
K = 40
S_0 = 42
T = 2
r = 0.02
sigma = 0.2
U = 38
simulation = 10000
trading_days = 500
trading_days = trading_days -1
#creating 2 lists for the first and second loop
loop_simulation = []
loop_trading_days = []
#first loop calculates the first column in a list
for j in range (0,simulation):
print("Progressbar_1_2 {:2.2%}".format(j / simulation), end="\n\r")
S_Tag_new = 0
NORM_S_INV = norm.ppf(rd.random())
S_Tag = S_0*np.exp((r-sigma**2/2)*T/trading_days+sigma*np.sqrt(T/trading_days)*NORM_S_INV)
S_Tag_new = S_Tag
loop_simulation.append(S_Tag)
#second loop calculates the the rows for the columns in a list
for i in range (0,trading_days):
NORM_S_INV = norm.ppf(rd.random())
S_Tag = S_Tag_new*np.exp((r-sigma**2/2)*T/trading_days+sigma*np.sqrt(T/trading_days)*NORM_S_INV)
loop_trading_days.append(S_Tag)
S_Tag_new = S_Tag
#values from the second loop will be divided in number of Trading days per Simulation
loop_trading_days_chunked = list(chunk_list(loop_trading_days,trading_days))
#First dataframe with just the first results from the firstloop for each simulation
df1 = pd.DataFrame({'S_Tag 1': loop_simulation})
#Appending the the chunked list from the second loop to a second dataframe
df2 = pd.DataFrame(loop_trading_days_chunked)
#Merging both dataframe into one
df3 = pd.concat([df1, df2], axis=1)

python: increase performance of finding the best timeshift for a correlation between each X column and y

I have a dataframe X with several columns and a dataframe y with only one column (series). The rows in X represent timesteps and I want to find the interval I need to shift each column of X to obtain the highest correlation with y. I wrote a function that loops over all columns and then loops over all timesteps and correlates the X column with y. If the R² is better than before I store the timestep. However, with over 300 columns this routine is really taking some time and I need to increase the performance. Is there a nice way to simplify this code?
(In the example I used the iris data set which is of course not a timeseries...)
from sklearn import datasets
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
from copy import deepcopy
def get_best_shift(dfX, dfy, ti=60, maxt=1440):
"""
determines the best correlation for the last maxt minutes based on a
timestep of ti minutes. Creates a dataframe with the shifted variables based on the
best match (strongest correlation).
"""
df_out = deepcopy(dfX)
for xcol in dfX:
bestshift = 0
Rmax = 0
for ishift in range(0, int(maxt / ti)):
xvals = dfX[xcol].iloc[0:(dfX.shape[0] - ishift)].values
yvals = np.array([val[0] for val in dfy.iloc[ishift:dfy.shape[0]].values])
selector = np.array([str(val)!="nan" for val in (xvals*yvals)],dtype=bool)
xvals = xvals[selector]
yvals = yvals[selector]
R = np.corrcoef(xvals,yvals)[0][1]
# plt.figure()
# plt.plot(xvals,yvals,'k.')
# plt.show()
if R ** 2 > Rmax:
Rmax = R ** 2
# print(Rmax)
bestshift = ishift
df_out[xcol] = list(np.zeros(bestshift)) + list(dfX[xcol].iloc[0:dfX.shape[0] - bestshift].values)
df_out = df_out.rename(columns={xcol: ''.join([str(xcol), '_t-', str(bestshift)])})
return df_out
iris = datasets.load_iris()
X = pd.DataFrame(iris.data)
y = pd.DataFrame(iris.target)
df = get_best_shift(X,y)

Appending function created column to an existing data frame

I currently have a dataframe as below:
and wish to add a column, E, that is calculated based on the following function.
def geometric_brownian_motion(T = 1, N = 100, mu = 0.1, sigma = 0.01, S0 = 20):
dt = float(T)/N
t = np.linspace(0, T, N)
W = np.random.standard_normal(size = N)
W = np.cumsum(W)*np.sqrt(dt) ### standard brownian motion ###
X = (mu-0.5*sigma**2)*t + sigma*W
S = S0*np.exp(X) ### geometric brownian motion ###
return S
(originating from here)
How to i create a time-series for all of the dates contained within the data-frame and append it?
The function input parameters are as follows:
T = (#days between df row 1 and df last)/365
N = # rows in data frame
S0 = 100
As i understand the essense of question is how to apply some method to every column, taking into account, the fact that to calculate a new value you need an index from dataframe:
I suggest you to extract index as separate column and use apply as usually.
from functools import partial
df['index'] = df.index
T = # precalculate T here
N = df.shape[0]
applying_method = partial(geometric_brownian_motion,T=T,N=N, S0=100)
df['E'] = df.apply(lambda row: applying_method(*row),axis=1)
Or if you rename columns of dataframe accroding to you function arguments:
df['E'] = df.apply(lambda row: applying_method(**row),axis=1)
Hope that helps.

Categories

Resources