I have N samples, each of which has n values in it. For each sample, I plot the histogram of n values. In total, I would have N histogram. Now I would like to have a plot that shows the mean of these histograms and have the 5-95% quantile region shaded. An example of such a plot would look like (please don't mind the dashed line and black line, just the shaded area.)
So far, I have plotted all the N histograms on top of each other.
I realized that the mean of these histograms would be the histogram of all [N X n] values together. A sample code for this would look like
import numpy as np
import matplotlib.pyplot as plt
samples = []
N = 20
n = 250
for i in range(N):
samples.append(np.random.normal(loc=np.random.rand(1,)[0]/5-0.1, scale=1., size=n))
values_all = None
for i in range(len(samples)):
values = samples[i]
print(values)
weights = np.ones_like(values) / float(len(values))
plt.hist(values, range=[-4, 4], density=False, histtype='step', color='red', bins=15, weights=weights)
if values_all is None:
values_all = values
else:
values_all = np.concatenate(([values_all, values]), axis=0)
weights = np.ones_like(values_all) / float(len(values_all))
plt.hist(values_all, range=[-4, 4], density=False, histtype='step', color='black', bins=15, weights=weights)
plt.show()
Any suggestions on how to find and plot the 5-95% quantiles would be appreciated.
You should get the probabilities for each bin and find the quantiles for them. Here is a sample code
import numpy as np
import matplotlib.pyplot as plt
# generate data
samples = []
N = 20
n = 250
for i in range(N):
samples.append(np.random.normal(loc=np.random.rand(1,)[0]/5-0.1, scale=1., size=n))
prob_all = None
for i in range(len(samples)):
values = samples[i]
weights = np.ones_like(values) / float(len(values))
n, bins, patches = plt.hist(values, range=[-4, 4], density=False, histtype='step', color='red', bins=15, weights=weights, alpha = 0.5)
# concatanate bin probabilities
if prob_all is None:
prob_all = n.reshape(-1,1)
else:
prob_all = np.concatenate(([prob_all, n.reshape(-1,1)]), axis=1)
plt.close() # don't plot previous histograms
# find quantiles for each bin
quant = np.quantile(prob_all, [0.05, 0.5, 0.95], axis=1)
# plot histogram from bins and probabilities
def plt_hist(bins, quant, clr, alph, lw):
for j in range(len(n)):
plt.plot([bins[j], bins[j + 1]], [quant[j], quant[j]], color=clr, linewidth=lw, alpha = alph)
plt.plot([bins[0], bins[0]], [0., quant[0]], color=clr, linewidth=lw, alpha = alph)
plt.plot([bins[len(n)], bins[len(n)]], [quant[len(n) - 1], 0.], color=clr, linewidth=lw, alpha = alph)
for j in range(len(n) - 1):
plt.plot([bins[j + 1], bins[j + 1]], [quant[j], quant[j + 1]], color=clr, linewidth=lw, alpha = alph)
fig, ax = plt.subplots()
ax.set_ylim([0., 0.3])
ax.set_xlim([-4.5, 4.5])
# plot 50% quantile (mean)
plt_hist(bins, quant[1], clr='blue', alph=1., lw=1.)
# shade between quantiles
for i in range(len(n)):
x = np.arange(bins[i], bins[i+1], 0.0001)
y1 = quant[0,i]
y2 = quant[2,i]
ax.fill_between(x, y1, y2, facecolor='red', alpha=0.4)
# boarder for shadings
plt_hist(bins, quant[0], clr='black', alph=.2, lw=1.)
plt_hist(bins, quant[2], clr='black', alph=.2, lw=1.)
plt.show()
Related
I am using "plt.subplots(2, 2, sharex=True, sharey=True)" to draw a 2*2 subplots. Each subplot has two Y axis and contains normal distribution curve over a histogram. Noting I particularly set "sharex=True, sharey=True" here in order to make all subplots share the same X axis and Y axis.
After running my code, everything is fine except the second, three, and fourth subplots where the normal distribution curve doesn't fit the histogram very well (please see the figure here)
I did googling but failed to get this issue solved. However, if I set "sharex=True, sharey=False" in my code, then the figure looks correct, but all subplots use their own Y axix which isn't what I want. Please see the figure here
Hope this issue can be fixed by experts in StackOverflow. Many thanks in advance!
Below is my code:
import matplotlib.pyplot as plt
from scipy.stats import norm
def align_yaxis(ax1, v1, ax2, v2):
#adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
def drawSingle(myax, mydf , title, offset):
num_bins = 200
xs = mydf["gap"]
x = np.linspace(-1,1,1000)
mu =np.mean(x)
sigma =np.std(xs)
n, bins, patche = myax.hist(xs, num_bins, alpha=0.8, facecolor='blue', density=False)
myax.set_ylabel('frequency',color="black",fontsize=12, weight = "bold")
myax.set_xlabel('X', fontsize=12, weight = "bold",horizontalalignment='center')
ax_twin = myax.twinx()
y_normcurve = norm.pdf(bins, mu, sigma)
ax_twin.plot(bins, y_normcurve, 'r--')
align_yaxis(myax,0,ax_twin,0)
peakpoint = norm.pdf(mu,loc=mu,scale=sigma)
plt.vlines(mu, 0, peakpoint, 'y', '--', label='example')
ax_twin.set_ylabel("probablility dense",color="black",fontsize=12, weight = "bold")
def drawSubplots(mydf1,mydf2,mydf3,mydf4, pos1,pos2,pos3,pos4, title, filename):
plt.rcParams['figure.figsize'] = (18,15 )
my_x_ticks = np.arange(-0.8, 0.8,0.1)
rows, cols = 2, 2
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
drawSingle(ax[0][0], mydf1, "Subplot1", pos1)
drawSingle(ax[0][1], mydf2, "Subplot2", pos2)
drawSingle(ax[1][0], mydf3, "Subplot3", pos3)
drawSingle(ax[1][1], mydf4, "Subplot4", pos4)
plt.text(-1, -1, title, horizontalalignment='center', fontsize=18)
plt.show()
drawSubplots(df1, df2,df3,df4,3.2,3.1,2.7,2.85,"test9", "test9")
Here is an attempt to:
have the left y-axes being "frequency" (which is very uninformative in the case of the current bin widths) and shared among the 4 subplots
have the right y-axes be a "probability density"; note how the top of all gaussians is around y=0.02 (the twin axes can only be set at the end because the shared y axes can be updated via later subplots)
have the histogram and the normal curve aligned
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from scipy.stats import norm
def drawSingle(myax, mydf, title):
num_bins = 200
xs = mydf["gap"]
x = np.linspace(-1, 1, 1000)
mu = np.mean(x)
sigma = np.std(xs)
n, bins, patches = myax.hist(xs, num_bins, alpha=0.8, facecolor='blue', density=False)
myax.set_ylabel('frequency', color="black", fontsize=12, weight="bold")
myax.set_xlabel('X', fontsize=12, weight="bold", horizontalalignment='center')
normalization_factor = len(xs) * (bins[1] - bins[0])
y_normcurve = norm.pdf(x, mu, sigma) * normalization_factor
myax.plot(x, y_normcurve, 'r--')
myax.vlines(mu, 0, y_normcurve.max(), 'y', '--', color='lime', label='example')
return normalization_factor
def drawSubplots(mydf1, mydf2, mydf3, mydf4, title):
plt.rcParams['figure.figsize'] = (18, 15)
fig, ax = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)
dfs = [mydf1, mydf2, mydf3, mydf4]
norm_factors = [drawSingle(ax_i, df, title)
for ax_i, df, title in zip(ax.ravel(), dfs, ["Subplot1", "Subplot2", "Subplot3", "Subplot4"])]
for ax_i, norm_factor in zip(ax.ravel(), norm_factors):
ax_twin = ax_i.twinx()
ymax = ax_i.get_ylim()[1]
ax_twin.set_ylim(0, ymax / norm_factor)
plt.suptitle(title, fontsize=18)
plt.tight_layout()
plt.show()
df1, df2, df3, df4 = [pd.DataFrame({"gap": np.random.normal(0, 0.2, n)}) for n in [6000, 4000, 1800, 1200]]
drawSubplots(df1, df2, df3, df4, "Title")
Many thanks JohanC, you are amazing.
Based on your code, I just added a few lines of code within drawSubplots function in order to make 95% of the Gaussian curve area shaded between the lower bound and upper bound for each subplot. The following is my try. It seems that ax_twin.fill_between doesn't work normally here. As you could see from the figure that the shaded area is out of the Gaussian curve enter image description here. What I want is only to shade the area under the Gaussian curve between the lower bound and upper bound. If you don't mind, would you please check it out my mistake? Thank you very much!
import matplotlib.pyplot as plt
import math
from scipy.stats import norm
def align_yaxis(ax1, v1, ax2, v2):
#adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
def drawSingle(myax, mydf , title):
num_bins = 200
xs = mydf["gap"]
x = np.linspace(-1,1,1000)
mu =np.mean(xs)
sigma =np.std(xs)
n, bins, patches = myax.hist(xs, num_bins, alpha=0.8, facecolor='blue', density=False)
myax.set_ylabel('Frequency', color="black", fontsize=12, weight="bold")
myax.set_xlabel(title, fontsize=12, weight="bold", horizontalalignment='center')
normalization_factor = len(xs) * (bins[1] - bins[0])
y_normcurve = norm.pdf(x, mu, sigma) * normalization_factor
myax.plot(x, y_normcurve, 'r--')
myax.vlines(mu, 0, y_normcurve.max(), 'y', '--', color='lime', label='example')
plt.xlim(-0.8,0.8)
my_x_ticks = np.arange(-0.8, 0.8,0.1)
plt.xticks(my_x_ticks)
return normalization_factor, mu, sigma
def drawSubplots(mydf1,mydf2,mydf3,mydf4, title):
plt.rcParams['figure.figsize'] = (18,15 )
norm_factors = []
mus = []
sigmas = []
my_x_ticks = np.arange(-0.8, 0.8,0.1)
rows, cols = 2, 2
fig, ax = plt.subplots(nrows=rows, ncols=cols, sharex=True, sharey=True)
dfs = [mydf1, mydf2, mydf3, mydf4]
#norm_factors = [drawSingle(ax_i, df, title)
#for ax_i, df, title in zip(ax.ravel(), dfs, ["Subplot1", "Subplot2", "Subplot3", "Subplot4"])]
for ax_i, df, title in zip(ax.ravel(), dfs, ["Subplot1", "Subplot2", "Subplot3", "Subplot4"]):
norm_factor, mu, sigma = drawSingle(ax_i, df, title)
norm_factors.append(norm_factor)
mus.append(mu)
sigmas.append(sigma)
for ax_i, norm_factor, mu, sigma in zip(ax.ravel(), norm_factors, mus, sigmas ):
ax_twin = ax_i.twinx()
xmax = ax_i.get_xlim()[1]
ax_twin.set_ylim(0, xmax / norm_factor)
ax_twin.set_ylabel("probablility dense",color="black",fontsize=12, weight = "bold")
CI_95_lower = mu - (1.96*sigma)
CI_95_upper = mu + (1.96*sigma)
px_shaded = np.arange(CI_95_lower,CI_95_upper,0.1)
ax_twin.fill_between(px_shaded,norm.pdf(px_shaded,loc=mu,scale=sigma) * norm_factor,alpha=0.75, color='pink')
area_shaded_95_CI = norm.cdf(x=CI_95_upper, loc=mu, scale=sigma)-norm.cdf(x=CI_95_lower, loc=mu, scale=sigma)
ax_twin.text(-0.06,0.01,str(round(area_shaded_95_CI*100,1))+"%", fontsize=20)
ax_twin.annotate(s=f'lower bound= {CI_95_lower:.3f}',xy=(CI_95_lower,norm.pdf(CI_95_lower,loc=mu,scale=sigma)),xytext=(-0.75,0.01),weight='bold',color='blue',\
arrowprops=dict(arrowstyle='-|>',connectionstyle='arc3',color='green'),\
fontsize=12
)
ax_twin.annotate(s=f'upper bound= {CI_95_upper:.3f}',xy=(CI_95_upper,norm.pdf(CI_95_upper,loc=mu,scale=sigma)),xytext=(0.28,0.01),weight='bold',color='blue',\
arrowprops=dict(arrowstyle='-|>',connectionstyle='arc3',color='green'),\
fontsize=12
)
ax_twin.text(0.05, 0.03, r"$\mu=" + f'{mu:.6f}' + ", \sigma=" + f'{sigma:.6f}' + "$" + ", confidence interval=95%" ,
horizontalalignment='center', fontsize=15)
plt.suptitle(title, fontsize=18)
plt.tight_layout()
plt.show()
df1, df2, df3, df4 = [pd.DataFrame({"gap": np.random.normal(0, 0.2, n)}) for n in [6000, 4000, 1800, 1200]]
drawSubplots(df1, df2, df3, df4, "Title")
I want to build a graph that will look like this, - for each point I have a single value and there is a maximum that reaches the border.
All I can find is how to have hexbin in a scatterplot with seaborn or similar - any ideas, is there some ready solution maybe or I would need to code my way through it?
You could use tripcolor to show 6 shaded triangles. Scaling the outer vectors can adapt the triangles to show the desired proportions.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
proportions = [0.6, 0.75, 0.8, 0.9, 0.7, 0.8]
labels = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta']
N = len(proportions)
proportions = np.append(proportions, 1)
theta = np.linspace(0, 2 * np.pi, N, endpoint=False)
x = np.append(np.sin(theta), 0)
y = np.append(np.cos(theta), 0)
triangles = [[N, i, (i + 1) % N] for i in range(N)]
triang_backgr = tri.Triangulation(x, y, triangles)
triang_foregr = tri.Triangulation(x * proportions, y * proportions, triangles)
cmap = plt.cm.rainbow_r # or plt.cm.hsv ?
colors = np.linspace(0, 1, N + 1)
plt.tripcolor(triang_backgr, colors, cmap=cmap, shading='gouraud', alpha=0.4)
plt.tripcolor(triang_foregr, colors, cmap=cmap, shading='gouraud', alpha=0.8)
plt.triplot(triang_backgr, color='white', lw=2)
for label, color, xi, yi in zip(labels, colors, x, y):
plt.text(xi * 1.05, yi * 1.05, label, # color=cmap(color),
ha='left' if xi > 0.1 else 'right' if xi < -0.1 else 'center',
va='bottom' if yi > 0.1 else 'top' if yi < -0.1 else 'center')
plt.axis('off')
plt.gca().set_aspect('equal')
plt.show()
The code allows for different numbers of triangles. Here are examples with 5 or 6 triangles:
I have a heavily right-skewed histogram and would like to calculate the probabilities for a range of Lifetimevalues (Area under the curve, the PDF). For instance, the probability that the Lifetime value is in (0-0.01)
Dataframe consisting of LTV calculated by cumulative revenue/ cumulative installs:
df['LTV'] is
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.208125,0.0558879,0.608348,0.212553,0.0865896,
0.728542,0,0.609512,0,0,0,0,0,0,0,0.0801339,0.140657,0.0194118,0,0,0.0634682,
0.339545,0.875902,0.8325,0.0260526,0.0711905,0.169894,0.202969,0.0761538,0,0.342055,
0.42781,0,0,0.192115,0,0,0,0,0,0,0,0,0,0,0,1.6473,0,0.232329,0,2.21329,0.748,0.0424286,
0.455439,0.210282,5.56453,0.427959,0,0.352059,0,0,0.567059,0,0,0,0.384462,1.29476,
0.0103125,0,0.0126923,1.03356,0,0,0.289785,0,0)
I have tried utilizing SKlearn's KernelDensity, however, after fitting it to the histogram it does not capture the over-represented 0s.
import gc
from sklearn.neighbors import KernelDensity
def plot_prob_density(df_lunch, field, x_start, x_end):
plt.figure(figsize = (10, 7))
unit = 0
x = np.linspace(df_lunch.min() - unit, df_lunch.max() + unit, 1000)[:, np.newaxis]
# Plot the data using a normalized histogram
plt.hist(df_lunch, bins=200, density=True, label='LTV', color='blue', alpha=0.2)
# Do kernel density estimation
kd_lunch = KernelDensity(kernel='gaussian', bandwidth=0.00187).fit(df_lunch) #0.00187
# Plot the estimated densty
kd_vals_lunch = np.exp(kd_lunch.score_samples(x))
plt.plot(x, kd_vals_lunch, color='orange')
plt.axvline(x=x_start,color='red',linestyle='dashed')
plt.axvline(x=x_end,color='red',linestyle='dashed')
# Show the plots
plt.xlabel(field, fontsize=15)
plt.ylabel('Probability Density', fontsize=15)
plt.legend(fontsize=15)
plt.show()
gc.collect()
return kd_lunch
kd_lunch = plot_prob_density(final_df['LTV'].values.reshape(-1,1), 'LTV', x_start=0, x_end=0.01)
Then finding the probabilities like this:
def get_probability(start_value, end_value, eval_points, kd):
# Number of evaluation points
N = eval_points
step = (end_value - start_value) / (N - 1) # Step size
x = np.linspace(start_value, end_value, N)[:, np.newaxis] # Generate values in the range
kd_vals = np.exp(kd.score_samples(x)) # Get PDF values for each x
probability = np.sum(kd_vals * step) # Approximate the integral of the PDF
return probability.round(4)
print('Probability of LTV 0-3 tips during LUNCH time: {}\n'
.format(get_probability(start_value = 0,
end_value = 0.01,
eval_points = 100,
kd = kd_lunch)))
However, this method does not yield the appropriate PDF values we were aiming for.
Any suggestions for alternative methods would be appreciated.
PLot:
I have used more or less similar script for my work, here is my script may be it will be helpful for you.
import gc
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.neighbors import KernelDensity
from scipy import stats
data1 = beta_95[0]
def plot_prob_density(data1, x_start, x_end):
plt.figure(figsize = (4, 3.5))
unit = 1.5
x = np.linspace(-20, 20, 1000)[:, np.newaxis]
# Plot the data using a normalized histogram
plt.hist(data1, bins=np.linspace(-20,20,40), density=True, color='r', alpha=0.4)
#plt.show
# Do kernel density estimation
kd_data1 = KernelDensity(kernel='gaussian', bandwidth=1.8).fit(data1)
# Plot the estimated densty
kd_vals_data1 = np.exp(kd_data1.score_samples(x))
plt.plot(x, kd_vals_data1, color='r', label='$N_a$', linewidth = 2)
plt.axvline(x=9.95,color='green',linestyle='dashed', linewidth = 2.0, label='$β_o$')
plt.axvline(x=1.9,color='black',linestyle='dashed', linewidth = 2.0, label='$β_b$')
plt.axvline(x=x_end,color='red',linestyle='dashed', linewidth = 2, label='$β_{95\%}$')
# Show the plots
plt.xlabel('Beta', fontsize=10)
plt.ylabel('Probability Density', fontsize=10)
plt.title('02 hours window', fontsize=12)
plt.xlim(-20, 20)
plt.ylim(0, 0.3)
plt.yticks([0, 0.1, 0.2, 0.3])
plt.legend(fontsize=12, loc='upper left', frameon=False)
plt.show()
gc.collect()
return kd_data1
def get_probability(start_value, end_value, eval_points, kd):
# Number of evaluation points
N = eval_points
step = (end_value - start_value) / (N - 1) # Step size
x = np.linspace(start_value, end_value, N)[:, np.newaxis] # Generate values in the range
kd_vals = np.exp(kd.score_samples(x)) # Get PDF values for each x
probability = np.sum(kd_vals * step) # Approximate the integral of the PDF
return probability.round(4)
data1 = np.array(data1).reshape(-1, 1)
kd_data1 = plot_prob_density(data1, x_start=3.0, x_end=13)
print('Beta-95%: {}\n'
.format(get_probability(start_value = -10,
end_value = 13,
eval_points = 1000,
kd = kd_data1)))
The purpose of this code is to demonstrate CLT.
If I do the following:
num_samples = 10000
sample_means = np.empty(num_samples)
for i in range(num_samples):
mean = np.mean(st.bernoulli.rvs(p=0.5, size=100))
sample_means[i] = mean
sample_demeaned = np.subtract(sample_means, 0.5)
denominator = np.divide(0.5, np.sqrt(100))
z_ed = np.divide(sample_demeaned, denominator)
plt.hist(z_ed, bins=40, edgecolor='k', density=True)
x = np.linspace(st.norm.ppf(0.001), st.norm.ppf(0.999), 10000)
y = st.norm.pdf(x)
plt.plot(x, y, color='red')
I get:
However, if I try to do it with a for loop for different sample sizes:
num_samples = 10000
sample_sizes = np.array([5, 20, 75, 100])
sample_std_means = np.empty(shape=(num_samples, len(sample_sizes)))
for col, size in enumerate(sample_sizes):
sample_means = np.empty(num_samples)
for i in range(num_samples):
mean = np.mean(st.bernoulli.rvs(p=0.5, size=size))
sample_means[i] = mean
sample_demeaned = np.subtract(sample_means, 0.5)
denominator = np.divide(0.5, np.sqrt(size))
z_ed = np.divide(sample_demeaned, denominator)
sample_std_means[:, col] = sample_means
And then plot each of them in a 2x2 grid:
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 7))
x = np.linspace(st.norm.ppf(0.001), st.norm.ppf(0.999), 10000)
y = st.norm.pdf(x)
for i, ax in enumerate(axes.flatten()):
ax.hist(sample_std_means[i], bins=40, edgecolor='k', color='midnightblue')
ax.set_ylabel('Density')
ax.set_xlabel(f'n = {sample_sizes[i]}')
ax.plot(x, y, color='red')
ax.set_xlim((-3, 3))
plt.show()
I get the following image:
I cannot debug the discrepancy here. Any help is highly appreciated.
Please note that scipy.stats and numpy have been imported as st and np respectively in both code blocks.
First, note that one numpy's strong points is that it allows operations which mix arrays and single numbers. This is called broadcasting. So, for example sample_demeaned = np.subtract(sample_means, 0.5) can be written more concise as sample_demeaned = sample_means - 0.5.
Several issues are going wrong:
sample_std_means[:, col] = sample_means should use the just calculated z_ed instead of sample_means.
ax.hist(sample_std_means[i], ...) uses the i'th row of the array. That row only contains 4 elements. You'd want sample_std_means[;,i] to take the i'th column.
The pdf is drawn in its normalized form (with an area below the curve equal to one). However, the histogram's height is proportional to the number of samples. Its total area is num_samples * bin_width, where the histogram's default bin width is the length from the first to the last element divided by the number of bins. To get both the pdf and histogram with similar sizes, either the histogram should be normalized (using density=True) or the pdf should be multiplied by the expected area of the histogram.
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
num_samples = 10000
sample_sizes = np.array([5, 20, 75, 100])
sample_std_means = np.empty(shape=(num_samples, len(sample_sizes)))
for col, size in enumerate(sample_sizes):
sample_means = np.empty(num_samples)
for i in range(num_samples):
sample_means[i] = np.mean(st.bernoulli.rvs(p=0.5, size=size))
sample_demeaned = sample_means - 0.5
z_ed = sample_demeaned / (0.5 / np.sqrt(size))
sample_std_means[:, col] = z_ed
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 7))
x = np.linspace(st.norm.ppf(0.001), st.norm.ppf(0.999), 1000)
y = st.norm.pdf(x)
for i, ax in enumerate(axes.flatten()):
ax.hist(sample_std_means[:, i], bins=40, edgecolor='k', color='midnightblue', density=True)
ax.set_ylabel('Density')
ax.set_xlabel(f'n = {sample_sizes[i]}')
# bin_width = (sample_std_means[:, i].max() - sample_std_means[:, i].min()) / 40
# ax.plot(x, y * num_samples * bin_width, color='red')
ax.plot(x, y, color='red')
ax.set_xlim((-3, 3))
plt.show()
Now note the weird empty bars in the histograms. A histogram works best for continuous distributions. But the mean of n Bernoulli trials can have at most n+1 different outcomes. When all trials would be True, the mean would be n/n = 1. When all would be False, the mean would be 0. Combined, the possible means are 0, 1/n, 2/n, ..., 1. The histogram of such a discrete distribution should take these values into account for the boundaries between the bins.
The following code creates a scatter plot, using the position of the means and a random y-value to visualize how many there are per x. Also, the position of the bin boundaries is calculated and visualized by dotted vertical lines.
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 7))
for i, ax in enumerate(axes.flatten()):
ax.scatter(sample_std_means[:, i], np.random.uniform(0, 1, num_samples), color='r', alpha=0.5, lw=0, s=1)
# there are n+1 possible mean values for n bernoulli trials
# n+2 boundaries will be needed to separate the bins
bins = np.arange(-1, sample_sizes[i]+1) / sample_sizes[i]
bins += (bins[1] - bins[0]) / 2 # shift half a bin
bins -= 0.5 # subtract the mean
bins /= (0.5 / np.sqrt(sample_sizes[i])) # correction factor
for b in bins:
ax.axvline(b, color='g', ls=':')
ax.set_xlabel(f'n = {sample_sizes[i]}')
ax.set_xlim((-3, 3))
And here are the histograms using these bins:
ax.hist(sample_std_means[:, i], bins=bins, edgecolor='k', color='midnightblue', density=True)
I made a function which can plot statistics for large arrays (10**8) less than 2 seconds. How can I scale Y-axis to make area under the graph equal to 1?
def dis(inp):
import numpy as np
import vaex
import matplotlib.pyplot as plt
if getattr(inp, "numpy", None) is not None:
inp1d = np.reshape(inp.numpy(), [-1])
else:
inp1d = np.reshape(inp, [-1])
bin_count = 64
df = vaex.from_arrays(x=inp1d)
x_min, x_max = df.minmax(df.x)
bins = df.count(binby=df.x, shape=bin_count, limits='minmax', edges=True)
bins[-2] += bins[-1]
bins[-1] = bins[-2]
bins = bins[2:]
hist_height = np.max(bins)
edges = np.linspace(x_min, x_max, bin_count+1)
mean = df.mean(df.x)
std = df.std(df.x)
for i, v in enumerate([x * std + mean for x in range(-3, 4)]):
if i == 3:
plt.plot([v, v], [0, hist_height * 1.02], color='#34A853', linewidth=1)
else:
plt.plot([v, v], [0, hist_height * 0.97], color='#34A853', linewidth=0.5)
plt.step(edges, bins, where='post', color='#4285F4', linewidth=1)
plt.show()
print(f'{str(inp.shape) + " " if getattr(inp, "shape", None) is not None and inp.ndim > 1 else ""}{len(inp1d):,}\nmean: {mean}\nstd: {std}\nmin: {x_min}\nmax: {x_max}')
x = np.random.normal(0, 1, (10**8, ))
Complete answer if somebody wants to now how to plot big data statistics:
def dis(inp):
import numpy as np
import vaex
import matplotlib.pyplot as plt
if getattr(inp, "numpy", None) is not None:
inp1d = np.reshape(inp.numpy(), [-1])
else:
inp1d = np.reshape(inp, [-1])
bin_count = 64
df = vaex.from_arrays(x=inp1d)
x_min, x_max = df.minmax(df.x)
bins = df.count(binby=df.x, shape=bin_count, limits='minmax', edges=True)
bins[-2] += bins[-1]
bins = bins[2:-1]
edges = np.linspace(x_min, x_max, bin_count+1)
left, right = edges[:-1], edges[1:]
edges = np.reshape(np.array([left,right]).T, [-1])
bins = np.reshape(np.array([bins,bins]).T, [-1])
mean = df.mean(df.x)
std = df.std(df.x)
# Scale AUC to 1
step = (x_max-x_min)/bin_count
population = np.sum(bins)
surface = population*step
bins = bins/surface
hist_height = np.max(bins)
for i, v in enumerate([x * std + mean for x in range(-3, 4)]):
if i == 3:
plt.plot([v, v], [0, hist_height * 1.02], color='#34A853', linewidth=1)
else:
plt.plot([v, v], [0, hist_height * 0.97], color='#34A853', linewidth=0.5)
plt.fill_between(edges, bins, step="pre", alpha=0.3)
plt.plot(edges, bins, color='#4285F4', linewidth=1)
plt.show()
print(f'{str(inp.shape) + " " if getattr(inp, "shape", None) is not None and inp.ndim > 1 else ""}{len(inp1d):,}\nmean: {mean}\nstd: {std}\nmin: {x_min}\nmax: {x_max}')
To moderators: this site doesn't alow me to post code even if it is the answer: It looks like your post is mostly code; please add some more details.
The idea is to normalise your data set, ie to divide the height of each column by the AUC (area under curve) of your histogram.
Before "plt.step(...)" write:
step = (x_max-x_min)/bin_count
population = np.sum(bins)
surface = population*step
bins = bins/surface
hope that could help