Plotly: How to find and annotate the intersection point between two lines? - python

I am trying to find and annotate the intersection point of two-line using Plotly. I know we can use (plt.plot(*intersection.xy,'ko')) to get the intersection point in Mathplotlib, but how can do it in plotly or if it can be done.
import numpy as np
import pandas as pd
import plotly.express as px
test = pd.merge(ps,uts,on = 'Time')
print(test)
time = test['Time']
omfr = test['Orifice Mass Flow Rate']
qab = test['Qab']
fig = px.line(test, x=time, y=[omfr,qab])
fig.update_layout( title='Pipe stress and UTS (MPa)',xaxis_title="Time (s)",yaxis_title="Pipe stress and UTS (MPa)",
hovermode='x')
Output of the code:
Intersection point:

Annotating and showing the intersection is easy. Finding it is the hard part, and my suggestion in that regard builds directly on the contributions in the post How do I compute the intersection point of two lines. I'll include a few lines on the details in my suggestion when I find the time. For now, the complete code snippet at the end of my answer will produce the following figure using this dataset:
Data
x y1 y2
0 1 1 11.00
1 2 8 14.59
2 3 27 21.21
3 4 64 31.11
Plot
Edit - Annotation
If you'd like to change the text annotation, just change
text="intersect"
... to something like:
text = 'lines intersect at x = ' + str(round(x[0], 2)) + ' and y = ' + str(round(y[0], 2))
Result:
Complete code
import pandas as pd
import plotly.graph_objects as go
import numpy as np
# import dash
# sample dataframe
df = pd.DataFrame()
df['x'] = np.arange(4) +1
df['y1'] = df['x']**3
df['y2'] = [10+val**2.2 for val in df['x']]
# intersection stuff
def _rect_inter_inner(x1,x2):
n1=x1.shape[0]-1
n2=x2.shape[0]-1
X1=np.c_[x1[:-1],x1[1:]]
X2=np.c_[x2[:-1],x2[1:]]
S1=np.tile(X1.min(axis=1),(n2,1)).T
S2=np.tile(X2.max(axis=1),(n1,1))
S3=np.tile(X1.max(axis=1),(n2,1)).T
S4=np.tile(X2.min(axis=1),(n1,1))
return S1,S2,S3,S4
def _rectangle_intersection_(x1,y1,x2,y2):
S1,S2,S3,S4=_rect_inter_inner(x1,x2)
S5,S6,S7,S8=_rect_inter_inner(y1,y2)
C1=np.less_equal(S1,S2)
C2=np.greater_equal(S3,S4)
C3=np.less_equal(S5,S6)
C4=np.greater_equal(S7,S8)
ii,jj=np.nonzero(C1 & C2 & C3 & C4)
return ii,jj
def intersection(x1,y1,x2,y2):
ii,jj=_rectangle_intersection_(x1,y1,x2,y2)
n=len(ii)
dxy1=np.diff(np.c_[x1,y1],axis=0)
dxy2=np.diff(np.c_[x2,y2],axis=0)
T=np.zeros((4,n))
AA=np.zeros((4,4,n))
AA[0:2,2,:]=-1
AA[2:4,3,:]=-1
AA[0::2,0,:]=dxy1[ii,:].T
AA[1::2,1,:]=dxy2[jj,:].T
BB=np.zeros((4,n))
BB[0,:]=-x1[ii].ravel()
BB[1,:]=-x2[jj].ravel()
BB[2,:]=-y1[ii].ravel()
BB[3,:]=-y2[jj].ravel()
for i in range(n):
try:
T[:,i]=np.linalg.solve(AA[:,:,i],BB[:,i])
except:
T[:,i]=np.NaN
in_range= (T[0,:] >=0) & (T[1,:] >=0) & (T[0,:] <=1) & (T[1,:] <=1)
xy0=T[2:,in_range]
xy0=xy0.T
return xy0[:,0],xy0[:,1]
# plotly figure
x,y=intersection(np.array(df['x'].values.astype('float')),np.array(df['y1'].values.astype('float')),
np.array(df['x'].values.astype('float')),np.array(df['y2'].values.astype('float')))
fig = go.Figure(data=go.Scatter(x=df['x'], y=df['y1'], mode = 'lines'))
fig.add_traces(go.Scatter(x=df['x'], y=df['y2'], mode = 'lines'))
fig.add_traces(go.Scatter(x=x, y=y,
mode = 'markers',
marker=dict(line=dict(color='black', width = 2),
symbol = 'diamond',
size = 14,
color = 'rgba(255, 255, 0, 0.6)'),
name = 'intersect'),
)
fig.add_annotation(x=x[0], y=y[0],
# text="intersect",
text = 'lines intersect at x = ' + str(round(x[0], 2)) + ' and y = ' + str(round(y[0], 2)),
font=dict(family="sans serif",
size=18,
color="black"),
ax=0,
ay=-100,
showarrow=True,
arrowhead=1)
fig.show()

Related

Is there a way to make a bit of gap between x-axis and the graph? (Python, bqplot)

I created a random number graph and I want to have a more gap between the x axis and the graph because it looks hard to read. Does anyone have a idea how to do that? Thank you so much. Here is my example code.
from bqplot import Axis, LinearScale, OrdinalScale, Scatter, Figure, Tooltip, DateScale, Lines, Bars
import bql
import pandas as pd
from numpy import nan, isnan
from numpy.random import randint
import numpy as np
x_sc_bar_fsa = OrdinalScale()
y_sc_bar_fsa = LinearScale()
tt_bar_fsa = Tooltip(fields=['x', 'y'], formats=['','.2f'])
bar_fsa = Bars(stroke = 'white',scales={'x': x_sc_bar_fsa, 'y': y_sc_bar_fsa},padding=0.5,\
tooltip=tt_bar_fsa, unhovered_style={'opacity': 0.9}, type = 'grouped')
ax_x_bar_fsa = Axis(scale=x_sc_bar_fsa)
ax_y_bar_fsa = Axis(scale=y_sc_bar_fsa, orientation='vertical',tick_format='0.2f')
#Final Output
fig_bar_fsa = Figure(title='Random Number Graph for Testing',marks=[], axes=[ax_x_bar_fsa, ax_y_bar_fsa],padding_x=0)
def plot_bars_fsa(x_data, y_data, label_data,colors=['darkblue','royalblue','darkgreen','darkred','red']):
if (len(x_data) == 0) | (len(y_data) == 0) | (len(label_data) == 0):
fig_bar_fsa.marks = [] #Creates empty list if x-data, y-data and label data is missing i.e. =0
else: #otherwise display
bar_fsa.x = x_data
bar_fsa.y = y_data
y_sc_bar_fsa.min = 0
y_sc_bar_fsa.max = np.nanmax([np.nanmax(x) for x in y_data]) * 1.1
bar_fsa.labels = label_data
bar_fsa.colors = colors # Defined above
ax_x_bar_fsa.tick_rotate = -90 # This is the angle of the letters on the graph
ax_x_bar_fsa.tick_style = {'text-anchor': 'end'}
fig_bar_fsa.marks = [bar_fsa] #links marks to bar chart
example_df = pd.DataFrame(data=randint(0, 100, size=(10, 5)),
columns=['column_' + str(i) for i in range(5)])
example_df.index = ['namenumber1253','namenumber26675','namenumber339', 'namenumber2556','namenumber3109', 'namenumber5894','namenumber1355','namenumber685890', 'namenumber397', 'namenumber85']
plot_bars_fsa(example_df.index.values,\
[example_df[x].values for x in ['column_0','column_1','column_2','column_3','column_4']],\
['column_0','column_1','column_2','column_3','column_4'])
fig_bar_fsa
enter image description here

How to draw proper chart of distributional tree?

I am using python with matplotlib and need to visualize distribution percentage of sub-groups of an data set.
imagine this tree:
Data --- group1 (40%)
-
--- group2 (25%)
-
--- group3 (35%)
group1 --- A (25%)
-
--- B (25%)
-
--- c (50%)
and it can go on, each group can have several sub-groups and same for each sub group.
How can i plot a proper chart for this info?
I created a minimal reproducible example that I think fits your description, but please let me know if that is not what you need.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.DataFrame()
n_rows = 100
data['group'] = np.random.choice(['1', '2', '3'], n_rows)
data['subgroup'] = np.random.choice(['A', 'B', 'C'], n_rows)
For instance, we could get the following counts for the subgroups.
In [1]: data.groupby(['group'])['subgroup'].value_counts()
Out[1]: group subgroup
1 A 17
C 16
B 5
2 A 23
C 10
B 7
3 C 8
A 7
B 7
Name: subgroup, dtype: int64
I created a function that computes the necessary counts given an ordering of the columns (e.g. ['group', 'subgroup']) and incrementally plots the bars with the corresponding percentages.
import matplotlib.pyplot as plt
import matplotlib.cm
def plot_tree(data, ordering, axis=False):
"""
Plots a sequence of bar plots reflecting how the data
is distributed at different levels. The order of the
levels is given by the ordering parameter.
Parameters
----------
data: pandas DataFrame
ordering: list
Names of the columns to be plotted.They should be
ordered top down, from the larger to the smaller group.
axis: boolean
Whether to plot the axis.
Returns
-------
fig: matplotlib figure object.
The final tree plot.
"""
# Frame set-up
fig, ax = plt.subplots(figsize=(9.2, 3*len(ordering)))
ax.set_xticks(np.arange(-1, len(ordering)) + 0.5)
ax.set_xticklabels(['All'] + ordering, fontsize=18)
if not axis:
plt.axis('off')
counts=[data.shape[0]]
# Get colormap
labels = ['All']
for o in reversed(ordering):
labels.extend(data[o].unique().tolist())
# Pastel is nice but has few colors. Change for a larger map if needed
cmap = matplotlib.cm.get_cmap('Pastel1', len(labels))
colors = dict(zip(labels, [cmap(i) for i in range(len(labels))]))
# Group the counts
counts = data.groupby(ordering).size().reset_index(name='c_' + ordering[-1])
for i, o in enumerate(ordering[:-1], 1):
if ordering[:i]:
counts['c_' + o]=counts.groupby(ordering[:i]).transform('sum')['c_' + ordering[-1]]
# Calculate percentages
counts['p_' + ordering[0]] = counts['c_' + ordering[0]]/data.shape[0]
for i, o in enumerate(ordering[1:], 1):
counts['p_' + o] = counts['c_' + o]/counts['c_' + ordering[i-1]]
# Plot first bar - all data
ax.bar(-1, data.shape[0], width=1, label='All', color=colors['All'], align="edge")
ax.annotate('All -- 100%', (-0.9, 0.5), fontsize=12)
comb = 1 # keeps track of the number of possible combinations at each level
for bar, col in enumerate(ordering):
labels = sorted(data[col].unique())*comb
comb *= len(data[col].unique())
# Get only the relevant counts at this level
local_counts = counts[ordering[:bar+1] +
['c_' + o for o in ordering[:bar+1]] +
['p_' + o for o in ordering[:bar+1]]].drop_duplicates()
sizes = local_counts['c_' + col]
percs = local_counts['p_' + col]
bottom = 0 # start at from 0
for size, perc, label in zip(sizes, percs, labels):
ax.bar(bar, size, width=1, bottom=bottom, label=label, color=colors[label], align="edge")
ax.annotate('{} -- {:.0%}'.format(label, perc), (bar+0.1, bottom+0.5), fontsize=12)
bottom += size # stack the bars
ax.legend(colors)
return fig
With the data shown above we would get the following.
fig = plot_tree(data, ['group', 'subgroup'], axis=True)
Have you tried stacked bar graph?
https://matplotlib.org/gallery/lines_bars_and_markers/bar_stacked.html#sphx-glr-gallery-lines-bars-and-markers-bar-stacked-py

How to increase the impact of an explanatory variable on Y as we step forward in time?

The question:
I'm building a model on three time series where Y is the dependent variable, and X1 and X2 ar the explanatory variables. Let's say that there is strong reason to believe that the impact of X1 on Y increases compared to X2 as time goes by. How can you account for this in a multiple regression model?
(I'll show some code snippets as my question progresses, and you'll find a complete code section at the end.)
The details - a visual approach:
Here are three synthetic series where it seems that the impact of X1 on Y is very strong at the end of the period:
A basic model could be:
model = smf.ols(formula='Y ~ X1 + X2')
And if you plot the fitted values against the observed Y values, you'd get this:
And sticking to a visual evaluation of the model, it seems that it performs OK in the majority of the period, but very poorly after August sets in.
How can I account for this in a multiple regression model? With the help from this post I've tried to introduce an interaction term with both a linear and squared timestep in these models:
mod_timestep = Y ~ X1 + X2:timestep
mod_timestep2 = Y ~ X1 + X2:timestep2
By the way, these are the timesteps:
Results:
It seems that both approaches perform a bit better in the end, but considerably worse in the beginning.
Any other suggestions? I know there's a multitude of possibilites with lagged terms of the dependent model and other models such as ARIMA or GARCH. But for a number of reasons I'd like to remain within the boundaries of multiple linear regressions and no lagged terms if possible.
Here's the whole thing for an easy copy&paste:
#%%
# imports
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.dates as mdates
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
###############################################################################
# Synthetic Data and plot
###############################################################################
# Function to build synthetic data
def sample():
np.random.seed(26)
date = pd.to_datetime("1st of Dec, 1999")
nPeriod = 250
dates = date+pd.to_timedelta(np.arange(nPeriod), 'D')
#ppt = np.random.rand(1900)
Y = np.random.normal(loc=0.0, scale=1.0, size=nPeriod).cumsum()
X1 = np.random.normal(loc=0.0, scale=1.0, size=nPeriod).cumsum()
X2 = np.random.normal(loc=0.0, scale=1.0, size=nPeriod).cumsum()
df = pd.DataFrame({'Y':Y,
'X1':X1,
'X2':X2},index=dates)
# Adjust level of series
df = df+100
# A subset
df = df.tail(50)
return(df)
# Function to make a couple of plots
def plot1(df, names, colors):
# PLot
fig, ax = plt.subplots(1)
ax.set_facecolor('white')
# Plot series
counter = 0
for name in names:
print(name)
ax.plot(df.index,df[name], lw=0.5, color = colors[counter])
counter = counter + 1
fig = ax.get_figure()
# Assign months to X axis
locator = mdates.MonthLocator() # every month
# Specify the X format
fmt = mdates.DateFormatter('%b')
X = plt.gca().xaxis
X.set_major_locator(locator)
X.set_major_formatter(fmt)
ax.legend(loc = 'upper left', fontsize ='x-small')
fig.show()
# Build sample data
df = sample()
# PLot of input variables
plot1(df = df, names = ['Y', 'X1', 'X2'], colors = ['red', 'blue', 'green'])
###############################################################################
# Models
###############################################################################
# Add timesteps to original df
timestep = pd.Series(np.arange(1, len(df)+1), index = df.index)
timestep2 = timestep**2
newcols2 = list(df)
df = pd.concat([df, timestep, timestep2], axis = 1)
newcols2.extend(['timestep', 'timestep2'])
df.columns = newcols2
def add_models_to_df(df, models, modelNames):
df_temp = df.copy()
counter = 0
for model in models:
df_temp[modelNames[counter]] = smf.ols(formula=model, data=df).fit().fittedvalues
counter = counter + 1
return(df_temp)
df_models = add_models_to_df(df, models = ['Y ~ X1 + X2', 'Y ~ X1 + X2:timestep', 'Y ~ X1 + X2:timestep2'],
modelNames = ['mod_regular', 'mod_timestep', 'mod_timestep2'])
# Models
df_models = add_models_to_df(df, models = ['Y ~ X1 + X2', 'Y ~ X1 + X2:timestep', 'Y ~ X1 + X2:timestep2'],
modelNames = ['mod_regular', 'mod_timestep', 'mod_timestep2'])
# Plots of models
plot1(df = df_models,
names = ['Y', 'mod_regular', 'mod_timestep', 'mod_timestep2'],
colors = ['red', 'black', 'green', 'grey'])
Edit 1 - screenshot from suggestion:**
Using the other options on the link you provided would seem to be the better option.
Using your functions with Y ~ X1 + X2*timestep and Y ~ X1 + X2*timestep2 will at least "catch" the increase in Y in the beginning, the decrease in the middle of the period and the sudden increase in the end.
I can't post images yet, so you'll have to try yourself.

Adding a 45 degree line to a time series stock data plot

I guess this is supposed to be simple.. But I cant seem to make it work.
I have some stock data
import pandas as pd
import numpy as np
df = pd.DataFrame(index=pd.date_range(start = "06/01/2018", end = "08/01/2018"),
data = np.random.rand(62)*100)
I am doing some analysis on it, this results of my drawing some lines on the graph.
And I want to plot a 45 line somewhere on the graph as a reference for lines I drew on the graph.
What I have tried is
x = df.tail(len(df)/20).index
x = x.reset_index()
x_first_val = df.loc[x.loc[0].date].adj_close
In order to get some point and then use slope = 1 and calculate y values.. but this sounds all wrong.
Any ideas?
Here is a possibility:
import pandas as pd
import numpy as np
df = pd.DataFrame(index=pd.date_range(start = "06/01/2018", end = "08/01/2018"),
data=np.random.rand(62)*100,
columns=['data'])
# Get values for the time:
index_range = df.index[('2018-06-18' < df.index) & (df.index < '2018-07-21')]
# get the timestamps in nanoseconds (since epoch)
timestamps_ns = index_range.astype(np.int64)
# convert it to a relative number of days (for example, could be seconds)
time_day = (timestamps_ns - timestamps_ns[0]) / 1e9 / 60 / 60 / 24
# Define y-data for a line:
slope = 3 # unit: "something" per day
something = time_day * slope
trendline = pd.Series(something, index=index_range)
# Graph:
df.plot(label='data', alpha=0.8)
trendline.plot(label='some trend')
plt.legend(); plt.ylabel('something');
which gives:
edit - first answer, using dayofyear instead of the timestamps:
import pandas as pd
import numpy as np
df = pd.DataFrame(index=pd.date_range(start = "06/01/2018", end = "08/01/2018"),
data=np.random.rand(62)*100,
columns=['data'])
# Define data for a line:
slope = 3 # unit: "something" per day
index_range = df.index[('2018-06-18' < df.index) & (df.index < '2018-07-21')]
dayofyear = index_range.dayofyear # it will not work around the new year...
dayofyear = dayofyear - dayofyear[0]
something = dayofyear * slope
trendline = pd.Series(something, index=index_range)
# Graph:
df.plot(label='data', alpha=0.8)
trendline.plot(label='some trend')
plt.legend(); plt.ylabel('something');

matplotlib: how to define lines with text and mark lines joints?

I am trying to recreate the following:
Any comments will be appreciated. I want to imitate this picture actually, but I have 3 problems:
How to get known the joints of two lines and the turning points of a line? Could these specific points be calculated from analytical calculations? or matplotlib could find out them?
How can I draw the dashed vertical line segment just below the line joint?
How to paste text to the segments of the lines? Could matplotlib determine the convenient location to write text attached to the lines ? or I should determine the location myself ?
For example, I can only draw such kind as below, far less than required. Please help me to improve my picture.
My own picture, which need improvements:
Code so far with detail code as below:
# -*- coding: utf-8 -*
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import math
from pylab import *
c = 2.998*10**10
hp = 6.626*10**-27
hb = 1.055*10**-27
kb = 1.381*10**-16
g = 6.673*10**-8
me = 9.109*10**-28
mp = 1.673*10**-24
q = 4.803*10**-10
sigT = 6.652*10**-25
p = 2.5
E52 = 1000
epsB_r = 0.01
epse_r = 0.1
D28 = 1
n1 = 1.0
nu15 = 2*10**(-3)
r014 = 1
g42 = 5
delt12 =1
g4 = g42*10**2
E0 = E52*10**52
eta = g4
N0 = E0/(g4*mp*c**2)
p_td = 24*3600
p_txd = 3**(1./3)*2**(-4./3)*10**(52./3)*pi**(-1./3)*mp**(-1./3)*c**(-5./3)/p_td
txd = p_txd*n1**(-1./3)*eta**(-8./3)*E52**(1./3)
p_Fmax_r1 = 2**(1./2)*3**(-1)*pi**(-1./2)*me*mp**(1./2)*c**3*sigT*q**(-1)*p_txd**(-3./2)*10**(-56)
Fmax_r1 = lambda t : p_Fmax_r1*N0*eta**6*E52**(-1./2)*n1*epsB_r**(1./2)*D28**(-2)*t**(3./2)
p_Fmax_r2 = 2**(1./2)*3**(-1)*pi**(-1./2)*me*mp**(1./2)*c**3*sigT*q**(-1)*p_txd**(34./35)*10**(-56)
Fmax_r2 = lambda t : p_Fmax_r2*N0*epsB_r**(1./2)*D28**(-2)*t**(-34./35)*eta**(-62./105)*n1**(37./210)*E52**(34./105)
p_nuc_r1 = 2**(-13./2)*3**2*me*mp**(-3./2)*c**(-2)*sigT**(-2)*pi**(-1./2)*q*p_td**(-2)
p_nuc_r2 = 2**(-13./2)*3**2*pi**(-1./2)*me*mp**(-3./2)*c**(-2)*sigT**(-2)*q*p_txd**(-74./35)*p_td**(-2)
nuc_r1 = lambda t : p_nuc_r1*eta**(-4)*epsB_r**(-3./2)*n1**(-3./2)*t**(-2)
nuc_r2 = lambda t : p_nuc_r2*eta**(172./105)*t**(4./35)*n1**(-167./210)*epsB_r**(-3./2)*E52**(-74./105)
p_num_r1 = 2**(11./2)*7**(-2)*mp**(5./2)*me**(-3)*pi**(-1./2)*q*p_txd**(-6)
p_num_r2 = 2**(11./2)*7**(-2)*mp**(5./2)*me**(-3)*pi**(-1./2)*q*p_txd**(54./35)
num_r1 = lambda t : p_num_r1*eta**18*((p-2)/(p-1))**2*epse_r**2*epsB_r**(1./2)*n1**(5./2)*t**6*E52**(-2)
num_r2 = lambda t : p_num_r2*((p-2)/(p-1))**2*n1**(-1./70)*eta**(-74./35)*E52**(18./35)*t**(-54./35)*epse_r**2*epsB_r**(1./2)
def num_r_(t):
return num_r1(t) if t<txd else num_r2(t)
num_r = np.vectorize(num_r_)
def nuc_r_(t):
return nuc_r1(t) if t<txd else nuc_r2(t)
nuc_r = np.vectorize(nuc_r_)
def Fmax_r_(t):
return Fmax_r1(t) if t<txd else Fmax_r2(t)
Fmax_r = np.vectorize(Fmax_r_)
i= np.arange(-5,-2,0.05)
t = 10**i
dnum = [math.log10(mmm) for mmm in num_r(t)]
dnuc = [math.log10(j) for j in nuc_r(t)]
nu_obs = [math.log(nu15*10**15,10) for a in i]
plt.figure('God Bless: Observable Limit')
plt.title(r'$\nu_{obs}$ and $\nu_c$ and $\nu_m$''\nComparation')
plt.xlabel('Time: log t')
plt.ylabel(r'log $\nu$')
plt.axvline(math.log10(txd))
plt.plot(i,nu_obs,'--',linewidth=2,label=r'$\nu_{obs}$')
plt.plot(i,dnum,'-.',linewidth=2,label=r'$\nu_m$')
plt.plot(i,dnuc,linewidth=2,label=r'$\nu_c$')
plt.savefig("test4.eps", dpi=120,bbox_inches='tight')
plt.legend()
plt.show()
I just find a solution, not certain whether there would be some better solution.
I took reference here: Annotate some points
I assumed the solution like this :
1, We can calculate the joint point coordination of lines.
2, If we want to plot a segment of a vertical line, i.e. the segment below the joint point, we can choose two points to draw a short line. That does work!
3, Maybe we can only to find a location of the illustrative text, and attach the text to that place.
I add such phrases :
plot([math.log10(txd),math.log10(txd)],[4,math.log10(nuc_r(txd))], color ='blue', linewidth=2.5, linestyle="--")
scatter([math.log10(txd),],[math.log10(nuc_r(txd))], 50, color ='blue')
annotate(r'$sin(\frac{2\pi}{3})=\frac{\sqrt{3}}{2}$',
xy=(math.log10(txd), math.log10(nuc_r(txd))), xycoords='data',
xytext=(+10, +30), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
And got the result as :
A Better One

Categories

Resources