I have a subplot bar chart coded as:
axes = df.plot.bar(yerr=df1, figsize=(10,8), legend=False,
title='Bar chart',grid=1, subplots=True, layout (5,1),xticks=None)
Is there an easy way to modify the code so that to see numerical values from dataframe df on the top of each bar?
UPDATE: with the code below still no values:
df = DataFrame(np.zeros((5, 3)))
df.index=['[1,3)','[3, 4)','[4, 5)','[5, 6)','[7]']
df.columns=['cat1','cat2','cat3']
df.iloc[0,:]= np.array( [0.4, 0.3, 0.2])
df.iloc[1,:]= np.array( [0, 0.1, 0.9])
df.iloc[2,:]= np.array( [0.3, 0.1, 0.3])
df.iloc[3,:]= np.array( [0, 0, 0.2])
df.iloc[4,:]= np.array( [0.0, 0, 0.9])
se_df = DataFrame(np.zeros((5, 3)))
se_df.index=['[1,3)','[3, 4)','[4, 5)','[5, 6)','[7]']
se_df.columns=['cat1','cat2','cat3']
se_df.iloc[0,:]= np.array( [0.1, 0.2, 0.002])
se_df.iloc[1,:]= np.array( [0.003, 0.02, 0.008])
se_df.iloc[2,:]= np.array( [0.006, 0.03, 0.0002])
se_df.iloc[3,:]= np.array( [0.001, 0, 0.0001])
se_df.iloc[4,:]= np.array( [0.0001, 0, 0.0002])
df1=df.transpose()
se_df1=se_df.transpose()
axes = df1.plot.bar(yerr=se_df1, figsize=(10,8), legend=False,
title='Title',grid=1, subplots=True, layout=(5,1),xticks=None)
for n,i in enumerate(axes, 1):
for rec, label in zip(i.patches,df.loc[:, n].astype(str)):
height = rec.get_height()
i.text(rec.get_x() + rec.get_width() / 2, height - 5, label,
ha = 'center', va='bottom', color='w', weight='bold')
plt.tight_layout()
Could you please indicate what am I doing wrong?
Update with your new code:
df = pd.DataFrame(np.zeros((5, 3)))
df.index=['[1,3)','[3, 4)','[4, 5)','[5, 6)','[7]']
df.columns=['cat1','cat2','cat3']
df.iloc[0,:]= np.array( [0.4, 0.3, 0.2])
df.iloc[1,:]= np.array( [0, 0.1, 0.9])
df.iloc[2,:]= np.array( [0.3, 0.1, 0.3])
df.iloc[3,:]= np.array( [0, 0, 0.2])
df.iloc[4,:]= np.array( [0.0, 0, 0.9])
se_df = pd.DataFrame(np.zeros((5, 3)))
se_df.index=['[1,3)','[3, 4)','[4, 5)','[5, 6)','[7]']
se_df.columns=['cat1','cat2','cat3']
se_df.iloc[0,:]= np.array( [0.1, 0.2, 0.002])
se_df.iloc[1,:]= np.array( [0.003, 0.02, 0.008])
se_df.iloc[2,:]= np.array( [0.006, 0.03, 0.0002])
se_df.iloc[3,:]= np.array( [0.001, 0, 0.0001])
se_df.iloc[4,:]= np.array( [0.0001, 0, 0.0002])
df1=df.transpose()
se_df1=se_df.transpose()
naxes = df1.plot.bar(yerr=se_df1, figsize=(10,8), legend=False,
title='Title',grid=1, subplots=True, layout=(5,1),xticks=None)
for n,i in enumerate(naxes, 1):
for rec, label in zip(i[0].patches,df1.iloc[:, n-1].astype(str)):
height = rec.get_height()
i[0].text(rec.get_x() + rec.get_width() / 2, height * .8, label,
ha = 'center', va='bottom', color='w', weight='bold')
plt.tight_layout()
Let's use:
np.random.seed(20)
df = pd.DataFrame({'Chart':[1,2,3,4]*3,'x':[1,2,3]*4,'y':np.random.randint(0,50,12)})
df_chart = df.set_index(['x','Chart'])['y'].unstack()
naxes = df_chart.plot.bar(subplots=True, figsize=(15,10), grid=1, yerr=df.groupby(['Chart'])['y'].transform('std'))
for n,i in enumerate(naxes, 1):
for rec, label in zip(i.patches,df_chart.loc[:, n].astype(str)):
height = rec.get_height()
i.text(rec.get_x() + rec.get_width() / 2, height - 5, label,
ha = 'center', va='bottom', color='w', weight='bold')
plt.tight_layout()
Output:
Related
To make it more readable on the graph, I would like to remove the decimals from the values of x from 0.1 to 20 and put the values from 0 to 0.09 in power.
So that there is no more 0.10 displayed but 0.1, no more 2.00 but simply 2 and no more 0.01 but 10e-2 for example.
Do you have any idea how to do it?
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
x = sorted([0.09, 20, 0.6, 6, 0.05, 0.5, 0.4, 4, 0.3, 3, 2, 0.1, 0.2,
0.9, 0.8, 10, 0.7, 1, 0.08, 0.07, 0.06, 0.04, 0.03, 0.02, 0.01, 0.005])
y = list(np.arange(0.0, 2.6, 0.1))
plt.plot(x, y)
plt.xscale('log', base=10)
ax = plt.gca()
ax.xaxis.set_major_formatter(ScalarFormatter())
ax.set_xticks(x)
plt.xticks(fontsize=6)
plt.grid()
plt.show()
You can take advantage of labels in plt.xticks() function. This way -
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
fig = plt.figure(figsize=(10, 7))
x = sorted([0.09, 20, 0.6, 6, 0.05, 0.5, 0.4, 4, 0.3, 3, 2, 0.1, 0.2,
0.9, 0.8, 10, 0.7, 1, 0.08, 0.07, 0.06, 0.04, 0.03, 0.02, 0.01, 0.005])
y = list(np.arange(0.0, 2.6, 0.1))
plt.plot(x, y)
plt.xscale('log')
ax = plt.gca()
ax.xaxis.set_major_formatter(ScalarFormatter())
plt.xticks(ticks=x, labels=[str(el) for el in x], fontsize=6)
plt.grid()
plt.show()
Output looks like this -
Consider the following toy code:
import matplotlib.pyplot as plt
fig = plt.figure()
plt.grid()
ax1 = fig.add_subplot(111)
X1 =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
X2 = [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]
Y = [0.7, 0.6, 0.5, 0.4, 0.4, 0.3, 0.7, 0.8, 0.9, 0.2]
ax1.plot(X1,Y)
ax1.set_ylim([0, 1.0])
ax2 = ax1.twiny()
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(X2)
ax1.set_xlabel('X1 Label')
ax1.set_ylabel('Y Label')
ax2.set_xlabel('X2 Label')
plt.show()
Which gives the following plot:
As one can see, the X1 axis has a padding at the end (where the 10 is located). However, the second axis X2 stops at the end of the actual graph, which is not desired.
Basically, I want the 10000 value aligned with the 10, ending in the same place with that padding.
The answer in this similar question does not help me because I am already doing what it was suggested (adding the set_xlim for the twiny() axis).
Also (albeit not as important as the original question), I would like to know why Matplotlib decided to plot all the x ticks on the X2 axis instead of "jumping" a few ones just like on the X1 axis.
You can connect() the bottom and top axes by using the ax1.xlim_changed callback to trigger ax2.set_xlim().
Update: To restore padding on the axes limits, you can use something like this lim() function (default 5%):
fig, ax1 = plt.subplots()
X1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
X2 = [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]
Y = [0.7, 0.6, 0.5, 0.4, 0.4, 0.3, 0.7, 0.8, 0.9, 0.2]
ax1.plot(X1, Y)
ax1.set_ylim([0, 1.0])
# add axes margins by `pad` percent
def lim(x, pad=5):
xmin, xmax = min(x), max(x)
margin = (xmax - xmin) * pad/100
return xmin - margin, xmax + margin
ax2 = ax1.twiny()
ax1.callbacks.connect('xlim_changed', lambda ax1: ax2.set_xlim(*lim(X2)))
ax1.set_xlim(*lim(X1)) # trigger the xlim_changed callback
The two axis are completely separate and X2 will not just automatically align with the X1 axis when you change the ticks on an empty plot on your secondary axis.
if you plot a second line on X2 instead they will align
import matplotlib.pyplot as plt
fig = plt.figure()
plt.grid()
ax1 = fig.add_subplot(111)
X1 =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
X2 = [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]
Y = [0.7, 0.6, 0.5, 0.4, 0.4, 0.3, 0.7, 0.8, 0.9, 0.2]
ax1.plot(X1,Y)
ax1.set_ylim([0, 1.0])
ax2 = ax1.twiny()
#ax2.set_xlim(ax1.get_xlim())
#ax2.set_xticks(X2)
ax2.plot(X2,Y) # Plot a second line on top of the previous one
ax1.set_xlabel('X1 Label')
ax1.set_ylabel('Y Label')
ax2.set_xlabel('X2 Label')
plt.show()
You could also do the following by using the correct xlim for X2 and remove the margin from X1 to match X2. You can probably add some margin to X2 instead some how, but I'm not sure how to do it without plotting anything on X2.
import matplotlib.pyplot as plt
fig = plt.figure()
plt.grid()
ax1 = fig.add_subplot(111)
X1 =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
X2 = [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]
Y = [0.7, 0.6, 0.5, 0.4, 0.4, 0.3, 0.7, 0.8, 0.9, 0.2]
ax1.plot(X1,Y)
ax1.set_ylim([0, 1.0])
ax2 = ax1.twiny()
ax2.set_xlim(X2[0], X2[-1]) # Correct xlim for secondary axis
ax2.set_xticks(X2)
ax1.margins(0) # Remove margin from first axis to match X2
ax1.set_xlabel('X1 Label')
ax1.set_ylabel('Y Label')
ax2.set_xlabel('X2 Label')
plt.show()
A half-open interval of the form [0,0.5) can be created using the following code:
rv = np.linspace(0., 0.5, nr, endpoint=False)
where nr is the number of points in the interval.
Question: How do I use linspace to create an open interval of the form (a,b) or a half-open interval of the form (a,b]?
Probably the simplest way (since this functionality isn't built in to np.linspace()) is to just slice what you want.
Let's say you're interested in the interval [0,1] with a spacing of 0.1.
>>> import numpy as np
>>> np.linspace(0, 1, 11) # [0,1]
array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ])
>>> np.linspace(0, 1, 11-1, endpoint=False) # [0,1)
array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
>>> np.linspace(0, 1, 11)[:-1] # [0,1) again
array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
>>> np.linspace(0, 1, 11)[1:] # (0,1]
array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ])
>>> np.linspace(0, 1, 11)[1:-1] # (0,1)
array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
Im a noob for this, but I started to learn Neural Networks.
I want to make simple NN with Python and Numpy. I have watched one tutorial on Youtube abut that, and I did everything the same, but I get an error:
output = sigmoid(np.dot(input_layer, weights))
ValueError: shapes (13,3) and (13,1) not aligned: 3 (dim 1) != 13 (dim 0)
I know that my output array should look like 1D array, but for some reason I cant get that.
What am I doing wrong
import numpy as np
import pandas as pd
df = pd.DataFrame({'input 1':[0.5, 0.3, 0, 0.1, 0.4, -0.4, 0.4, -0.1, -0.6, 0.2, 0.6, 0, 0.2],
'input 2':[0.3, 0.5, -0.4, -0.2, 0.9, 0, 0.35, -0.4, -0.9, 0.4, 0.3, -0.1, 0.1],
'input 3':[0, 0.4, 0, -0.1, 0.4, -0.2, 0.4, -0.3, -0.1, 0.1, 0.3, 0, 0.5],
'result':[1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1]})
print(df)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
features = np.array(df.iloc[:,:-1])
results = np.array(df.iloc[:,-1:]).T
np.random.seed(10)
weights = 2 * np.random.random((13,1)) - 1
print('These are my random weights:\n')
print(weights)
for iteration in range(1):
input_layer = features
output = sigmoid(np.dot(input_layer, weights))
print('\nOutput result:\n', output)
I have managed to find the result:
import numpy as np
import pandas as pd
df = pd.DataFrame({'input 1':[0.5, 0.3, 0, 0.1, 0.4, -0.4, 0.4, 0.1, -0.6, 0.2, 0.6, 0, 0.2, 0.2, -0.1, -0.1, 0, 0.4, -0.2, -0.4],
'input 2':[0.3, 0.6, -0.4, -0.2, 0.9, 0, 0.35, -0.4, -0.9, 0.4, 0.3, -0.1, 0.1, 0.3, 0.1, 0.1, 0.3, 0.1, 0.3, 0.3],
'input 3':[0, 0.4, 0, -0.1, 0.4, -0.2, 0.7, -0.3, -0.1, 0.1, 0.3, 0, 0.5, 0.4, -0.31, 0.1, 0.3, 0.1, 0.1, 0.2],
'result':[1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0]})
print(df)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivate(x):
return x * (1 - x)
features = df.iloc[:,:-1].to_numpy()
results = df.iloc[:,-1:].to_numpy()
np.random.seed(1)
weights = 2 * np.random.random((3,1)) - 1
print('These are my random weights:\n')
print(weights)
for iteration in range(100000):
input_layer = features
outputs = sigmoid(np.dot(input_layer, weights))
error = results - outputs
adjustments = error * sigmoid_derivate(outputs)
weights += np.dot(input_layer.T, adjustments)
df['output prediction'] = outputs.round(0)
print(df)
I have wrote a code for neural network that uses sigmoid function. I made it with NumPy and Python.
Code works good, but now I want to tune it, to improve accuracy. How can I tune my NN, do I need to add some parameters, or to add hidden layers to it?
Is it even possible?
This is the code that I have:
import numpy as np
import pandas as pd
df = pd.DataFrame({'input 1':[0.5, 0.3, 0, 0.1, 0.4, -0.4, 0.4, 0.1, -0.6, 0.2, 0.6, 0, 0.2, 0.2, -0.1, -0.1, 0, 0.4, -0.2, -0.4],
'input 2':[0.3, 0.6, -0.4, -0.2, 0.9, 0, 0.35, -0.4, -0.9, 0.4, 0.3, -0.1, 0.1, 0.3, 0.1, 0.1, 0.3, 0.1, 0.3, 0.3],
'input 3':[0, 0.4, 0, -0.1, 0.4, -0.2, 0.7, -0.3, -0.1, 0.1, 0.3, 0, 0.5, 0.4, -0.31, 0.1, 0.3, 0.1, 0.1, 0.2],
'result':[1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0]})
print(df)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivate(x):
return x * (1 - x)
features = df.iloc[:,:-1].to_numpy()
results = df.iloc[:,-1:].to_numpy()
np.random.seed(1)
weights = 2 * np.random.random((3,1)) - 1
print('These are my random weights:\n')
print(weights)
for iteration in range(100000):
input_layer = features
outputs = sigmoid(np.dot(input_layer, weights))
error = results - outputs
adjustments = error * sigmoid_derivate(outputs)
weights += np.dot(input_layer.T, adjustments)
outputs = outputs.round(0).tolist()
outputs = list(itertools.chain(*outputs))
outputs.insert(0,'None')
df['output prediction'] = outputs
print(df)
df1 = df.tail(len(df)-1)
#print(df1)
acc = 0
for i, j in zip(df1['result'] ,df1['output prediction']):
if i == j:
acc += 1
accuracy = round(acc * 100 /len(df1), 2)
print(accuracy)
I think that I it should be added below part where I define weights, but Im not sure.
Thanks for your help!
import numpy as np
import pandas as pd
df = pd.DataFrame({'input 1':[0.5, 0.3, 0, 0.1, 0.4, -0.4, 0.4, 0.1, -0.6, 0.2, 0.6, 0, 0.2, 0.2, -0.1, -0.1, 0, 0.4, -0.2, -0.4],
'input 2':[0.3, 0.6, -0.4, -0.2, 0.9, 0, 0.35, -0.4, -0.9, 0.4, 0.3, -0.1, 0.1, 0.3, 0.1, 0.1, 0.3, 0.1, 0.3, 0.3],
'input 3':[0, 0.4, 0, -0.1, 0.4, -0.2, 0.7, -0.3, -0.1, 0.1, 0.3, 0, 0.5, 0.4, -0.31, 0.1, 0.3, 0.1, 0.1, 0.2],
'result':[1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0]})
print(df)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivate(x):
return x * (1 - x)
alpha=0.1#define alpha
features = df.iloc[:,:-1]
results = df.iloc[:,-1:]
features=np.array(features)
results=np.array(results)
np.random.seed(1)
weight0 = 2*np.random.random((3,10)) - 1 #3 - number of features; 10 - number of nodes in hidden layer
weight1 = 2*np.random.random((10,4)) - 1 #10 - number of nodes in hidden layer; 4 - number of nodes in output layer
weight2 = 2*np.random.random((4,1)) - 1 #4 - number of nodes in output layer; 1 - number of labels
# you can change layer's nodes, but they must be able to make dot product. For example (320,160) and (160,40)
for iteration in range(1000):
l0 = features
l1 = sigmoid(np.dot(l0,weight0))
l2 = sigmoid(np.dot(l1,weight1))
l3 = sigmoid(np.dot(l2,weight2))
l3_error = results - l3
print ("Error after "+str(iteration)+" iterations:" + str(np.mean(np.abs(l3_error))))
l3_delta = l3_error*sigmoid_derivate(l3)
l2_error = l3_delta.dot(weight2.T)
l2_delta = l2_error * sigmoid_derivate(l2)
l1_error = l2_delta.dot(weight1.T)
l1_delta = l1_error * sigmoid_derivate(l1)
weight2 += alpha*l2.T.dot(l3_delta)
weight1 += alpha*l1.T.dot(l2_delta)
weight0 += alpha*l0.T.dot(l1_delta)
Here is your code with 1 input, 1 hidden and 1 output layers.