Why my handmade numpy neural network doesn't learn? - python

As an exercise I was building a neural network in numpy from scratch.
For simplicity I wanted to use it to solve XOR problem. I derived all the equation and put everything together, but it looks like my network doesn't learn. I've spent some time trying to spot the mistake, but without success. Maybe you notice something I'm missing here?
X = [(0,0), (1,0), (0,1), (1,1)]
Y = [0, 1, 1, 0]
w1 = 2 * np.random.random(size=(2,3)) - 1
w2 = 2 * np.random.random(size=(3,1)) - 1
b1 = 2 * np.random.random(size=(1,3)) - 1
b2 = 2 * np.random.random(size=(1,1)) - 1
def sigmoid(x):
return 1./(1 + np.exp(-x))
def dsigmoid(y):
return y*(1-y)
N = 1000
error = np.zeros((N,1))
for n in range(N):
Dw_1 = np.zeros((2,3))
Dw_2 = np.zeros((3,1))
Db_1 = np.zeros((1,3))
Db_2 = np.zeros((1,1))
for i in range(len(X)): # iterate over all examples
x = np.array(X[i])
y = np.array(Y[i])
# Forward pass, 1st layer
act1 = np.dot(w1.T, x) + b1
lay1 = sigmoid(act1)
# Forward pass, 2nd layer
act2 = np.dot(w2.T, lay1.T) + b2
lay2 = sigmoid(act2)
# Computing error
E = 0.5*(lay2 - y)**2
error[n] += E[0]
# Backprop, 2nd layer
delta_l2 = (y-lay2) * dsigmoid(lay2)
corr_w2 = (delta_l2 * lay1).T
corr_b2 = delta_l2 * 1
# Backprop, 1st layer
delta_l1 = np.dot(w2, delta_l2) * dsigmoid(lay1).T
corr_w1 = np.outer(x, delta_l1)
corr_b1 = (delta_l1 * 1).T
Dw_2 += corr_w2
Dw_1 += corr_w1
Db_2 += corr_b2
Db_1 += corr_b1
if n % 1000 == 0:
print y, lay2,
if n % 1000 == 0:
print
w2 = w2 - eta * Dw_2
b2 = b2 - eta * Db_2
w1 = w1 - eta * Dw_1
b1 = b1 - eta * Db_1
error[n] /= len(X)

There were small mistakes in it, I hope this helps you
import numpy as np
import matplotlib.pyplot as plt
X = [(0, 0), (1, 0), (0, 1), (1, 1)]
Y = [0, 1, 1, 0]
eta = 0.7
w1 = 2 * np.random.random(size=(2, 3)) - 1
w2 = 2 * np.random.random(size=(3, 1)) - 1
b1 = 2 * np.random.random(size=(1, 3)) - 1
b2 = 2 * np.random.random(size=(1, 1)) - 1
def sigmoid(x):
return 1. / (1 + np.exp(-x))
def dsigmoid(y):
return y * (1 - y)
N = 2000
error = []
for n in range(N):
Dw_1 = np.zeros((2, 3))
Dw_2 = np.zeros((3, 1))
Db_1 = np.zeros((1, 3))
Db_2 = np.zeros((1, 1))
tmp_error = 0
for i in range(len(X)): # iterate over all examples
x = np.array(X[i]).reshape(1, 2)
y = np.array(Y[i])
layer1 = sigmoid(np.dot(x, w1) + b1)
output = sigmoid(np.dot(layer1, w2) + b2)
tmp_error += np.mean(np.abs(output - y))
d_w2 = np.dot(layer1.T, ((output - y) * dsigmoid(output)))
d_b2 = np.dot(1, ((output - y) * dsigmoid(output)))
d_w1 = np.dot(x.T, (np.dot((output - y) * dsigmoid(output), w2.T) * dsigmoid(layer1)))
d_b1 = np.dot(1, (np.dot((output - y) * dsigmoid(output), w2.T) * dsigmoid(layer1)))
Dw_2 += d_w2
Dw_1 += d_w1
Db_1 += d_b1
Db_2 += d_b2
w2 = w2 - eta * Dw_2
w1 = w1 - eta * Dw_1
b1 = b1 - eta * Db_1
b2 = b2 - eta * Db_2
error.append(tmp_error)
error = np.array(error)
print(error.shape)
plt.plot(error)
plt.show()

Related

Neural net decision boundaries are perpendicular to true boundaries

I have built a small neural net taking in two inputs. Two neurons in hidden layer, one neuron in output layer. The results are "mirror images" ie the decisions boundaries are perpendicular to the true boundaries. Does anyone know how this might be happening on what mistake I may have made?
linear data
nonlinear data
import random, numpy, math
lr = 0.1 #learning rate
dt = '4' #data type 1: linear 2: curve 3: box 4: XORish
epochs = 100000
tda = 50 #training data amount
def step(x): #step function
if x > 0:
x = 1
else:
x = 0
return x
def error(truth, output):
return 0.5 * (truth - output)**2
def sig(x): #sigmoid activation
return 1/(1+numpy.exp(-x))
#weights
w = [random.random(),random.random(),random.random(),random.random(),random.random(),random.random()]
#biases
b = [random.random(),random.random(),random.random()]
def Net(x, y, t) : # t is truth (or target)
h1 = x*w[0]+y*w[1]+b[0] #summation in h1, first neuron in hidden layer
h1out = sig(h1) #sigmoid activation
h2 = x*w[2]+y*w[3]+b[1]
h2out = sig(h2)
z = h1out*w[4]+h2out*w[5]+b[2] #z is output neuron
zout = sig(z)
e = error(t, zout) # e is error
#backpropagation, partial differentiations to find error at each weight and bias
e5 = (zout-t) * (zout * (1 - zout)) * h1out #e5 is error at weight 5 etc
e6 = (zout-t) * (zout * (1 - zout)) * h2out
e1 = (zout-t) * (zout * (1 - zout)) * w[4] * (h1out * (1 - h1out)) * x
e2 = (zout-t) * (zout * (1 - zout)) * w[4] * (h1out * (1 - h1out)) * y
e3 = (zout-t) * (zout * (1 - zout)) * w[5] * (h2out * (1 - h2out)) * x
e4 = (zout-t) * (zout * (1 - zout)) * w[5] * (h2out * (1 - h2out)) * y
be3 = (zout-t) * (zout * (1 - zout))
be1 = (zout-t) * (zout * (1 - zout)) * w[4] * (h1out * (1 - h1out))
be2 = (zout-t) * (zout * (1 - zout)) * w[5] * (h2out * (1 - h2out))
#updating weights and biases
w[0] = w[0] - (e1 * lr)
w[1] = w[1] - (e2 * lr)
w[2] = w[2] - (e3 * lr)
w[3] = w[3] - (e4 * lr)
w[4] = w[4] - (e5 * lr)
w[5] = w[5] - (e6 * lr)
b[2] = b[2] - (be3 * lr)
b[0] = b[0] - (be1 * lr)
b[1] = b[1] - (be2 * lr)
train_data = []
while len(train_data)<tda: #makes training data
x = random.randrange(100)
y = random.randrange(100)
if dt == '1':
if x+y>100:
truth = 1
else:
truth = 0
elif dt == '2':
if x*y>1000:
truth = 1
else:
truth = 0
elif dt == '3':
if x>50 or y>50:
truth = 1
else:
truth = 0
elif dt == '4':
if x+y>60 and x+y<140:
truth = 1
else:
truth = 0
if [x,y,truth] not in train_data:
train_data.append([x,y,truth])
for n in range(epochs): #EPOCHS
for i in train_data:
Net(i[0], i[1], i[2])
if n%1000 == 0 and n != 0:
print (n, 'epochs')
if n%5000 == 0:
#every 5000 epochs the net is fed new test data and results are plotted
test_data = []
while len(test_data)<(50): #makes test data
x = random.randrange(100)
y = random.randrange(100)
if [x, y] not in test_data and [x, y, 0] not in train_data and [x, y, 1] not in train_data:
test_data.append([x, y])
#classifying the test data with the net
for i in test_data:
x = i[0]
y = i[1]
h1 = x*w[0]+y*w[1]+b[0]
h1out = sig(h1)
h2 = x*w[2]+y*w[3]+b[1]
h2out = sig(h2)
z = h1out*w[4]+h2out*w[5]+b[2]
zout = step(z)
i.append(zout)
print (test_data)
print (w, b)
#plotting results
import matplotlib.pyplot as plt
x0 = []
y0 = []
x1 = []
y1 = []
xt0 = []
yt0 = []
xt1 = []
yt1 = []
for i in train_data:
if i[2] == 0:
x0.append(i[0])
y0.append(i[1])
else:
x1.append(i[0])
y1.append(i[1])
for i in test_data:
if i[2] == 0:
xt0.append(i[0])
yt0.append(i[1])
else:
xt1.append(i[0])
yt1.append(i[1])
plt.clf()
plt.scatter(xt0, yt0, 30, color = 'red')
plt.scatter(xt1, yt1, 30, color = 'blue')
plt.scatter(x0, y0, 10, color = 'orange')
plt.scatter(x1, y1, 10, color = 'green')
plt.title(str(tda)+" training data, "+str(epochs)+" epochs \n Red/Orange are small things, Blue/Green are big things \n Orange/Green are training data, Red/Blue are test data")
plt.xlabel("Width")
plt.ylabel("Height")
plt.savefig('plot.png')

Python neural network does not train

I have a simple neural network with 2 input neurons, 3 hidden neurons and 1 output neuron. hidden layer has bias.
I'm not used matrix operations to doing feed forward and backpropagation. when I run training function on a simple linear dataset, the error raises up and the predication result is wrong.
import random
from math import exp,pow,tanh
def random_weight():
return random.random()
def sigmoid(x):
return 1.0 / (1.0 + exp(-x))
def sigmoid_drv(x):
return sigmoid(x)*(1.0-sigmoid(x))
w11_I = random_weight()
w12_I = random_weight()
w21_I = random_weight()
w22_I = random_weight()
w31_I = random_weight()
w32_I = random_weight()
w11_II = random_weight()
w12_II = random_weight()
w13_II = random_weight()
b_I = 1
activation = sigmoid
activation_drv = sigmoid_drv
def predict(x1,x2):
global w11_I,w12_I,w21_I,w22_I,w31_I,w32_I,w11_II,w12_II,w13_II,b_I
a1_I = w11_I*x1 + w12_I*x2 + b_I
z1_I = activation(a1_I)
a2_I = w21_I*x1 + w22_I*x2 + b_I
z2_I = activation(a2_I)
a3_I = w31_I*x1 + w32_I*x2 + b_I
z3_I = activation(a3_I)
a1_II = w11_II*z1_I + w12_II*z2_I + w13_II*z3_I
z1_II = activation(a1_II)
return a1_I, z1_I, a2_I, z2_I, a3_I, z3_I, a1_II, z1_II
def train(x1,x2,y,alpha):
global w11_I,w12_I,w21_I,w22_I,w31_I,w32_I,w11_II,w12_II,w13_II,b_I
a1_I, z1_I, a2_I, z2_I, a3_I, z3_I, a1_II, z1_II = predict(x1,x2)
error = 0.5 * pow(y-z1_II,2)
delta = y-z1_II * activation_drv(a1_II)
w11_II += delta * z1_I * alpha
w12_II += delta * z2_I * alpha
w13_II += delta * z3_I * alpha
w11_I += delta * w11_II * activation_drv(a1_I) * x1 * alpha
w12_I += delta * w11_II * activation_drv(a1_I) * x2 * alpha
w21_I += delta * w12_II * activation_drv(a2_I) * x1 * alpha
w22_I += delta * w12_II * activation_drv(a2_I) * x2 * alpha
w31_I += delta * w13_II * activation_drv(a3_I) * x1 * alpha
w32_I += delta * w13_II * activation_drv(a3_I) * x2 * alpha
b_I += (delta * w11_II * activation_drv(a1_I) + delta * w12_II * activation_drv(a2_I) + delta * w13_II * activation_drv(a3_I)) * alpha
return error
data = [
[0,0,0],
[0,1,1],
[1,0,1],
[1,1,1],
]
for i in range(0,10):
err = 0
dt = data[::]
random.shuffle(dt)
for j in dt:
err += train(j[0],j[1],j[2],0.01)
print(err)
print("-"*30)
for j in data:
_, _, _, _, _, _, _, res = predict(j[0],j[1])
print(j[0],",",j[1],"=",res)
For example the result of the code is:
0.363894453262
0.366966815948
0.366406041572
0.369982058232
0.36988850637
0.375869833099
0.378106172616
0.380456639936
0.37901554717
0.383723920259
------------------------------
(0, ',', 0, '=', 0.8439871540493414)
(0, ',', 1, '=', 0.861714406183168)
(1, ',', 0, '=', 0.8515477541104413)
(1, ',', 1, '=', 0.8676931366534011)
---------------- UPDATE ----------------
I change codes to this :
import random
from math import exp,pow
def random_weight():
return random.random()
def sigmoid(x):
return 1.0 / (1.0 + exp(-x))
def sigmoid_drv(x):
return sigmoid(x)*(1.0-sigmoid(x))
w11_I = random_weight()
w12_I = random_weight()
w21_I = random_weight()
w22_I = random_weight()
w31_I = random_weight()
w32_I = random_weight()
w11_II = random_weight()
w12_II = random_weight()
w13_II = random_weight()
b_I = random_weight()
activation = sigmoid
activation_drv = sigmoid_drv
def predict(x1,x2):
global w11_I,w12_I,w21_I,w22_I,w31_I,w32_I,w11_II,w12_II,w13_II,b_I
a1_I = w11_I*x1 + w12_I*x2 + b_I
z1_I = activation(a1_I)
a2_I = w21_I*x1 + w22_I*x2 + b_I
z2_I = activation(a2_I)
a3_I = w31_I*x1 + w32_I*x2 + b_I
z3_I = activation(a3_I)
a1_II = w11_II*z1_I + w12_II*z2_I + w13_II*z3_I
z1_II = activation(a1_II)
return a1_I, z1_I, a2_I, z2_I, a3_I, z3_I, a1_II, z1_II
def train(x1,x2,y,alpha):
global w11_I,w12_I,w21_I,w22_I,w31_I,w32_I,w11_II,w12_II,w13_II,b_I
a1_I, z1_I, a2_I, z2_I, a3_I, z3_I, a1_II, z1_II = predict(x1,x2)
error = 0.5 * pow(z1_II-y,2)
delta = z1_II-y * activation_drv(a1_II)
d_w11_II = delta * z1_I * alpha
d_w12_II = delta * z2_I * alpha
d_w13_II = delta * z3_I * alpha
d_w11_I = delta * w11_II * activation_drv(a1_I) * x1 * alpha
d_w12_I = delta * w11_II * activation_drv(a1_I) * x2 * alpha
d_w21_I = delta * w12_II * activation_drv(a2_I) * x1 * alpha
d_w22_I = delta * w12_II * activation_drv(a2_I) * x2 * alpha
d_w31_I = delta * w13_II * activation_drv(a3_I) * x1 * alpha
d_w32_I = delta * w13_II * activation_drv(a3_I) * x2 * alpha
d_b_I = (delta * w11_II * activation_drv(a1_I) + delta * w12_II * activation_drv(a2_I) + delta * w13_II * activation_drv(a3_I)) * alpha
w11_II -= d_w11_II
w12_II -= d_w12_II
w13_II -= d_w13_II
w11_I -= d_w11_I
w12_I -= d_w12_I
w21_I -= d_w21_I
w22_I -= d_w22_I
w31_I -= d_w31_I
w32_I -= d_w32_I
b_I -= d_b_I
return error
data = [
[0,0,0],
[0,1,0],
[1,0,0],
[1,1,1],
]
for i in range(0,10):
err = 0
dt = data[::]
random.shuffle(dt)
for j in dt:
err += train(j[0],j[1],j[2],0.01)
print(err)
print("-"*30)
for j in data:
_, _, _, _, _, _, _, res = predict(j[0],j[1])
print(j[0],",",j[1],"=",res)
I'm subtract weight errors with weights now. Error of network reduces. But prediction is still wrong.
The result of above code:
0.7793443881847488
0.7577581315356949
0.7432698222320477
0.7316129719356839
0.7160385688813552
0.6943522088277978
0.6862277294774705
0.6656984495700775
0.6584361784187711
0.6410006126876817
------------------------------
0 , 0 = 0.6049212721996029
0 , 1 = 0.6227402202339664
1 , 0 = 0.6139758543180651
1 , 1 = 0.6293581473456563
One possible error is in the calculation of delta:
delta = z1_II-y * activation_drv(a1_II)
Add braces and change this to:
delta = (z1_II-y) * activation_drv(a1_II)
I found the problem
the sigmoid function was not good for this network. I change it to tanh and prediction results is correct now.
the final code :
import random
from math import exp,pow
class ANN:
def random_weight(self):
return random.random()
def sigmoid(self,x):
return 1.0 / (1.0 + exp(-x))
def sigmoid_drv(self,x):
return self.sigmoid(x)*(1.0-self.sigmoid(x))
def tanh(self, x):
return (exp(x) - exp(-x)) / (exp(x) + exp(-x))
def tanh_drv(self,x):
return 1 - pow(self.tanh(x),2)
def __init__(self):
self.w11_I = self.random_weight()
self.w12_I = self.random_weight()
self.w21_I = self.random_weight()
self.w22_I = self.random_weight()
self.w31_I = self.random_weight()
self.w32_I = self.random_weight()
self.w11_II = self.random_weight()
self.w12_II = self.random_weight()
self.w13_II = self.random_weight()
self.b_I = self.random_weight()
self.activation = self.tanh
self.activation_drv = self.tanh_drv
def predict(self,x1,x2):
a1_I = self.w11_I*x1 + self.w12_I*x2 + self.b_I
z1_I = self.activation(a1_I)
a2_I = self.w21_I*x1 + self.w22_I*x2 + self.b_I
z2_I = self.activation(a2_I)
a3_I = self.w31_I*x1 + self.w32_I*x2 + self.b_I
z3_I = self.activation(a3_I)
a1_II = self.w11_II*z1_I + self.w12_II*z2_I + self.w13_II*z3_I
z1_II = self.activation(a1_II)
return a1_I, z1_I, a2_I, z2_I, a3_I, z3_I, a1_II, z1_II
def train(self,x1,x2,y,alpha):
a1_I, z1_I, a2_I, z2_I, a3_I, z3_I, a1_II, z1_II = self.predict(x1,x2)
error = 0.5 * pow(z1_II-y,2)
delta = (z1_II-y) * self.activation_drv(a1_II)
d_w11_II = delta * z1_I * alpha
d_w12_II = delta * z2_I * alpha
d_w13_II = delta * z3_I * alpha
d_w11_I = delta * self.w11_II * self.activation_drv(a1_I) * x1 * alpha
d_w12_I = delta * self.w11_II * self.activation_drv(a1_I) * x2 * alpha
d_w21_I = delta * self.w12_II * self.activation_drv(a2_I) * x1 * alpha
d_w22_I = delta * self.w12_II * self.activation_drv(a2_I) * x2 * alpha
d_w31_I = delta * self.w13_II * self.activation_drv(a3_I) * x1 * alpha
d_w32_I = delta * self.w13_II * self.activation_drv(a3_I) * x2 * alpha
d_b_I = (delta * self.w11_II * self.activation_drv(a1_I) + delta * self.w12_II * self.activation_drv(a2_I) + delta * self.w13_II * self.activation_drv(a3_I)) * alpha
self.w11_II -= d_w11_II
self.w12_II -= d_w12_II
self.w13_II -= d_w13_II
self.w11_I -= d_w11_I
self.w12_I -= d_w12_I
self.w21_I -= d_w21_I
self.w22_I -= d_w22_I
self.w31_I -= d_w31_I
self.w32_I -= d_w32_I
self.b_I -= d_b_I
return error
model = ANN()
data = [
[0,0,0],
[0,1,0],
[1,0,0],
[1,1,1],
]
for i in range(0,200):
err = 0
dt = data[::]
random.shuffle(dt)
for j in dt:
err += model.train(j[0],j[1],j[2],0.1)
print(err)
print("-"*30)
for j in data:
_, _, _, _, _, _, _, res = model.predict(j[0],j[1])
print(j[0],",",j[1],"=",res)
Result of code :
...
0.1978539306282795
0.19794670251861882
0.19745074826953185
0.19529942727878868
0.19779970636626873
0.19661596298810918
------------------------------
0 , 0 = -0.24217968147818447
0 , 1 = 0.236033934015224
1 , 0 = 0.24457439328909888
1 , 1 = 0.5919949310028919

Unsupported operand type(s) for *: 'int' and 'NoneType'

The exact error reads as:
Traceback (most recent call last):
File "C:/Users/Name/Downloads/MachineLearning.py", line 73, in <module>
class d0:
File "C:/Users/Name/Downloads/MachineLearning.py", line 83, in d0
z = hypothesis1(W0,W1,W2,W3,Z0,Z1,Z2,Z3)
File "C:/Users/Name/Downloads/MachineLearning.py", line 41, in hypothesis1
W0 * Z0 + W1 * Z1 + W2 * Z2 + W3 * Z3
TypeError: unsupported operand type(s) for *: 'int' and 'NoneType'
And here is the code that causes the error:
x0 = 1
z0 = 1
x1 = int(input("Enter 0 or 1: "))
x2 = int(input("Enter 0 or 1: "))
y = ""
learning_rate = 0.1
w1 = 1
w2 = 1
w3 = 1
w4 = 1
w5 = 1
w6 = 1
w7 = 1
w8 = 1
w9 = 1
v1 = 1
v2 = 1
v3 = 1
v4 = 1
if x1 == 0 and x2 == 0:
y = 0
elif x1 == 1 and x2 == 0:
y = 1
elif x1 == 0 and x2 == 1:
y = 1
elif x1 == 1 and x2 == 1:
y = 0
else:
y = 0
def hypothesis(W0,W1,W2,X0,X1,X2):
W0 * X0 + W1 * X1 + W2 * X2
def hypothesis1(W0,W1,W2,W3,Z0,Z1,Z2,Z3):
(W0 * Z0) + (W1 * Z1) + (W2 * Z2) + (W3 * Z3)
class z1:
W0 = w1
W1 = w4
W2 = w7
X0 = x0
X1 = x1
X2 = x2
z = hypothesis(W0,W1,W2,X0,X1,X2)
class z2:
W0 = w2
W1 = w5
W2 = w8
X0 = x0
X1 = x1
X2 = x2
z = hypothesis(W0,W1,W2,X0,X1,X2)
class z3:
W0 = w3
W1 = w6
W2 = w9
X0 = x0
X1 = x1
X2 = x2
z = hypothesis(W0,W1,W2,X0,X1,X2)
class d0:
W0 = v1
W1 = v2
W2 = v3
W3 = v4
Z0 = z0
Z1 = z1.z
Z2 = z2.z
Z3 = z3.z
z = hypothesis1(W0,W1,W2,W3,Z0,Z1,Z2,Z3)
print(z1.z)
The error seems to be only consistent with the class d0, so if you can, can someone please explain why it is happening and how could I fix it? I have tried completely commenting it out, but it still comes back with a error like the one above. When I try to print the other ones though, it comes back None.
The functions
def hypothesis(W0,W1,W2,X0,X1,X2):
W0 * X0 + W1 * X1 + W2 * X2
def hypothesis1(W0,W1,W2,W3,Z0,Z1,Z2,Z3):
(W0 * Z0) + (W1 * Z1) + (W2 * Z2) + (W3 * Z3)
currently do not return anything explicitly, so they implicitly return None. Since you assign this return value to z in classes z1, z2 and z3, when you later try to access these in class d0 via z1.z, for example, you get a None value and your multiplication in hypothesis1 will raise the error you see.
Instead, you probably mean to return the result of the calculations, i.e.:
def hypothesis(W0,W1,W2,X0,X1,X2):
return W0 * X0 + W1 * X1 + W2 * X2
def hypothesis1(W0,W1,W2,W3,Z0,Z1,Z2,Z3):
return (W0 * Z0) + (W1 * Z1) + (W2 * Z2) + (W3 * Z3)

I need help figuring out how to get the code to give a triple gaussian distribution

So the purpose of my code is to use inputted data points to give a gaussian plot distribution. I figured out how to make it work with a double gaussian but I'm having a lot of trouble adding a third. Im not quite sure what I'm doing wrong. 1 of the errors I keep getting is an Index Error saying that the list index is out of range. I would appreciate any help with this.
Heres my code:
from pylab import *
import numpy as np
from numpy import loadtxt
from scipy.optimize import leastsq
from scipy.optimize import least_squares
from scipy.stats import iqr
import math
import matplotlib.pyplot as plt
import sys
matplotlib.rcParams['mathtext.default'] = 'regular'
fitfunc_triple = lambda p, x: np.abs(p[0]) * exp(-0.5 * ((x - p[1]) / p[2]) ** 2) + np.abs(p[3]) * exp(
-0.5 * ((x - p[4]) / p[5]) ** 2) + np.abs(p[6]) * exp(-0.5 * ((x - p[7])/ p[8] **2 ))
fitfunc_double = lambda p, x: np.abs(p[0]) * exp(-0.5 * ((x - p[1]) / p[2]) ** 2) + np.abs(p[3]) * exp(
-0.5 * ((x - p[4]) / p[5]) ** 2)
fitfunc_single = lambda p, x: np.abs(p[0]) * exp(-0.5 * ((x - p[1]) / p[2]) ** 2)
errfunc = lambda p, x, y: (y - fitfunc(p, x))
dataR = np.loadtxt("/Users/Safi/Library/Preferences/PyCharmCE2018.1/scratches/rspecial1385.a2261.dat5", skiprows=0)
RA = dataR[:, 0]
DEC = dataR[:, 1]
VELR = dataR[:, 2]
REDSH = dataR[:, 3]
RADD = dataR[:, 4]
sl = 3E5
zbar = np.mean(REDSH)
vc = zbar * sl
VEL = vc + sl * ((REDSH - zbar) / (1 + zbar))
wdith = 200
iters = 10
sig2 = 500
binN = int(math.ceil((np.max(VEL) - np.min(VEL)) / wdith))
sys.stdout = open(str(wdith) + "_1sigma_" + str(iters) + "_sig2_" + str(sig2) + ".txt", "w")
plt.figure(1)
y, x, _ = plt.hist(VEL, binN, alpha=0.5, label='data')
x = (x[1:] + x[:-1]) / 2 # for len(x)==len(y)
data = np.vstack((x, y)).T
xdata = data[:, 0]
ydata = data[:, 1]
yerr = ydata ** 0.5
init = [10, 69500, 1200, 5, 68000, sig2]
bds = ([0, 66000, 800, 0, 66000, sig2], [50, 70000, 1750, 20, 70000, sig2 + 0.01])
def index_outlier(data):
inter_quart = iqr(data) * 1.5
bd2 = np.percentile(data, 75) + inter_quart
bd1 = np.percentile(data, 25) - inter_quart
index = []
for i in [i for i, x in enumerate(data) if x < bd1 or x > bd2]:
index.append(i)
return (index)
#### Bootstrapping Estimation Function ####
def fit_bootstrap(fitfunc, datax, datay, init, bds, sigma, iterations=iters):
errfunc = lambda p, x, y: (y - fitfunc(p, x))
# Fit first time
pfit = least_squares(errfunc, init, bounds=bds, args=(datax, datay), max_nfev=10000)
model = fitfunc(pfit.x, datax)
residuals = pfit.fun
# Random data sets are generated and fitted
ps = []
for i in range(iterations):
randomdataY = []
for k in range(len(sigma)):
randomDelta = np.random.normal(0., sigma[k], 1)
randomdataY.append(datay[k] + randomDelta)
out = np.concatenate(randomdataY)
randomfit = least_squares(errfunc, init, bounds=bds, args=(datax, out))
ps.append(randomfit.x)
# Removing outliers
# Finding outliers and indexing them
master_list = []
indexed = []
for k in range(len(ps[0])): # 0-6
it = []
for i in range(len(ps)): # 0-1000
it.append(ps[i][k])
master_list.append(it)
# indexed.append(index_outlier(master_list[k]))
# # List of outlier indicies
# flat_list=[item for sublist in indexed for item in sublist]
# no_dups= list(set(flat_list))
# # Removing bad fits
# for k in range(len(master_list)):
# for i in sorted(no_dups,reverse=True):
# del master_list[k][i]
pfit_bootstrap = []
perr_bootstrap = []
for i in master_list:
pfit_bootstrap.append(np.median(i))
perr_pos = np.round(np.percentile(i, 84) - np.median(i), 4)
perr_neg = np.round(np.median(i) - np.percentile(i, 16), 4)
perr_bootstrap.append(str('[+') + str(perr_pos) + str(',-') + str(perr_neg) + str(']'))
return (pfit_bootstrap, perr_bootstrap, residuals, pfit.nfev, master_list)
pfit, perr, residuals, nfev, master_list = fit_bootstrap(fitfunc_double, xdata, ydata, init, bds, yerr)
pfit1, perr1, residuals1, nfev1, master_list1 = fit_bootstrap(fitfunc_single, xdata, ydata, init, bds, yerr)
more_data = np.linspace(np.min(xdata), np.max(xdata), 1000)
real_func = fitfunc_double(pfit, more_data)
real_func1 = fitfunc_single(pfit1, more_data)
######## Saving Coefficients #########
A1 = pfit[0]
m1 = pfit[1]
s1 = pfit[2]
A2 = pfit[3]
m2 = pfit[4]
s2 = pfit[5]
A3 = pfit[6]
m3 = pfit[7]
s3 = pfit[8]
pecp = VEL - vc
m1p = m1 - vc
m2p = m2 - vc
m3p = m3 - vc
xdatap = xdata - vc
plt.figure(6)
plt.hist(pecp, binN, alpha=.5, label='data', color='skyblue')
xhmax = np.amax(pecp + 1500)
xhmin = np.amin(pecp - 1500)
xh = np.linspace(xhmin, xhmax, 50)
# yh1=(mlab.normpdf(xh, c[1], c[2]))
yh1 = np.abs(A1) * exp(-0.5 * (((xh - m1p) / (s1)) ** 2))
yh2 = np.abs(A2) * exp(-0.5 * (((xh - m2p) / (s2)) ** 2))
yh3 = np.abs(A3) * exp(-0.5 * (((xh - m3p) / (s3)) ** 2))
plt.plot(xh, yh1, color='b', linewidth=2)
plt.plot(xh, yh2, color='r', linewidth=2)
plt.plot(xh, yh3, color='g', linewidth=2)
plt.plot(xh, yh1 + yh2 + yh3, color='purple', linewidth=3)
# plt.errorbar(xdatap,y,xerr=wdith/2,ls='none', yerr=yerr,color='k',linewidth=2)
# plt.plot(xdatap, ydata,'.',color='k')
plt.ylim(0, np.max(ydata) + 2)
plt.xlabel('Peculiar Velocity (km/s)')
plt.ylabel('N$_{gal}$')
plt.text(-4800, 15, '$\mu_{2}$-$\mu_{1}$ = ' + str(int(m2 - m1)) + ' km/s')
plt.savefig(str(wdith) + "_1sigma_" + str(iters) + "_sig2_" + str(sig2) + "_hist.ps")
divi = -1800
memlow = np.array([[0 for x in range(2)] for y in range(1)])
memhigh = np.array([[0 for x in range(2)] for y in range(1)])
j = 0
k = 0
plt.show()

Calculate nolinear model of vibration control

I am studding automatic and have a matlab/SIMULINK model of nonlinear vibration. I am trying to resolve the same problem using python instead.
Here we have a schema. The black frame is a part that I have already. I also have a Model tłumnika MR (eng. Magnetorheological Damper). Just I don't know how to connect it together?
There are welcome any suggestion how to resolve this including modifying model.
Code for part 1 (black frame) [just pseudo code ]
def rms(v): # ROOT MEAN SQRT
return np.sqrt(np.mean(np.square(v)))
def transmissibility(_in, _out):
# Transmissibility (T) = output/input
return rms(_out) / rms(_in)
def q(ss, t, Ampituda, freqs):
y2 = []
for f in freqs:
u = Ampituda * np.sin(2 * np.pi * f * t) # wektor wejscia
t1, y1, x1 = signal.lsim(ss, u, t) # wyliczenie modelu w dziedzinie czasu
y2.append(transmissibility(u, y1))
return y2
vec_c = np.arange(1000, 6000 , 1000)
for c in vec_c:
A = [[0, 1], [-(k/m), -(c/m)]]
B = [[-c/m], [(k/m) - (c/m)**2]]
C = [1, 0]
D = 0
ss = signal.StateSpace(A,B,C,D) # State Space model
y2 = q(ss, t, Ampituda, freqs)
plt.plot(freqs, y2, label=r'$c = {} \frac{}{} $'.format(c, "{Ns}", "{m}"), linewidth=2.0)
plt.legend()
Code for MR (Model tłumnika MR) [copy paste example]
from scipy import signal
import numpy as np
def I(to_intagrate, dt, y0=0):
i = [y0]
for v in to_intagrate:
i.append(i[-1] + v * dt)
del i[0]
return i
def MR(v, i, dt):
""" #v -- prędkość (z' -w') wektor przyśpieszeń
#i -- natężenie prądu w amperach
return Siła generowan przez tłumnik w N
Slajd 18
http://home.agh.edu.pl/~jastrzeb/images/SSD/SSD_2DOF_v1.pdf
"""
b1 = 3415.7
b2 = 93.324
b3 = 74.487
F0 = b1 * (i**2) + b2 * i + b3
b4 = 2534.1
b5 = 19.55
b6 = 643.1
C1 = b4 * (i**2) + b5*i + b6
beta = 50
p1 = 4
p2 = 0.2
x = I(v, dt)
Ft = []
for x1, v1 in zip(x, v):
part1 = F0 * np.tanh( beta * (v1 + (p1 * x1)))
part2 = C1 * (v1 + (p2 * x1))
Ft.append(part1 + part2)
return Ft, x
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (20, 16)
plt.rcParams['font.size'] = 18
for f in [2, 5]: # wybrane częstotliwości
# f = 5
i = 0.2
# x_sin wektor wartości x dla wymuszenia sinusoidalnego 201 pkt
# na każdy okres sinusa. Rozpoczęcie pkt pracy w -0.2
x_sin = np.linspace(-np.pi/2, (np.pi * f) - np.pi/2, num=201 * f)
u = np.sin(x_sin) * 0.2 # przeskalowanie przyśpieszenia
dt = 1/(f*201)
ft, x = MR(u, i, dt) # sila
plt.plot(u, ft, label='Freq = {}Hz, I={}A'.format(f, i))
plt.legend()
plt.grid()
plt.show()

Categories

Resources