from pybrain.structure import FeedForwardNetwork
from pybrain.structure import LinearLayer, SigmoidLayer
from pybrain.structure import FullConnection
from pybrain.datasets import SupervisedDataSet
import numpy as np
X = np.loadtxt('xdatanorm.txt', dtype=float)
y = np.loadtxt('ydatanorm.txt', dtype=float)
n = FeedForwardNetwork()
inLayer = LinearLayer(35)
hiddenLayer = SigmoidLayer(18)
outLayer = LinearLayer(1)
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)
n.sortModules()
DS = SupervisedDataSet(35,1)
DS.addSample(X,y)
Starting using pybrain to get a Neural Network to work on my diffusion energy data. I don't know how to get a dataset working from my X and y values. X is 35 inputs and y is 1 ouput and there are 148 samples. With this code I get the error: "ValueError: could not broadcast input array from shape (148,35) into shape (35)"
Need to know how to properly prepare a dataset for pybrain.
I believe the .addSample() method expects one sample at a time. Rather than using .addSample(), try
assert(X.shape[0] == y.shape[0])
DS.setField('input', X)
DS.setField('target', y)
The 'assert()' is recommended because the .setField() method does not verify array dimensions like .addSample() does.
See Pybrain dataset tutorial for more info.
Related
The post is directed to user's who have used the library deepxde. May you kindly help me in finding the solution to my problem.
I tried the DeepOnet operator learning example according to the dataset generated from grf from this tutorial website link . Now I want to apply the trained model on a known pair of function like cos(x) and sin(x) ; I'll generate my input data-field data as x = np.arange(0,4*np.pi,0.1) ; u(y) = y = np.cos(x) now I want to plot the anti-derivate of my u(y) and compare it with y = np.cos(x).
What additional code shall I implement in order to do so?
The following code runs well on my colab after installing the deepxde library with pip. Now I just want to extend this and check whether the trained model works okay on my custom fucntions or not? for example cosx , sinx etc. Thanks for the help!
import deepxde as dde
import matplotlib.pyplot as plt
import numpy as np
# Load dataset
d = np.load("antiderivative_aligned_train.npz", allow_pickle=True)
X_train = (d["X"][0].astype(np.float32), d["X"][1].astype(np.float32))
y_train = d["y"].astype(np.float32)
d = np.load("antiderivative_aligned_test.npz", allow_pickle=True)
X_test = (d["X"][0].astype(np.float32), d["X"][1].astype(np.float32))
y_test = d["y"].astype(np.float32)
data = dde.data.TripleCartesianProd(
X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test
)
# Choose a network
m = 100
dim_x = 1
net = dde.nn.DeepONetCartesianProd(
[m, 40, 40],
[dim_x, 40, 40],
"relu",
"Glorot normal",
)
# Define a Model
model = dde.Model(data, net)
# Compile and Train
model.compile("adam", lr=0.001, metrics=["mean l2 relative error"])
losshistory, train_state = model.train(iterations=10000)
# Plot the loss trajectory
dde.utils.plot_loss_history(losshistory)
plt.show()
I'm trying to create a keras model with multiple input branches, but keras doesn't like that the inputs have different sizes.
Here is a minimal example:
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
inputA = layers.Input(shape=(2,))
xA = layers.Dense(8, activation='relu')(inputA)
inputB = layers.Input(shape=(3,))
xB = layers.Dense(8, activation='relu')(inputB)
merged = layers.Concatenate()([xA, xB])
output = layers.Dense(8, activation='linear')(merged)
model = keras.Model(inputs=[inputA, inputB], outputs=output)
a = np.array([1, 2])
b = np.array([3, 4, 5])
model.predict([a, b])
Which results in the error:
ValueError: Data cardinality is ambiguous:
x sizes: 2, 3
Please provide data which shares the same first dimension.
Is there a better way to do this in keras? I've read the other questions referencing the same error, but I'm not really understanding what I need to change.
you need to pass array in the correct format... (n_batch, n_feat). A simple reshape is sufficient to create the batch dimensionality
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
inputA = layers.Input(shape=(2,))
xA = layers.Dense(8, activation='relu')(inputA)
inputB = layers.Input(shape=(3,))
xB = layers.Dense(8, activation='relu')(inputB)
merged = layers.Concatenate()([xA, xB])
output = layers.Dense(8, activation='linear')(merged)
model = keras.Model(inputs=[inputA, inputB], outputs=output)
a = np.array([1, 2]).reshape(1,-1)
b = np.array([3, 4, 5]).reshape(1,-1)
model.predict([a, b])
I'm trying to train a dynamic rnn estimator, but can't seem to get the regressor to identify the correct shape of my data.
import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.constants import (
ProblemType,
)
from tensorflow.contrib.learn.python.learn.estimators.rnn_common import (
PredictionType,
)
from tensorflow.contrib.layers import real_valued_column
X = np.random.uniform(size=(1000, 10))
M = X.shape[0]
N = X.shape[1]
y = np.random.uniform(size=1000)
seq_feat_cols = [real_valued_column(column_name='X', dimension=N)]
rnn = DynamicRnnEstimator(ProblemType.LINEAR_REGRESSION,
PredictionType.SINGLE_VALUE,
sequence_feature_columns=seq_feat_cols)
def get_batch():
period_steps = 20
start = random.randint(0, (M - 1) - period_steps - 1)
end = start + period_steps
x_tf = tf.expand_dims(X[start:end], axis=0)
return {'X': x_tf}, tf.constant(y[start:end])
rnn.fit(input_fn=get_batch, steps=10)
This is yielding:
ValueError: Provided a prefix or suffix of None: 1 and None
I've tried extending the dimension on both sides of my ndarray to no avail; any suggestions would be greatly appreciated!
That ValueError looks like it's because num_units wasn't provided to the constructor of DynamicRNNEstimator. Some other issues:
The input_fn you specify will only be run once! So it should build a TensorFlow graph which either iterates over a dataset or has random TensorFlow operations.
It looks like you have one label per timestep, in which case I think you need MULTIPLE_VALUE rather than SINGLE_VALUE for the prediction type.
The Estimator expects a batch dimension (it can be one)
Putting all of that together:
import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.constants import (
ProblemType,
)
from tensorflow.contrib.learn.python.learn.estimators.rnn_common import (
PredictionType,
)
from tensorflow.contrib.layers import real_valued_column
X = np.random.uniform(size=(1000, 10))
M = X.shape[0]
N = X.shape[1]
y = np.random.uniform(size=1000)
seq_feat_cols = [real_valued_column('X')]
rnn = DynamicRnnEstimator(ProblemType.LINEAR_REGRESSION,
PredictionType.MULTIPLE_VALUE,
num_units=5,
sequence_feature_columns=seq_feat_cols)
def get_batch():
period_steps = 20
start = tf.random_uniform(
shape=(),
minval=0,
maxval=(M - 1) - period_steps - 1,
dtype=tf.int32)
end = start + period_steps
x_sliced = tf.constant(X)[None, start:end, :]
y_sliced = tf.constant(y)[None, start:end]
x_sliced.set_shape((1, period_steps, N))
y_sliced.set_shape((1, period_steps))
return {'X': x_sliced}, y_sliced
rnn.fit(input_fn=get_batch, steps=10)
I am getting an AssertionError:
shapes do not match along axis 0: (107, 13); (0, 535)
while running the following code for classification.
n_samples=535, n_features=13, n_classes=7
Any help would be much appreciated.
import theanets
from sklearn.metrics import confusion_matrix
import scipy.io
import numpy
X = scipy.io.loadmat('berlinFeaturesCAFE.mat')
X_F = X['featureContainer'];
X_F_A = numpy.require(X_F, dtype=numpy.float32, requirements=None)
y = scipy.io.loadmat('convertLabel.mat')
y_F = y['xdNew']
cut = int(len(X_F_A) * 0.8) # training / validation split
train = X_F_A[:cut], y_F[:cut]
valid = X_F_A[cut:], y_F[cut:]
net = theanets.Classifier([13, 7])
# Train the model using SGD with momentum.
net.train(train, valid, algo='sgd', learning_rate=1e-4, momentum=0.9)
# Show confusion matrices on the training/validation splits.
for label, (X, y) in (('training:', train), ('validation:', valid)):
print(label)
print(confusion_matrix(y, net.predict(X)))
I am making use of pybrain to build a network that has 6 input dimensions and one real valued output dimension. The code I use is shown below:
network = buildNetwork(train.indim, 4, train.outdim)
trainer = BackpropTrainer( network, train)
trainer.trainOnDataset(train, 8000)
print 'MSE train', trainer.testOnData(train, verbose = True)
here train is of type Dataset
I want to get the predictions made in trainer.testOnData() as a numpy array. I am able to view the predicted result along with the error but I want it as an array. Is there anyway that this can be done?
Use the activate function of your network:
numpy.array([network.activate(x) for x, _ in train])
Complete example:
from datasets import XORDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer
import numpy
d = XORDataSet()
n = buildNetwork(d.indim, 4, d.outdim, bias=True)
t = BackpropTrainer(n, learningrate=0.01, momentum=0.99, verbose=True)
t.trainOnDataset(d, 1000)
t.testOnData(verbose=True)
print numpy.array([n.activate(x) for x, _ in d])
(Only works in the directory pybrain/examples/supervised/backprop of pybrain because the XORDataSet is required.)