Animate Self Organizing Map in Tensorflow - python
I found this very helpful blog for the implementation of self organizing maps using tensorflow. I tried running the scikit learn iris data set on it and I get the result see image below. To see how the SOM evolves I would like to animate my graph and here is where I got stuck. I found some basic example for animation:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig2 = plt.figure()
x = np.arange(-9, 10)
y = np.arange(-9, 10).reshape(-1, 1)
base = np.hypot(x, y)
ims = []
for add in np.arange(15):
ims.append((plt.pcolor(x, y, base + add, norm=plt.Normalize(0, 30)),))
im_ani = animation.ArtistAnimation(fig2, ims, interval=50, repeat_delay=3000, blit=True)
plt.show()
To animate I must edit the train function of som.py because the training for loop is encapsulated there. It looks like this:
def train(self, input_vects):
"""
Trains the SOM.
'input_vects' should be an iterable of 1-D NumPy arrays with
dimensionality as provided during initialization of this SOM.
Current weightage vectors for all neurons(initially random) are
taken as starting conditions for training.
"""
#fig2 = plt.figure()
#Training iterations
for iter_no in tqdm(range(self._n_iterations)):
#Train with each vector one by one
for input_vect in input_vects:
self._sess.run(self._training_op,
feed_dict={self._vect_input: input_vect,
self._iter_input: iter_no})
#Store a centroid grid for easy retrieval later on
centroid_grid = [[] for i in range(self._m)]
self._weightages = list(self._sess.run(self._weightage_vects))
self._locations = list(self._sess.run(self._location_vects))
for i, loc in enumerate(self._locations):
centroid_grid[loc[0]].append(self._weightages[i])
#im_ani = animation.ArtistAnimation(fig2, centroid_grid, interval=50, repeat_delay=3000, blit=True)
self._centroid_grid = centroid_grid
self._trained = True
#plt.show()
The comments are my try to implement the animation but it doesn't work because in the basic example the ims list is a matplotlib object and in the training function the list is a 4d numpy array.
To sum it up how can I animate my plot? Thanks for your help in advance.
Here is my full code:
som.py
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import matplotlib.animation as animation
from matplotlib import pyplot as plt
import time
class SOM(object):
"""
2-D Self-Organizing Map with Gaussian Neighbourhood function
and linearly decreasing learning rate.
"""
#To check if the SOM has been trained
_trained = False
def __init__(self, m, n, dim, n_iterations=100, alpha=None, sigma=None):
"""
Initializes all necessary components of the TensorFlow
Graph.
m X n are the dimensions of the SOM. 'n_iterations' should
should be an integer denoting the number of iterations undergone
while training.
'dim' is the dimensionality of the training inputs.
'alpha' is a number denoting the initial time(iteration no)-based
learning rate. Default value is 0.3
'sigma' is the the initial neighbourhood value, denoting
the radius of influence of the BMU while training. By default, its
taken to be half of max(m, n).
"""
#Assign required variables first
self._m = m
self._n = n
if alpha is None:
alpha = 0.3
else:
alpha = float(alpha)
if sigma is None:
sigma = max(m, n) / 2.0
else:
sigma = float(sigma)
self._n_iterations = abs(int(n_iterations))
##INITIALIZE GRAPH
self._graph = tf.Graph()
##POPULATE GRAPH WITH NECESSARY COMPONENTS
with self._graph.as_default():
##VARIABLES AND CONSTANT OPS FOR DATA STORAGE
#Randomly initialized weightage vectors for all neurons,
#stored together as a matrix Variable of size [m*n, dim]
self._weightage_vects = tf.Variable(tf.random_normal(
[m*n, dim]))
#Matrix of size [m*n, 2] for SOM grid locations
#of neurons
self._location_vects = tf.constant(np.array(
list(self._neuron_locations(m, n))))
##PLACEHOLDERS FOR TRAINING INPUTS
#We need to assign them as attributes to self, since they
#will be fed in during training
#The training vector
self._vect_input = tf.placeholder("float", [dim])
#Iteration number
self._iter_input = tf.placeholder("float")
##CONSTRUCT TRAINING OP PIECE BY PIECE
#Only the final, 'root' training op needs to be assigned as
#an attribute to self, since all the rest will be executed
#automatically during training
#To compute the Best Matching Unit given a vector
#Basically calculates the Euclidean distance between every
#neuron's weightage vector and the input, and returns the
#index of the neuron which gives the least value
bmu_index = tf.argmin(tf.sqrt(tf.reduce_sum(
tf.pow(tf.subtract(self._weightage_vects, tf.stack([self._vect_input for i in range(m*n)])), 2), 1)), 0)
#This will extract the location of the BMU based on the BMU's
#index
slice_input = tf.pad(tf.reshape(bmu_index, [1]),
np.array([[0, 1]]))
bmu_loc = tf.reshape(tf.slice(self._location_vects, slice_input,
tf.constant(np.array([1, 2]))),
[2])
#To compute the alpha and sigma values based on iteration
#number
learning_rate_op = tf.subtract(1.0, tf.div(self._iter_input,
self._n_iterations))
_alpha_op = tf.multiply(alpha, learning_rate_op)
_sigma_op = tf.multiply(sigma, learning_rate_op)
#Construct the op that will generate a vector with learning
#rates for all neurons, based on iteration number and location
#wrt BMU.
bmu_distance_squares = tf.reduce_sum(tf.pow(tf.subtract(
self._location_vects, tf.stack(
[bmu_loc for i in range(m*n)])), 2), 1)
neighbourhood_func = tf.exp(tf.negative(tf.div(tf.cast(
bmu_distance_squares, "float32"), tf.pow(_sigma_op, 2))))
learning_rate_op = tf.multiply(_alpha_op, neighbourhood_func)
#Finally, the op that will use learning_rate_op to update
#the weightage vectors of all neurons based on a particular
#input
learning_rate_multiplier = tf.stack([tf.tile(tf.slice(
learning_rate_op, np.array([i]), np.array([1])), [dim])
for i in range(m*n)])
weightage_delta = tf.multiply(
learning_rate_multiplier,
tf.subtract(tf.stack([self._vect_input for i in range(m*n)]),
self._weightage_vects))
new_weightages_op = tf.add(self._weightage_vects,
weightage_delta)
self._training_op = tf.assign(self._weightage_vects,
new_weightages_op)
##INITIALIZE SESSION
self._sess = tf.Session()
##INITIALIZE VARIABLES
init_op = tf.global_variables_initializer()
self._sess.run(init_op)
def _neuron_locations(self, m, n):
"""
Yields one by one the 2-D locations of the individual neurons
in the SOM.
"""
#Nested iterations over both dimensions
#to generate all 2-D locations in the map
for i in range(m):
for j in range(n):
yield np.array([i, j])
def train(self, input_vects):
"""
Trains the SOM.
'input_vects' should be an iterable of 1-D NumPy arrays with
dimensionality as provided during initialization of this SOM.
Current weightage vectors for all neurons(initially random) are
taken as starting conditions for training.
"""
#fig2 = plt.figure()
#Training iterations
for iter_no in tqdm(range(self._n_iterations)):
#Train with each vector one by one
for input_vect in input_vects:
self._sess.run(self._training_op,
feed_dict={self._vect_input: input_vect,
self._iter_input: iter_no})
#Store a centroid grid for easy retrieval later on
centroid_grid = [[] for i in range(self._m)]
self._weightages = list(self._sess.run(self._weightage_vects))
self._locations = list(self._sess.run(self._location_vects))
for i, loc in enumerate(self._locations):
centroid_grid[loc[0]].append(self._weightages[i])
#im_ani = animation.ArtistAnimation(fig2, centroid_grid, interval=50, repeat_delay=3000, blit=True)
self._centroid_grid = centroid_grid
#print(centroid_grid)
self._trained = True
#plt.show()
def get_centroids(self):
"""
Returns a list of 'm' lists, with each inner list containing
the 'n' corresponding centroid locations as 1-D NumPy arrays.
"""
if not self._trained:
raise ValueError("SOM not trained yet")
return self._centroid_grid
def map_vects(self, input_vects):
"""
Maps each input vector to the relevant neuron in the SOM
grid.
'input_vects' should be an iterable of 1-D NumPy arrays with
dimensionality as provided during initialization of this SOM.
Returns a list of 1-D NumPy arrays containing (row, column)
info for each input vector(in the same order), corresponding
to mapped neuron.
"""
if not self._trained:
raise ValueError("SOM not trained yet")
to_return = [self._locations[min([i for i in range(len(self._weightages))],
key=lambda x: np.linalg.norm(vect-self._weightages[x]))] for vect in input_vects]
return to_return
usage.py
from matplotlib import pyplot as plt
import matplotlib.animation as animation
import numpy as np
from som import SOM
from sklearn.datasets import load_iris
data = load_iris()
flower_data = data['data']
normed_flower_data = flower_data / flower_data.max(axis=0)
target_int = data['target']
target_names = data['target_names']
targets = [target_names[i] for i in target_int]
#Train a 20x30 SOM with 400 iterations
som = SOM(25, 25, 4, 100) # My parameters
som.train(normed_flower_data)
#Get output grid
image_grid = som.get_centroids()
#Map colours to their closest neurons
mapped = som.map_vects(normed_flower_data)
#Plot
plt.imshow(image_grid)
plt.title('SOM')
for i, m in enumerate(mapped):
plt.text(m[1], m[0], targets[i], ha='center', va='center',
bbox=dict(facecolor='white', alpha=0.5, lw=0))
plt.show()
Related
Regression problem with MLPRegressor (scikit)
I need to develop a neural network able to produce as output values of a 2D map (for example of a gaussian distribution) starting from fewparameter in input (offset, limit, sigma). In the code below I tried to start, probably in the wrong way, with a simpler case study with the 1D map of a gaussian distribution. Output are not as expected, I don't know if I miss the data formatting or the instance of the neural network. Any sugestion? from sklearn.neural_network import MLPRegressor import numpy as np import matplotlib.pyplot as plt import math def gaussian(x, alpha, r): return 1./(math.sqrt(alpha**math.pi))*np.exp(-alpha*np.power((x - r), 2.)) features = 20000 output = 1000 w = [] j = [] for iii in range(0,features): mu,sigma = 0.,(iii+1) x = np.linspace(-(iii+1), (iii+1), output) t = gaussian(x, sigma, iii) t = t.tolist() dummy = np.zeros(3) dummy[0] = sigma dummy[1] = (iii+1) dummy[2] = (iii) dummy = dummy.tolist() w.append(t) j.append(dummy) nn = MLPRegressor(hidden_layer_sizes=(5000,10), activation='tanh', solver='lbfgs') model = nn.fit(j,w) test_i = [[1.0,1.0,0.0]] test_o = nn.predict(test_i)
Python Polynomial Regression with Gradient Descent
I try to implement Polynomial Regression with Gradient Descent. I want to fit the following function: The code I use is: import numpy as np import matplotlib.pyplot as plt import scipy.linalg from sklearn.preprocessing import PolynomialFeatures np.random.seed(seed=42) def create_data(): x = PolynomialFeatures(degree=5).fit_transform(np.linspace(-10,10,100).reshape(100,-1)) l = lambda x_i: (1/3)*x_i**3-2*x_i**2+2*x_i+2 data = l(x[:,1]) noise = np.random.normal(0,0.1,size=np.shape(data)) y = data+noise y= y.reshape(100,1) return {'x':x,'y':y} def plot_function(x,y): fig = plt.figure(figsize=(10,10)) plt.plot(x[:,1],[(1/3)*x_i**3-2*x_i**2+2*x_i+2 for x_i in x[:,1]],c='lightgreen',linewidth=3,zorder=0) plt.scatter(x[:,1],y) plt.show() def w_update(y,x,batch,w_old,eta): derivative = np.sum([(y[i]-np.dot(w_old.T,x[i,:]))*x[i,:] for i in range(np.shape(x)[0])]) print(derivative) return w_old+eta*(1/batch)*derivative # initialize variables w = np.random.normal(size=(6,1)) data = create_data() x = data['x'] y = data['y'] plot_function(x,y) # Update w w_s = [] Error = [] for i in range(500): error = (1/2)*np.sum([(y[i]-np.dot(w.T,x[i,:]))**2 for i in range(len(x))]) Error.append(error) w_prime = w_update(y,x,np.shape(x)[0],w,0.001) w = w_prime w_s.append(w) # Plot the predicted function plt.plot(x[:,1],np.dot(x,w)) plt.show() # Plot the error fig3 = plt.figure() plt.scatter(range(len(Error[10:])),Error[10:]) plt.show() But as result I receive smth. strange which is completely out of bounds...I have also tried to alter the number of iterations as well as the parameter theta but it did not help. I assume I have made an mistake in the update of w.
I have found the solution. The Problem is indeed in the part where I calculate the weights. Specifically in: np.sum([(y[d]-np.dot(w_old.T,x[d,:]))*x[d,:] for d in range(np.shape(x)[0])]) which should be like: np.sum([-(y[d]-np.dot(w.T.copy(),x[d,:]))*x[d,:].reshape(np.shape(w)) for d in range(len(x))],axis=0) We have to add np.sum(axis=0) to get the dimensionality we want --> Dimensionality must be equal to w. The numpy sum documentation sais The default, axis=None, will sum all of the elements of the input array. This is not what we want to achieve. Adding axis = 0 sums over the first axis of our array which is of dimensionality (100,7,1) hence the 100 elements of dimensionality (7,1) are summed up and the resulting array is of dimensionality (7,1) which is exactly what we want. Implementing this and cleaning up the code yields: import numpy as np import matplotlib.pyplot as plt import scipy.linalg from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import MinMaxScaler np.random.seed(seed=42) def create_data(): x = PolynomialFeatures(degree=6).fit_transform(np.linspace(-2,2,100).reshape(100,-1)) x[:,1:] = MinMaxScaler(feature_range=(-2,2),copy=False).fit_transform(x[:,1:]) l = lambda x_i: np.cos(0.8*np.pi*x_i) data = l(x[:,1]) noise = np.random.normal(0,0.1,size=np.shape(data)) y = data+noise y= y.reshape(100,1) # Normalize Data return {'x':x,'y':y} def plot_function(x,y,w,Error,w_s): fig,ax = plt.subplots(nrows=1,ncols=2,figsize=(40,10)) ax[0].plot(x[:,1],[np.cos(0.8*np.pi*x_i) for x_i in x[:,1]],c='lightgreen',linewidth=3,zorder=0) ax[0].scatter(x[:,1],y) ax[0].plot(x[:,1],np.dot(x,w)) ax[0].set_title('Function') ax[1].scatter(range(iterations),Error) ax[1].set_title('Error') plt.show() # initialize variables data = create_data() x = data['x'] y = data['y'] w = np.random.normal(size=(np.shape(x)[1],1)) eta = 0.1 iterations = 10000 batch = 10 def stochastic_gradient_descent(x,y,w,eta): derivative = -(y-np.dot(w.T,x))*x.reshape(np.shape(w)) return eta*derivative def batch_gradient_descent(x,y,w,eta): derivative = np.sum([-(y[d]-np.dot(w.T.copy(),x[d,:]))*x[d,:].reshape(np.shape(w)) for d in range(len(x))],axis=0) return eta*(1/len(x))*derivative def mini_batch_gradient_descent(x,y,w,eta,batch): gradient_sum = np.zeros(shape=np.shape(w)) for b in range(batch): choice = np.random.choice(list(range(len(x)))) gradient_sum += -(y[choice]-np.dot(w.T,x[choice,:]))*x[choice,:].reshape(np.shape(w)) return eta*(1/batch)*gradient_sum # Update w w_s = [] Error = [] for i in range(iterations): # Calculate error error = (1/2)*np.sum([(y[i]-np.dot(w.T,x[i,:]))**2 for i in range(len(x))]) Error.append(error) # Stochastic Gradient Descent """ for d in range(len(x)): w-= stochastic_gradient_descent(x[d,:],y[d],w,eta) w_s.append(w.copy()) """ # Minibatch Gradient Descent """ w-= mini_batch_gradient_descent(x,y,w,eta,batch) """ # Batch Gradient Descent w -= batch_gradient_descent(x,y,w,eta) # Show predicted weights print(w_s) # Plot the predicted function and the Error plot_function(x,y,w,Error,w_s) As result we receive: Which surely can be improved by altering eta and the number of iterations as well as switching to Stochastic or Mini Batch Gradient Descent or more sophisticated optimization algorithms.
Filtering 1D numpy arrays in Python
Explanation: I have two numpy arrays: dataX and dataY, and I am trying to filter each array to reduce the noise. The image shown below shows the actual input data (blue dots) and an example of what I want it to be like(red dots). I do not need the filtered data to be as perfect as in the example but I do want it to be as straight as possible. I have provided sample data in the code. What I have tried: Firstly, you can see that the data isn't 'continuous', so I first divided them into individual 'segments' ( 4 of them in this example), and then applied a filter to each 'segment'. Someone suggested that I use a Savitzky-Golay filter. The full, run-able code is below: import scipy as sc import scipy.signal import numpy as np import matplotlib.pyplot as plt # Sample Data ydata = np.array([1,0,1,2,1,2,1,0,1,1,2,2,0,0,1,0,1,0,1,2,7,6,8,6,8,6,6,8,6,6,8,6,6,7,6,5,5,6,6, 10,11,12,13,12,11,10,10,11,10,12,11,10,10,10,10,12,12,10,10,17,16,15,17,16, 17,16,18,19,18,17,16,16,16,16,16,15,16]) xdata = np.array([1,2,3,1,5,4,7,8,6,10,11,12,13,10,12,13,17,16,19,18,21,19,23,21,25,20,26,27,28,26,26,26,29,30,30,29,30,32,33, 1,2,3,1,5,4,7,8,6,10,11,12,13,10,12,13,17,16,19,18,21,19,23,21,25,20,26,27,28,26,26,26,29,30,30,29,30,32]) # Used a diff array to find where there is a big change in Y. # If there's a big change in Y, then there must be a change of 'segment'. diffy = np.diff(ydata) # Create empty numpy arrays to append values into filteredX = np.array([]) filteredY = np.array([]) # Chose 3 to be the value indicating the change in Y index = np.where(diffy >3) # Loop through the array start = 0 for i in range (0, (index[0].size +1) ): # Check if last segment is reached if i == index[0].size: print xdata[start:] partSize = xdata[start:].size # Window length must be an odd integer if partSize % 2 == 0: partSize = partSize - 1 filteredDataX = sc.signal.savgol_filter(xdata[start:], partSize, 3) filteredDataY = sc.signal.savgol_filter(ydata[start:], partSize, 3) filteredX = np.append(filteredX, filteredDataX) filteredY = np.append(filteredY, filteredDataY) else: print xdata[start:index[0][i]] partSize = xdata[start:index[0][i]].size if partSize % 2 == 0: partSize = partSize - 1 filteredDataX = sc.signal.savgol_filter(xdata[start:index[0][i]], partSize, 3) filteredDataY = sc.signal.savgol_filter(ydata[start:index[0][i]], partSize, 3) start = index[0][i] filteredX = np.append(filteredX, filteredDataX) filteredY = np.append(filteredY, filteredDataY) # Plots plt.plot(xdata,ydata, 'bo', label = 'Input Data') plt.plot(filteredX, filteredY, 'ro', label = 'Filtered Data') plt.xlabel('X') plt.ylabel('Y') plt.title('Result') plt.legend() plt.show() This is my result: When each point is connected, the result looks as follows. I have played around with the order, but it seems like a third order gave the best result. I have also tried these filters, among a few others: scipy.signal.medfilt scipy.ndimage.filters.uniform_filter1d But so far none of the filters I have tried were close to what I really wanted. What is the best way to filter data such as this? Looking forward to your help.
One way to get something looking close to your ideal would be clustering + linear regression. Note that you have to provide the number of clusters and I also cheated a bit in scaling up y before clustering. import numpy as np from scipy import cluster, stats ydata = np.array([1,0,1,2,1,2,1,0,1,1,2,2,0,0,1,0,1,0,1,2,7,6,8,6,8,6,6,8,6,6,8,6,6,7,6,5,5,6,6, 10,11,12,13,12,11,10,10,11,10,12,11,10,10,10,10,12,12,10,10,17,16,15,17,16, 17,16,18,19,18,17,16,16,16,16,16,15,16]) xdata = np.array([1,2,3,1,5,4,7,8,6,10,11,12,13,10,12,13,17,16,19,18,21,19,23,21,25,20,26,27,28,26,26,26,29,30,30,29,30,32,33, 1,2,3,1,5,4,7,8,6,10,11,12,13,10,12,13,17,16,19,18,21,19,23,21,25,20,26,27,28,26,26,26,29,30,30,29,30,32]) def split_to_lines(x, y, k): yo = np.empty_like(y, dtype=float) # get the cluster centers and the labels for each point centers, map_ = cluster.vq.kmeans2(np.array((x, y * 2)).T.astype(float), k) # for each cluster, use the labels to select the points belonging to # the cluster and do a linear regression for i in range(k): slope, interc, *_ = stats.linregress(x[map_==i], y[map_==i]) # use the regression parameters to construct y values on the # best fit line yo[map_==i] = x[map_==i] * slope + interc return yo import pylab pylab.plot(xdata, ydata, 'or') pylab.plot(xdata, split_to_lines(xdata, ydata, 4), 'ob') pylab.show()
Fit the gamma distribution only to a subset of the samples
I have the histogram of my input data (in black) given in the following graph: I'm trying to fit the Gamma distribution but not on the whole data but just to the first curve of the histogram (the first mode). The green plot in the previous graph corresponds to when I fitted the Gamma distribution on all the samples using the following python code which makes use of scipy.stats.gamma: img = IO.read(input_file) data = img.flatten() + abs(np.min(img)) + 1 # calculate dB positive image img_db = 10 * np.log10(img) img_db_pos = img_db + abs(np.min(img_db)) data = img_db_pos.flatten() + 1 # data histogram n, bins, patches = plt.hist(data, 1000, normed=True) # slice histogram here # estimation of the parameters of the gamma distribution fit_alpha, fit_loc, fit_beta = gamma.fit(data, floc=0) x = np.linspace(0, 100) y = gamma.pdf(x, fit_alpha, fit_loc, fit_beta) print '(alpha, beta): (%f, %f)' % (fit_alpha, fit_beta) # plot estimated model plt.plot(x, y, linewidth=2, color='g') plt.show() How can I restrict the fitting only to the interesting subset of this data? Update1 (slicing): I sliced the input data by keeping only values below the max of the previous histogram, but the results were not really convincing: This was achieved by inserting the following code below the # slice histogram here comment in the previous code: max_data = bins[np.argmax(n)] data = data[data < max_data] Update2 (scipy.optimize.minimize): The code below shows how scipy.optimize.minimize() is used to minimize an energy function to find (alpha, beta): import matplotlib.pyplot as plt import numpy as np from geotiff.io import IO from scipy.stats import gamma from scipy.optimize import minimize def truncated_gamma(x, max_data, alpha, beta): gammapdf = gamma.pdf(x, alpha, loc=0, scale=beta) norm = gamma.cdf(max_data, alpha, loc=0, scale=beta) return np.where(x < max_data, gammapdf / norm, 0) # read image img = IO.read(input_file) # calculate dB positive image img_db = 10 * np.log10(img) img_db_pos = img_db + abs(np.min(img_db)) data = img_db_pos.flatten() + 1 # data histogram n, bins = np.histogram(data, 100, normed=True) # using minimize on a slice data below max of histogram max_data = bins[np.argmax(n)] data = data[data < max_data] data = np.random.choice(data, 1000) energy = lambda p: -np.sum(np.log(truncated_gamma(data, max_data, *p))) initial_guess = [np.mean(data), 2.] o = minimize(energy, initial_guess, method='SLSQP') fit_alpha, fit_beta = o.x # plot data histogram and model x = np.linspace(0, 100) y = gamma.pdf(x, fit_alpha, 0, fit_beta) plt.hist(data, 30, normed=True) plt.plot(x, y, linewidth=2, color='g') plt.show() The algorithm above converged for a subset of data, and the output in o was: x: array([ 16.66912781, 6.88105559]) But as can be seen on the screenshot below, the gamma plot doesn't fit the histogram:
You can use a general optimization tool such as scipy.optimize.minimize to fit a truncated version of the desired function, resulting in a nice fit: First, the modified function: def truncated_gamma(x, alpha, beta): gammapdf = gamma.pdf(x, alpha, loc=0, scale=beta) norm = gamma.cdf(max_data, alpha, loc=0, scale=beta) return np.where(x<max_data, gammapdf/norm, 0) This selects values from the gamma distribution where x < max_data, and zero elsewhere. The np.where part is not actually important here, because the data is exclusively to the left of max_data anyway. The key is normalization, because varying alpha and beta will change the area to the left of the truncation point in the original gamma. The rest is just optimization technicalities. It's common practise to work with logarithms, so I used what's sometimes called "energy", or the logarithm of the inverse of the probability density. energy = lambda p: -np.sum(np.log(truncated_gamma(data, *p))) Minimize: initial_guess = [np.mean(data), 2.] o = minimize(energy, initial_guess, method='SLSQP') fit_alpha, fit_beta = o.x My output is (alpha, beta): (11.595208, 824.712481). Like the original, it is a maximum likelihood estimate. If you're not happy with the convergence rate, you may want to Select a sample from your rather big dataset: data = np.random.choice(data, 10000) Try different algorithms using the method keyword argument. Some optimization routines output a representation of the inverse hessian, which is useful for uncertainty estimation. Enforcement of nonnegativity for the parameters may also be a good idea. A log-scaled plot without truncation shows the entire distribution:
Here's another possible approach using a manually created dataset in excel that more or less matched the plot given. Raw Data Outline Imported data into a Pandas dataframe. Mask the indices after the max response index. Create a mirror image of the remaining data. Append the mirror image while leaving a buffer of empty space. Fit the desired distribution to the modified data. Below I do a normal fit by the method of moments and adjust the amplitude and width. Working Script # Import data to dataframe. df = pd.read_csv('sample.csv', header=0, index_col=0) # Mask indices after index at max Y. mask = df.index.values <= df.Y.argmax() df = df.loc[mask, :] scaled_y = 100*df.Y.values # Create new df with mirror image of Y appended. sep = 6 app_zeroes = np.append(scaled_y, np.zeros(sep, dtype=np.float)) mir_y = np.flipud(scaled_y) new_y = np.append(app_zeroes, mir_y) # Using Scipy-cookbook to fit a normal by method of moments. idxs = np.arange(new_y.size) # idxs=[0, 1, 2,...,len(data)] mid_idxs = idxs.mean() # len(data)/2 # idxs-mid_idxs is [-53.5, -52.5, ..., 52.5, len(data)/2] scaling_param = np.sqrt(np.abs(np.sum((idxs-mid_idxs)**2*new_y)/np.sum(new_y))) # adjust amplitude fmax = new_y.max()*1.2 # adjusted function max to 120% max y. # adjust width scaling_param = scaling_param*.7 # adjusted by 70%. # Fit normal. fit = lambda t: fmax*np.exp(-(t-mid_idxs)**2/(2*scaling_param**2)) # Plot results. plt.plot(new_y, '.') plt.plot(fit(idxs), '--') plt.show() Result See the scipy-cookbook fitting data page for more on fitting a normal using method of moments.
Bad K-means with Gradient Descent using TensorFlow
Currently learning TensorFlow I'm working to implement kmeans clustering using TensorFlow. I am following a tutorial on TensorFlow which first introduce kmeans then introduce Gradient Descent Optimization We first generate samples def create_samples(n_clusters, n_samples_per_cluster, n_features, embiggen_factor, seed): np.random.seed(seed) slices = [] centroids = [] # Create samples for each cluster for i in range(n_clusters): samples = tf.random_normal((n_samples_per_cluster, n_features), mean=0.0, stddev=5.0, dtype=tf.float32, seed=seed, name="cluster_{}".format(i)) current_centroid = (np.random.random((1, n_features)) * embiggen_factor) - (embiggen_factor/2) centroids.append(current_centroid) samples += current_centroid slices.append(samples) # Create a big "samples" dataset samples = tf.concat(0, slices, name='samples') centroids = tf.concat(0, centroids, name='centroids') return centroids, samples then define 2 function assign & update (+ euclidian distance) as usual def assign(data, centroids): # Explanations here: http://learningtensorflow.com/lesson6/ expanded_vectors = tf.expand_dims(samples, 0) expanded_centroids = tf.expand_dims(centroids, 1) # nice trick here: use 'sub' "pairwisely" (thats why we just used "expand") # distances = tf.reduce_sum( tf.square( tf.sub(expanded_vectors, expanded_centroids)), 2) mins = tf.argmin(distances, 0) nearest_indices = mins return nearest_indices def update(data, nearest_indices, n_clusters): # Updates the centroid to be the mean of all samples associated with it. nearest_indices = tf.to_int32(nearest_indices) partitions = tf.dynamic_partition(samples, nearest_indices, n_clusters) new_centroids = tf.concat(0, [tf.expand_dims(tf.reduce_mean(partition, 0), 0) for partition in partitions]) return new_centroids def euclidian_distance(x, y): sqd = tf.squared_difference(tf.cast(x, "float32"),tf.cast(y, "float32")) sumsqd = tf.reduce_sum(sqd) sqrtsumsqd = tf.sqrt(sumsqd) return sqrtsumsqd Then define the TensorFlow model to run: import tensorflow as tf import numpy as np nclusters = 3 nsamplespercluster = 500 nfeatures = 2 embiggenfactor = 70 seed = 700 np.random.seed(seed) ocentroids, samples = create_samples(nclusters, nsamplespercluster, nfeatures, embiggenfactor, seed) X = tf.placeholder("float32", [nclusters*nsamplespercluster, 2]) # chosing random sample points as initial centroids. centroids = tf.Variable([samples[i] for i in np.random.choice(range(nclusters*nsamplespercluster), nclusters)])#, [10.,10.]]) mean=0.0, stddev=150, dtype=tf.float32, seed=seed)) nearest_indices = assign(X, centroids) new_centroids = update(X, nearest_indices, nclusters) # Our error is defined as the square of the differences between centroid error = euclidian_distance(centroids, new_centroids) # The Gradient Descent Optimizer train_op = tf.train.GradientDescentOptimizer(0.01).minimize(error) model = tf.initialize_all_variables() with tf.Session() as session: data = session.run(samples) session.run(model) epsilon = 0.08 err = float("inf") count = 0 while err > epsilon: _, err = session.run([train_op, error], {X: data}) print(err) clustering = session.run(nearest_indices) centers = session.run(centroids) count += 1 # Plot each 100 iteration to see progress if (count % 100) == 0: print(count) plt.figure() plt.scatter(data[:,0], data[:,1], c=clustering) plt.scatter(centers[:,0], centers[:,1], s=300, c="orange", marker="x", linewidth=5) print("%d iterations" % count) plt.figure() plt.scatter(data[:,0], data[:,1], c=clustering) plt.scatter(centers[:,0], centers[:,1], s=300, c="orange", marker="x", linewidth=5) This is actually working (running) but the result is decieving: After around 1600 iteration the result is so bad. I dont even figure out how some points can be "lost" (= clustered as a color they are so away from). To my mind kmeans can converge rlly fast on such case. Here it is not even converging to a good solution. Is it due to Gradient Descent? (don't see how could it be but...) Thanks for advices! pltrdy