I am trying to introduce myself to MCMC sampling with emcee. I want to simply take a sample from a Maxwell Boltzmann distribution using a set of example code on github, https://github.com/dfm/emcee/blob/master/examples/quickstart.py.
The example code is really excellent, but when I change the distribution from a Gaussian to a Maxwellian, I receive the error, TypeError: lnprob() takes exactly 2 arguments (3 given)
However it is not called anywhere where it is not given the appropriate parameters? In need of some guidance as to how to define a Maxwellian Curve and have it fit into this example code.
Here is what I have;
from __future__ import print_function
import numpy as np
import emcee
try:
xrange
except NameError:
xrange = range
def lnprob(x, a, icov):
pi = np.pi
return np.sqrt(2/pi)*x**2*np.exp(-x**2/(2.*a**2))/a**3
ndim = 2
means = np.random.rand(ndim)
cov = 0.5-np.random.rand(ndim**2).reshape((ndim, ndim))
cov = np.triu(cov)
cov += cov.T - np.diag(cov.diagonal())
cov = np.dot(cov,cov)
icov = np.linalg.inv(cov)
nwalkers = 50
p0 = [np.random.rand(ndim) for i in xrange(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=[means, icov])
pos, prob, state = sampler.run_mcmc(p0, 5000)
sampler.reset()
sampler.run_mcmc(pos, 100000, rstate0=state)
Thanks
I think there are a couple of problems that I see. The main one is that emcee wants you to give it the natural logarithm of the probability distribution function that you want to sample. So, rather than having:
def lnprob(x, a, icov):
pi = np.pi
return np.sqrt(2/pi)*x**2*np.exp(-x**2/(2.*a**2))/a**3
you would instead want, e.g.
def lnprob(x, a):
pi = np.pi
if x < 0:
return -np.inf
else:
return 0.5*np.log(2./pi) + 2.*np.log(x) - (x**2/(2.*a**2)) - 3.*np.log(a)
where the if...else... statement is to explicitly say that negative values of x have zero probability (or -infinity in log-space).
You also shouldn't have to calculate icov and pass it to lnprob as that's only needed for the Gaussian case in the example you link to.
When you call:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=[means, icov])
the args value should just be any additional arguments that your lnprob function requires, so in your case this would be the value of a that you want to set your Maxwell-Boltxmann distribution up with. This should be a single value rather than the two randomly initialised values you have set when creating mean.
Overall, the following should work for you:
from __future__ import print_function
import emcee
import numpy as np
from numpy import pi as pi
# define the natural log of the Maxwell-Boltzmann distribution
def lnprob(x, a):
if x < 0:
return -np.inf
else:
return 0.5*np.log(2./pi) + 2.*np.log(x) - (x**2/(2.*a**2)) - 3.*np.log(a)
# choose a value of 'a' for the distributions
a = 5. # I'm choosing 5!
# choose the number of walkers
nwalkers = 50
# set some initial points at which to calculate the lnprob
p0 = [np.random.rand(1) for i in xrange(nwalkers)]
# initialise the sampler
sampler = emcee.EnsembleSampler(nwalkers, 1, lnprob, args=[a])
# Run 5000 steps as a burn-in.
pos, prob, state = sampler.run_mcmc(p0, 5000)
# Reset the chain to remove the burn-in samples.
sampler.reset()
# Starting from the final position in the burn-in chain, sample for 100000 steps.
sampler.run_mcmc(pos, 100000, rstate0=state)
# lets check the samples look right
mbmean = 2.*a*np.sqrt(2./pi) # mean of Maxwell-Boltzmann distribution
print("Sample mean = {}, analytical mean = {}".format(np.mean(sampler.flatchain[:,0]), mbmean))
mbstd = np.sqrt(a**2*(3*np.pi-8.)/np.pi) # std. dev. of M-B distribution
print("Sample standard deviation = {}, analytical = {}".format(np.std(sampler.flatchain[:,0]), mbstd))
Related
I am trying to fit a complex conductivity model (the drude-smith-anderson model) using lmfit.minimize. In that fitting, I want constraints on my parameters c and c1 such that 0<c<1, -1<c1<0 and 0<1+c1-c<1. So, I am using the following code:
#reference: Juluri B.K. "Fitting Complex Metal Dielectric Functions with Differential Evolution Method". http://juluribk.com/?p=1597.
#reference: https://lmfit.github.io/lmfit-py/fitting.html
#import libraries (numdifftools needs to be installed but doesn't need to be imported)
import matplotlib.pyplot as plt
import numpy as np
import lmfit as lmf
import math as mt
#define the complex conductivity model
def model(params,w):
sigma0 = params["sigma0"].value
tau = params["tau"].value
c = params["c"].value
d = params["d"].value
c1 = params["c1"].value
druidanderson = (sigma0/(1-1j*2*mt.pi*w*tau))*(1 + c1/(1-1j*2*mt.pi*w*tau)) - sigma0*c/(1-1j*2*mt.pi*w*d*tau)
return druidanderson
#defining the complex residues (chi squared is sum of squares of residues)
def complex_residuals(params,w,exp_data):
delta = model(params,w)
residual = (abs((delta.real - exp_data.real) / exp_data.real) + abs(
(delta.imag - exp_data.imag) / exp_data.imag))
return residual
# importing data from CSV file
importpath = input("Path of CSV file: ") #Asking the location of where your data file is kept (give input in form of path\name.csv)
frequency = np.genfromtxt(rf"{importpath}",delimiter=",", usecols=(0)) #path to be changed to the file from which data is taken
conductivity = np.genfromtxt(rf"{importpath}",delimiter=",", usecols=(1)) + 1j*np.genfromtxt(rf"{importpath}",delimiter=",", usecols=(2)) #path to be changed to the file from which data is taken
frequency = frequency[np.logical_not(np.isnan(frequency))]
conductivity = conductivity[np.logical_not(np.isnan(conductivity))]
w_for_fit = frequency
eps_for_fit = conductivity
#defining the bounds and initial guesses for the fitting parameters
params = lmf.Parameters()
params.add("sigma0", value = float(input("Guess for \u03C3\u2080: ")), min =10 , max = 5000) #bounds have to be changed manually
params.add("tau", value = float(input("Guess for \u03C4: ")), min = 0.0001, max =10) #bounds have to be changed manually
params.add("c1", value = float(input("Guess for c1: ")), min = -1 , max = 0) #bounds have to be changed manually
params.add("constraint", value = float(input("Guess for constraint: ")), min = 0, max=1)
params.add("c", expr="1+c1-constraint", min = 0, max = 1) #bounds have to be changed manually
params.add("d", value = float(input("Guess for \u03C4_1/\u03C4: ")),min = 100, max = 100000) #bounds have to be changed manually
# minimizing the chi square
minimizer_results = lmf.minimize(complex_residuals, params, args=(w_for_fit, eps_for_fit), method = 'differential_evolution', strategy='best1bin',
popsize=50, tol=0.01, mutation=(0, 1), recombination=0.9, seed=None, callback=None, disp=True, polish=True, init='latinhypercube')
lmf.printfuncs.report_fit(minimizer_results, show_correl=False)
As a result for the fit, I get the following output:
sigma0: 3489.38961 (init = 1000)
tau: 1.2456e-04 (init = 0.01)
c1: -0.99816132 (init = -1)
constraint: 0.98138820 (init = 1)
c: 0.00000000 == '1+c1-constraint'
d: 7333.82306 (init = 1000)
These values don't make any sense as 1+c1-c = -0.97954952 which is not 0 and is thus invalid. How to fix this issue?
Your code is not runnable. The use of input() is sort of stunning - please do not do that. Write code that is pleasant to read and separates i/o from logic.
To make a floating point residual from a complex array, use complex_array.view(float)
Guessing any parameter value to be at or very close to its limit (here, c) is a very bad idea, likely to make the fit harder.
More to your question, you defined c as "evaluate 1+c1-constant and then apply the bounds min=0, max=1". That is literally, precisely, and exactly what your
params.add("c", expr="1+c1-constraint", min = 0, max = 1)
means: calculate c as 1+c1-constraint, and then apply the bounds [0, 1]. The code is doing exactly what you told it to do.
Unless you know what you are doing (I suspect maybe not ;)), I would strongly advise doing a fit with the default leastsq method before trying to use differential_evolution. It turns out that differential_evolution is not a very good global fitting method (shgo is generally better, though no "global" solver should be considered as very reliable). But, unless you know that you need such a method, you probably do not.
I would also strongly advise you to plot your data and some models evaluated with what you think are reasonable parameters.
i have to often normalize the result of circular-convolution, so I 'borrowed'-and-modfied the following routine :
def fft_normalize(x):# Normalize a vector x in complex domain.
c = np.fft.rfft(x)
# Look at real and image as if they were real
ri = np.vstack([c.real, c.imag])
# Normalize magnitude of each complex/real pair
norm = np.linalg.norm(ri, axis=0)
if np.any(norm==0): norm[norm == 0] = np.float64(1e-308) #!fixme
ri= np.divide(ri,norm)
c_proj = ri[0,:] + 1j * ri[1,:]
rv = np.fft.irfft(c_proj, n=x.shape[-1])
return rv
def fft_convolution(a, b):
return np.fft.irfft(np.fft.rfft(a) * np.fft.rfft(b))
so that I do this :
fft_normalize(fft_convolution(a,b))
i see in the numpy docs there is a 'norm' option. Is this equivalent to what i'm doing ?
And if yes, which option should I use ?
def fft_convolution2(a, b):
return np.fft.irfft(np.fft.rfft(a) * np.fft.rfft(b), norm='ortho')
When I test it it behaves better when I do fft_normalize()
Second, i had to add this line, but it does not seem, right. any ideas ?
if np.any(norm==0): norm[norm == 0] = np.float64(1e-308) #!fixme
As a side note, if you know !! numpy docs mentions that they promote float32 to float64 and that scipy.fftpack does not !!
Would fftpack be faster ! because scipy says fftpack is obsolete and there is no info on the new scipy ?
#cris are you sugesting i do it this way :
def fft_normalize(x):# Normalize a vector x in complex domain.
c = np.fft.rfft(x)
ri = np.vstack([c.real, c.imag])
norm = np.abs(c)
if np.any(norm==0): norm[norm == 0] = MIN #!fixme
ri= np.divide(ri,norm)
c_proj = ri[0,:] + 1j * ri[1,:]
rv = np.fft.irfft(c_proj, n=x.shape[-1])
return rv
The norm argument to the FFT functions in NumPy determine whether the transform result is multiplied by 1, 1/N or 1/sqrt(N), with N the number of samples in the array. Normally, the inverse transform is normalized by dividing by N, and the forward transform is not. Specifying “ortho” here causes both transforms to be normalized by 1/sqrt(2). Specifying “forward” causes only the forward transform to be normalized by 1/N.
These normalizations are very different from the one you apply, where you normalize each element in the frequency domain.
I'm trying to use the hpd function but I keep getting an attribute error.
import pymc3 as pm
pm.__version__
>>'3.11.4'
pm.stats.hpd()
>>AttributeError: module 'pymc3.stats' has no attribute 'hpd'
The complete code (properly passing arguments):
def posterior_grid_approx(grid_points=100, success=6, tosses=9):
"""
"""
# define grid
p_grid = np.linspace(0, 1, grid_points)
# define prior
prior = np.repeat(5, grid_points) # uniform
#prior = (p_grid >= 0.5).astype(int) # truncated
#prior = np.exp(- 5 * abs(p_grid - 0.5)) # double exp
# compute likelihood at each point in the grid
likelihood = stats.binom.pmf(success, tosses, p_grid)
# compute product of likelihood and prior
unstd_posterior = likelihood * prior
# standardize the posterior, so it sums to 1
posterior = unstd_posterior / unstd_posterior.sum()
return p_grid, posterior
p_grid, posterior = posterior_grid_approx(grid_points=100, success=6, tosses=9)
samples = np.random.choice(p_grid, p=posterior, size=int(1e4), replace=True)
pm.stats.hpd(samples, alpha=0.5)
>> AttributeError: module 'pymc3.stats' has no attribute 'hpd'
I tried pm.hpd and pm.stats.hpd both with no succcess
It looks like this was moved to arviz and the code below seems to work
import arviz as az
# Calculate HPD credible interval of 95%
ci_95 = az.hdi(posterior_draws, hdi_prob=0.95)
I am trying to run a code that outputs a Gaussian distribtuion by integrating the 1-D gaussian distribution equation using Monte Carlo integration. I am trying to use the mcint module. I defined the gaussian equation and the sampler function that is used in the mcint module. I am not sure what the 'measure' part in the mcint function does and what it should be set to. Does anyone know what measure is supposed to be? And how do I know what to set it as?
from matplotlib import pyplot as mp
import numpy as np
import mcint
import random
#f equation
def gaussian(x,x0,sig0,time,var):
[velocity,diffussion_coeffient] = var
mu = x0 + (velocity*time)
sig = sig0 + np.sqrt(2.0*diffussion_coeffient*time)
return (1/(np.sqrt(2.0*np.pi*(sig**2.0))))*(np.exp((-(x-mu)**2.0)/(2.0*(sig**2.0))))
#random variables that are generated during the integration
def sampler(varinterval):
while True:
velocity = random.uniform(varinterval[0][0],varinterval[0][1])
diffussion_coeffient = random.uniform(varinterval[1][0],varinterval[1][1])
yield (velocity,diffussion_coeffient)
if __name__ == "__main__":
x0 = 0
#ranges for integration
velocitymin = -3.0
velocitymax = 3.0
diffussion_coeffientmin = 0.01
diffussion_coeffientmax = 0.89
varinterval = [[velocitymin,velocitymax],[diffussion_coeffientmin,diffussion_coeffientmax]]
time = 1
sig0 = 0.05
x = np.linspace(-20, 20, 120)
res = []
for i in np.linspace(-10, 10, 120):
result, error = mcint.integrate(lambda v: gaussian(i,x0,sig0,time,v), sampler(varinterval), measure=1, n=1000)
res.append(result)
mp.plot(x,res)
mp.show()
Is this the module you are talking about? If that's the case the whole source is only 17 lines long (at the times of writing). The relevant line is the last one, which reads:
return (measure*sample_mean, measure*math.sqrt(sample_var/n))
As you can see, the measure argument (whose default value is unity) is used to scale the values returned by the integrate method.
I am working to learn pyMC 3 and having some trouble. Since there are limited tutorials for pyMC3 I am working from Bayesian Methods for Hackers. I'm trying to port the pyMC 2 code to pyMC 3 in the Bayesian A/B testing example, with no success. From what I can see the model isn't taking into account the observations at all.
I've had to make a few changes from the example, as pyMC 3 is quite different, so what should look like this:
import pymc as pm
# The parameters are the bounds of the Uniform.
p = pm.Uniform('p', lower=0, upper=1)
# set constants
p_true = 0.05 # remember, this is unknown.
N = 1500
# sample N Bernoulli random variables from Ber(0.05).
# each random variable has a 0.05 chance of being a 1.
# this is the data-generation step
occurrences = pm.rbernoulli(p_true, N)
print occurrences # Remember: Python treats True == 1, and False == 0
print occurrences.sum()
# Occurrences.mean is equal to n/N.
print "What is the observed frequency in Group A? %.4f" % occurrences.mean()
print "Does this equal the true frequency? %s" % (occurrences.mean() == p_true)
# include the observations, which are Bernoulli
obs = pm.Bernoulli("obs", p, value=occurrences, observed=True)
# To be explained in chapter 3
mcmc = pm.MCMC([p, obs])
mcmc.sample(18000, 1000)
figsize(12.5, 4)
plt.title("Posterior distribution of $p_A$, the true effectiveness of site A")
plt.vlines(p_true, 0, 90, linestyle="--", label="true $p_A$ (unknown)")
plt.hist(mcmc.trace("p")[:], bins=25, histtype="stepfilled", normed=True)
plt.legend()
instead looks like:
import pymc as pm
import random
import numpy as np
import matplotlib.pyplot as plt
with pm.Model() as model:
# Prior is uniform: all cases are equally likely
p = pm.Uniform('p', lower=0, upper=1)
# set constants
p_true = 0.05 # remember, this is unknown.
N = 1500
# sample N Bernoulli random variables from Ber(0.05).
# each random variable has a 0.05 chance of being a 1.
# this is the data-generation step
occurrences = [] # pm.rbernoulli(p_true, N)
for i in xrange(N):
occurrences.append((random.uniform(0.0, 1.0) <= p_true))
occurrences = np.array(occurrences)
obs = pm.Bernoulli('obs', p_true, observed=occurrences)
start = pm.find_MAP()
step = pm.Metropolis()
trace = pm.sample(18000, step, start)
pm.traceplot(trace);
plt.show()
Apologies for the lengthy post but in my adaptation there have been a number of small changes, e.g. manually generating the observations because pm.rbernoulli no longer exists. I'm also not sure if I should be finding the start prior to running the trace. How should I change my implementation to correctly run?
You were indeed close. However, this line:
obs = pm.Bernoulli('obs', p_true, observed=occurrences)
is wrong as you are just setting a constant value for p (p_true == 0.05). Thus, your random variable p defined above to have a uniform prior is not constrained by the likelihood and your plot shows that you are just sampling from the prior. If you replace p_true with p in your code it should work. Here is the fixed version:
import pymc as pm
import random
import numpy as np
import matplotlib.pyplot as plt
with pm.Model() as model:
# Prior is uniform: all cases are equally likely
p = pm.Uniform('p', lower=0, upper=1)
# set constants
p_true = 0.05 # remember, this is unknown.
N = 1500
# sample N Bernoulli random variables from Ber(0.05).
# each random variable has a 0.05 chance of being a 1.
# this is the data-generation step
occurrences = [] # pm.rbernoulli(p_true, N)
for i in xrange(N):
occurrences.append((random.uniform(0.0, 1.0) <= p_true))
occurrences = np.array(occurrences)
obs = pm.Bernoulli('obs', p, observed=occurrences)
start = pm.find_MAP()
step = pm.Metropolis()
trace = pm.sample(18000, step, start)
pm.traceplot(trace);
This worked for me. I generated the observations before initiating the model.
true_p_A = 0.05
true_p_B = 0.04
N_A = 1500
N_B = 750
obs_A = np.random.binomial(1, true_p_A, size=N_A)
obs_B = np.random.binomial(1, true_p_B, size=N_B)
with pm.Model() as ab_model:
p_A = pm.Uniform('p_A', 0, 1)
p_B = pm.Uniform('p_B', 0, 1)
delta = pm.Deterministic('delta',p_A - p_B)
obs_A = pm.Bernoulli('obs_A', p_A, observed=obs_A)
osb_B = pm.Bernoulli('obs_B', p_B, observed=obs_B)
with ab_model:
trace = pm.sample(2000)
pm.traceplot(trace)
You were very close - you just need to unindent the last two lines, which produce the traceplot. You can think of plotting the traceplot as a diagnostic that should occur after you finish sampling. The following works for me:
import pymc as pm
import random
import numpy as np
import matplotlib.pyplot as plt
with pm.Model() as model:
# Prior is uniform: all cases are equally likely
p = pm.Uniform('p', lower=0, upper=1)
# set constants
p_true = 0.05 # remember, this is unknown.
N = 1500
# sample N Bernoulli random variables from Ber(0.05).
# each random variable has a 0.05 chance of being a 1.
# this is the data-generation step
occurrences = [] # pm.rbernoulli(p_true, N)
for i in xrange(N):
occurrences.append((random.uniform(0.0, 1.0) <= p_true))
occurrences = np.array(occurrences)
obs = pm.Bernoulli('obs', p_true, observed=occurrences)
start = pm.find_MAP()
step = pm.Metropolis()
trace = pm.sample(18000, step, start)
#Now plot
pm.traceplot(trace)
plt.show()