I am implementing a Personalized Mixture of Multivariate Gaussian Regressions in pymc3 and running into an issue with empty components. After referring to the related PyMC3 mixture model example, I tried implementing the model using univariate normals instead, but I've had some issues there as well.
I've tried several strategies to constrain each component to be non-empty, but each has failed. These are shown in the code below. My specific question is: What is the best way to constrain all components to be non-empty in a mixture of multivariate Gaussians using pymc3?
Note that attempt #1 in the code below comes from the Mixture Model in PyMC3 Example and does not work here.
You can replicate the synthetic data I am using with the function in this gist.
import pymc3 as pm
import numpy as np
import theano
import theano.tensor as T
from scipy import stats
# Extract problem dimensions.
N = X.shape[0] # number of samples
F = X.shape[1] # number of features
pids = I[:, 0].astype(np.int) # primary entity ids
uniq_pids = np.unique(pids) # array of unique primary entity ids
n_pe = len(uniq_pids) # number of primary entities
with pm.Model() as gmreg:
# Init hyperparameters.
a0 = 1
b0 = 1
mu0 = pm.constant(np.zeros(F))
alpha = pm.constant(np.ones(K))
coeff_precisions = pm.constant(1 / X.var(0))
# Init parameters.
# Dirichlet shape parameter, prior on indicators.
pi = pm.Dirichlet(
'pi', a=alpha, shape=K)
# ATTEMPT 1: Make probability of membership for each cluter >= 0.1
# ================================================================
pi_min_potential = pm.Potential(
'pi_min_potential', T.switch(T.min(pi) < .1, -np.inf, 0))
# ================================================================
# The multinomial (and by extension, the Categorical), is a symmetric
# distribution. Using this as a prior for the indicator variables Z
# makes the likelihood invariant under the many possible permutations of
# the indices. This invariance is inherited in posterior inference.
# This invariance model implies unidentifiability and induces label
# switching during inference.
# Resolve by ordering the components to have increasing weights.
# This does not deal with the parameter identifiability issue.
order_pi_potential = pm.Potential(
'order_pi_potential',
T.sum([T.switch(pi[k] - pi[k-1] < 0, -np.inf, 0)
for k in range(1, K)]))
# Indicators, specifying which cluster each primary entity belongs to.
# These are draws from Multinomial with 1 trial.
init_pi = stats.dirichlet.rvs(alpha.eval())[0]
test_Z = np.random.multinomial(n=1, pvals=init_pi, size=n_pe)
as_cat = np.nonzero(test_Z)[1]
Z = pm.Categorical(
'Z', p=pi, shape=n_pe, testval=as_cat)
# ATTEMPT 2: Give infinite negative likelihood to the case
# where any of the clusters have no users assigned.
# ================================================================
# sizes = [T.eq(Z, k).nonzero()[0].shape[0] for k in range(K)]
# nonempty_potential = pm.Potential(
# 'comp_nonempty_potential',
# np.sum([T.switch(sizes[k] < 1, -np.inf, 0) for k in range(K)]))
# ================================================================
# ATTEMPT 3: Add same sample to each cluster, each has at least 1.
# ================================================================
# shared_X = X.mean(0)[None, :]
# shared_y = y.mean().reshape(1)
# X = T.concatenate((shared_X.repeat(K).reshape(K, F), X))
# y = T.concatenate((shared_y.repeat(K), y))
# Add range(K) on to the beginning to include shared instance.
# Z_expanded = Z[pids]
# Z_with_shared = T.concatenate((range(K), Z_expanded))
# pid_idx = pm.Deterministic('pid_idx', Z_with_shared)
# ================================================================
# Expand user cluster indicators to each observation for each user.
pid_idx = pm.Deterministic('pid_idx', Z[pids])
# Construct masks for each component.
masks = [T.eq(pid_idx, k).nonzero() for k in range(K)]
comp_sizes = [masks[k][0].shape[0] for k in range(K)]
# Component regression precision parameters.
beta = pm.Gamma(
'beta', alpha=a0, beta=b0, shape=(K,),
testval=np.random.gamma(a0, b0, size=K))
# Regression coefficient matrix, with coeffs for each component.
W = pm.MvNormal(
'W', mu=mu0, tau=T.diag(coeff_precisions), shape=(K, F),
testval=np.random.randn(K, F) * std)
# The mean of the observations is the result of a regression, with
# coefficients determined by the cluster the sample belongs to.
# Now we have K different multivariate normal distributions.
X = T.cast(X, 'float64')
y = T.cast(y, 'float64')
comps = []
for k in range(K):
mask_k = masks[k]
X_k = X[mask_k]
y_k = y[mask_k]
n_k = comp_sizes[k]
precision_matrix = beta[k] * T.eye(n_k)
comp_k = pm.MvNormal(
'comp_%d' % k,
mu=T.dot(X_k, W[k]), tau=precision_matrix,
observed=y_k)
comps.append(comp_k)
The first two approaches fail to ensure non-empty clusters; attempting to sample results in a LinAlgError:
with gmreg:
step1 = pm.Metropolis(vars=[pi, beta, W])
step2 = pm.ElemwiseCategoricalStep(vars=[Z], values=np.arange(K))
tr = pm.sample(100, step=[step1, step2])
...:
Failed to compute determinant []
---------------------------------------------------------------------------
LinAlgError Traceback (most recent call last)
<ipython-input-2-c7df53f4c6a5> in <module>()
2 step1 = pm.Metropolis(vars=[pi, beta, W])
3 step2 = pm.ElemwiseCategoricalStep(vars=[Z], values=np.arange(K))
----> 4 tr = pm.sample(100, step=[step1, step2])
5
/home/mack/anaconda/lib/python2.7/site-packages/pymc3/sampling.pyc in sample(draws, step, start, trace, chain, njobs, tune, progressbar, model, random_seed)
155 sample_args = [draws, step, start, trace, chain,
156 tune, progressbar, model, random_seed]
--> 157 return sample_func(*sample_args)
158
159
/home/mack/anaconda/lib/python2.7/site-packages/pymc3/sampling.pyc in _sample(draws, step, start, trace, chain, tune, progressbar, model, random_seed)
164 progress = progress_bar(draws)
165 try:
--> 166 for i, strace in enumerate(sampling):
167 if progressbar:
168 progress.update(i)
/home/mack/anaconda/lib/python2.7/site-packages/pymc3/sampling.pyc in _iter_sample(draws, step, start, trace, chain, tune, model, random_seed)
246 if i == tune:
247 step = stop_tuning(step)
--> 248 point = step.step(point)
249 strace.record(point)
250 yield strace
/home/mack/anaconda/lib/python2.7/site-packages/pymc3/step_methods/compound.pyc in step(self, point)
12 def step(self, point):
13 for method in self.methods:
---> 14 point = method.step(point)
15 return point
/home/mack/anaconda/lib/python2.7/site-packages/pymc3/step_methods/arraystep.pyc in step(self, point)
87 inputs += [point]
88
---> 89 apoint = self.astep(bij.map(point), *inputs)
90 return bij.rmap(apoint)
91
/home/mack/anaconda/lib/python2.7/site-packages/pymc3/step_methods/gibbs.pyc in astep(self, q, logp)
38
39 def astep(self, q, logp):
---> 40 p = array([logp(v * self.sh) for v in self.values])
41 return categorical(p, self.var.dshape)
42
/home/mack/anaconda/lib/python2.7/site-packages/pymc3/blocking.pyc in __call__(self, x)
117
118 def __call__(self, x):
--> 119 return self.fa(self.fb(x))
/home/mack/anaconda/lib/python2.7/site-packages/pymc3/model.pyc in __call__(self, *args, **kwargs)
423 def __call__(self, *args, **kwargs):
424 point = Point(model=self.model, *args, **kwargs)
--> 425 return self.f(**point)
426
427 compilef = fastfn
/home/mack/anaconda/lib/python2.7/site-packages/theano/compile/function_module.pyc in __call__(self, *args, **kwargs)
604 self.fn.nodes[self.fn.position_of_error],
605 self.fn.thunks[self.fn.position_of_error],
--> 606 storage_map=self.fn.storage_map)
607 else:
608 # For the c linker We don't have access from
/home/mack/anaconda/lib/python2.7/site-packages/theano/compile/function_module.pyc in __call__(self, *args, **kwargs)
593 t0_fn = time.time()
594 try:
--> 595 outputs = self.fn()
596 except Exception:
597 if hasattr(self.fn, 'position_of_error'):
/home/mack/anaconda/lib/python2.7/site-packages/theano/gof/op.pyc in rval(p, i, o, n)
766 # default arguments are stored in the closure of `rval`
767 def rval(p=p, i=node_input_storage, o=node_output_storage, n=node):
--> 768 r = p(n, [x[0] for x in i], o)
769 for o in node.outputs:
770 compute_map[o][0] = True
/home/mack/anaconda/lib/python2.7/site-packages/theano/tensor/nlinalg.pyc in perform(self, node, (x,), (z,))
267 def perform(self, node, (x,), (z, )):
268 try:
--> 269 z[0] = numpy.asarray(numpy.linalg.det(x), dtype=x.dtype)
270 except Exception:
271 print 'Failed to compute determinant', x
/home/mack/anaconda/lib/python2.7/site-packages/numpy/linalg/linalg.pyc in det(a)
1769 """
1770 a = asarray(a)
-> 1771 _assertNoEmpty2d(a)
1772 _assertRankAtLeast2(a)
1773 _assertNdSquareness(a)
/home/mack/anaconda/lib/python2.7/site-packages/numpy/linalg/linalg.pyc in _assertNoEmpty2d(*arrays)
220 for a in arrays:
221 if a.size == 0 and product(a.shape[-2:]) == 0:
--> 222 raise LinAlgError("Arrays cannot be empty")
223
224
LinAlgError: Arrays cannot be empty
Apply node that caused the error: Det(Elemwise{Mul}[(0, 1)].0)
Inputs types: [TensorType(float64, matrix)]
Inputs shapes: [(0, 0)]
Inputs strides: [(8, 8)]
Inputs values: [array([], shape=(0, 0), dtype=float64)]
Backtrace when the node is created:
File "/home/mack/anaconda/lib/python2.7/site-packages/pymc3/distributions/multivariate.py", line 66, in logp
result = k * T.log(2 * np.pi) + T.log(1./det(tau))
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.
...which indicates the component is empty, since the precision matrix has shape (0, 0).
The third method actually resolves the empty component issue but gives very strange inference behavior. I selected a burn-in based on traceplots and thinned to every 10th sample. The samples are still highly autocorrelated but much better than without thinning. At this point, I summed the Z values across the samples, and this is what I get:
In [3]: with gmreg:
step1 = pm.Metropolis(vars=[pi, beta, W])
step2 = pm.ElemwiseCategoricalStep(vars=[Z], values=np.arange(K))
tr = pm.sample(1000, step=[step1, step2])
...:
[-----------------100%-----------------] 1000 of 1000 complete in 258.8 sec
...
In [24]: zvals = tr[300::10]['Z']
In [25]: np.array([np.bincount(zvals[:, n]) for n in range(nusers)])
Out[25]:
array([[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70],
[ 0, 0, 70]])
So for some reason, all of the users are being assigned to the last cluster for every sample.
I have run into a similar problem. Something like this worked for a mixture of multivariate gaussians model. As for whether it's the best, it's certainly the best solution I've found.
pm.Potential('pi_min_potential', T.switch(
T.all(
[pi[i, 0] < 0.1 for i in range(K)]), -np.inf, 0))
The key here is that you need to account for each potential that is below your cutoff. Further, you should adjust the shape of your pi distribution, as mentioned in the comments. This will affect your indexing in the T.switch call (on the pi[i,0]).
Related
I have this code to estimate a model using a tobit regression in Python. This is the code which is parsed in three parts: data definition, the estimator builder and estimation.
import numpy as np
from scipy.optimize import minimize
# define the dependent variable and independent variables
X = data.iloc[:, 1:]
y = data.iloc[:, 0]
# Add a column of ones to the independent variables for the constant term
X = np.c_[np.ones(X.shape[0]), X]
# Define the likelihood function for the Tobit model
def likelihood(params, y, X, lower, upper):
beta = params[:-1]
sigma = params[-1]
mu = X # beta
prob = (1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-0.5 * ((y - mu) / sigma)**2))
prob[y < lower] = 0
prob[y > upper] = 0
return -np.log(prob).sum()
# Set the initial values for the parameters and the lower and upper bounds for censoring
params_init = np.random.normal(size=X.shape[1] + 1)
bounds = [(None, None) for i in range(X.shape[1])] + [(1e-10, None)]
# Perform the MLE estimation
res = minimize(likelihood, params_init, args=(y, X, 0, 100), bounds=bounds, method='L-BFGS-B')
# Extract the estimated parameters and their standard errors
params = res.x
stderr = np.sqrt(np.diag(res.hess_inv))
# Print the results
print(f'Coefficients: {params[:-1]}')
print(f'Standard Errors: {stderr[:-1]}')
print(f'Sigma: {params[-1]:.4f}')
Why am I getting this error message?
Thank you.
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-245-5f39f416cc07> in <module>
31 # Extract the estimated parameters and their standard errors
32 params = res.x
---> 33 stderr = np.sqrt(np.diag(res.hess_inv))
34
35 # Print the results
/opt/anaconda3/lib/python3.8/site-packages/numpy/core/overrides.py in diag(*args, **kwargs)
/opt/anaconda3/lib/python3.8/site-packages/numpy/lib/twodim_base.py in diag(v, k)
307 return diagonal(v, k)
308 else:
--> 309 raise ValueError("Input must be 1- or 2-d.")
310
311
ValueError: Input must be 1- or 2-d.
EDIT: If you wanna look at the type of data I'm dealing with, you can simulate them using these lines of code I just wrote:
data = pd.DataFrame()
# Append 'interview probabilities' for individuals with and without disabilities
interview_prob_disabled = np.random.normal(38.63, 28.72, 619)
interview_prob_enabled = np.random.normal(44.27, 28.19, 542)
interview_prob = np.append(interview_prob_disabled, interview_prob_enabled)
# Correct the variable by its mean and standard deviation, without it being negative, nor exceeding 100, nor a float
interview_prob = np.clip(interview_prob, 0, 100)
interview_prob = np.round(interview_prob)
# Add the 'interview probabilities' variable to the dataframe
data['Interview Probabilities'] = interview_prob
# Add other variables such as age, gender, employment status, education, etc.
data['Age'] = np.random.randint(18, 65, size=len(interview_prob))
data['Gender'] = np.random.choice(['Male', 'Female'], size=len(interview_prob))
data['Employment Status'] = np.random.choice(['Employed', 'Unemployed', 'Retired'], size=len(interview_prob))
data['Education Level'] = np.random.choice(['High School', 'College', 'Vocational', 'Graduate School'], size=len(interview_prob))
# Add a 'disability status' variable as a dummy
data['Disability Status'] = np.append(np.repeat('Disabled', 619), np.repeat('Non-disabled', 542))
# Categorical variables
data['Gender'] = data['Gender'].map({'Male': 0, 'Female': 1})
data['Employment Status'] = data['Employment Status'].map({'Employed': 0, 'Unemployed': 1})
data['Education Level'] = data['Education Level'].map({'High School': 0, 'College': 1, 'Vocational': 2, 'Graduate School': 3})
data['Disability Status'] = data['Disability Status'].map({'Disabled': 1, 'Non-disabled': 0})
# Print the df
data
The problem is that your solver, L-BFGS-B yields a LbfgsInvHessProduct object (a linear operator) out of .hess_inv instead of a numpy array (which something like BFGS would give).
One solution to your problem would be to use res.hess_inv.todense() instead.
my following code is getting the error: "AssertionError: Total area is zero in defuzzification!" im honestly really trying to understand what is wrong and its giving me the following error however im at a dead end. if anyone has some solution it would be appreciated. the gist of the code below is to use fuzzy logic in combination with Vader to clasify whether a text is negative or positive.
x_p = np.arange(0, 1, 0.1)
x_n = np.arange(0, 1, 0.1)
x_op = np.arange(0, 10, 1)
p_lo = fuzz.trimf(x_p, [0, 0, 0.5])
p_md = fuzz.trimf(x_p, [0, 0.5, 1])
p_hi = fuzz.trimf(x_p, [0.5, 1, 1])
n_lo = fuzz.trimf(x_n, [0, 0, 0.5])
n_md = fuzz.trimf(x_n, [0, 0.5, 1])
n_hi = fuzz.trimf(x_n, [0.5, 1, 1])
op_Neg = fuzz.trimf(x_op, [0, 0, 5]) # Scale : Neg Neu Pos
op_Neu = fuzz.trimf(x_op, [0, 5, 10])
op_Pos = fuzz.trimf(x_op, [5, 10, 10])
sid = SentimentIntensityAnalyzer()
sentiment_val=[]
sentiment_doc=[]
for j in range(doclen):
sentiment_doc.append(senti[j])
ss = sid.polarity_scores(tweets[j])
posscore=ss['pos']
negscore=ss['neg']
neuscore=ss['neu']
compoundscore=ss['compound']
print(str(j+1)+" {:-<65} {}".format(tweets[j], str(ss)))
print("\nPositive Score for each tweet :")
if (posscore==1):
posscore=0.9
else:
posscore=round(posscore,1)
print(posscore)
print("\nNegative Score for each tweet :")
if (negscore==1):
negscore=0.9
else:
negscore=round(negscore,1)
print(negscore)
# We need the activation of our fuzzy membership functions at these values.
p_level_lo = fuzz.interp_membership(x_p, p_lo, posscore)
p_level_md = fuzz.interp_membership(x_p, p_md, posscore)
p_level_hi = fuzz.interp_membership(x_p, p_hi, posscore)
n_level_lo = fuzz.interp_membership(x_n, n_lo, negscore)
n_level_md = fuzz.interp_membership(x_n, n_md, negscore)
n_level_hi = fuzz.interp_membership(x_n, n_hi, negscore)
# Now we take our rules and apply them. Rule 1 concerns bad food OR nice.
# The OR operator means we take the maximum of these two.
active_rule1 = np.fmin(p_level_lo, n_level_lo)
active_rule2 = np.fmin(p_level_md, n_level_lo)
active_rule3 = np.fmin(p_level_hi, n_level_lo)
active_rule4 = np.fmin(p_level_lo, n_level_md)
active_rule5 = np.fmin(p_level_md, n_level_md)
active_rule6 = np.fmin(p_level_hi, n_level_md)
active_rule7 = np.fmin(p_level_lo, n_level_hi)
active_rule8 = np.fmin(p_level_md, n_level_hi)
active_rule9 = np.fmin(p_level_hi, n_level_hi)
# Now we apply this by clipping the top off the corresponding output
# membership function with `np.fmin`
n1=np.fmax(active_rule4,active_rule7)
n2=np.fmax(n1,active_rule8)
op_activation_lo = np.fmin(n2,op_Neg)
neu1=np.fmax(active_rule1,active_rule5)
neu2=np.fmax(neu1,active_rule9)
op_activation_md = np.fmin(neu2,op_Neu)
p1=np.fmax(active_rule2,active_rule3)
p2=np.fmax(p1,active_rule6)
op_activation_hi = np.fmin(p2,op_Pos)
op0 = np.zeros_like(x_op)
# Aggregate all three output membership functions together
aggregated = np.fmax(op_activation_lo,
np.fmax(op_activation_md, op_activation_hi))
# Calculate defuzzified result
op = fuzz.defuzz(x_op, aggregated, 'centroid')
output=round(op,2)
op_activation = fuzz.interp_membership(x_op, aggregated, op) # for plot
if 0<(output)<3.33: # R
print("\nOutput after Defuzzification: Negative")
sentiment.append("Negative")
sentiment_val.append('0')
elif 3.34<(output)<10:
print("\nOutput after Defuzzification: Positive")
sentiment.append("Positive")
sentiment_val.append('1')
print("Doc sentiment: " +str(senti[j])+"\n")
traceback is the following:
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
/var/folders/1c/pf8ljm0n5d7_w36ty_m7hyhw0000gn/T/ipykernel_1538/2987240111.py in <module>
151
152 # Calculate defuzzified result
--> 153 op = fuzz.defuzz(x_op, aggregated, 'centroid')
154 output=round(op,2)
155
~/opt/anaconda3/lib/python3.9/site-packages/skfuzzy/defuzzify/defuzz.py in defuzz(x, mfx, mode)
246 if 'centroid' in mode or 'bisector' in mode:
247 zero_truth_degree = mfx.sum() == 0 # Approximation of total area
--> 248 assert not zero_truth_degree, 'Total area is zero in defuzzification!'
249
250 if 'centroid' in mode:
AssertionError: Total area is zero in defuzzification!
I am trying to use the fmin_l_bfgs function in python to maximize the log-likelihood function below:
def loglik(x0):
p = np.zeros((NCS,1)) #vector to hold the probabilities for each observation
data['v'] = (data.iloc[:, [3,4]]).dot(x0) #calculate determinstic utility
for i in range(NCS):
vv = data.v[(data.idcase == i + 1)]
vy = data.v[(data.idcase == i + 1) & (data.depvar == 1)]
p[i][0] = np.maximum(np.exp(vy)/ sum(np.exp(vv)),0.00000001)
#print("p", p)
ll = -sum(np.log(p)) #Negative since neg of ll is minimized
return ll
The input data being used is:
data = pd.read_csv("drive/My Drive/example_data.csv") #read data
data.iloc[:, [3,4]] = data.iloc[:, [3,4]]/100 #scale costs
B = np.zeros((1,2)) #give starting values of beta; 1xK vector; 2alternatives so 1x2 vector
NCS = data['idcase'].nunique() # number of choice situations in the dataset
x0 = B.T
estimation
optim2 = fmin_l_bfgs_b(loglik, x0, fprime=None, args=(), approx_grad=0, bounds=None, m=10, factr=10000000.0, pgtol=1e-05, epsilon=1e-08,iprint=0, maxfun=15000, maxiter=15000, disp=None, callback=None)
However, I keep getting this:
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-77-2821f2269a8c> in <module>()
83 print('which is the same as maximizing the log-likelihood.')
84
---> 85 optim2 = fmin_l_bfgs_b(loglik, x0, fprime=None, args=(), approx_grad=0, bounds=None, m=10, factr=10000000.0, pgtol=1e-05, epsilon=1e-08, iprint=0, maxfun=15000, maxiter=15000, disp=None, callback=None)
86
87 print(optim2)
4 frames
/usr/local/lib/python3.6/dist-packages/scipy/optimize/optimize.py in __call__(self, x, *args)
64 self.x = numpy.asarray(x).copy()
65 fg = self.fun(x, *args)
---> 66 self.jac = fg[1]
67 return fg[0]
68
IndexError: index 1 is out of bounds for axis 0 with size 1#
Can someone kindly advise me as to what to do? I am quite new in using numerical optimization methods.
Thanks
I am struggling with implementing a model where the concentration factor of the Dirichlet variable is dependent on another variable.
The situation is the following:
A system fails due to faulty components (there are three components, only one fails at each test/observation).
The probability of failure of the components is dependent on the temperature.
Here is a (commented) short implementation of the situation:
import numpy as np
import pymc3 as pm
import theano.tensor as tt
# Temperature data : 3 cold temperatures and 3 warm temperatures
T_data = np.array([10, 12, 14, 80, 90, 95])
# Data of failures of 3 components : [0,0,1] means component 3 failed
F_data = np.array([[0, 0, 1], \
[0, 0, 1], \
[0, 0, 1], \
[1, 0, 0], \
[1, 0, 0], \
[1, 0, 0]])
n_component = 3
# When temperature is cold : Component 1 fails
# When temperature is warm : Component 3 fails
# Component 2 never fails
# Number of observations :
n_obs = len(F_data)
# The number of failures can be modeled as a Multinomial F ~ M(n_obs, p) with parameters
# - n_test : number of tests (Fixed)
# - p : probability of failure of each component (shape (n_obs, 3))
# The probability of failure of components follows a Dirichlet distribution p ~ Dir(alpha) with parameters:
# - alpha : concentration (shape (n_obs, 3))
# The Dirichlet distributions ensures the probabilities sum to 1
# The alpha parameters (and the the probability of failures) depend on the temperature alpha ~ a + b * T
# - a : bias term (shape (1,3))
# - b : describes temperature dependency of alpha (shape (1,3))
_
# The prior on "a" is a normal distributions with mean 1/2 and std 0.001
# a ~ N(1/2, 0.001)
# The prior on "b" is a normal distribution zith mean 0 and std 0.001
# b ~ N(0, 0.001)
# Coding it all with pymc3
with pm.Model() as model:
a = pm.Normal('a', 1/2, 1/(0.001**2), shape = n_component)
b = pm.Normal('b', 0, 1/(0.001**2), shape = n_component)
# I generate 3 alphas values (corresponding to the 3 components) for each of the 6 temperatures
# I tried different ways to compute alpha but nothing worked out
alphas = pm.Deterministic('alphas', a + b * tt.stack([T_data, T_data, T_data], axis=1))
#alphas = pm.Deterministic('alphas', a + b[None, :] * T_data[:, None])
#alphas = pm.Deterministic('alphas', a + tt.outer(T_data,b))
# I think I should get 3 probabilities (corresponding to the 3 components) for each of the 6 temperatures
#p = pm.Dirichlet('p', alphas, shape = n_component)
p = pm.Dirichlet('p', alphas, shape = (n_obs,n_component))
# Multinomial is observed and take values from F_data
F = pm.Multinomial('F', 1, p, observed = F_data)
with model:
trace = pm.sample(5000)
I get the following error in the sample function:
RemoteTraceback Traceback (most recent call last)
RemoteTraceback:
"""
Traceback (most recent call last):
File "/anaconda3/lib/python3.6/site-packages/pymc3/parallel_sampling.py", line 73, in run
self._start_loop()
File "/anaconda3/lib/python3.6/site-packages/pymc3/parallel_sampling.py", line 113, in _start_loop
point, stats = self._compute_point()
File "/anaconda3/lib/python3.6/site-packages/pymc3/parallel_sampling.py", line 139, in _compute_point
point, stats = self._step_method.step(self._point)
File "/anaconda3/lib/python3.6/site-packages/pymc3/step_methods/arraystep.py", line 247, in step
apoint, stats = self.astep(array)
File "/anaconda3/lib/python3.6/site-packages/pymc3/step_methods/hmc/base_hmc.py", line 117, in astep
'might be misspecified.' % start.energy)
ValueError: Bad initial energy: inf. The model might be misspecified.
"""
The above exception was the direct cause of the following exception:
ValueError Traceback (most recent call last)
ValueError: Bad initial energy: inf. The model might be misspecified.
The above exception was the direct cause of the following exception:
RuntimeError Traceback (most recent call last)
<ipython-input-5-121fdd564b02> in <module>()
1 with model:
2 #start = pm.find_MAP()
----> 3 trace = pm.sample(5000)
/anaconda3/lib/python3.6/site-packages/pymc3/sampling.py in sample(draws, step, init, n_init, start, trace, chain_idx, chains, cores, tune, nuts_kwargs, step_kwargs, progressbar, model, random_seed, live_plot, discard_tuned_samples, live_plot_kwargs, compute_convergence_checks, use_mmap, **kwargs)
438 _print_step_hierarchy(step)
439 try:
--> 440 trace = _mp_sample(**sample_args)
441 except pickle.PickleError:
442 _log.warning("Could not pickle model, sampling singlethreaded.")
/anaconda3/lib/python3.6/site-packages/pymc3/sampling.py in _mp_sample(draws, tune, step, chains, cores, chain, random_seed, start, progressbar, trace, model, use_mmap, **kwargs)
988 try:
989 with sampler:
--> 990 for draw in sampler:
991 trace = traces[draw.chain - chain]
992 if trace.supports_sampler_stats and draw.stats is not None:
/anaconda3/lib/python3.6/site-packages/pymc3/parallel_sampling.py in __iter__(self)
303
304 while self._active:
--> 305 draw = ProcessAdapter.recv_draw(self._active)
306 proc, is_last, draw, tuning, stats, warns = draw
307 if self._progress is not None:
/anaconda3/lib/python3.6/site-packages/pymc3/parallel_sampling.py in recv_draw(processes, timeout)
221 if msg[0] == 'error':
222 old = msg[1]
--> 223 six.raise_from(RuntimeError('Chain %s failed.' % proc.chain), old)
224 elif msg[0] == 'writing_done':
225 proc._readable = True
/anaconda3/lib/python3.6/site-packages/six.py in raise_from(value, from_value)
RuntimeError: Chain 1 failed.
Any suggestions ?
Misspecified model. The alphas are taking on nonpositive values under your current parameterization, whereas the Dirichlet distribution requires them to be positive, making the model misspecified.
In Dirichlet-Multinomial regression, one uses an exponential link function to mediate between the range of the linear model and the domain of the Dirichlet-Multinomial, namely,
alpha = exp(beta*X)
There are details on this in the MGLM package documentation.
Dirichlet-Multinomial Regression Model
If we implement this model we can achieve decent model convergence and sampling.
import numpy as np
import pymc3 as pm
import theano
import theano.tensor as tt
from sklearn.preprocessing import scale
T_data = np.array([10,12,14,80,90,95])
# standardize the data for better sampling
T_data_z = scale(T_data)
# transform to theano tensor, so it works with tt.outer
T_data_z = theano.shared(T_data_z)
F_data = np.array([
[0,0,1],
[0,0,1],
[0,0,1],
[1,0,0],
[1,0,0],
[1,0,0],
])
# N = num_obs, K = num_components
N, K = F_data.shape
with pm.Model() as dmr_model:
a = pm.Normal('a', mu=0, sd=1, shape=K)
b = pm.Normal('b', mu=0, sd=1, shape=K)
alpha = pm.Deterministic('alpha', pm.math.exp(a + tt.outer(T_data_z, b)))
p = pm.Dirichlet('p', a=alpha, shape=(N, K))
F = pm.Multinomial('F', 1, p, observed=F_data)
trace = pm.sample(5000, tune=10000, target_accept=0.9)
Model Outcomes
The sampling in this model isn't perfect. For example, there are still a number of divergences even with the increased target acceptance rate and additional tuning.
There were 501 divergences after tuning. Increase target_accept or reparameterize.
There were 477 divergences after tuning. Increase target_accept or reparameterize.
The acceptance probability does not match the target. It is 0.5858954056820339, but should be close to 0.8. Try to increase the number of tuning steps.
The number of effective samples is smaller than 10% for some parameters.
Trace Plots
We can see the traces for a and b look good, and the mean locations make sense with data.
Pair Plot
While correlation is less of a problem for NUTS, having uncorrelated posterior sampling is ideal. For the most part we're seeing low correlation, with some slight structure within the a components.
Posterior Plots
Finally, we can look at the posterior plots of p and confirm they make sense with the data.
Alternative Model
The advantage of the Dirichlet-Multinomial is handling overdispersion. It might be worth trying the simpler Multinomial Logisitic Regression / Softmax Regression, since it runs significantly faster and doesn't exhibit any of the sampling problems coming up in the DMR model.
In the end, you could run both and perform model comparison to see if the Dirichlet-Multinomial really is adding explanatory value.
Model
with pm.Model() as softmax_model:
a = pm.Normal('a', mu=0, sd=1, shape=K)
b = pm.Normal('b', mu=0, sd=1, shape=K)
p = pm.Deterministic('p', tt.nnet.softmax(a + tt.outer(T_data_z, b)))
F = pm.Multinomial('F', 1, p, observed = F_data)
trace_sm = pm.sample(5000, tune=10000)
Posterior Plots
I am trying to learn PyMC3, I want to make a simple mixture of gaussians example. I found this example and want to convert it to pymc3 but I'm currently getting an error when trying to plot the traceplot.
n1 = 500
n2 = 200
n = n1+n2
mean1 = 21.8
mean2 = 42.0
precision = 0.1
sigma = np.sqrt(1 / precision)
# precision = 1/sigma^2
print "sigma1: %s" % sigma1
print "sigma2: %s" % sigma2
data1 = np.random.normal(mean1,sigma,n1)
data2 = np.random.normal(mean2,sigma,n2)
data = np.concatenate([data1 , data2])
#np.random.shuffle(data)
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, xlabel='x', ylabel='y', title='mixture of 2 guassians')
ax.plot(range(0,n1+n2), data, 'x', label='data')
plt.legend(loc=0)
with pm.Model() as model:
#priors
p = pm.Uniform( "p", 0 , 1) #this is the fraction that come from mean1 vs mean2
ber = pm.Bernoulli( "ber", p = p) # produces 1 with proportion p.
precision = pm.Gamma('precision', alpha=0.1, beta=0.1)
mean1 = pm.Normal( "mean1", 0, 0.01 ) #better to use normals versus Uniforms (unless you are certain the value is truncated at 0 and 200
mean2 = pm.Normal( "mean2", 0, 0.01 )
mean = pm.Deterministic('mean', ber*mean1 + (1-ber)*mean2)
process = pm.Normal('process', mu=mean, tau=precision, observed=data)
# inference
step = pm.Metropolis()
trace = pm.sample(10000, step)
pm.traceplot(trace)
Error:
sigma1: 3.16227766017
sigma2: 1.69030850946
[-----------------100%-----------------] 10000 of 10000 complete in 4.4 sec
---------------------------------------------------------------------------
LinAlgError Traceback (most recent call last)
<ipython-input-10-eb728824de83> in <module>()
44 step = pm.Metropolis()
45 trace = pm.sample(10000, step)
---> 46 pm.traceplot(trace)
/usr/lib/python2.7/site-packages/pymc-3.0-py2.7.egg/pymc/plots.pyc in traceplot(trace, vars, figsize, lines, combined, grid)
70 ax[i, 0].set_xlim(mind - .5, maxd + .5)
71 else:
---> 72 kdeplot_op(ax[i, 0], d)
73 ax[i, 0].set_title(str(v))
74 ax[i, 0].grid(grid)
/usr/lib/python2.7/site-packages/pymc-3.0-py2.7.egg/pymc/plots.pyc in kdeplot_op(ax, data)
94 for i in range(data.shape[1]):
95 d = data[:, i]
---> 96 density = kde.gaussian_kde(d)
97 l = np.min(d)
98 u = np.max(d)
/usr/lib64/python2.7/site-packages/scipy/stats/kde.pyc in __init__(self, dataset, bw_method)
186
187 self.d, self.n = self.dataset.shape
--> 188 self.set_bandwidth(bw_method=bw_method)
189
190 def evaluate(self, points):
/usr/lib64/python2.7/site-packages/scipy/stats/kde.pyc in set_bandwidth(self, bw_method)
496 raise ValueError(msg)
497
--> 498 self._compute_covariance()
499
500 def _compute_covariance(self):
/usr/lib64/python2.7/site-packages/scipy/stats/kde.pyc in _compute_covariance(self)
507 self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
508 bias=False))
--> 509 self._data_inv_cov = linalg.inv(self._data_covariance)
510
511 self.covariance = self._data_covariance * self.factor**2
/usr/lib64/python2.7/site-packages/scipy/linalg/basic.pyc in inv(a, overwrite_a, check_finite)
381 inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
382 if info > 0:
--> 383 raise LinAlgError("singular matrix")
384 if info < 0:
385 raise ValueError('illegal value in %d-th argument of internal '
LinAlgError: singular matrix
Thanks to Fonnesbeck for answering this on the github issue tracker:
https://github.com/pymc-devs/pymc3/issues/452
here is the updated code:
with pm.Model() as model:
#priors
p = pm.Uniform( "p", 0 , 1) #this is the fraction that come from mean1 vs mean2
ber = pm.Bernoulli( "ber", p = p, shape=len(data)) # produces 1 with proportion p.
sigma = pm.Uniform('sigma', 0, 100)
precision = sigma**-2
mean = pm.Normal( "mean", 0, 0.01, shape=2 )
mu = pm.Deterministic('mu', mean[ber])
process = pm.Normal('process', mu=mu, tau=precision, observed=data)
with model:
step1 = pm.Metropolis([p, sigma, mean])
step2 = pm.BinaryMetropolis([ber])
trace = pm.sample(10000, [step1, step2])
You need to use BinaryMetropolis when inferring a Bernoulli random variable
And an even simpler and quicker version is as follows:
with pm.Model() as model2:
p = pm.Beta( "p", 1., 1.)
means = pm.Uniform('mean', 15, 60, shape=2)
sigma = pm.Uniform('sigma', 0, 20, testval=5)
process = pm.NormalMixture('obs', tt.stack([p, 1-p]), means, sd=sigma, observed=data)
with model2:
step = pm.Metropolis()
trace = pm.sample(10000, step=step)
I know this issue is old, but I am trying differente examples of PyMC3 usages to get used to modeling in PyMC3. The answer as given above does not work in current version 1.0 of PyMC3 (It does not distringuish the two means correctly). The minimum changes I had to do in order to make it work were the following:
1)
# mean = pm.Normal("mean", 0, 0.01, shape=2 )
mean = pm.Uniform('mean', 15, 60, shape=2)
2)
# step2 = pm.BinaryMetropolis([ber])
step2 = pm.ElemwiseCategorical(vars=[ber], values=[0, 1])
Just in case anybody else is having a similar problem.