Skip to content
Snippets Groups Projects

Add a mean-log-likelihood method to improve the ACT estimation

Merged Gregory Ashton requested to merge add-mean-log-like-to-ptemcee into master
All threads resolved!
Compare and Show latest version
1 file
+ 35
25
Compare changes
  • Side-by-side
  • Inline
@@ -146,7 +146,7 @@ class Ptemcee(MCMCSampler):
nsamples=5000,
burn_in_nact=50,
burn_in_fixed_discard=100,
mean_logl_frac=0.01,
mean_logl_frac=0.001,
thin_by_nact=0.5,
autocorr_tol=50,
autocorr_c=5,
@@ -475,7 +475,6 @@ class Ptemcee(MCMCSampler):
frac=self.convergence_inputs.mean_logl_frac,
discard=self.convergence_inputs.burn_in_fixed_discard
)
print("discard={}".format(self.discard))
(
stop,
@@ -599,7 +598,7 @@ class Ptemcee(MCMCSampler):
plot_mean_log_likelihood(
self.mean_log_likelihood,
self.outdir,
self.label
self.label,
)
@@ -615,9 +614,8 @@ def get_min_iteration_to_check(iteration, mean_log_likelihood, frac=0.01, discar
if np.sum(idxs) > 0:
mean_logl_min_it = np.min(np.arange(len(idxs))[idxs])
print(discard, mean_logl_min_it)
min_it = np.max([discard, mean_logl_min_it])
if min_it < iteration:
if iteration > min_it + 50:
return min_it
else:
return iteration
@@ -659,22 +657,11 @@ def check_iteration(
nsamples_effective: int
The effective number of samples after burning and thinning
"""
import emcee
ci = convergence_inputs
nwalkers, nsteps, ndim = samples.shape
# Compute ACT tau for 0-temperature chains
tau_array = np.zeros((nwalkers, ndim))
for ii in range(nwalkers):
for jj, key in enumerate(search_parameter_keys):
if ci.ignore_keys_for_tau and ci.ignore_keys_for_tau in key:
continue
try:
tau_array[ii, jj] = emcee.autocorr.integrated_time(
samples[ii, :, jj], c=ci.autocorr_c, tol=0)[0]
except emcee.autocorr.AutocorrError:
tau_array[ii, jj] = np.inf
tau_array = calculate_tau_array(samples, search_parameter_keys, ci)
# Maximum over parameters, mean over walkers
tau = np.max(np.mean(tau_array, axis=0))
@@ -693,7 +680,7 @@ def check_iteration(
if np.isnan(tau) or np.isinf(tau):
print_progress(
iteration, sampler, time_per_check, np.nan, np.nan,
np.nan, np.nan, False, convergence_inputs, np.nan,
np.nan, np.nan, False, convergence_inputs, Q,
)
return False, np.nan, np.nan, np.nan, np.nan
@@ -758,13 +745,16 @@ def check_iteration(
def get_Q_convergence(samples):
nwalkers, nsteps, ndim = samples.shape
W = np.mean(np.var(samples, axis=1), axis=0)
per_walker_mean = np.mean(samples, axis=1)
mean = np.mean(per_walker_mean, axis=0)
B = nsteps / (nwalkers - 1.) * np.sum((per_walker_mean - mean)**2, axis=0)
Vhat = (nsteps - 1) / nsteps * W + (nwalkers + 1) / (nwalkers * nsteps) * B
Q_per_dim = np.sqrt(Vhat / W)
return np.max(Q_per_dim)
if nsteps > 1:
W = np.mean(np.var(samples, axis=1), axis=0)
per_walker_mean = np.mean(samples, axis=1)
mean = np.mean(per_walker_mean, axis=0)
B = nsteps / (nwalkers - 1.) * np.sum((per_walker_mean - mean)**2, axis=0)
Vhat = (nsteps - 1) / nsteps * W + (nwalkers + 1) / (nwalkers * nsteps) * B
Q_per_dim = np.sqrt(Vhat / W)
return np.max(Q_per_dim)
else:
return np.inf
def print_progress(
@@ -835,6 +825,24 @@ def print_progress(
)
def calculate_tau_array(samples, search_parameter_keys, ci):
""" Compute ACT tau for 0-temperature chains """
import emcee
nwalkers, nsteps, ndim = samples.shape
tau_array = np.zeros((nwalkers, ndim)) + np.inf
if nsteps > 1:
for ii in range(nwalkers):
for jj, key in enumerate(search_parameter_keys):
if ci.ignore_keys_for_tau and ci.ignore_keys_for_tau in key:
continue
try:
tau_array[ii, jj] = emcee.autocorr.integrated_time(
samples[ii, :, jj], c=ci.autocorr_c, tol=0)[0]
except emcee.autocorr.AutocorrError:
tau_array[ii, jj] = np.inf
return tau_array
def checkpoint(
iteration,
outdir,
@@ -955,6 +963,8 @@ def plot_mean_log_likelihood(mean_log_likelihood, outdir, label):
fig, ax = plt.subplots()
idxs = np.arange(nsteps)
ax.plot(idxs, mean_log_likelihood.T)
ax.set(xlabel="Iteration", ylabel=r"$\langle\log\mathcal{L}\rangle$")
fig.tight_layout()
fig.savefig("{}/{}_checkpoint_meanloglike.png".format(outdir, label))
plt.close(fig)
Loading