Skip to content
Snippets Groups Projects

Add a mean-log-likelihood method to improve the ACT estimation

Merged Gregory Ashton requested to merge add-mean-log-like-to-ptemcee into master
All threads resolved!
Compare and Show latest version
1 file
+ 40
15
Compare changes
  • Side-by-side
  • Inline
@@ -506,6 +506,7 @@ class Ptemcee(MCMCSampler):
self.tau_list,
self.tau_list_n,
self.Q_list,
self.mean_log_likelihood,
)
if stop:
@@ -638,6 +639,7 @@ def check_iteration(
tau_list,
tau_list_n,
Q_list,
mean_log_likelihood,
):
""" Per-iteration logic to calculate the convergence check
@@ -686,7 +688,7 @@ def check_iteration(
if np.isnan(tau) or np.isinf(tau):
print_progress(
iteration, sampler, time_per_check, np.nan, np.nan,
np.nan, np.nan, False, convergence_inputs, Q,
np.nan, np.nan, np.nan, False, convergence_inputs, Q,
)
return False, np.nan, np.nan, np.nan, np.nan
@@ -704,25 +706,40 @@ def check_iteration(
logger.debug("Convergence: Q<Q_tol={}, nsamples<nsamples_effective={}"
.format(Q < ci.Q_tol, ci.nsamples < nsamples_effective))
# Calculate change in tau from previous iterations
GRAD_WINDOW_LENGTH = 11
nsteps_to_check = ci.autocorr_tau * np.max([2 * GRAD_WINDOW_LENGTH, tau_int])
lower_tau_index = np.max([0, len(tau_list) - nsteps_to_check])
check_taus = np.array(tau_list[lower_tau_index :])
if not np.any(np.isnan(check_taus)) and check_taus.shape[0] > GRAD_WINDOW_LENGTH:
# Estimate the maximum gradient
grad = np.max(scipy.signal.savgol_filter(
check_taus, axis=0, window_length=GRAD_WINDOW_LENGTH, polyorder=2, deriv=1))
gradient_tau = get_max_gradient(
check_taus, axis=0, window_length=GRAD_WINDOW_LENGTH)
if gradient_tau < ci.gradient_tau:
logger.debug("tau usable as {} < gradient_tau={}"
.format(gradient_tau, ci.gradient_tau))
tau_usable = True
else:
logger.debug("tau not usable as {} > gradient_tau={}"
.format(gradient_tau, ci.gradient_tau))
tau_usable = False
if grad < ci.gradient_tau:
logger.debug("tau usable as grad < gradient_tau={}".format(ci.gradient_tau))
check_mean_log_likelihood = mean_log_likelihood[:, -nsteps_to_check:]
gradient_mean_logl = get_max_gradient(
check_mean_log_likelihood, axis=1, window_length=GRAD_WINDOW_LENGTH,
smooth=True)
if gradient_mean_logl < ci.gradient_tau:
logger.debug("tau usable as {} < gradient_tau={}"
.format(gradient_tau, ci.gradient_tau))
tau_usable = True
else:
logger.debug("tau not usable as grad > gradient_tau={}".format(ci.gradient_tau))
logger.debug("tau not usable as {} > gradient_tau={}"
.format(gradient_tau, ci.gradient_tau))
tau_usable = False
else:
logger.debug("ACT is nan")
grad = np.nan
gradient_tau = np.nan
tau_usable = False
if nsteps < tau_int * ci.autocorr_tol:
@@ -740,7 +757,8 @@ def check_iteration(
nsamples_effective,
samples_per_check,
tau_int,
grad,
gradient_tau,
gradient_mean_logl,
tau_usable,
convergence_inputs,
Q
@@ -749,6 +767,15 @@ def check_iteration(
return stop, nburn, thin, tau_int, nsamples_effective
def get_max_gradient(x, axis=0, window_length=11, polyorder=2, smooth=False):
if smooth:
x = scipy.signal.savgol_filter(
x, axis=axis, window_length=window_length, polyorder=3)
return np.max(scipy.signal.savgol_filter(
x, axis=axis, window_length=window_length, polyorder=polyorder,
deriv=1))
def get_Q_convergence(samples):
nwalkers, nsteps, ndim = samples.shape
if nsteps > 1:
@@ -770,7 +797,8 @@ def print_progress(
nsamples_effective,
samples_per_check,
tau_int,
grad,
gradient_tau,
gradient_mean_logl,
tau_usable,
convergence_inputs,
Q,
@@ -794,10 +822,7 @@ def print_progress(
sampling_time = datetime.timedelta(seconds=np.sum(time_per_check))
if grad < convergence_inputs.gradient_tau:
tau_str = "{}({:0.2f}<{})".format(tau_int, grad, convergence_inputs.gradient_tau)
else:
tau_str = "{}({:0.2f}>{})".format(tau_int, grad, convergence_inputs.gradient_tau)
tau_str = "{}(+{:0.2f},+{:0.2f})".format(tau_int, gradient_tau, gradient_mean_logl)
if tau_usable:
tau_str = "={}".format(tau_str)
Loading