Commit e2a37ebb authored by Michael Williams's avatar Michael Williams
Browse files

Merge changes from master and move test

parents 1980ba15 762568df
Pipeline #192292 failed with stage
in 61 minutes and 5 seconds
......@@ -43,7 +43,7 @@ basic-3.7:
# test example on python 3.7
python-3.7:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- python -m pip install .
......@@ -69,7 +69,7 @@ python-3.7:
# test example on python 3.8
python-3.8:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python38
image: quay.io/bilbydev/v2-dockerfile-test-suite-python38
script:
- python -m pip install .
......@@ -78,7 +78,7 @@ python-3.8:
# test example on python 3.6
python-3.6:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python36
image: quay.io/bilbydev/v2-dockerfile-test-suite-python36
script:
- python -m pip install .
......@@ -87,7 +87,7 @@ python-3.6:
# test samplers on python 3.7
python-3.7-samplers:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- python -m pip install .
......@@ -97,7 +97,7 @@ python-3.7-samplers:
# test samplers on python 3.6
python-3.6-samplers:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python36
image: quay.io/bilbydev/v2-dockerfile-test-suite-python36
script:
- python -m pip install .
......@@ -106,7 +106,7 @@ python-3.6-samplers:
# Test containers are up to date
containers:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- cd containers
- python write_dockerfiles.py
......@@ -117,7 +117,7 @@ containers:
# Tests run at a fixed schedule rather than on push
scheduled-python-3.7:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
only:
- schedules
script:
......@@ -129,7 +129,7 @@ scheduled-python-3.7:
plotting:
stage: test
image: bilbydev/bilby-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
only:
- schedules
script:
......@@ -140,7 +140,7 @@ plotting:
authors:
stage: test
image: bilbydev/bilby-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- python test/check_author_list.py
......@@ -162,7 +162,7 @@ pages:
deploy_release:
stage: deploy
image: bilbydev/v2-dockerfile-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
variables:
TWINE_USERNAME: $PYPI_USERNAME
TWINE_PASSWORD: $PYPI_PASSWORD
......@@ -177,7 +177,7 @@ deploy_release:
precommits-py3.7:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- source activate python37
- mkdir -p .pip37
......
# All notable changes will be documented in this file
## [1.0.3] 2020-11-23
Version 1.0.4 release of bilby
### Added
- Added a chirp-mass and mass-ratio prior which are uniform in component masses (!891)
### Changes
- Fixed issue in the CI
## [1.0.3] 2020-10-23
Version 1.0.3 release of bilby
......
......@@ -16,7 +16,6 @@ https://lscsoft.docs.ligo.org/bilby/installation.html.
"""
from __future__ import absolute_import
import sys
from . import core, gw, hyper
......
from __future__ import absolute_import
from . import grid, likelihood, prior, result, sampler, series, utils
from __future__ import division
import numpy as np
import os
import json
......
from __future__ import division, print_function
import copy
import numpy as np
......
......@@ -370,8 +370,8 @@ class SymmetricLogUniform(Prior):
class Cosine(Prior):
def __init__(self, name=None, latex_label=None, unit=None,
minimum=-np.pi / 2, maximum=np.pi / 2, boundary=None):
def __init__(self, minimum=-np.pi / 2, maximum=np.pi / 2, name=None,
latex_label=None, unit=None, boundary=None):
"""Cosine prior with bounds
Parameters
......@@ -389,8 +389,8 @@ class Cosine(Prior):
boundary: str
See superclass
"""
super(Cosine, self).__init__(name=name, latex_label=latex_label, unit=unit,
minimum=minimum, maximum=maximum, boundary=boundary)
super(Cosine, self).__init__(minimum=minimum, maximum=maximum, name=name,
latex_label=latex_label, unit=unit, boundary=boundary)
def rescale(self, val):
"""
......@@ -425,8 +425,8 @@ class Cosine(Prior):
class Sine(Prior):
def __init__(self, name=None, latex_label=None, unit=None, minimum=0,
maximum=np.pi, boundary=None):
def __init__(self, minimum=0, maximum=np.pi, name=None,
latex_label=None, unit=None, boundary=None):
"""Sine prior with bounds
Parameters
......@@ -444,8 +444,8 @@ class Sine(Prior):
boundary: str
See superclass
"""
super(Sine, self).__init__(name=name, latex_label=latex_label, unit=unit,
minimum=minimum, maximum=maximum, boundary=boundary)
super(Sine, self).__init__(minimum=minimum, maximum=maximum, name=name,
latex_label=latex_label, unit=unit, boundary=boundary)
def rescale(self, val):
"""
......
......@@ -3,7 +3,6 @@ from io import open as ioopen
import json
import os
from future.utils import iteritems
from matplotlib.cbook import flatten
import numpy as np
......@@ -185,7 +184,7 @@ class PriorDict(dict):
def from_dictionary(self, dictionary):
eval_dict = dict(inf=np.inf)
for key, val in iteritems(dictionary):
for key, val in dictionary.items():
if isinstance(val, Prior):
continue
elif isinstance(val, (int, float)):
......@@ -391,6 +390,7 @@ class PriorDict(dict):
samples = self.sample_subset(keys=keys, size=sampling_chunk)
keep = np.atleast_1d(self.evaluate_constraints(samples))
if len(keep) == 1:
self._cached_normalizations[keys] = 1
return 1
all_samples = {key: np.array([]) for key in keys}
while np.count_nonzero(keep) < min_accept:
......
from __future__ import division
import inspect
import os
from collections import OrderedDict, namedtuple
......@@ -238,8 +236,9 @@ class Result(object):
sampler_kwargs=None, injection_parameters=None,
meta_data=None, posterior=None, samples=None,
nested_samples=None, log_evidence=np.nan,
log_evidence_err=np.nan, log_noise_evidence=np.nan,
log_bayes_factor=np.nan, log_likelihood_evaluations=None,
log_evidence_err=np.nan, information_gain=np.nan,
log_noise_evidence=np.nan, log_bayes_factor=np.nan,
log_likelihood_evaluations=None,
log_prior_evaluations=None, sampling_time=None, nburn=None,
num_likelihood_evaluations=None, walkers=None,
max_autocorrelation_time=None, use_ratio=None,
......@@ -269,6 +268,8 @@ class Result(object):
An array of the output posterior samples and the unweighted samples
log_evidence, log_evidence_err, log_noise_evidence, log_bayes_factor: float
Natural log evidences
information_gain: float
The Kullback-Leibler divergence
log_likelihood_evaluations: array_like
The evaluations of the likelihood for each sample point
num_likelihood_evaluations: int
......@@ -321,6 +322,7 @@ class Result(object):
self.use_ratio = use_ratio
self.log_evidence = log_evidence
self.log_evidence_err = log_evidence_err
self.information_gain = information_gain
self.log_noise_evidence = log_noise_evidence
self.log_bayes_factor = log_bayes_factor
self.log_likelihood_evaluations = log_likelihood_evaluations
......@@ -573,7 +575,7 @@ class Result(object):
'log_noise_evidence', 'log_bayes_factor', 'priors', 'posterior',
'injection_parameters', 'meta_data', 'search_parameter_keys',
'fixed_parameter_keys', 'constraint_parameter_keys',
'sampling_time', 'sampler_kwargs', 'use_ratio',
'sampling_time', 'sampler_kwargs', 'use_ratio', 'information_gain',
'log_likelihood_evaluations', 'log_prior_evaluations',
'num_likelihood_evaluations', 'samples', 'nested_samples',
'walkers', 'nburn', 'parameter_labels', 'parameter_labels_with_unit',
......
from __future__ import absolute_import
import datetime
import distutils.dir_util
import numpy as np
......
from __future__ import absolute_import
import array
import copy
......@@ -89,8 +88,8 @@ class Cpnest(NestedSampler):
prior_samples = self.priors.sample()
self._update_bounds()
point = LivePoint(
self.names, array.array(
'f', [prior_samples[name] for name in self.names]))
self.names, array.array('d', [prior_samples[name] for name in self.names])
)
return point
self._resolve_proposal_functions()
......@@ -132,6 +131,7 @@ class Cpnest(NestedSampler):
self.result.nested_samples['weights'] = np.exp(log_weights)
self.result.log_evidence = out.NS.state.logZ
self.result.log_evidence_err = np.sqrt(out.NS.state.info / out.NS.state.nlive)
self.result.information_gain = out.NS.state.info
return self.result
def _verify_kwargs_against_default_kwargs(self):
......
from __future__ import absolute_import
import os
import dill as pickle
......
......@@ -6,7 +6,7 @@ import pickle
import signal
import time
import tqdm
from tqdm.auto import tqdm
import matplotlib.pyplot as plt
import numpy as np
from pandas import DataFrame
......@@ -224,7 +224,7 @@ class Dynesty(NestedSampler):
self.kwargs['update_interval'] = int(0.6 * self.kwargs['nlive'])
if self.kwargs['print_func'] is None:
self.kwargs['print_func'] = self._print_func
self.pbar = tqdm.tqdm(file=sys.stdout)
self.pbar = tqdm(file=sys.stdout)
Sampler._verify_kwargs_against_default_kwargs(self)
def _print_func(self, results, niter, ncall=None, dlogz=None, *args, **kwargs):
......@@ -401,6 +401,7 @@ class Dynesty(NestedSampler):
sorted_samples=self.result.samples)
self.result.log_evidence = out.logz[-1]
self.result.log_evidence_err = out.logzerr[-1]
self.result.information_gain = out.information[-1]
def _run_nested_wrapper(self, kwargs):
""" Wrapper function to run_nested
......@@ -612,7 +613,7 @@ class Dynesty(NestedSampler):
fig = dyplot.traceplot(self.sampler.results, labels=labels)[0]
fig.tight_layout()
fig.savefig(filename)
except (RuntimeError, np.linalg.linalg.LinAlgError, ValueError) as e:
except (RuntimeError, np.linalg.linalg.LinAlgError, ValueError, OverflowError, Exception) as e:
logger.warning(e)
logger.warning('Failed to create dynesty state plot at checkpoint')
finally:
......@@ -690,6 +691,16 @@ class Dynesty(NestedSampler):
"""
return self.priors.rescale(self._search_parameter_keys, theta)
def calc_likelihood_count(self):
if self.likelihood_benchmark:
if hasattr(self, 'sampler'):
self.result.num_likelihood_evaluations = \
getattr(self.sampler, 'ncall', 0)
else:
self.result.num_likelihood_evaluations = 0
else:
return None
def sample_rwalk_bilby(args):
""" Modified bilby-implemented version of dynesty.sampling.sample_rwalk """
......
from __future__ import absolute_import, print_function
from collections import namedtuple
import os
import signal
......@@ -12,8 +10,7 @@ from pandas import DataFrame
from distutils.version import LooseVersion
import dill as pickle
from ..utils import (
logger, get_progress_bar, check_directory_exists_and_if_not_mkdir)
from ..utils import logger, check_directory_exists_and_if_not_mkdir
from .base_sampler import MCMCSampler, SamplerError
......@@ -353,7 +350,7 @@ class Emcee(MCMCSampler):
self.pos0 = self.sampler.chain[:, -1, :]
def run_sampler(self):
tqdm = get_progress_bar()
from tqdm.auto import tqdm
sampler_function_kwargs = self.sampler_function_kwargs
iterations = sampler_function_kwargs.pop('iterations')
iterations -= self._previous_iterations
......
from __future__ import absolute_import
import numpy as np
from .base_sampler import Sampler
......
from __future__ import absolute_import, print_function
from ..utils import logger, get_progress_bar
from ..utils import logger
import numpy as np
import os
from .emcee import Emcee
......@@ -141,7 +140,7 @@ class Kombine(Emcee):
logger.info("Kombine auto-burnin complete. Removing {} samples from chains".format(self.nburn))
self._set_pos0_for_resume()
tqdm = get_progress_bar()
from tqdm.auto import tqdm
sampler_function_kwargs = self.sampler_function_kwargs
iterations = sampler_function_kwargs.pop('iterations')
iterations -= self._previous_iterations
......
from __future__ import absolute_import
import numpy as np
from pandas import DataFrame
......@@ -74,6 +73,7 @@ class Nestle(NestedSampler):
sorted_samples=self.result.samples)
self.result.log_evidence = out.logz
self.result.log_evidence_err = out.logzerr
self.result.information_gain = out.h
self.calc_likelihood_count()
return self.result
......
from __future__ import absolute_import
import numpy as np
......
from __future__ import absolute_import, division, print_function
import os
import datetime
......@@ -8,10 +7,12 @@ import sys
import time
import dill
from collections import namedtuple
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal
from ..utils import logger, check_directory_exists_and_if_not_mkdir
from .base_sampler import SamplerError, MCMCSampler
......@@ -23,10 +24,14 @@ ConvergenceInputs = namedtuple(
"autocorr_c",
"autocorr_tol",
"autocorr_tau",
"gradient_tau",
"gradient_mean_log_posterior",
"Q_tol",
"safety",
"burn_in_nact",
"burn_in_fixed_discard",
"mean_logl_frac",
"thin_by_nact",
"frac_threshold",
"nsamples",
"ignore_keys_for_tau",
"min_tau",
......@@ -53,6 +58,10 @@ class Ptemcee(MCMCSampler):
The number of burn-in autocorrelation times to discard and the thin-by
factor. Increasing burn_in_nact increases the time required for burn-in.
Increasing thin_by_nact increases the time required to obtain nsamples.
burn_in_fixed_discard: int (0)
A fixed number of samples to discard for burn-in
mean_logl_frac: float, (0.0.1)
The maximum fractional change the mean log-likelihood to accept
autocorr_tol: int, (50)
The minimum number of autocorrelation times needed to trust the
estimate of the autocorrelation time.
......@@ -62,14 +71,18 @@ class Ptemcee(MCMCSampler):
A multiplicitive factor for the estimated autocorrelation. Useful for
cases where non-convergence can be observed by eye but the automated
tools are failing.
autocorr_tau:
autocorr_tau: int, (1)
The number of autocorrelation times to use in assessing if the
autocorrelation time is stable.
frac_threshold: float, (0.01)
The maximum fractional change in the autocorrelation for the last
autocorr_tau steps. If the fractional change exceeds this value,
sampling will continue until the estimate of the autocorrelation time
can be trusted.
gradient_tau: float, (0.1)
The maximum (smoothed) local gradient of the ACT estimate to allow.
This ensures the ACT estimate is stable before finishing sampling.
gradient_mean_log_posterior: float, (0.1)
The maximum (smoothed) local gradient of the logliklilhood to allow.
This ensures the ACT estimate is stable before finishing sampling.
Q_tol: float (1.01)
The maximum between-chain to within-chain tolerance allowed (akin to
the Gelman-Rubin statistic).
min_tau: int, (1)
A minimum tau (autocorrelation time) to accept.
check_point_deltaT: float, (600)
......@@ -79,7 +92,7 @@ class Ptemcee(MCMCSampler):
exit_code: int, (77)
The code on which the sampler exits.
store_walkers: bool (False)
If true, store the unthinned, unburnt chaines in the result. Note, this
If true, store the unthinned, unburnt chains in the result. Note, this
is not recommended for cases where tau is large.
ignore_keys_for_tau: str
A pattern used to ignore keys in estimating the autocorrelation time.
......@@ -90,6 +103,12 @@ class Ptemcee(MCMCSampler):
The walkers are then initialized from the range of values obtained.
If a list, for the keys in the list the optimization step is applied,
otherwise the initial points are drawn from the prior.
niterations_per_check: int (5)
The number of iteration steps to take before checking ACT. This
effectively pre-thins the chains. Larger values reduce the per-eval
timing due to improved efficiency. But, if it is made too large the
pre-thinning may be overly agressive effectively wasting compute-time.
If you see tau=1, then niterations_per_check is likely too large.
Other Parameters
......@@ -98,7 +117,7 @@ class Ptemcee(MCMCSampler):
The number of walkers
nsteps: int, (100)
The number of steps to take
ntemps: int (2)
ntemps: int (10)
The number of temperatures used by ptemcee
Tmax: float
The maximum temperature
......@@ -107,15 +126,15 @@ class Ptemcee(MCMCSampler):
# Arguments used by ptemcee
default_kwargs = dict(
ntemps=20,
nwalkers=200,
ntemps=10,
nwalkers=100,
Tmax=None,
betas=None,
a=2.0,
adaptation_lag=10000,
adaptation_time=100,
random=None,
adapt=True,
adapt=False,
swap_ratios=False,
)
......@@ -130,13 +149,17 @@ class Ptemcee(MCMCSampler):
skip_import_verification=False,
resume=True,
nsamples=5000,
burn_in_nact=10,
burn_in_nact=50,
burn_in_fixed_discard=0,
mean_logl_frac=0.01,
thin_by_nact=0.5,
autocorr_tol=50,
autocorr_c=5,
safety=1,
autocorr_tau=5,
frac_threshold=0.01,
autocorr_tau=1,
gradient_tau=0.1,
gradient_mean_log_posterior=0.1,
Q_tol=1.02,
min_tau=1,
check_point_deltaT=600,
threads=1,
......@@ -145,7 +168,8 @@ class Ptemcee(MCMCSampler):
store_walkers=False,
ignore_keys_for_tau=None,
pos0="prior",
niterations_per_check=10,
niterations_per_check=5,
log10beta_min=None,
**kwargs
):
super(Ptemcee, self).__init__(
......@@ -184,14 +208,19 @@ class Ptemcee(MCMCSampler):
autocorr_tau=autocorr_tau,
safety=safety,
burn_in_nact=burn_in_nact,
burn_in_fixed_discard=burn_in_fixed_discard,
mean_logl_frac=mean_logl_frac,
thin_by_nact=thin_by_nact,
frac_threshold=frac_threshold,
gradient_tau=gradient_tau,
gradient_mean_log_posterior=gradient_mean_log_posterior,
Q_tol=Q_tol,
nsamples=nsamples,
ignore_keys_for_tau=ignore_keys_for_tau,
min_tau=min_tau,
niterations_per_check=niterations_per_check,
)
self.convergence_inputs = ConvergenceInputs(**convergence_inputs_dict)
logger.info("Using convergence inputs: {}".format(self.convergence_inputs))
# Check if threads was given as an equivalent arg
if threads == 1:
......@@ -206,6 +235,23 @@ class Ptemcee(MCMCSampler):
self.store_walkers = store_walkers
self.pos0 = pos0
self._periodic = [
self.priors[key].boundary == "periodic" for key in self.search_parameter_keys
]
self.priors.sample()
self._minima = np.array([
self.priors[key].minimum for key in self.search_parameter_keys
])
self._range = np.array([
self.priors[key].maximum for key in self.search_parameter_keys
]) - self._minima
self.log10beta_min = log10beta_min
if self.log10beta_min is not None:
betas = np.logspace(0, self.log10beta_min, self.ntemps)
logger.warning("Using betas {}".format(betas))
self.kwargs["betas"] = betas
@property
def sampler_function_kwargs(self):
""" Kwargs passed to samper.sampler() """
......@@ -322,7 +368,7 @@ class Ptemcee(MCMCSampler):
return pos0
def setup_sampler(self):
""" Either initialize the sampelr or read in the resume file """
""" Either initialize the sampler or read in the resume file """
import ptemcee