Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (118)
Showing
with 902 additions and 155 deletions
[run]
omit =
test/example_test.py
test/gw_example_test.py
test/noise_realisation_test.py
test/other_test.py
test/integration/example_test.py
test/integration/noise_realisation_test.py
test/integration/other_test.py
......@@ -43,7 +43,7 @@ basic-3.7:
# test example on python 3.7
python-3.7:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- python -m pip install .
......@@ -69,7 +69,7 @@ python-3.7:
# test example on python 3.8
python-3.8:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python38
image: quay.io/bilbydev/v2-dockerfile-test-suite-python38
script:
- python -m pip install .
......@@ -78,7 +78,7 @@ python-3.8:
# test example on python 3.6
python-3.6:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python36
image: quay.io/bilbydev/v2-dockerfile-test-suite-python36
script:
- python -m pip install .
......@@ -87,46 +87,62 @@ python-3.6:
# test samplers on python 3.7
python-3.7-samplers:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- python -m pip install .
- pytest test/sampler_test.py --durations 10
- pytest test/sample_from_the_prior_test.py
- pytest test/integration/sampler_run_test.py --durations 10
- pytest test/integration/sample_from_the_prior_test.py
# test samplers on python 3.6
python-3.6-samplers:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python36
image: quay.io/bilbydev/v2-dockerfile-test-suite-python36
script:
- python -m pip install .
- pytest test/sampler_test.py
- pytest test/integration/sampler_run_test.py
# Test containers are up to date
containers:
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- cd containers
- python write_dockerfiles.py
# Fail if differences exist. If this fails, you may need to run
# write_dockerfiles.py and commit the changes.
- git diff --exit-code
# Tests run at a fixed schedule rather than on push
scheduled-python-3.7:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
only:
- schedules
script:
- python -m pip install .
# Run tests which are only done on schedule
- pytest test/example_test.py
- pytest test/gw_example_test.py
- pytest test/sample_from_the_prior_test.py
- pytest test/integration/example_test.py
- pytest test/integration/sample_from_the_prior_test.py
plotting:
stage: test
image: bilbydev/bilby-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
only:
- schedules
script:
- python -m pip install .
- python -m pip install ligo.skymap
- pytest test/gw_plot_test.py
- pytest test/gw/plot_test.py
authors:
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- python test/check_author_list.py
pages:
stage: deploy
......@@ -146,7 +162,7 @@ pages:
deploy_release:
stage: deploy
image: bilbydev/v2-dockerfile-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
variables:
TWINE_USERNAME: $PYPI_USERNAME
TWINE_PASSWORD: $PYPI_PASSWORD
......@@ -161,7 +177,7 @@ deploy_release:
precommits-py3.7:
stage: test
image: bilbydev/v2-dockerfile-test-suite-python37
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- source activate python37
- mkdir -p .pip37
......
# Authors
This file lists all the authors in first-name alphabetical order who have
contributed (either by code contribution or indirectly). If your name is not
listed here, please contact anyone on this list and raise your concern.
Abhirup Ghosh
Aditya Vijaykumar
Andrew Kim
Andrew Miller
Antoni Ramos-Buades
Avi Vajpeyi
Bruce Edelman
Carl-Johan Haster
Cecilio Garcia-Quiros
Charlie Hoy
Christopher Berry
Christos Karathanasis
Colm Talbot
Daniel Williams
David Keitel
Duncan Macleod
Eric Thrane
Ethan Payne
Francisco Javier Hernandez
Gregory Ashton
Hector Estelles
Ignacio Magaña Hernandez
Isobel Marguarethe Romero-Shaw
Jade Powell
James A Clark
John Veitch
Katerina Chatziioannou
Kaylee de Soto
Khun Sang Phukon
Kshipraa Athar
Liting Xiao
Maite Mateu-Lucena
Marc Arene
Marcus Edward Lower
Margaret Millhouse
Marta Colleoni
Matthew Carney
Matthew David Pitkin
Michael Puerrer
Michael Williams
Monica Rizzo
Moritz Huebner
Nicola De Lillo
Nikhil Sarin
Nirban Bose
Paul Easter
Paul Lasky
Philip Relton
Rhys Green
Roberto Cotesta
Rory Smith
S. H. Oh
Sacha Husa
Scott Coughlin
Serguei Ossokine
Shanika Galaudage
Sharan Banagiri
Shichao Wu
Simon Stevenson
Soichiro Morisaki
Sumeet Kulkarni
Sylvia Biscoveanu
Tathagata Ghosh
Virginia d'Emilio
Vivien Raymond
# All notable changes will be documented in this file
## [1.0.3] 2020-11-23
Version 1.0.4 release of bilby
### Added
- Added a chirp-mass and mass-ratio prior which are uniform in component masses (!891)
### Changes
- Fixed issue in the CI
## [1.0.3] 2020-10-23
Version 1.0.3 release of bilby
### Added
- SlabSpikePrior and examples (!857)
- Authors file (!885)
- CDF function to conditional priors (!882)
- Waveform plot in visualising_the_results.ipynb (!817)
- Addition of dnest4 sampler (!849, !883)
- Loaded modules added to meta-data (!881)
### Changes
- Constraint to Uniform priors in ROQ tutorial (!884)
- Fix to CDF and PDF for SymmetricLogUniform prior (!876)
- Fix bug in evidence combination (!880)
- Typo fixes (!878, !887, !879)
- Minor bug fixes (!888)
## [1.0.2] 2020-09-14
Version 1.0.2 release of bilby
### Added
- Template for the docker files (!783)
- New delta_phase parameter (!850)
- Normalization factor to time-domain waveform plot (!867)
- JSON encoding for int and float types (!866)
- Various minor formatting additions (!870)
### Changes
- Switched to the conda-forge version of multinest and ultranest (!783)
- Updates KAGRA - K1 interferometer information (!861)
- Restructures to tests to be uniform across project (!834)
- Fix to distance and phase marginalization method (!875)
- Fixed roundoff of in-plane spins samples with vectorisation (!864)
- Fix to reference distance and interpolant behavior (!858)
- Fix to constraint prior sampling method (!863)
- Clean up of code (!854)
- Various minor bug, test and plotting fixes (!859, !874, !872, !865)
## [1.0.1] 2020-08-29
Version 1.0.1 release of bilby
### Added
- Added an rcparams configuration for plotting (!832)
- Added `chi_1` and `chi_2` parameters to default latex label dictionary (!841)
- Allow output merged result file to be gzip or saved as a HDF5 file (!802)
### Changes
- Fixed first value in EOS cumulative integral(!860)
- Fixed saving the number of likelihood evaluations (!848)
- Likelihood condition is now strictly increasing (!846)
- Fixed a minor issue with conditional priors that could cause unexpected behaviour in edge cases (!838)
- Fixed `__repr__` method in the `FromFile` prior (!836)
- Fixed an issue that caused problems for some users when plotting with a latex backend (!816)
- Fixed bug that occured when min/max of interpolated priors was changed (!815)
- Fixed time domain waveform epoch (!736)
- Fixed time keeping in multinest (!830)
- Now checks if marginalised priors were defined before marginalising (!829)
- Fixed an issue with multivariate Gaussian prior (!822)
- Various minor code improvements (!836)(!839)
- Various minor bug fixes and improvements to the documentation (!820)(!823)(!837)
- Various testing improvements (!833)(!847)(!855)(!852)
## [1.0.0] 2020-07-06
Version 1.0 release of bilby
......
......@@ -4,3 +4,4 @@ from .conditional import *
from .dict import *
from .interpolated import *
from .joint import *
from .slabspike import *
......@@ -337,7 +337,8 @@ class SymmetricLogUniform(Prior):
-------
float: Prior probability of val
"""
return (np.nan_to_num(0.5 / np.abs(val) / np.log(self.maximum / self.minimum)) *
val = np.abs(val)
return (np.nan_to_num(0.5 / val / np.log(self.maximum / self.minimum)) *
self.is_in_prior_range(val))
def ln_prob(self, val):
......@@ -354,11 +355,23 @@ class SymmetricLogUniform(Prior):
"""
return np.nan_to_num(- np.log(2 * np.abs(val)) - np.log(np.log(self.maximum / self.minimum)))
def cdf(self, val):
val = np.atleast_1d(val)
norm = 0.5 / np.log(self.maximum / self.minimum)
cdf = np.zeros((len(val)))
lower_indices = np.where(np.logical_and(-self.maximum <= val, val <= -self.minimum))[0]
upper_indices = np.where(np.logical_and(self.minimum <= val, val <= self.maximum))[0]
cdf[lower_indices] = -norm * np.log(-val[lower_indices] / self.maximum)
cdf[np.where(np.logical_and(-self.minimum < val, val < self.minimum))] = 0.5
cdf[upper_indices] = 0.5 + norm * np.log(val[upper_indices] / self.minimum)
cdf[np.where(self.maximum < val)] = 1
return cdf
class Cosine(Prior):
def __init__(self, name=None, latex_label=None, unit=None,
minimum=-np.pi / 2, maximum=np.pi / 2, boundary=None):
def __init__(self, minimum=-np.pi / 2, maximum=np.pi / 2, name=None,
latex_label=None, unit=None, boundary=None):
"""Cosine prior with bounds
Parameters
......@@ -376,8 +389,8 @@ class Cosine(Prior):
boundary: str
See superclass
"""
super(Cosine, self).__init__(name=name, latex_label=latex_label, unit=unit,
minimum=minimum, maximum=maximum, boundary=boundary)
super(Cosine, self).__init__(minimum=minimum, maximum=maximum, name=name,
latex_label=latex_label, unit=unit, boundary=boundary)
def rescale(self, val):
"""
......@@ -412,8 +425,8 @@ class Cosine(Prior):
class Sine(Prior):
def __init__(self, name=None, latex_label=None, unit=None, minimum=0,
maximum=np.pi, boundary=None):
def __init__(self, minimum=0, maximum=np.pi, name=None,
latex_label=None, unit=None, boundary=None):
"""Sine prior with bounds
Parameters
......@@ -431,8 +444,8 @@ class Sine(Prior):
boundary: str
See superclass
"""
super(Sine, self).__init__(name=name, latex_label=latex_label, unit=unit,
minimum=minimum, maximum=maximum, boundary=boundary)
super(Sine, self).__init__(minimum=minimum, maximum=maximum, name=name,
latex_label=latex_label, unit=unit, boundary=boundary)
def rescale(self, val):
"""
......@@ -605,6 +618,7 @@ class TruncatedGaussian(Prior):
/ self.sigma / self.normalisation * self.is_in_prior_range(val)
def cdf(self, val):
val = np.atleast_1d(val)
_cdf = (erf((val - self.mu) / 2 ** 0.5 / self.sigma) - erf(
(self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) / 2 / self.normalisation
_cdf[val > self.maximum] = 1
......
......@@ -444,9 +444,6 @@ class Constraint(Prior):
def prob(self, val):
return (val > self.minimum) & (val < self.maximum)
def ln_prob(self, val):
return np.log((val > self.minimum) & (val < self.maximum))
class PriorException(Exception):
""" General base class for all prior exceptions """
......@@ -110,9 +110,41 @@ def conditional_prior_factory(prior_class):
return super(ConditionalPrior, self).prob(val)
def ln_prob(self, val, **required_variables):
"""Return the natural log prior probability of val.
Parameters
----------
val: Union[float, int, array_like]
See superclass
required_variables:
Any required variables that this prior depends on
Returns
-------
float: Natural log prior probability of val
"""
self.update_conditions(**required_variables)
return super(ConditionalPrior, self).ln_prob(val)
def cdf(self, val, **required_variables):
"""Return the cdf of val.
Parameters
----------
val: Union[float, int, array_like]
See superclass
required_variables:
Any required variables that this prior depends on
Returns
-------
float: CDF of val
"""
self.update_conditions(**required_variables)
return super(ConditionalPrior, self).cdf(val)
def update_conditions(self, **required_variables):
"""
This method updates the conditional parameters (depending on the parent class
......@@ -129,7 +161,7 @@ def conditional_prior_factory(prior_class):
"""
if sorted(list(required_variables)) == sorted(self.required_variables):
parameters = self.condition_func(self.reference_params, **required_variables)
parameters = self.condition_func(self.reference_params.copy(), **required_variables)
for key, value in parameters.items():
setattr(self, key, value)
elif len(required_variables) == 0:
......
......@@ -365,23 +365,21 @@ class PriorDict(dict):
return sample
else:
needed = np.prod(size)
constraint_keys = list()
for ii, key in enumerate(keys[-1::-1]):
for key in keys.copy():
if isinstance(self[key], Constraint):
constraint_keys.append(-ii - 1)
for ii in constraint_keys[-1::-1]:
del keys[ii]
del keys[keys.index(key)]
all_samples = {key: np.array([]) for key in keys}
_first_key = list(all_samples.keys())[0]
while len(all_samples[_first_key]) < needed:
samples = self.sample_subset(keys=keys, size=needed)
keep = np.array(self.evaluate_constraints(samples), dtype=bool)
for key in samples:
all_samples[key] = np.hstack(
[all_samples[key], samples[key][keep].flatten()])
all_samples = {key: np.reshape(all_samples[key][:needed], size)
for key in all_samples
if not isinstance(self[key], Constraint)}
for key in keys:
all_samples[key] = np.hstack([
all_samples[key], samples[key][keep].flatten()
])
all_samples = {
key: np.reshape(all_samples[key][:needed], size) for key in keys
}
return all_samples
def normalize_constraint_factor(self, keys):
......
import numpy as np
from bilby.core.prior.base import Prior
from bilby.core.utils import logger
class SlabSpikePrior(Prior):
def __init__(self, slab, spike_location=None, spike_height=0):
"""'Slab-and-spike' prior, see e.g. https://arxiv.org/abs/1812.07259
This prior is composed of a `slab`, i.e. any common prior distribution,
and a Dirac spike at a fixed location. This can effectively be used
to emulate sampling in the number of dimensions (similar to reversible-
jump MCMC).
`SymmetricLogUniform` and `FermiDirac` are currently not supported.
Parameters
----------
slab: Prior
Any instance of a bilby prior class. All general prior attributes
from the slab are copied into the SlabSpikePrior.
Note that this hasn't been tested for conditional priors.
spike_location: float, optional
Location of the Dirac spike. Must be between minimum and maximum
of the slab. Defaults to the minimum of the slab
spike_height: float, optional
Relative weight of the spike compared to the slab. Must be
between 0 and 1. Defaults to 0, i.e. the prior is just the slab.
"""
self.slab = slab
super().__init__(name=self.slab.name, latex_label=self.slab.latex_label, unit=self.slab.unit,
minimum=self.slab.minimum, maximum=self.slab.maximum,
check_range_nonzero=self.slab.check_range_nonzero, boundary=self.slab.boundary)
self.spike_location = spike_location
self.spike_height = spike_height
try:
self.inverse_cdf_below_spike = self._find_inverse_cdf_fraction_before_spike()
except Exception as e:
logger.warning("Disregard the following warning when running tests:\n {}".format(e))
@property
def spike_location(self):
return self._spike_loc
@spike_location.setter
def spike_location(self, spike_loc):
if spike_loc is None:
spike_loc = self.minimum
if not self.minimum <= spike_loc <= self.maximum:
raise ValueError("Spike location {} not within prior domain ".format(spike_loc))
self._spike_loc = spike_loc
@property
def spike_height(self):
return self._spike_height
@spike_height.setter
def spike_height(self, spike_height):
if 0 <= spike_height <= 1:
self._spike_height = spike_height
else:
raise ValueError("Spike height must be between 0 and 1, but is {}".format(spike_height))
@property
def slab_fraction(self):
""" Relative prior weight of the slab. """
return 1 - self.spike_height
def _find_inverse_cdf_fraction_before_spike(self):
return float(self.slab.cdf(self.spike_location)) * self.slab_fraction
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the prior.
Parameters
----------
val: Union[float, int, array_like]
A random number between 0 and 1
Returns
-------
array_like: Associated prior value with input value.
"""
val = np.atleast_1d(val)
lower_indices = np.where(val < self.inverse_cdf_below_spike)[0]
intermediate_indices = np.where(np.logical_and(
self.inverse_cdf_below_spike <= val,
val <= self.inverse_cdf_below_spike + self.spike_height))[0]
higher_indices = np.where(val > self.inverse_cdf_below_spike + self.spike_height)[0]
res = np.zeros(len(val))
res[lower_indices] = self._contracted_rescale(val[lower_indices])
res[intermediate_indices] = self.spike_location
res[higher_indices] = self._contracted_rescale(val[higher_indices] - self.spike_height)
return res
def _contracted_rescale(self, val):
"""
Contracted version of the rescale function that implements the `rescale` function
on the pure slab part of the prior.
Parameters
----------
val: Union[float, int, array_like]
A random number between 0 and self.slab_fraction
Returns
-------
array_like: Associated prior value with input value.
"""
return self.slab.rescale(val / self.slab_fraction)
def prob(self, val):
"""Return the prior probability of val.
Returns np.inf for the spike location
Parameters
----------
val: Union[float, int, array_like]
Returns
-------
array_like: Prior probability of val
"""
res = self.slab.prob(val) * self.slab_fraction
res = np.atleast_1d(res)
res[np.where(val == self.spike_location)] = np.inf
return res
def ln_prob(self, val):
"""Return the Log prior probability of val.
Returns np.inf for the spike location
Parameters
----------
val: Union[float, int, array_like]
Returns
-------
array_like: Prior probability of val
"""
res = self.slab.ln_prob(val) + np.log(self.slab_fraction)
res = np.atleast_1d(res)
res[np.where(val == self.spike_location)] = np.inf
return res
def cdf(self, val):
""" Return the CDF of the prior.
This calls to the slab CDF and adds a discrete step
at the spike location.
Parameters
----------
val: Union[float, int, array_like]
Returns
-------
array_like: CDF value of val
"""
res = self.slab.cdf(val) * self.slab_fraction
res = np.atleast_1d(res)
indices_above_spike = np.where(val > self.spike_location)[0]
res[indices_above_spike] += self.spike_height
return res
......@@ -574,9 +574,10 @@ class Result(object):
'injection_parameters', 'meta_data', 'search_parameter_keys',
'fixed_parameter_keys', 'constraint_parameter_keys',
'sampling_time', 'sampler_kwargs', 'use_ratio',
'log_likelihood_evaluations', 'log_prior_evaluations', 'samples',
'nested_samples', 'walkers', 'nburn', 'parameter_labels',
'parameter_labels_with_unit', 'version']
'log_likelihood_evaluations', 'log_prior_evaluations',
'num_likelihood_evaluations', 'samples', 'nested_samples',
'walkers', 'nburn', 'parameter_labels', 'parameter_labels_with_unit',
'version']
dictionary = OrderedDict()
for attr in save_attrs:
try:
......@@ -704,16 +705,20 @@ class Result(object):
"""
latex_labels = []
for k in keys:
if k in self.search_parameter_keys:
idx = self.search_parameter_keys.index(k)
latex_labels.append(self.parameter_labels_with_unit[idx])
elif k in self.parameter_labels:
latex_labels.append(k)
for key in keys:
if key in self.search_parameter_keys:
idx = self.search_parameter_keys.index(key)
label = self.parameter_labels_with_unit[idx]
elif key in self.parameter_labels:
label = key
else:
label = None
logger.debug(
'key {} not a parameter label or latex label'.format(k))
latex_labels.append(' '.join(k.split('_')))
'key {} not a parameter label or latex label'.format(key)
)
if label is None:
label = key.replace("_", " ")
latex_labels.append(label)
return latex_labels
@property
......@@ -1639,24 +1644,26 @@ class ResultList(list):
The result object with the combined evidences.
"""
self.check_nested_samples()
if result.use_ratio:
log_bayes_factors = np.array([res.log_bayes_factor for res in self])
result.log_bayes_factor = logsumexp(log_bayes_factors, b=1. / len(self))
result.log_evidence = result.log_bayes_factor + result.log_noise_evidence
result_weights = np.exp(log_bayes_factors - np.max(log_bayes_factors))
else:
log_evidences = np.array([res.log_evidence for res in self])
result.log_evidence = logsumexp(log_evidences, b=1. / len(self))
result_weights = np.exp(log_evidences - np.max(log_evidences))
# Combine evidences
log_evidences = np.array([res.log_evidence for res in self])
result.log_evidence = logsumexp(log_evidences, b=1. / len(self))
result.log_bayes_factor = result.log_evidence - result.log_noise_evidence
# Propogate uncertainty in combined evidence
log_errs = [res.log_evidence_err for res in self if np.isfinite(res.log_evidence_err)]
if len(log_errs) > 0:
result.log_evidence_err = 0.5 * logsumexp(2 * np.array(log_errs), b=1. / len(self))
else:
result.log_evidence_err = np.nan
# Combined posteriors with a weighting
result_weights = np.exp(log_evidences - np.max(log_evidences))
posteriors = list()
for res, frac in zip(self, result_weights):
selected_samples = (np.random.uniform(size=len(res.posterior)) < frac)
posteriors.append(res.posterior[selected_samples])
# remove original nested_samples
result.nested_samples = None
result.sampler_kwargs = None
......
......@@ -3,7 +3,8 @@ import sys
import datetime
from collections import OrderedDict
from ..utils import command_line_args, logger
import bilby
from ..utils import command_line_args, logger, loaded_modules_dict
from ..prior import PriorDict, DeltaFunction
from .base_sampler import Sampler, SamplingMarginalisedParameterError
......@@ -20,13 +21,14 @@ from .pymc3 import Pymc3
from .pymultinest import Pymultinest
from .ultranest import Ultranest
from .fake_sampler import FakeSampler
from .dnest4 import DNest4
from . import proposal
IMPLEMENTED_SAMPLERS = {
'cpnest': Cpnest, 'dynamic_dynesty': DynamicDynesty, 'dynesty': Dynesty,
'emcee': Emcee, 'kombine': Kombine, 'nestle': Nestle, 'ptemcee': Ptemcee,
'ptmcmcsampler': PTMCMCSampler, 'pymc3': Pymc3, 'pymultinest': Pymultinest,
'pypolychord': PyPolyChord, 'ultranest': Ultranest,
'cpnest': Cpnest, 'dnest4': DNest4, 'dynamic_dynesty': DynamicDynesty,
'dynesty': Dynesty, 'emcee': Emcee, 'kombine': Kombine, 'nestle': Nestle,
'ptemcee': Ptemcee, 'ptmcmcsampler': PTMCMCSampler, 'pymc3': Pymc3,
'pymultinest': Pymultinest, 'pypolychord': PyPolyChord, 'ultranest': Ultranest,
'fake_sampler': FakeSampler}
if command_line_args.sampler_help:
......@@ -107,7 +109,7 @@ def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
Returns
-------
result
result: bilby.core.result.Result
An object containing the results
"""
......@@ -140,6 +142,7 @@ def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
if meta_data is None:
meta_data = dict()
meta_data['likelihood'] = likelihood.meta_data
meta_data["loaded_modules"] = loaded_modules_dict()
if command_line_args.bilby_zero_likelihood_mode:
from bilby.core.likelihood import ZeroLikelihood
......
from __future__ import absolute_import
import datetime
import distutils.dir_util
import numpy as np
import os
import tempfile
from pandas import DataFrame
from ..utils import logger, command_line_args, Counter
from ..utils import logger, check_directory_exists_and_if_not_mkdir, command_line_args, Counter
from ..prior import Prior, PriorDict, DeltaFunction, Constraint
from ..result import Result, read_in_result
......@@ -541,7 +544,8 @@ class Sampler(object):
class NestedSampler(Sampler):
npoints_equiv_kwargs = ['nlive', 'nlives', 'n_live_points', 'npoints', 'npoint', 'Nlive', 'num_live_points']
npoints_equiv_kwargs = ['nlive', 'nlives', 'n_live_points', 'npoints',
'npoint', 'Nlive', 'num_live_points', 'num_particles']
walks_equiv_kwargs = ['walks', 'steps', 'nmcmc']
def reorder_loglikelihoods(self, unsorted_loglikelihoods, unsorted_samples,
......@@ -601,6 +605,27 @@ class NestedSampler(Sampler):
else:
return np.nan_to_num(-np.inf)
def _setup_run_directory(self):
"""
If using a temporary directory, the output directory is moved to the
temporary directory.
Used for Dnest4, Pymultinest, and Ultranest.
"""
if self.use_temporary_directory:
temporary_outputfiles_basename = tempfile.TemporaryDirectory().name
self.temporary_outputfiles_basename = temporary_outputfiles_basename
if os.path.exists(self.outputfiles_basename):
distutils.dir_util.copy_tree(self.outputfiles_basename, self.temporary_outputfiles_basename)
check_directory_exists_and_if_not_mkdir(temporary_outputfiles_basename)
self.kwargs["outputfiles_basename"] = self.temporary_outputfiles_basename
logger.info("Using temporary file {}".format(temporary_outputfiles_basename))
else:
check_directory_exists_and_if_not_mkdir(self.outputfiles_basename)
self.kwargs["outputfiles_basename"] = self.outputfiles_basename
logger.info("Using output file {}".format(self.outputfiles_basename))
class MCMCSampler(Sampler):
nwalkers_equiv_kwargs = ['nwalker', 'nwalkers', 'draws', 'Niter']
......
from __future__ import absolute_import
import array
import copy
import numpy as np
......@@ -88,8 +89,8 @@ class Cpnest(NestedSampler):
prior_samples = self.priors.sample()
self._update_bounds()
point = LivePoint(
self.names, [prior_samples[name]
for name in self.names])
self.names, array.array(
'f', [prior_samples[name] for name in self.names]))
return point
self._resolve_proposal_functions()
......
import os
import shutil
import distutils.dir_util
import signal
import time
import datetime
import sys
import numpy as np
import pandas as pd
from ..utils import check_directory_exists_and_if_not_mkdir, logger
from .base_sampler import NestedSampler
class _DNest4Model(object):
def __init__(self, log_likelihood_func, from_prior_func, widths, centers, highs, lows):
"""Initialize the DNest4 model.
Args:
log_likelihood_func: function
The loglikelihood function to use during the Nested Sampling run.
from_prior_func: function
The function to use when randomly selecting parameter vectors from the prior space.
widths: array_like
The approximate widths of the prior distrbutions.
centers: array_like
The approximate center points of the prior distributions.
"""
self._log_likelihood = log_likelihood_func
self._from_prior = from_prior_func
self._widths = widths
self._centers = centers
self._highs = highs
self._lows = lows
self._n_dim = len(widths)
return
def log_likelihood(self, coords):
"""The model's log_likelihood function"""
return self._log_likelihood(coords)
def from_prior(self):
"""The model's function to select random points from the prior space."""
return self._from_prior()
def perturb(self, coords):
"""The perturb function to perform Monte Carlo trial moves."""
idx = np.random.randint(self._n_dim)
coords[idx] += (self._widths[idx] * (np.random.uniform(size=1) - 0.5))
cw = self._widths[idx]
cc = self._centers[idx]
coords[idx] = self.wrap(coords[idx], (cc - 0.5 * cw), cc + 0.5 * cw)
return 0.0
@staticmethod
def wrap(x, minimum, maximum):
if maximum <= minimum:
raise ValueError("maximum {} <= minimum {}, when trying to wrap coordinates".format(maximum, minimum))
return (x - minimum) % (maximum - minimum) + minimum
class DNest4(NestedSampler):
"""
Bilby wrapper of DNest4
Parameters
----------
TBD
Other Parameters
----------------
num_particles: int
The number of points to use in the Nested Sampling active population.
max_num_levels: int
The max number of diffusive likelihood levels that DNest4 should initialize
during the Diffusive Nested Sampling run.
backend: str
The python DNest4 backend for storing the output.
Options are: 'memory' and 'csv'. If 'memory' the
DNest4 outputs are stored in memory during the run. If 'csv' the
DNest4 outputs are written out to files with a CSV format during
the run.
CSV backend may not be functional right now (October 2020)
num_steps: int
The number of MCMC iterations to run
new_level_interval: int
The number of moves to run before creating a new diffusive likelihood level
lam: float
Set the backtracking scale length
beta: float
Set the strength of effect to force the histogram to equal bin counts
seed: int
Set the seed for the C++ random number generator
verbose: Bool
If True, prints information during run
"""
default_kwargs = dict(max_num_levels=20, num_steps=500,
new_level_interval=10000, num_per_step=10000,
thread_steps=1, num_particles=1000, lam=10.0,
beta=100, seed=None, verbose=True, outputfiles_basename=None,
backend='memory')
def __init__(self, likelihood, priors, outdir="outdir", label="label", use_ratio=False, plot=False,
exit_code=77, skip_import_verification=False, temporary_directory=True, **kwargs):
super(DNest4, self).__init__(
likelihood=likelihood, priors=priors, outdir=outdir, label=label,
use_ratio=use_ratio, plot=plot, skip_import_verification=skip_import_verification,
exit_code=exit_code, **kwargs)
self.num_particles = self.kwargs["num_particles"]
self.max_num_levels = self.kwargs["max_num_levels"]
self._verbose = self.kwargs["verbose"]
self._backend = self.kwargs["backend"]
self.use_temporary_directory = temporary_directory
self.start_time = np.nan
self.sampler = None
self._information = np.nan
self._last_live_sample_info = np.nan
self._outputfiles_basename = None
self._temporary_outputfiles_basename = None
signal.signal(signal.SIGTERM, self.write_current_state_and_exit)
signal.signal(signal.SIGINT, self.write_current_state_and_exit)
signal.signal(signal.SIGALRM, self.write_current_state_and_exit)
# Get the estimates of the prior distributions' widths and centers.
widths = []
centers = []
highs = []
lows = []
samples = self.priors.sample(size=10000)
for key in self.search_parameter_keys:
pts = samples[key]
low = pts.min()
high = pts.max()
width = high - low
center = (high + low) / 2.0
widths.append(width)
centers.append(center)
highs.append(high)
lows.append(low)
self._widths = np.array(widths)
self._centers = np.array(centers)
self._highs = np.array(highs)
self._lows = np.array(lows)
self._dnest4_model = _DNest4Model(self.log_likelihood, self.get_random_draw_from_prior, self._widths,
self._centers, self._highs, self._lows)
def _set_backend(self):
import dnest4
if self._backend == 'csv':
return dnest4.backends.CSVBackend("{}/dnest4{}/".format(self.outdir, self.label), sep=" ")
else:
return dnest4.backends.MemoryBackend()
def _set_dnest4_kwargs(self):
dnest4_keys = ["num_steps", "new_level_interval", "lam", "beta", "seed"]
self.dnest4_kwargs = {key: self.kwargs[key] for key in dnest4_keys}
def run_sampler(self):
import dnest4
self._set_dnest4_kwargs()
backend = self._set_backend()
self._verify_kwargs_against_default_kwargs()
self._setup_run_directory()
self._check_and_load_sampling_time_file()
self.start_time = time.time()
self.sampler = dnest4.DNest4Sampler(self._dnest4_model, backend=backend)
out = self.sampler.sample(self.max_num_levels,
num_particles=self.num_particles,
**self.dnest4_kwargs)
for i, sample in enumerate(out):
if self._verbose and ((i + 1) % 100 == 0):
stats = self.sampler.postprocess()
logger.info("Iteration: {0} log(Z): {1}".format(i + 1, stats['log_Z']))
self._calculate_and_save_sampling_time()
self._clean_up_run_directory()
stats = self.sampler.postprocess(resample=1)
self.result.log_evidence = stats['log_Z']
self._information = stats['H']
self.result.log_evidence_err = np.sqrt(self._information / self.num_particles)
if self._backend == 'memory':
self._last_live_sample_info = pd.DataFrame(self.sampler.backend.sample_info[-1])
self.result.log_likelihood_evaluations = self._last_live_sample_info['log_likelihood']
self.result.samples = np.array(self.sampler.backend.posterior_samples)
else:
sample_info_path = './' + self.kwargs["outputfiles_basename"] + '/sample_info.txt'
sample_info = np.genfromtxt(sample_info_path, comments='#', names=True)
self.result.log_likelihood_evaluations = sample_info['log_likelihood']
self.result.samples = np.array(self.sampler.backend.posterior_samples)
self.result.sampler_output = out
self.result.outputfiles_basename = self.outputfiles_basename
self.result.sampling_time = datetime.timedelta(seconds=self.total_sampling_time)
self.calc_likelihood_count()
return self.result
def _translate_kwargs(self, kwargs):
if 'num_steps' not in kwargs:
for equiv in self.walks_equiv_kwargs:
if equiv in kwargs:
kwargs['num_steps'] = kwargs.pop(equiv)
def _verify_kwargs_against_default_kwargs(self):
self.outputfiles_basename = self.kwargs.pop("outputfiles_basename", None)
super(DNest4, self)._verify_kwargs_against_default_kwargs()
def _check_and_load_sampling_time_file(self):
self.time_file_path = self.kwargs["outputfiles_basename"] + '/sampling_time.dat'
if os.path.exists(self.time_file_path):
with open(self.time_file_path, 'r') as time_file:
self.total_sampling_time = float(time_file.readline())
else:
self.total_sampling_time = 0
def _calculate_and_save_sampling_time(self):
current_time = time.time()
new_sampling_time = current_time - self.start_time
self.total_sampling_time += new_sampling_time
with open(self.time_file_path, 'w') as time_file:
time_file.write(str(self.total_sampling_time))
self.start_time = current_time
def _clean_up_run_directory(self):
if self.use_temporary_directory:
self._move_temporary_directory_to_proper_path()
self.kwargs["outputfiles_basename"] = self.outputfiles_basename
@property
def outputfiles_basename(self):
return self._outputfiles_basename
@outputfiles_basename.setter
def outputfiles_basename(self, outputfiles_basename):
if outputfiles_basename is None:
outputfiles_basename = "{}/dnest4{}/".format(self.outdir, self.label)
if not outputfiles_basename.endswith("/"):
outputfiles_basename += "/"
check_directory_exists_and_if_not_mkdir(self.outdir)
self._outputfiles_basename = outputfiles_basename
@property
def temporary_outputfiles_basename(self):
return self._temporary_outputfiles_basename
@temporary_outputfiles_basename.setter
def temporary_outputfiles_basename(self, temporary_outputfiles_basename):
if not temporary_outputfiles_basename.endswith("/"):
temporary_outputfiles_basename = "{}/".format(
temporary_outputfiles_basename
)
self._temporary_outputfiles_basename = temporary_outputfiles_basename
if os.path.exists(self.outputfiles_basename):
shutil.copytree(
self.outputfiles_basename, self.temporary_outputfiles_basename
)
def write_current_state_and_exit(self, signum=None, frame=None):
""" Write current state and exit on exit_code """
logger.info(
"Run interrupted by signal {}: checkpoint and exit on {}".format(
signum, self.exit_code
)
)
self._calculate_and_save_sampling_time()
if self.use_temporary_directory:
self._move_temporary_directory_to_proper_path()
sys.exit(self.exit_code)
def _move_temporary_directory_to_proper_path(self):
"""
Move the temporary back to the proper path
Anything in the proper path at this point is removed including links
"""
self._copy_temporary_directory_contents_to_proper_path()
shutil.rmtree(self.temporary_outputfiles_basename)
def _copy_temporary_directory_contents_to_proper_path(self):
"""
Copy the temporary back to the proper path.
Do not delete the temporary directory.
"""
logger.info(
"Overwriting {} with {}".format(
self.outputfiles_basename, self.temporary_outputfiles_basename
)
)
if self.outputfiles_basename.endswith('/'):
outputfiles_basename_stripped = self.outputfiles_basename[:-1]
else:
outputfiles_basename_stripped = self.outputfiles_basename
distutils.dir_util.copy_tree(self.temporary_outputfiles_basename, outputfiles_basename_stripped)
from __future__ import absolute_import
import datetime
import dill
import os
......@@ -326,10 +324,6 @@ class Dynesty(NestedSampler):
def run_sampler(self):
import dynesty
logger.info("Using dynesty version {}".format(dynesty.__version__))
if self.kwargs['live_points'] is None:
self.kwargs['live_points'] = (
self.get_initial_points_from_prior(
self.kwargs['nlive']))
if self.kwargs.get("sample", "rwalk") == "rwalk":
logger.info(
......@@ -351,10 +345,21 @@ class Dynesty(NestedSampler):
self._setup_pool()
self.sampler = dynesty.NestedSampler(
loglikelihood=_log_likelihood_wrapper,
prior_transform=_prior_transform_wrapper,
ndim=self.ndim, **self.sampler_init_kwargs)
if self.resume:
self.resume = self.read_saved_state(continuing=True)
if self.resume:
logger.info('Resume file successfully loaded.')
else:
if self.kwargs['live_points'] is None:
self.kwargs['live_points'] = (
self.get_initial_points_from_prior(self.kwargs['nlive'])
)
self.sampler = dynesty.NestedSampler(
loglikelihood=_log_likelihood_wrapper,
prior_transform=_prior_transform_wrapper,
ndim=self.ndim, **self.sampler_init_kwargs
)
if self.check_point:
out = self._run_external_sampler_with_checkpointing()
......@@ -424,10 +429,6 @@ class Dynesty(NestedSampler):
def _run_external_sampler_with_checkpointing(self):
logger.debug("Running sampler with checkpointing")
if self.resume:
resume_file_loaded = self.read_saved_state(continuing=True)
if resume_file_loaded:
logger.info('Resume file successfully loaded.')
old_ncall = self.sampler.ncall
sampler_kwargs = self.sampler_function_kwargs.copy()
......@@ -611,7 +612,7 @@ class Dynesty(NestedSampler):
fig = dyplot.traceplot(self.sampler.results, labels=labels)[0]
fig.tight_layout()
fig.savefig(filename)
except (RuntimeError, np.linalg.linalg.LinAlgError, ValueError) as e:
except (RuntimeError, np.linalg.linalg.LinAlgError, ValueError, OverflowError, Exception) as e:
logger.warning(e)
logger.warning('Failed to create dynesty state plot at checkpoint')
finally:
......@@ -689,6 +690,16 @@ class Dynesty(NestedSampler):
"""
return self.priors.rescale(self._search_parameter_keys, theta)
def calc_likelihood_count(self):
if self.likelihood_benchmark:
if hasattr(self, 'sampler'):
self.result.num_likelihood_evaluations = \
getattr(self.sampler, 'ncall', 0)
else:
self.result.num_likelihood_evaluations = 0
else:
return None
def sample_rwalk_bilby(args):
""" Modified bilby-implemented version of dynesty.sampling.sample_rwalk """
......@@ -756,7 +767,7 @@ def sample_rwalk_bilby(args):
# Check proposed point.
v_prop = prior_transform(np.array(u_prop))
logl_prop = loglikelihood(np.array(v_prop))
if logl_prop >= loglstar:
if logl_prop > loglstar:
u = u_prop
v = v_prop
logl = logl_prop
......
......@@ -13,7 +13,7 @@ import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ..utils import logger
from ..utils import logger, check_directory_exists_and_if_not_mkdir
from .base_sampler import SamplerError, MCMCSampler
......@@ -517,6 +517,7 @@ class Ptemcee(MCMCSampler):
sys.exit(self.exit_code)
def write_current_state(self, plot=True):
check_directory_exists_and_if_not_mkdir(self.outdir)
checkpoint(
self.iteration,
self.outdir,
......
import importlib
import os
import tempfile
import shutil
import distutils.dir_util
import signal
import time
import datetime
import sys
import numpy as np
......@@ -175,7 +175,7 @@ class Pymultinest(NestedSampler):
self._calculate_and_save_sampling_time()
if self.use_temporary_directory:
self._move_temporary_directory_to_proper_path()
os._exit(self.exit_code)
sys.exit(self.exit_code)
def _copy_temporary_directory_contents_to_proper_path(self):
"""
......@@ -239,26 +239,6 @@ class Pymultinest(NestedSampler):
self.result.sampling_time = datetime.timedelta(seconds=self.total_sampling_time)
return self.result
def _setup_run_directory(self):
"""
If using a temporary directory, the output directory is moved to the
temporary directory.
"""
if self.use_temporary_directory:
temporary_outputfiles_basename = tempfile.TemporaryDirectory().name
self.temporary_outputfiles_basename = temporary_outputfiles_basename
if os.path.exists(self.outputfiles_basename):
distutils.dir_util.copy_tree(self.outputfiles_basename, self.temporary_outputfiles_basename)
check_directory_exists_and_if_not_mkdir(temporary_outputfiles_basename)
self.kwargs["outputfiles_basename"] = self.temporary_outputfiles_basename
logger.info("Using temporary file {}".format(temporary_outputfiles_basename))
else:
check_directory_exists_and_if_not_mkdir(self.outputfiles_basename)
self.kwargs["outputfiles_basename"] = self.outputfiles_basename
logger.info("Using output file {}".format(self.outputfiles_basename))
def _check_and_load_sampling_time_file(self):
self.time_file_path = self.kwargs["outputfiles_basename"] + '/sampling_time.dat'
if os.path.exists(self.time_file_path):
......
......@@ -6,7 +6,6 @@ import inspect
import os
import shutil
import signal
import tempfile
import time
import numpy as np
......@@ -62,7 +61,7 @@ class Ultranest(NestedSampler):
log_interval=None,
dlogz=None,
max_iters=None,
update_interval_iter_fraction=0.2,
update_interval_volume_fraction=0.2,
viz_callback=None,
dKL=0.5,
frac_remain=0.01,
......@@ -233,7 +232,7 @@ class Ultranest(NestedSampler):
]
else:
keys = [
"update_interval_iter_fraction",
"update_interval_volume_fraction",
"update_interval_ncall",
"log_interval",
"show_status",
......@@ -287,6 +286,7 @@ class Ultranest(NestedSampler):
stepsampler = self.kwargs.pop("step_sampler", None)
self._setup_run_directory()
self.kwargs["log_dir"] = self.kwargs.pop("outputfiles_basename")
self._check_and_load_sampling_time_file()
# use reactive nested sampler when no live points are given
......@@ -326,30 +326,6 @@ class Ultranest(NestedSampler):
return self.result
def _setup_run_directory(self):
"""
If using a temporary directory, the output directory is moved to the
temporary directory and symlinked back.
"""
if self.use_temporary_directory:
temporary_outputfiles_basename = tempfile.TemporaryDirectory().name
self.temporary_outputfiles_basename = temporary_outputfiles_basename
if os.path.exists(self.outputfiles_basename):
distutils.dir_util.copy_tree(
self.outputfiles_basename, self.temporary_outputfiles_basename
)
check_directory_exists_and_if_not_mkdir(temporary_outputfiles_basename)
self.kwargs["log_dir"] = self.temporary_outputfiles_basename
logger.info(
"Using temporary file {}".format(temporary_outputfiles_basename)
)
else:
check_directory_exists_and_if_not_mkdir(self.outputfiles_basename)
self.kwargs["log_dir"] = self.outputfiles_basename
logger.info("Using output file {}".format(self.outputfiles_basename))
def _clean_up_run_directory(self):
if self.use_temporary_directory:
self._move_temporary_directory_to_proper_path()
......
......@@ -4,6 +4,7 @@ from distutils.spawn import find_executable
import logging
import os
import shutil
import sys
from math import fmod
import argparse
import inspect
......@@ -19,6 +20,7 @@ import numpy as np
from scipy.interpolate import interp2d
from scipy.special import logsumexp
import pandas as pd
import matplotlib.pyplot as plt
logger = logging.getLogger('bilby')
......@@ -976,6 +978,10 @@ class BilbyJsonEncoder(json.JSONEncoder):
def default(self, obj):
from .prior import MultivariateGaussianDist, Prior, PriorDict
from ..gw.prior import HealPixMapPriorDist
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, PriorDict):
return {'__prior_dict__': True, 'content': obj._get_json_dict()}
if isinstance(obj, (MultivariateGaussianDist, HealPixMapPriorDist, Prior)):
......@@ -1160,27 +1166,67 @@ def safe_file_dump(data, filename, module):
def latex_plot_format(func):
"""
Wrap a plotting function to set rcParams so that text renders nicely with
latex and Computer Modern Roman font.
Wrap the plotting function to set rcParams dependent on environment variables
The rcparams can be set directly from the env. variable `BILBY_STYLE` to
point to a matplotlib style file. Or, if `BILBY_STYLE=default` (any case) a
default setup is used, this is enabled by default. To not use any rcParams,
set `BILBY_STYLE=none`. Occasionally, issues arrise with the latex
`mathdefault` command. A fix is to define this command in the rcParams. An
env. variable `BILBY_MATHDEFAULT` can be used to turn this fix on/off.
Setting `BILBY_MATHDEFAULT=1` will enable the fix, all other choices
(including undefined) will disable it. Additionally, the BILBY_STYLE and
BILBY_MATHDEFAULT arguments can be passed into any
latex_plot_format-wrapped plotting function and will be set directly.
"""
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
from matplotlib import rcParams
_old_tex = rcParams["text.usetex"]
_old_serif = rcParams["font.serif"]
_old_family = rcParams["font.family"]
if find_executable("latex"):
rcParams["text.usetex"] = True
if "BILBY_STYLE" in kwargs:
bilby_style = kwargs.pop("BILBY_STYLE")
else:
bilby_style = os.environ.get("BILBY_STYLE", "default")
if "BILBY_MATHDEFAULT" in kwargs:
bilby_mathdefault = kwargs.pop("BILBY_MATHDEFAULT")
else:
bilby_mathdefault = int(os.environ.get("BILBY_MATHDEFAULT", "0"))
if bilby_mathdefault == 1:
logger.debug("Setting mathdefault in the rcParams")
rcParams['text.latex.preamble'] = r'\newcommand{\mathdefault}[1][]{}'
logger.debug("Using BILBY_STYLE={}".format(bilby_style))
if bilby_style.lower() == "none":
return func(*args, **kwargs)
elif os.path.isfile(bilby_style):
plt.style.use(bilby_style)
return func(*args, **kwargs)
elif bilby_style in plt.style.available:
plt.style.use(bilby_style)
return func(*args, **kwargs)
elif bilby_style.lower() == "default":
_old_tex = rcParams["text.usetex"]
_old_serif = rcParams["font.serif"]
_old_family = rcParams["font.family"]
if find_executable("latex"):
rcParams["text.usetex"] = True
else:
rcParams["text.usetex"] = False
rcParams["font.serif"] = "Computer Modern Roman"
rcParams["font.family"] = "serif"
rcParams["text.usetex"] = _old_tex
rcParams["font.serif"] = _old_serif
rcParams["font.family"] = _old_family
return func(*args, **kwargs)
else:
rcParams["text.usetex"] = False
rcParams["font.serif"] = "Computer Modern Roman"
rcParams["font.family"] = "serif"
value = func(*args, **kwargs)
rcParams["text.usetex"] = _old_tex
rcParams["font.serif"] = _old_serif
rcParams["font.family"] = _old_family
return value
logger.debug(
"Environment variable BILBY_STYLE={} not used"
.format(bilby_style)
)
return func(*args, **kwargs)
return wrapper_decorator
......@@ -1224,6 +1270,15 @@ def get_function_path(func):
return func
def loaded_modules_dict():
module_names = sys.modules.keys()
vdict = {}
for key in module_names:
if "." not in key:
vdict[key] = str(getattr(sys.modules[key], "__version__", "N/A"))
return vdict
class IllegalDurationAndSamplingFrequencyException(Exception):
pass
......