Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Showing
with 3769 additions and 1929 deletions
import numpy as np
from .base import Prior, PriorException
from .interpolated import Interped
from .analytical import DeltaFunction, PowerLaw, Uniform, LogUniform, \
......@@ -76,7 +74,9 @@ def conditional_prior_factory(prior_class):
float: See superclass
"""
self.least_recently_sampled = self.rescale(np.random.uniform(0, 1, size), **required_variables)
from ..utils.random import rng
self.least_recently_sampled = self.rescale(rng.uniform(0, 1, size), **required_variables)
return self.least_recently_sampled
def rescale(self, val, **required_variables):
......@@ -371,7 +371,8 @@ class DirichletElement(ConditionalBeta):
self._required_variables = [
label + str(ii) for ii in range(order)
]
self.__class__.__name__ = 'Dirichlet'
self.__class__.__name__ = 'DirichletElement'
self.__class__.__qualname__ = 'DirichletElement'
def dirichlet_condition(self, reference_parms, **kwargs):
remaining = 1 - sum(
......
This diff is collapsed.
......@@ -162,11 +162,11 @@ class Interped(Prior):
self._initialize_attributes()
def _initialize_attributes(self):
from scipy.integrate import cumtrapz
from scipy.integrate import cumulative_trapezoid
if np.trapz(self._yy, self.xx) != 1:
logger.debug('Supplied PDF for {} is not normalised, normalising.'.format(self.name))
self._yy /= np.trapz(self._yy, self.xx)
self.YY = cumtrapz(self._yy, self.xx, initial=0)
self.YY = cumulative_trapezoid(self._yy, self.xx, initial=0)
# Need last element of cumulative distribution to be exactly one.
self.YY[-1] = 1
self.probability_density = interp1d(x=self.xx, y=self._yy, bounds_error=False, fill_value=0)
......
This diff is collapsed.
from numbers import Number
import numpy as np
from .base import Prior
......@@ -84,6 +85,7 @@ class SlabSpikePrior(Prior):
=======
array_like: Associated prior value with input value.
"""
original_is_number = isinstance(val, Number)
val = np.atleast_1d(val)
lower_indices = np.where(val < self.inverse_cdf_below_spike)[0]
......@@ -96,6 +98,12 @@ class SlabSpikePrior(Prior):
res[lower_indices] = self._contracted_rescale(val[lower_indices])
res[intermediate_indices] = self.spike_location
res[higher_indices] = self._contracted_rescale(val[higher_indices] - self.spike_height)
if original_is_number:
try:
res = res[0]
except (KeyError, TypeError):
logger.warning("Based on inputs, a number should be output\
but this could not be accessed from what was computed")
return res
def _contracted_rescale(self, val):
......@@ -126,9 +134,16 @@ class SlabSpikePrior(Prior):
=======
array_like: Prior probability of val
"""
original_is_number = isinstance(val, Number)
res = self.slab.prob(val) * self.slab_fraction
res = np.atleast_1d(res)
res[np.where(val == self.spike_location)] = np.inf
if original_is_number:
try:
res = res[0]
except (KeyError, TypeError):
logger.warning("Based on inputs, a number should be output\
but this could not be accessed from what was computed")
return res
def ln_prob(self, val):
......@@ -143,9 +158,16 @@ class SlabSpikePrior(Prior):
=======
array_like: Prior probability of val
"""
original_is_number = isinstance(val, Number)
res = self.slab.ln_prob(val) + np.log(self.slab_fraction)
res = np.atleast_1d(res)
res[np.where(val == self.spike_location)] = np.inf
if original_is_number:
try:
res = res[0]
except (KeyError, TypeError):
logger.warning("Based on inputs, a number should be output\
but this could not be accessed from what was computed")
return res
def cdf(self, val):
......
This diff is collapsed.
import datetime
import inspect
import sys
import datetime
from collections import OrderedDict
import bilby
from ..utils import command_line_args, logger, loaded_modules_dict
from ..prior import PriorDict, DeltaFunction
from .base_sampler import Sampler, SamplingMarginalisedParameterError
from .cpnest import Cpnest
from .dynamic_dynesty import DynamicDynesty
from .dynesty import Dynesty
from .emcee import Emcee
from .kombine import Kombine
from .nessai import Nessai
from .nestle import Nestle
from .polychord import PyPolyChord
from .ptemcee import Ptemcee
from .ptmcmc import PTMCMCSampler
from .pymc3 import Pymc3
from .pymultinest import Pymultinest
from .ultranest import Ultranest
from .fake_sampler import FakeSampler
from .dnest4 import DNest4
from .zeus import Zeus
from bilby.bilby_mcmc import Bilby_MCMC
from ..prior import DeltaFunction, PriorDict
from ..utils import (
command_line_args,
env_package_list,
get_entry_points,
loaded_modules_dict,
logger,
)
from . import proposal
from .base_sampler import Sampler, SamplingMarginalisedParameterError
class ImplementedSamplers:
"""Dictionary-like object that contains implemented samplers.
This class is singleton and only one instance can exist.
"""
_instance = None
_samplers = get_entry_points("bilby.samplers")
def keys(self):
"""Iterator of available samplers by name.
Reduces the list to its simplest. This includes removing the 'bilby.'
prefix from native samplers if a corresponding plugin is not available.
"""
keys = []
for key in self._samplers.keys():
name = key.replace("bilby.", "")
if name in self._samplers.keys():
keys.append(key)
else:
keys.append(name)
return iter(keys)
def values(self):
"""Iterator of sampler classes.
Note: the classes need to loaded using :code:`.load()` before being
called.
"""
return iter(self._samplers.values())
def items(self):
"""Iterator of tuples containing keys (sampler names) and classes.
Note: the classes need to loaded using :code:`.load()` before being
called.
"""
return iter(((k, v) for k, v in zip(self.keys(), self.values())))
def valid_keys(self):
"""All valid keys including bilby.<sampler name>."""
keys = set(self._samplers.keys())
return iter(keys.union({k.replace("bilby.", "") for k in keys}))
def __getitem__(self, key):
if key in self._samplers:
return self._samplers[key]
elif f"bilby.{key}" in self._samplers:
return self._samplers[f"bilby.{key}"]
else:
raise ValueError(
f"Sampler {key} is not implemented! "
f"Available samplers are: {list(self.keys())}"
)
def __contains__(self, value):
return value in self.valid_keys()
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
IMPLEMENTED_SAMPLERS = ImplementedSamplers()
def get_implemented_samplers():
"""Get a list of the names of the implemented samplers.
This includes natively supported samplers (e.g. dynesty) and any additional
samplers that are supported through the sampler plugins.
Returns
-------
list
The list of implemented samplers.
"""
return list(IMPLEMENTED_SAMPLERS.keys())
def get_sampler_class(sampler):
"""Get the class for a sampler from its name.
This includes natively supported samplers (e.g. dynesty) and any additional
samplers that are supported through the sampler plugins.
Parameters
----------
sampler : str
The name of the sampler.
Returns
-------
Sampler
The sampler class.
Raises
------
ValueError
Raised if the sampler is not implemented.
"""
return IMPLEMENTED_SAMPLERS[sampler.lower()].load()
IMPLEMENTED_SAMPLERS = {
"bilby_mcmc": Bilby_MCMC,
"cpnest": Cpnest,
"dnest4": DNest4,
"dynamic_dynesty": DynamicDynesty,
"dynesty": Dynesty,
"emcee": Emcee,
"kombine": Kombine,
"nessai": Nessai,
"nestle": Nestle,
"ptemcee": Ptemcee,
"ptmcmcsampler": PTMCMCSampler,
"pymc3": Pymc3,
"pymultinest": Pymultinest,
"pypolychord": PyPolyChord,
"ultranest": Ultranest,
"zeus": Zeus,
"fake_sampler": FakeSampler,
}
if command_line_args.sampler_help:
sampler = command_line_args.sampler_help
if sampler in IMPLEMENTED_SAMPLERS:
sampler_class = IMPLEMENTED_SAMPLERS[sampler]
print('Help for sampler "{}":'.format(sampler))
sampler_class = IMPLEMENTED_SAMPLERS[sampler].load()
print(f'Help for sampler "{sampler}":')
print(sampler_class.__doc__)
else:
if sampler == "None":
......@@ -59,8 +134,8 @@ if command_line_args.sampler_help:
"the name of the sampler"
)
else:
print("Requested sampler {} not implemented".format(sampler))
print("Available samplers = {}".format(IMPLEMENTED_SAMPLERS))
print(f"Requested sampler {sampler} not implemented")
print(f"Available samplers = {get_implemented_samplers()}")
sys.exit()
......@@ -82,7 +157,7 @@ def run_sampler(
gzip=False,
result_class=None,
npool=1,
**kwargs
**kwargs,
):
"""
The primary interface to easy parameter estimation
......@@ -145,9 +220,7 @@ def run_sampler(
An object containing the results
"""
logger.info(
"Running for label '{}', output will be saved to '{}'".format(label, outdir)
)
logger.info(f"Running for label '{label}', output will be saved to '{outdir}'")
if clean:
command_line_args.clean = clean
......@@ -161,12 +234,12 @@ def run_sampler(
_check_marginalized_parameters_not_sampled(likelihood, priors)
if type(priors) in [dict, OrderedDict]:
if type(priors) == dict:
priors = PriorDict(priors)
elif isinstance(priors, PriorDict):
pass
else:
raise ValueError("Input priors not understood")
raise ValueError("Input priors not understood should be dict or PriorDict")
priors.fill_priors(likelihood, default_priors_file=default_priors_file)
......@@ -175,8 +248,9 @@ def run_sampler(
meta_data = dict()
likelihood.label = label
likelihood.outdir = outdir
meta_data['likelihood'] = likelihood.meta_data
meta_data["likelihood"] = likelihood.meta_data
meta_data["loaded_modules"] = loaded_modules_dict()
meta_data["environment_packages"] = env_package_list(as_dataframe=True)
if command_line_args.bilby_zero_likelihood_mode:
from bilby.core.likelihood import ZeroLikelihood
......@@ -186,24 +260,20 @@ def run_sampler(
if isinstance(sampler, Sampler):
pass
elif isinstance(sampler, str):
if sampler.lower() in IMPLEMENTED_SAMPLERS:
sampler_class = IMPLEMENTED_SAMPLERS[sampler.lower()]
sampler = sampler_class(
likelihood,
priors=priors,
outdir=outdir,
label=label,
injection_parameters=injection_parameters,
meta_data=meta_data,
use_ratio=use_ratio,
plot=plot,
result_class=result_class,
npool=npool,
**kwargs
)
else:
print(IMPLEMENTED_SAMPLERS)
raise ValueError("Sampler {} not yet implemented".format(sampler))
sampler_class = get_sampler_class(sampler)
sampler = sampler_class(
likelihood,
priors=priors,
outdir=outdir,
label=label,
injection_parameters=injection_parameters,
meta_data=meta_data,
use_ratio=use_ratio,
plot=plot,
result_class=result_class,
npool=npool,
**kwargs,
)
elif inspect.isclass(sampler):
sampler = sampler.__init__(
likelihood,
......@@ -215,12 +285,12 @@ def run_sampler(
injection_parameters=injection_parameters,
meta_data=meta_data,
npool=npool,
**kwargs
**kwargs,
)
else:
raise ValueError(
"Provided sampler should be a Sampler object or name of a known "
"sampler: {}.".format(", ".join(IMPLEMENTED_SAMPLERS.keys()))
f"sampler: {get_implemented_samplers()}."
)
if sampler.cached_result:
......@@ -241,44 +311,46 @@ def run_sampler(
elif isinstance(result.sampling_time, (float, int)):
result.sampling_time = datetime.timedelta(result.sampling_time)
logger.info('Sampling time: {}'.format(result.sampling_time))
logger.info(f"Sampling time: {result.sampling_time}")
# Convert sampling time into seconds
result.sampling_time = result.sampling_time.total_seconds()
if sampler.use_ratio:
result.log_noise_evidence = likelihood.noise_log_likelihood()
result.log_bayes_factor = result.log_evidence
result.log_evidence = \
result.log_bayes_factor + result.log_noise_evidence
result.log_evidence = result.log_bayes_factor + result.log_noise_evidence
else:
result.log_noise_evidence = likelihood.noise_log_likelihood()
result.log_bayes_factor = \
result.log_evidence - result.log_noise_evidence
result.log_bayes_factor = result.log_evidence - result.log_noise_evidence
if None not in [result.injection_parameters, conversion_function]:
result.injection_parameters = conversion_function(
result.injection_parameters)
result.injection_parameters
)
# Initial save of the sampler in case of failure in samples_to_posterior
if save:
result.save_to_file(extension=save, gzip=gzip)
result.save_to_file(extension=save, gzip=gzip, outdir=outdir)
if None not in [result.injection_parameters, conversion_function]:
result.injection_parameters = conversion_function(result.injection_parameters)
# Check if the posterior has already been created
if getattr(result, "_posterior", None) is None:
result.samples_to_posterior(likelihood=likelihood, priors=result.priors,
conversion_function=conversion_function,
npool=npool)
result.samples_to_posterior(
likelihood=likelihood,
priors=result.priors,
conversion_function=conversion_function,
npool=npool,
)
if save:
# The overwrite here ensures we overwrite the initially stored data
result.save_to_file(overwrite=True, extension=save, gzip=gzip)
result.save_to_file(overwrite=True, extension=save, gzip=gzip, outdir=outdir)
if plot:
result.plot_corner()
logger.info("Summary of results:\n{}".format(result))
logger.info(f"Summary of results:\n{result}")
return result
......@@ -287,7 +359,5 @@ def _check_marginalized_parameters_not_sampled(likelihood, priors):
if key in priors:
if not isinstance(priors[key], (float, DeltaFunction)):
raise SamplingMarginalisedParameterError(
"Likelihood is {} marginalized but you are trying to sample in {}. ".format(
key, key
)
f"Likelihood is {key} marginalized but you are trying to sample in {key}. "
)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
import numpy as np
from .base_sampler import Sampler
from ..result import read_in_result
from .base_sampler import Sampler
class FakeSampler(Sampler):
......@@ -17,17 +16,40 @@ class FakeSampler(Sampler):
sample_file: str
A string pointing to the posterior data file to be loaded.
"""
default_kwargs = dict(verbose=True, logl_args=None, logl_kwargs=None,
print_progress=True)
def __init__(self, likelihood, priors, sample_file, outdir='outdir',
label='label', use_ratio=False, plot=False,
injection_parameters=None, meta_data=None, result_class=None,
**kwargs):
super(FakeSampler, self).__init__(likelihood=likelihood, priors=priors, outdir=outdir, label=label,
use_ratio=False, plot=False, skip_import_verification=True,
injection_parameters=None, meta_data=None, result_class=None,
**kwargs)
sampler_name = "fake_sampler"
default_kwargs = dict(
verbose=True, logl_args=None, logl_kwargs=None, print_progress=True
)
def __init__(
self,
likelihood,
priors,
sample_file,
outdir="outdir",
label="label",
use_ratio=False,
plot=False,
injection_parameters=None,
meta_data=None,
result_class=None,
**kwargs
):
super(FakeSampler, self).__init__(
likelihood=likelihood,
priors=priors,
outdir=outdir,
label=label,
use_ratio=False,
plot=False,
skip_import_verification=True,
injection_parameters=None,
meta_data=None,
result_class=None,
**kwargs
)
self._read_parameter_list_from_file(sample_file)
self.result.outdir = outdir
self.result.label = label
......@@ -41,7 +63,7 @@ class FakeSampler(Sampler):
def run_sampler(self):
"""Compute the likelihood for the list of parameter space points."""
self.sampler = 'fake_sampler'
self.sampler = "fake_sampler"
# Flushes the output to force a line break
if self.kwargs["verbose"]:
......@@ -59,8 +81,12 @@ class FakeSampler(Sampler):
likelihood_ratios.append(logl)
if self.kwargs["verbose"]:
print(self.likelihood.parameters['log_likelihood'], likelihood_ratios[-1],
self.likelihood.parameters['log_likelihood'] - likelihood_ratios[-1])
print(
self.likelihood.parameters["log_likelihood"],
likelihood_ratios[-1],
self.likelihood.parameters["log_likelihood"]
- likelihood_ratios[-1],
)
self.result.log_likelihood_evaluations = np.array(likelihood_ratios)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.