Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (26)
Showing
with 1150 additions and 1529 deletions
......@@ -30,6 +30,7 @@ Isobel Marguarethe Romero-Shaw
Jade Powell
James A Clark
John Veitch
Joshua Brandt
Katerina Chatziioannou
Kaylee de Soto
Khun Sang Phukon
......
......@@ -40,7 +40,6 @@ class DeltaFunction(Prior):
=======
float: Rescaled probability, equivalent to peak
"""
self.test_valid_for_rescaling(val)
return self.peak * val ** 0
def prob(self, val):
......@@ -105,7 +104,6 @@ class PowerLaw(Prior):
=======
Union[float, array_like]: Rescaled probability
"""
self.test_valid_for_rescaling(val)
if self.alpha == -1:
return self.minimum * np.exp(val * np.log(self.maximum / self.minimum))
else:
......@@ -206,7 +204,6 @@ class Uniform(Prior):
=======
Union[float, array_like]: Rescaled probability
"""
self.test_valid_for_rescaling(val)
return self.minimum + val * (self.maximum - self.minimum)
def prob(self, val):
......@@ -314,7 +311,6 @@ class SymmetricLogUniform(Prior):
=======
Union[float, array_like]: Rescaled probability
"""
self.test_valid_for_rescaling(val)
if isinstance(val, (float, int)):
if val < 0.5:
return -self.maximum * np.exp(-2 * val * np.log(self.maximum / self.minimum))
......@@ -401,7 +397,6 @@ class Cosine(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
norm = 1 / (np.sin(self.maximum) - np.sin(self.minimum))
return np.arcsin(val / norm + np.sin(self.minimum))
......@@ -456,7 +451,6 @@ class Sine(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
norm = 1 / (np.cos(self.minimum) - np.cos(self.maximum))
return np.arccos(np.cos(self.minimum) - val / norm)
......@@ -515,7 +509,6 @@ class Gaussian(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return self.mu + erfinv(2 * val - 1) * 2 ** 0.5 * self.sigma
def prob(self, val):
......@@ -602,7 +595,6 @@ class TruncatedGaussian(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return erfinv(2 * val * self.normalisation + erf(
(self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) * 2 ** 0.5 * self.sigma + self.mu
......@@ -695,7 +687,6 @@ class LogNormal(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return np.exp(self.mu + np.sqrt(2 * self.sigma ** 2) * erfinv(2 * val - 1))
def prob(self, val):
......@@ -790,7 +781,6 @@ class Exponential(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return -self.mu * log1p(-val)
def prob(self, val):
......@@ -887,7 +877,6 @@ class StudentT(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
if isinstance(val, (float, int)):
if val == 0:
rescaled = -np.inf
......@@ -977,7 +966,6 @@ class Beta(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return btdtri(self.alpha, self.beta, val) * (self.maximum - self.minimum) + self.minimum
def prob(self, val):
......@@ -1070,7 +1058,6 @@ class Logistic(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
if isinstance(val, (float, int)):
if val == 0:
rescaled = -np.inf
......@@ -1151,7 +1138,6 @@ class Cauchy(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
rescaled = self.alpha + self.beta * np.tan(np.pi * (val - 0.5))
if isinstance(val, (float, int)):
if val == 1:
......@@ -1233,7 +1219,6 @@ class Gamma(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return gammaincinv(self.k, val) * self.theta
def prob(self, val):
......@@ -1385,8 +1370,6 @@ class FermiDirac(Prior):
.. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
<https:arxiv.org/abs/1705.08978v1>`_, 2017.
"""
self.test_valid_for_rescaling(val)
inv = (-np.exp(-1. * self.r) + (1. + np.exp(self.r)) ** -val +
np.exp(-1. * self.r) * (1. + np.exp(self.r)) ** -val)
......
......@@ -202,23 +202,6 @@ class Prior(object):
"""
return (val >= self.minimum) & (val <= self.maximum)
@staticmethod
def test_valid_for_rescaling(val):
"""Test if 0 < val < 1
Parameters
==========
val: Union[float, int, array_like]
Raises
=======
ValueError: If val is not between 0 and 1
"""
valarray = np.atleast_1d(val)
tests = (valarray < 0) + (valarray > 1)
if np.any(tests):
raise ValueError("Number to be rescaled should be in [0, 1]")
def __repr__(self):
"""Overrides the special method __repr__.
......
......@@ -86,7 +86,6 @@ class Interped(Prior):
This maps to the inverse CDF. This is done using interpolation.
"""
self.test_valid_for_rescaling(val)
rescaled = self.inverse_cumulative_distribution(val)
if rescaled.shape == ():
rescaled = float(rescaled)
......
......@@ -172,7 +172,7 @@ class BaseJointPriorDist(object):
raise ValueError("Array is the wrong shape")
# check sample(s) is within bounds
outbounds = np.ones(samp.shape[0], dtype=np.bool)
outbounds = np.ones(samp.shape[0], dtype=bool)
for s, bound in zip(samp.T, self.bounds.values()):
outbounds = (s < bound[0]) | (s > bound[1])
if np.any(outbounds):
......@@ -630,7 +630,7 @@ class MultivariateGaussianDist(BaseJointPriorDist):
elif isinstance(self.__dict__[key], (np.ndarray, list)):
thisarr = np.asarray(self.__dict__[key])
otherarr = np.asarray(other.__dict__[key])
if thisarr.dtype == np.float and otherarr.dtype == np.float:
if thisarr.dtype == float and otherarr.dtype == float:
fin1 = np.isfinite(np.asarray(self.__dict__[key]))
fin2 = np.isfinite(np.asarray(other.__dict__[key]))
if not np.array_equal(fin1, fin2):
......@@ -710,7 +710,6 @@ class JointPrior(Prior):
A sample from the prior paramter.
"""
self.test_valid_for_rescaling(val)
self.dist.rescale_parameters[self.name] = val
if self.dist.filled_rescale():
......
......@@ -446,6 +446,8 @@ class Sampler(object):
==========
theta: array_like
Parameter values at which to evaluate likelihood
warning: bool
Whether or not to print a warning
Returns
=======
......@@ -453,14 +455,19 @@ class Sampler(object):
True if the likelihood and prior are finite, false otherwise
"""
bad_values = [np.inf, np.nan_to_num(np.inf), np.nan]
if abs(self.log_prior(theta)) in bad_values:
log_p = self.log_prior(theta)
log_l = self.log_likelihood(theta)
return \
self._check_bad_value(val=log_p, warning=warning, theta=theta, label='prior') and \
self._check_bad_value(val=log_l, warning=warning, theta=theta, label='likelihood')
@staticmethod
def _check_bad_value(val, warning, theta, label):
val = np.abs(val)
bad_values = [np.inf, np.nan_to_num(np.inf)]
if val in bad_values or np.isnan(val):
if warning:
logger.warning('Prior draw {} has inf prior'.format(theta))
return False
if abs(self.log_likelihood(theta)) in bad_values:
if warning:
logger.warning('Prior draw {} has inf likelihood'.format(theta))
logger.warning(f'Prior draw {theta} has inf {label}')
return False
return True
......
......@@ -375,7 +375,6 @@ class Dynesty(NestedSampler):
dill.dump(out, file)
self._generate_result(out)
self.calc_likelihood_count()
self.result.sampling_time = self.sampling_time
if self.plot:
......@@ -385,7 +384,9 @@ class Dynesty(NestedSampler):
def _generate_result(self, out):
import dynesty
weights = np.exp(out['logwt'] - out['logz'][-1])
from scipy.special import logsumexp
logwts = out["logwt"]
weights = np.exp(logwts - out['logz'][-1])
nested_samples = DataFrame(
out.samples, columns=self.search_parameter_keys)
nested_samples['weights'] = weights
......@@ -398,6 +399,16 @@ class Dynesty(NestedSampler):
self.result.log_evidence = out.logz[-1]
self.result.log_evidence_err = out.logzerr[-1]
self.result.information_gain = out.information[-1]
self.result.num_likelihood_evaluations = getattr(self.sampler, 'ncall', 0)
logneff = logsumexp(logwts) * 2 - logsumexp(logwts * 2)
neffsamples = int(np.exp(logneff))
self.result.meta_data["run_statistics"] = dict(
nlikelihood=self.result.num_likelihood_evaluations,
neffsamples=neffsamples,
sampling_time_s=self.sampling_time.seconds,
ncores=self.kwargs.get("queue_size", 1)
)
def _run_nested_wrapper(self, kwargs):
""" Wrapper function to run_nested
......@@ -703,16 +714,6 @@ class Dynesty(NestedSampler):
"""
return self.priors.rescale(self._search_parameter_keys, theta)
def calc_likelihood_count(self):
if self.likelihood_benchmark:
if hasattr(self, 'sampler'):
self.result.num_likelihood_evaluations = \
getattr(self.sampler, 'ncall', 0)
else:
self.result.num_likelihood_evaluations = 0
else:
return None
def sample_rwalk_bilby(args):
""" Modified bilby-implemented version of dynesty.sampling.sample_rwalk """
......
......@@ -92,13 +92,15 @@ class Ptemcee(MCMCSampler):
is not recommended for cases where tau is large.
ignore_keys_for_tau: str
A pattern used to ignore keys in estimating the autocorrelation time.
pos0: str, list ("prior")
pos0: str, list, np.ndarray
If a string, one of "prior" or "minimize". For "prior", the initial
positions of the sampler are drawn from the sampler. If "minimize",
a scipy.optimize step is applied to all parameters a number of times.
The walkers are then initialized from the range of values obtained.
If a list, for the keys in the list the optimization step is applied,
otherwise the initial points are drawn from the prior.
otherwise the initial points are drawn from the prior. If a numpy array
the shape should be (ntemps, nwalkers, ndim).
niterations_per_check: int (5)
The number of iteration steps to take before checking ACT. This
effectively pre-thins the chains. Larger values reduce the per-eval
......@@ -363,6 +365,17 @@ class Ptemcee(MCMCSampler):
)
return pos0
def get_pos0_from_array(self):
if self.pos0.shape != (self.ntemps, self.nwalkers, self.ndim):
raise ValueError(
"Shape of starting array should be (ntemps, nwalkers, ndim). "
"In this case that is ({}, {}, {}), got {}".format(
self.ntemps, self.nwalkers, self.ndim, self.pos0.shape
)
)
else:
return self.pos0
def setup_sampler(self):
""" Either initialize the sampler or read in the resume file """
import ptemcee
......@@ -446,6 +459,8 @@ class Ptemcee(MCMCSampler):
return self.get_pos0_from_minimize()
elif isinstance(self.pos0, list):
return self.get_pos0_from_minimize(minimize_list=self.pos0)
elif isinstance(self.pos0, np.ndarray):
return self.get_pos0_from_array()
else:
raise SamplerError("pos0={} not implemented".format(self.pos0))
......
This diff is collapsed.
from .calculus import *
from .cmd import *
from .colors import *
from .constants import *
from .conversion import *
from .counter import *
from .docs import *
from .introspection import *
from .io import *
from .logger import *
from .plotting import *
from .progress import *
from .samples import *
from .series import *
# Instantiate the default argument parser at runtime
command_line_args, command_line_parser = set_up_command_line_arguments()
# Instantiate the default logging
setup_logger(print_version=False, log_level=command_line_args.log_level)
from numbers import Number
import numpy as np
from scipy.interpolate import interp2d
from scipy.special import logsumexp
from .logger import logger
def derivatives(vals, func, releps=1e-3, abseps=None, mineps=1e-9, reltol=1e-3,
epsscale=0.5, nonfixedidx=None):
"""
Calculate the partial derivatives of a function at a set of values. The
derivatives are calculated using the central difference, using an iterative
method to check that the values converge as step size decreases.
Parameters
==========
vals: array_like
A set of values, that are passed to a function, at which to calculate
the gradient of that function
func:
A function that takes in an array of values.
releps: float, array_like, 1e-3
The initial relative step size for calculating the derivative.
abseps: float, array_like, None
The initial absolute step size for calculating the derivative.
This overrides `releps` if set.
`releps` is set then that is used.
mineps: float, 1e-9
The minimum relative step size at which to stop iterations if no
convergence is achieved.
epsscale: float, 0.5
The factor by which releps if scaled in each iteration.
nonfixedidx: array_like, None
An array of indices in `vals` that are _not_ fixed values and therefore
can have derivatives taken. If `None` then derivatives of all values
are calculated.
Returns
=======
grads: array_like
An array of gradients for each non-fixed value.
"""
if nonfixedidx is None:
nonfixedidx = range(len(vals))
if len(nonfixedidx) > len(vals):
raise ValueError("To many non-fixed values")
if max(nonfixedidx) >= len(vals) or min(nonfixedidx) < 0:
raise ValueError("Non-fixed indexes contain non-existant indices")
grads = np.zeros(len(nonfixedidx))
# maximum number of times the gradient can change sign
flipflopmax = 10.
# set steps
if abseps is None:
if isinstance(releps, float):
eps = np.abs(vals) * releps
eps[eps == 0.] = releps # if any values are zero set eps to releps
teps = releps * np.ones(len(vals))
elif isinstance(releps, (list, np.ndarray)):
if len(releps) != len(vals):
raise ValueError("Problem with input relative step sizes")
eps = np.multiply(np.abs(vals), releps)
eps[eps == 0.] = np.array(releps)[eps == 0.]
teps = releps
else:
raise RuntimeError("Relative step sizes are not a recognised type!")
else:
if isinstance(abseps, float):
eps = abseps * np.ones(len(vals))
elif isinstance(abseps, (list, np.ndarray)):
if len(abseps) != len(vals):
raise ValueError("Problem with input absolute step sizes")
eps = np.array(abseps)
else:
raise RuntimeError("Absolute step sizes are not a recognised type!")
teps = eps
# for each value in vals calculate the gradient
count = 0
for i in nonfixedidx:
# initial parameter diffs
leps = eps[i]
cureps = teps[i]
flipflop = 0
# get central finite difference
fvals = np.copy(vals)
bvals = np.copy(vals)
# central difference
fvals[i] += 0.5 * leps # change forwards distance to half eps
bvals[i] -= 0.5 * leps # change backwards distance to half eps
cdiff = (func(fvals) - func(bvals)) / leps
while 1:
fvals[i] -= 0.5 * leps # remove old step
bvals[i] += 0.5 * leps
# change the difference by a factor of two
cureps *= epsscale
if cureps < mineps or flipflop > flipflopmax:
# if no convergence set flat derivative (TODO: check if there is a better thing to do instead)
logger.warning("Derivative calculation did not converge: setting flat derivative.")
grads[count] = 0.
break
leps *= epsscale
# central difference
fvals[i] += 0.5 * leps # change forwards distance to half eps
bvals[i] -= 0.5 * leps # change backwards distance to half eps
cdiffnew = (func(fvals) - func(bvals)) / leps
if cdiffnew == cdiff:
grads[count] = cdiff
break
# check whether previous diff and current diff are the same within reltol
rat = (cdiff / cdiffnew)
if np.isfinite(rat) and rat > 0.:
# gradient has not changed sign
if np.abs(1. - rat) < reltol:
grads[count] = cdiffnew
break
else:
cdiff = cdiffnew
continue
else:
cdiff = cdiffnew
flipflop += 1
continue
count += 1
return grads
def logtrapzexp(lnf, dx):
"""
Perform trapezium rule integration for the logarithm of a function on a regular grid.
Parameters
==========
lnf: array_like
A :class:`numpy.ndarray` of values that are the natural logarithm of a function
dx: Union[array_like, float]
A :class:`numpy.ndarray` of steps sizes between values in the function, or a
single step size value.
Returns
=======
The natural logarithm of the area under the function.
"""
return np.log(dx / 2.) + logsumexp([logsumexp(lnf[:-1]), logsumexp(lnf[1:])])
class UnsortedInterp2d(interp2d):
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
""" Modified version of the interp2d call method.
This avoids the outer product that is done when two numpy
arrays are passed.
Parameters
==========
x: See superclass
y: See superclass
dx: See superclass
dy: See superclass
assume_sorted: bool, optional
This is just a place holder to prevent a warning.
Overwriting this will not do anything
Returns
=======
array_like: See superclass
"""
from scipy.interpolate.dfitpack import bispeu
x, y = self._sanitize_inputs(x, y)
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
bad = out_of_bounds_x | out_of_bounds_y
if isinstance(x, Number) and isinstance(y, Number):
if bad:
output = self.fill_value
ier = 0
else:
output, ier = bispeu(*self.tck, x, y)
output = float(output)
else:
output = np.empty_like(x)
output[bad] = self.fill_value
if np.any(~bad):
output[~bad], ier = bispeu(*self.tck, x[~bad], y[~bad])
else:
ier = 0
if ier == 10:
raise ValueError("Invalid input data")
elif ier:
raise TypeError("An error occurred")
return output
@staticmethod
def _sanitize_inputs(x, y):
if isinstance(x, np.ndarray) and x.size == 1:
x = float(x)
if isinstance(y, np.ndarray) and y.size == 1:
y = float(y)
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
if x.shape != y.shape:
raise ValueError(
"UnsortedInterp2d received unequally shaped arrays"
)
elif isinstance(x, np.ndarray) and not isinstance(y, np.ndarray):
y = y * np.ones_like(x)
elif not isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
x = x * np.ones_like(y)
return x, y
import argparse
import logging
import os
import subprocess
from .logger import logger
def set_up_command_line_arguments():
""" Sets up command line arguments that can be used to modify how scripts are run.
Returns
=======
command_line_args, command_line_parser: tuple
The command_line_args is a Namespace of the command line arguments while
the command_line_parser can be given to a new `argparse.ArgumentParser`
as a parent object from which to inherit.
Notes
=====
The command line arguments are passed initially at runtime, but this parser
does not have a `--help` option (i.e., the command line options are
available for any script which includes `import bilby`, but no help command
is available. This is done to avoid conflicts with child argparse routines
(see the example below).
Examples
========
In the following example we demonstrate how to setup a custom command line for a
project which uses bilby.
.. code-block:: python
# Here we import bilby, which initialses and parses the default command-line args
>>> import bilby
# The command line arguments can then be accessed via
>>> bilby.core.utils.command_line_args
Namespace(clean=False, log_level=20, quite=False)
# Next, we import argparse and define a new argparse object
>>> import argparse
>>> parser = argparse.ArgumentParser(parents=[bilby.core.utils.command_line_parser])
>>> parser.add_argument('--argument', type=int, default=1)
>>> args = parser.parse_args()
Namespace(clean=False, log_level=20, quite=False, argument=1)
Placing these lines into a script, you'll be able to pass in the usual bilby default
arguments, in addition to `--argument`. To see a list of all options, call the script
with `--help`.
"""
try:
parser = argparse.ArgumentParser(
description="Command line interface for bilby scripts",
add_help=False, allow_abbrev=False)
except TypeError:
parser = argparse.ArgumentParser(
description="Command line interface for bilby scripts",
add_help=False)
parser.add_argument("-v", "--verbose", action="store_true",
help=("Increase output verbosity [logging.DEBUG]." +
" Overridden by script level settings"))
parser.add_argument("-q", "--quiet", action="store_true",
help=("Decrease output verbosity [logging.WARNING]." +
" Overridden by script level settings"))
parser.add_argument("-c", "--clean", action="store_true",
help="Force clean data, never use cached data")
parser.add_argument("-u", "--use-cached", action="store_true",
help="Force cached data and do not check its validity")
parser.add_argument("--sampler-help", nargs='?', default=False,
const='None', help="Print help for given sampler")
parser.add_argument("--bilby-test-mode", action="store_true",
help=("Used for testing only: don't run full PE, but"
" just check nothing breaks"))
parser.add_argument("--bilby-zero-likelihood-mode", action="store_true",
help=("Used for testing only: don't run full PE, but"
" just check nothing breaks"))
args, unknown_args = parser.parse_known_args()
if args.quiet:
args.log_level = logging.WARNING
elif args.verbose:
args.log_level = logging.DEBUG
else:
args.log_level = logging.INFO
return args, parser
def run_commandline(cl, log_level=20, raise_error=True, return_output=True):
"""Run a string cmd as a subprocess, check for errors and return output.
Parameters
==========
cl: str
Command to run
log_level: int
See https://docs.python.org/2/library/logging.html#logging-levels,
default is '20' (INFO)
"""
logger.log(log_level, 'Now executing: ' + cl)
if return_output:
try:
out = subprocess.check_output(
cl, stderr=subprocess.STDOUT, shell=True,
universal_newlines=True)
except subprocess.CalledProcessError as e:
logger.log(log_level, 'Execution failed: {}'.format(e.output))
if raise_error:
raise
else:
out = 0
os.system('\n')
return(out)
else:
process = subprocess.Popen(cl, shell=True)
process.communicate()
class tcolors:
KEY = '\033[93m'
VALUE = '\033[91m'
HIGHLIGHT = '\033[95m'
END = '\033[0m'
# Constants: values taken from LAL 505df9dd2e69b4812f1e8eee3a6d468ba7f80674
speed_of_light = 299792458.0 # m/s
parsec = 3.085677581491367e+16 # m
solar_mass = 1.9884099021470415e+30 # Kg
radius_of_earth = 6378136.6 # m
gravitational_constant = 6.6743e-11 # m^3 kg^-1 s^-2
import warnings
from math import fmod
import numpy as np
def ra_dec_to_theta_phi(ra, dec, gmst):
""" Convert from RA and DEC to polar coordinates on celestial sphere
Parameters
==========
ra: float
right ascension in radians
dec: float
declination in radians
gmst: float
Greenwich mean sidereal time of arrival of the signal in radians
Returns
=======
float: zenith angle in radians
float: azimuthal angle in radians
"""
phi = ra - gmst
theta = np.pi / 2 - dec
return theta, phi
def theta_phi_to_ra_dec(theta, phi, gmst):
ra = phi + gmst
dec = np.pi / 2 - theta
return ra, dec
def gps_time_to_gmst(gps_time):
"""
Convert gps time to Greenwich mean sidereal time in radians
This method assumes a constant rotation rate of earth since 00:00:00, 1 Jan. 2000
A correction has been applied to give the exact correct value for 00:00:00, 1 Jan. 2018
Error accumulates at a rate of ~0.0001 radians/decade.
Parameters
-------
gps_time: float
gps time
Returns
-------
float: Greenwich mean sidereal time in radians
"""
warnings.warn(
"Function gps_time_to_gmst deprecated, use "
"lal.GreenwichMeanSiderealTime(time) instead",
DeprecationWarning)
omega_earth = 2 * np.pi * (1 / 365.2425 + 1) / 86400.
gps_2000 = 630720013.
gmst_2000 = (6 + 39. / 60 + 51.251406103947375 / 3600) * np.pi / 12
correction_2018 = -0.00017782487379358614
sidereal_time = omega_earth * (gps_time - gps_2000) + gmst_2000 + correction_2018
gmst = fmod(sidereal_time, 2 * np.pi)
return gmst
def spherical_to_cartesian(radius, theta, phi):
""" Convert from spherical coordinates to cartesian.
Parameters
==========
radius: float
radial coordinate
theta: float
axial coordinate
phi: float
azimuthal coordinate
Returns
=======
list: cartesian vector
"""
return [radius * np.sin(theta) * np.cos(phi), radius * np.sin(theta) * np.sin(phi), radius * np.cos(theta)]
import multiprocessing
class Counter(object):
"""
General class to count number of times a function is Called, returns total
number of function calls
Parameters
==========
initalval : int, 0
number to start counting from
"""
def __init__(self, initval=0):
self.val = multiprocessing.RawValue('i', initval)
self.lock = multiprocessing.Lock()
def increment(self):
with self.lock:
self.val.value += 1
@property
def value(self):
return self.val.value
def docstring(docstr, sep="\n"):
"""
Decorator: Append to a function's docstring.
This is required for e.g., :code:`classmethods` as the :code:`__doc__`
can't be changed after.
Parameters
==========
docstr: str
The docstring
sep: str
Separation character for appending the existing docstring.
"""
def _decorator(func):
if func.__doc__ is None:
func.__doc__ = docstr
else:
func.__doc__ = sep.join([func.__doc__, docstr])
return func
return _decorator
import inspect
import types
def infer_parameters_from_function(func):
""" Infers the arguments of a function
(except the first arg which is assumed to be the dep. variable).
Throws out `*args` and `**kwargs` type arguments
Can deal with type hinting!
Parameters
==========
func: function or method
The function or method for which the parameters should be inferred.
Returns
=======
list: A list of strings with the parameters
Raises
======
ValueError
If the object passed to the function is neither a function nor a method.
Notes
=====
In order to handle methods the `type` of the function is checked, and
if a method has been passed the first *two* arguments are removed rather than just the first one.
This allows the reference to the instance (conventionally named `self`)
to be removed.
"""
if isinstance(func, types.MethodType):
return infer_args_from_function_except_n_args(func=func, n=2)
elif isinstance(func, types.FunctionType):
return _infer_args_from_function_except_for_first_arg(func=func)
else:
raise ValueError("This doesn't look like a function.")
def infer_args_from_method(method):
""" Infers all arguments of a method except for `self`
Throws out `*args` and `**kwargs` type arguments.
Can deal with type hinting!
Returns
=======
list: A list of strings with the parameters
"""
return infer_args_from_function_except_n_args(func=method, n=1)
def infer_args_from_function_except_n_args(func, n=1):
""" Inspects a function to find its arguments, and ignoring the
first n of these, returns a list of arguments from the function's
signature.
Parameters
==========
func : function or method
The function from which the arguments should be inferred.
n : int
The number of arguments which should be ignored, staring at the beginning.
Returns
=======
parameters: list
A list of parameters of the function, omitting the first `n`.
Extended Summary
================
This function is intended to allow the handling of named arguments
in both functions and methods; this is important, since the first
argument of an instance method will be the instance.
See Also
========
infer_args_from_method: Provides the arguments for a method
infer_args_from_function: Provides the arguments for a function
infer_args_from_function_except_first_arg: Provides all but first argument of a function or method.
Examples
========
.. code-block:: python
>>> def hello(a, b, c, d):
>>> pass
>>>
>>> infer_args_from_function_except_n_args(hello, 2)
['c', 'd']
"""
try:
parameters = inspect.getfullargspec(func).args
except AttributeError:
parameters = inspect.getargspec(func).args
del(parameters[:n])
return parameters
def _infer_args_from_function_except_for_first_arg(func):
return infer_args_from_function_except_n_args(func=func, n=1)
def get_dict_with_properties(obj):
property_names = [p for p in dir(obj.__class__)
if isinstance(getattr(obj.__class__, p), property)]
dict_with_properties = obj.__dict__.copy()
for key in property_names:
dict_with_properties[key] = getattr(obj, key)
return dict_with_properties
def get_function_path(func):
if hasattr(func, "__module__") and hasattr(func, "__name__"):
return "{}.{}".format(func.__module__, func.__name__)
else:
return func
class PropertyAccessor(object):
"""
Generic descriptor class that allows handy access of properties without long
boilerplate code. The properties of Interferometer are defined as instances
of this class.
This avoids lengthy code like
.. code-block:: python
@property
def length(self):
return self.geometry.length
@length_setter
def length(self, length)
self.geometry.length = length
in the Interferometer class
"""
def __init__(self, container_instance_name, property_name):
self.property_name = property_name
self.container_instance_name = container_instance_name
def __get__(self, instance, owner):
return getattr(getattr(instance, self.container_instance_name), self.property_name)
def __set__(self, instance, value):
setattr(getattr(instance, self.container_instance_name), self.property_name, value)
import inspect
import json
import os
import shutil
from importlib import import_module
from pathlib import Path
import numpy as np
import pandas as pd
from .logger import logger
from .introspection import infer_args_from_method
def check_directory_exists_and_if_not_mkdir(directory):
""" Checks if the given directory exists and creates it if it does not exist
Parameters
==========
directory: str
Name of the directory
"""
Path(directory).mkdir(parents=True, exist_ok=True)
class BilbyJsonEncoder(json.JSONEncoder):
def default(self, obj):
from ..prior import MultivariateGaussianDist, Prior, PriorDict
from ...gw.prior import HealPixMapPriorDist
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, PriorDict):
return {'__prior_dict__': True, 'content': obj._get_json_dict()}
if isinstance(obj, (MultivariateGaussianDist, HealPixMapPriorDist, Prior)):
return {'__prior__': True, '__module__': obj.__module__,
'__name__': obj.__class__.__name__,
'kwargs': dict(obj.get_instantiation_dict())}
try:
from astropy import cosmology as cosmo, units
if isinstance(obj, cosmo.FLRW):
return encode_astropy_cosmology(obj)
if isinstance(obj, units.Quantity):
return encode_astropy_quantity(obj)
if isinstance(obj, units.PrefixUnit):
return str(obj)
except ImportError:
logger.debug("Cannot import astropy, cannot write cosmological priors")
if isinstance(obj, np.ndarray):
return {'__array__': True, 'content': obj.tolist()}
if isinstance(obj, complex):
return {'__complex__': True, 'real': obj.real, 'imag': obj.imag}
if isinstance(obj, pd.DataFrame):
return {'__dataframe__': True, 'content': obj.to_dict(orient='list')}
if isinstance(obj, pd.Series):
return {'__series__': True, 'content': obj.to_dict()}
if inspect.isfunction(obj):
return {"__function__": True, "__module__": obj.__module__, "__name__": obj.__name__}
if inspect.isclass(obj):
return {"__class__": True, "__module__": obj.__module__, "__name__": obj.__name__}
return json.JSONEncoder.default(self, obj)
def encode_astropy_cosmology(obj):
cls_name = obj.__class__.__name__
dct = {key: getattr(obj, key) for
key in infer_args_from_method(obj.__init__)}
dct['__cosmology__'] = True
dct['__name__'] = cls_name
return dct
def encode_astropy_quantity(dct):
dct = dict(__astropy_quantity__=True, value=dct.value, unit=str(dct.unit))
if isinstance(dct['value'], np.ndarray):
dct['value'] = list(dct['value'])
return dct
def decode_astropy_cosmology(dct):
try:
from astropy import cosmology as cosmo
cosmo_cls = getattr(cosmo, dct['__name__'])
del dct['__cosmology__'], dct['__name__']
return cosmo_cls(**dct)
except ImportError:
logger.debug("Cannot import astropy, cosmological priors may not be "
"properly loaded.")
return dct
def decode_astropy_quantity(dct):
try:
from astropy import units
if dct['value'] is None:
return None
else:
del dct['__astropy_quantity__']
return units.Quantity(**dct)
except ImportError:
logger.debug("Cannot import astropy, cosmological priors may not be "
"properly loaded.")
return dct
def load_json(filename, gzip):
if gzip or os.path.splitext(filename)[1].lstrip('.') == 'gz':
import gzip
with gzip.GzipFile(filename, 'r') as file:
json_str = file.read().decode('utf-8')
dictionary = json.loads(json_str, object_hook=decode_bilby_json)
else:
with open(filename, 'r') as file:
dictionary = json.load(file, object_hook=decode_bilby_json)
return dictionary
def decode_bilby_json(dct):
if dct.get("__prior_dict__", False):
cls = getattr(import_module(dct['__module__']), dct['__name__'])
obj = cls._get_from_json_dict(dct)
return obj
if dct.get("__prior__", False):
cls = getattr(import_module(dct['__module__']), dct['__name__'])
obj = cls(**dct['kwargs'])
return obj
if dct.get("__cosmology__", False):
return decode_astropy_cosmology(dct)
if dct.get("__astropy_quantity__", False):
return decode_astropy_quantity(dct)
if dct.get("__array__", False):
return np.asarray(dct["content"])
if dct.get("__complex__", False):
return complex(dct["real"], dct["imag"])
if dct.get("__dataframe__", False):
return pd.DataFrame(dct['content'])
if dct.get("__series__", False):
return pd.Series(dct['content'])
if dct.get("__function__", False) or dct.get("__class__", False):
default = ".".join([dct["__module__"], dct["__name__"]])
return getattr(import_module(dct["__module__"]), dct["__name__"], default)
return dct
def recursively_decode_bilby_json(dct):
"""
Recursively call `bilby_decode_json`
Parameters
----------
dct: dict
The dictionary to decode
Returns
-------
dct: dict
The original dictionary with all the elements decode if possible
"""
dct = decode_bilby_json(dct)
if isinstance(dct, dict):
for key in dct:
if isinstance(dct[key], dict):
dct[key] = recursively_decode_bilby_json(dct[key])
return dct
def decode_from_hdf5(item):
"""
Decode an item from HDF5 format to python type.
This currently just converts __none__ to None and some arrays to lists
.. versionadded:: 1.0.0
Parameters
----------
item: object
Item to be decoded
Returns
-------
output: object
Converted input item
"""
if isinstance(item, str) and item == "__none__":
output = None
elif isinstance(item, bytes) and item == b"__none__":
output = None
elif isinstance(item, (bytes, bytearray)):
output = item.decode()
elif isinstance(item, np.ndarray):
if item.size == 0:
output = item
elif "|S" in str(item.dtype) or isinstance(item[0], bytes):
output = [it.decode() for it in item]
else:
output = item
elif isinstance(item, np.bool_):
output = bool(item)
else:
output = item
return output
def encode_for_hdf5(item):
"""
Encode an item to a HDF5 savable format.
.. versionadded:: 1.1.0
Parameters
----------
item: object
Object to be encoded, specific options are provided for Bilby types
Returns
-------
output: object
Input item converted into HDF5 savable format
"""
from ..prior.dict import PriorDict
if isinstance(item, np.int_):
item = int(item)
elif isinstance(item, np.float_):
item = float(item)
elif isinstance(item, np.complex_):
item = complex(item)
if isinstance(item, (np.ndarray, int, float, complex, str, bytes)):
output = item
elif item is None:
output = "__none__"
elif isinstance(item, list):
if len(item) == 0:
output = item
elif isinstance(item[0], (str, bytes)) or item[0] is None:
output = list()
for value in item:
if isinstance(value, str):
output.append(value.encode("utf-8"))
elif isinstance(value, bytes):
output.append(value)
else:
output.append(b"__none__")
elif isinstance(item[0], (int, float, complex)):
output = np.array(item)
elif isinstance(item, PriorDict):
output = json.dumps(item._get_json_dict())
elif isinstance(item, pd.DataFrame):
output = item.to_dict(orient="list")
elif isinstance(item, pd.Series):
output = item.to_dict()
elif inspect.isfunction(item) or inspect.isclass(item):
output = dict(__module__=item.__module__, __name__=item.__name__, __class__=True)
elif isinstance(item, dict):
output = item.copy()
elif isinstance(item, tuple):
output = {str(ii): elem for ii, elem in enumerate(item)}
else:
raise ValueError(f'Cannot save {type(item)} type')
return output
def recursively_load_dict_contents_from_group(h5file, path):
"""
Recursively load a HDF5 file into a dictionary
.. versionadded:: 1.1.0
Parameters
----------
h5file: h5py.File
Open h5py file object
path: str
Path within the HDF5 file
Returns
-------
output: dict
The contents of the HDF5 file unpacked into the dictionary.
"""
import h5py
output = dict()
for key, item in h5file[path].items():
if isinstance(item, h5py.Dataset):
output[key] = decode_from_hdf5(item[()])
elif isinstance(item, h5py.Group):
output[key] = recursively_load_dict_contents_from_group(h5file, path + key + '/')
return output
def recursively_save_dict_contents_to_group(h5file, path, dic):
"""
Recursively save a dictionary to a HDF5 group
.. versionadded:: 1.1.0
Parameters
----------
h5file: h5py.File
Open HDF5 file
path: str
Path inside the HDF5 file
dic: dict
The dictionary containing the data
"""
for key, item in dic.items():
item = encode_for_hdf5(item)
if isinstance(item, dict):
recursively_save_dict_contents_to_group(h5file, path + key + '/', item)
else:
h5file[path + key] = item
def safe_file_dump(data, filename, module):
""" Safely dump data to a .pickle file
Parameters
==========
data:
data to dump
filename: str
The file to dump to
module: pickle, dill
The python module to use
"""
temp_filename = filename + ".temp"
with open(temp_filename, "wb") as file:
module.dump(data, file)
shutil.move(temp_filename, filename)
def move_old_file(filename, overwrite=False):
""" Moves or removes an old file.
Parameters
==========
filename: str
Name of the file to be move
overwrite: bool, optional
Whether or not to remove the file or to change the name
to filename + '.old'
"""
if os.path.isfile(filename):
if overwrite:
logger.debug('Removing existing file {}'.format(filename))
os.remove(filename)
else:
logger.debug(
'Renaming existing file {} to {}.old'.format(filename,
filename))
shutil.move(filename, filename + '.old')
logger.debug("Saving result to {}".format(filename))
def safe_save_figure(fig, filename, **kwargs):
check_directory_exists_and_if_not_mkdir(os.path.dirname(filename))
from matplotlib import rcParams
try:
fig.savefig(fname=filename, **kwargs)
except RuntimeError:
logger.debug(
"Failed to save plot with tex labels turning off tex."
)
rcParams["text.usetex"] = False
fig.savefig(fname=filename, **kwargs)
import logging
import os
from pathlib import Path
import sys
logger = logging.getLogger('bilby')
def setup_logger(outdir='.', label=None, log_level='INFO', print_version=False):
""" Setup logging output: call at the start of the script to use
Parameters
==========
outdir, label: str
If supplied, write the logging output to outdir/label.log
log_level: str, optional
['debug', 'info', 'warning']
Either a string from the list above, or an integer as specified
in https://docs.python.org/2/library/logging.html#logging-levels
print_version: bool
If true, print version information
"""
if type(log_level) is str:
try:
level = getattr(logging, log_level.upper())
except AttributeError:
raise ValueError('log_level {} not understood'.format(log_level))
else:
level = int(log_level)
logger = logging.getLogger('bilby')
logger.propagate = False
logger.setLevel(level)
if any([type(h) == logging.StreamHandler for h in logger.handlers]) is False:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(
'%(asctime)s %(name)s %(levelname)-8s: %(message)s', datefmt='%H:%M'))
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
if any([type(h) == logging.FileHandler for h in logger.handlers]) is False:
if label:
Path(outdir).mkdir(parents=True, exist_ok=True)
log_file = '{}/{}.log'.format(outdir, label)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)-8s: %(message)s', datefmt='%H:%M'))
file_handler.setLevel(level)
logger.addHandler(file_handler)
for handler in logger.handlers:
handler.setLevel(level)
if print_version:
version = get_version_information()
logger.info('Running bilby version: {}'.format(version))
def get_version_information():
version_file = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))), '.version')
try:
with open(version_file, 'r') as f:
return f.readline().rstrip()
except EnvironmentError:
print("No version information file '.version' found")
def loaded_modules_dict():
module_names = list(sys.modules.keys())
vdict = {}
for key in module_names:
if "." not in key:
vdict[key] = str(getattr(sys.modules[key], "__version__", "N/A"))
return vdict