Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (26)
Showing
with 1669 additions and 101 deletions
......@@ -89,3 +89,5 @@ Vivien Raymond
Ka-Lok Lo
Isaac Legred
Marc Penuliar
Andrew Fowlie
Martin White
# All notable changes will be documented in this file
## [2.1.0] 2023-04-12
Version 2.1.0 release of Bilby
Minor feature improvements and bug fixes
### Additions
- Additional parameterizations for equation-of-state inference (!1083, !1240)
- Add Fisher matrix posterior estimator (!1242)
### Changes
- Improvements to the bilby-mcmc sampler including a Fisher Information Matrix proposal (!1242)
- Optimize spline interpolation of calibration uncertainties (!1241)
- Update LIGO India coordinates record to public DCC (!1246)
- Make logger disabling work in redundancy test (!1245)
- Make sure nested samples are data frame (!1244)
- Minor improvements to the result methods including moving to top level imports (!1243)
- Fix a bug in the slabspike prior (!1235)
- Reduce verbosity when setting strain data (!1233)
- Fix issue with cached results class (!1223)
### Deprecated
- Reading/writing ROQ weights to json (!1232)
## [2.0.2] 2023-03-21
Version 2.0.2 release of Bilby
......
......@@ -23,6 +23,7 @@ from . import core, gw, hyper
from .core import utils, likelihood, prior, result, sampler
from .core.sampler import run_sampler
from .core.likelihood import Likelihood
from .core.result import read_in_result, read_in_result_list
try:
from ._version import version as __version__
......
......@@ -157,7 +157,7 @@ class Chain(object):
@property
def minimum_index(self):
"""This calculated a minimum index from which to discard samples
"""This calculates a minimum index from which to discard samples
A number of methods are provided for the calculation. A subset are
switched off (by `if False` statements) for future development
......@@ -342,7 +342,12 @@ class Chain(object):
@property
def nsamples(self):
nuseable_steps = self.position - self.minimum_index
return int(nuseable_steps / (self.thin_by_nact * self.tau))
n_independent_samples = nuseable_steps / self.tau
nsamples = int(n_independent_samples / self.thin_by_nact)
if nuseable_steps >= nsamples:
return nsamples
else:
return 0
@property
def nsamples_last(self):
......
......@@ -6,6 +6,7 @@ import numpy as np
from scipy.spatial.distance import jensenshannon
from scipy.stats import gaussian_kde
from ..core.fisher import FisherMatrixPosteriorEstimator
from ..core.prior import PriorDict
from ..core.sampler.base_sampler import SamplerError
from ..core.utils import logger, reflect
......@@ -61,6 +62,9 @@ class BaseProposal(object):
self.parameters = [p for p in self.parameters if p in subset]
self._str_attrs.append("parameters")
if len(self.parameters) == 0:
raise ValueError("Proposal requested with zero parameters")
self.ndim = len(self.parameters)
self.prior_boundary_dict = {key: priors[key].boundary for key in priors}
......@@ -129,10 +133,16 @@ class BaseProposal(object):
val_normalised_reflected = reflect(np.array(val_normalised))
return minimum + width * val_normalised_reflected
def __call__(self, chain):
sample, log_factor = self.propose(chain)
def __call__(self, chain, likelihood=None, priors=None):
if getattr(self, "needs_likelihood_and_priors", False):
sample, log_factor = self.propose(chain, likelihood, priors)
else:
sample, log_factor = self.propose(chain)
if log_factor == 0:
sample = self.apply_boundaries(sample)
return sample, log_factor
@abstractmethod
......@@ -459,7 +469,7 @@ class DensityEstimateProposal(BaseProposal):
# Print a log message
took = time.time() - start
logger.info(
logger.debug(
f"{self.density_name} construction at {self.steps_since_refit} finished"
f" for length {chain.position} chain, took {took:0.2f}s."
f" Current accept-ratio={self.acceptance_ratio:0.2f}"
......@@ -480,7 +490,7 @@ class DensityEstimateProposal(BaseProposal):
fail_parameters.append(key)
if len(fail_parameters) > 0:
logger.info(
logger.debug(
f"{self.density_name} construction failed verification and is discarded"
)
self.density = current_density
......@@ -493,7 +503,10 @@ class DensityEstimateProposal(BaseProposal):
# Check if we refit
testA = self.steps_since_refit >= self.next_refit_time
if testA:
self.refit(chain)
try:
self.refit(chain)
except Exception as e:
logger.warning(f"Failed to refit chain due to error {e}")
# If KDE is yet to be fitted, use the fallback
if self.trained is False:
......@@ -656,7 +669,7 @@ class NormalizingFlowProposal(DensityEstimateProposal):
return np.power(max_js, 2)
def train(self, chain):
logger.info("Starting NF training")
logger.debug("Starting NF training")
import torch
......@@ -687,14 +700,14 @@ class NormalizingFlowProposal(DensityEstimateProposal):
validation_samples, training_samples_draw
)
if max_js_bits < max_js_threshold:
logger.info(
logger.debug(
f"Training complete after {epoch} steps, "
f"max_js_bits={max_js_bits:0.5f}<{max_js_threshold}"
)
break
took = time.time() - start
logger.info(
logger.debug(
f"Flow training step ({self.steps_since_refit}) finished"
f" for length {chain.position} chain, took {took:0.2f}s."
f" Current accept-ratio={self.acceptance_ratio:0.2f}"
......@@ -715,7 +728,10 @@ class NormalizingFlowProposal(DensityEstimateProposal):
# Check if we retrain the NF
testA = self.steps_since_refit >= self.next_refit_time
if testA:
self.train(chain)
try:
self.train(chain)
except Exception as e:
logger.warning(f"Failed to retrain chain due to error {e}")
if self.trained is False:
return self.fallback.propose(chain)
......@@ -772,6 +788,64 @@ class FixedJumpProposal(BaseProposal):
return self.scale * np.random.normal()
class FisherMatrixProposal(AdaptiveGaussianProposal):
needs_likelihood_and_priors = True
"""Fisher Matrix Proposals
Uses a finite differencing approach motivated by BayesWave (see, e.g.
https://arxiv.org/abs/1410.3835). The inverse Fisher Information Matrix
is calculated from the current sample, then proposals are drawn from a
multivariate Gaussian and scaled by an adaptive parameter.
"""
def __init__(
self,
priors,
subset=None,
weight=1,
update_interval=100,
scale_init=1e0,
fd_eps=1e-6,
adapt=False,
):
super(FisherMatrixProposal, self).__init__(
priors, weight, subset, scale_init=scale_init
)
self.update_interval = update_interval
self.steps_since_update = update_interval
self.adapt = adapt
self.mean = np.zeros(len(self.parameters))
self.fd_eps = fd_eps
def propose(self, chain, likelihood, priors):
sample = chain.current_sample
if self.adapt:
self.update_scale(chain)
if self.steps_since_update >= self.update_interval:
fmp = FisherMatrixPosteriorEstimator(
likelihood, priors, parameters=self.parameters, fd_eps=self.fd_eps
)
try:
self.iFIM = fmp.calculate_iFIM(sample.dict)
except (RuntimeError, ValueError) as e:
logger.warning(f"FisherMatrixProposal failed with {e}")
if hasattr(self, "iFIM") is False:
# No past iFIM exists, return sample
return sample, 0
self.steps_since_update = 0
jump = self.scale * np.random.multivariate_normal(
self.mean, self.iFIM, check_valid="ignore"
)
for key, val in zip(self.parameters, jump):
sample[key] += val
log_factor = 0
self.steps_since_update += 1
return sample, log_factor
class BaseGravitationalWaveTransientProposal(BaseProposal):
def __init__(self, priors, weight=1):
super(BaseGravitationalWaveTransientProposal, self).__init__(
......@@ -985,7 +1059,7 @@ def get_default_ensemble_proposal_cycle(priors):
def get_proposal_cycle(string, priors, L1steps=1, warn=True):
big_weight = 10
small_weight = 5
tiny_weight = 0.1
tiny_weight = 0.5
if "gwA" in string:
# Parameters for learning proposals
......@@ -1009,15 +1083,15 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
if priors.intrinsic:
intrinsic = PARAMETER_SETS["intrinsic"]
plist += [
AdaptiveGaussianProposal(priors, weight=big_weight, subset=intrinsic),
AdaptiveGaussianProposal(priors, weight=small_weight, subset=intrinsic),
DifferentialEvolutionProposal(
priors, weight=big_weight, subset=intrinsic
priors, weight=small_weight, subset=intrinsic
),
KDEProposal(
priors, weight=big_weight, subset=intrinsic, **learning_kwargs
priors, weight=small_weight, subset=intrinsic, **learning_kwargs
),
GMMProposal(
priors, weight=big_weight, subset=intrinsic, **learning_kwargs
priors, weight=small_weight, subset=intrinsic, **learning_kwargs
),
]
......@@ -1026,13 +1100,13 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
plist += [
AdaptiveGaussianProposal(priors, weight=small_weight, subset=extrinsic),
DifferentialEvolutionProposal(
priors, weight=big_weight, subset=extrinsic
priors, weight=small_weight, subset=extrinsic
),
KDEProposal(
priors, weight=big_weight, subset=extrinsic, **learning_kwargs
priors, weight=small_weight, subset=extrinsic, **learning_kwargs
),
GMMProposal(
priors, weight=big_weight, subset=extrinsic, **learning_kwargs
priors, weight=small_weight, subset=extrinsic, **learning_kwargs
),
]
......@@ -1043,6 +1117,11 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
GMMProposal(
priors, weight=small_weight, subset=mass, **learning_kwargs
),
FisherMatrixProposal(
priors,
weight=small_weight,
subset=mass,
),
]
if priors.spin:
......@@ -1052,13 +1131,23 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
GMMProposal(
priors, weight=small_weight, subset=spin, **learning_kwargs
),
FisherMatrixProposal(
priors,
weight=big_weight,
subset=spin,
),
]
if priors.precession:
measured_spin = ["chi_1", "chi_2", "a_1", "a_2", "chi_1_in_plane"]
if priors.measured_spin:
measured_spin = PARAMETER_SETS["measured_spin"]
plist += [
AdaptiveGaussianProposal(
priors, weight=small_weight, subset=measured_spin
),
FisherMatrixProposal(
priors,
weight=small_weight,
subset=measured_spin,
),
]
if priors.mass and priors.spin:
......@@ -1086,6 +1175,21 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
CorrelatedPolarisationPhaseJump(priors, weight=tiny_weight),
PhasePolarisationReversalProposal(priors, weight=tiny_weight),
]
if priors.sky:
sky = PARAMETER_SETS["sky"]
plist += [
FisherMatrixProposal(
priors,
weight=small_weight,
subset=sky,
),
GMMProposal(
priors,
weight=small_weight,
subset=sky,
**learning_kwargs,
),
]
for key in ["time_jitter", "psi", "phi_12", "tilt_2", "lambda_1", "lambda_2"]:
if key in priors.non_fixed_keys:
plist.append(PriorProposal(priors, subset=[key], weight=tiny_weight))
......@@ -1101,6 +1205,7 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
DifferentialEvolutionProposal(priors, weight=big_weight),
UniformProposal(priors, weight=tiny_weight),
KDEProposal(priors, weight=big_weight, scale_fits=L1steps),
FisherMatrixProposal(priors, weight=big_weight),
]
if GMMProposal.check_dependencies(warn=warn):
plist.append(GMMProposal(priors, weight=big_weight, scale_fits=L1steps))
......@@ -1120,6 +1225,7 @@ def remove_proposals_using_string(plist, string):
GM=GMMProposal,
PR=PriorProposal,
UN=UniformProposal,
FM=FisherMatrixProposal,
)
for element in string.split("no")[1:]:
......
......@@ -146,6 +146,7 @@ class Bilby_MCMC(MCMCSampler):
L1steps=100,
L2steps=3,
printdt=60,
check_point_delta_t=1800,
min_tau=1,
proposal_cycle="default",
stop_after_convergence=False,
......@@ -165,7 +166,6 @@ class Bilby_MCMC(MCMCSampler):
use_ratio=False,
skip_import_verification=True,
check_point_plot=True,
check_point_delta_t=1800,
diagnostic=False,
resume=True,
exit_code=130,
......@@ -202,9 +202,9 @@ class Bilby_MCMC(MCMCSampler):
self.initial_sample_dict = self.kwargs["initial_sample_dict"]
self.printdt = self.kwargs["printdt"]
self.check_point_delta_t = self.kwargs["check_point_delta_t"]
check_directory_exists_and_if_not_mkdir(self.outdir)
self.resume = resume
self.check_point_delta_t = check_point_delta_t
self.resume_file = "{}/{}_resume.pickle".format(self.outdir, self.label)
self.verify_configuration()
......@@ -224,6 +224,10 @@ class Bilby_MCMC(MCMCSampler):
for equiv in self.npool_equiv_kwargs:
if equiv in kwargs:
kwargs["npool"] = kwargs.pop(equiv)
if "check_point_delta_t" not in kwargs:
for equiv in self.check_point_equiv_kwargs:
if equiv in kwargs:
kwargs["check_point_delta_t"] = kwargs.pop(equiv)
@property
def target_nsamples(self):
......@@ -315,8 +319,8 @@ class Bilby_MCMC(MCMCSampler):
self._steps_since_last_print = 0
self._time_since_last_print = 0
logger.info(f"Drawing {self.target_nsamples} samples")
logger.info(f"Checkpoint every {self.check_point_delta_t}s")
logger.info(f"Print update every {self.printdt}s")
logger.info(f"Checkpoint every check_point_delta_t={self.check_point_delta_t}s")
logger.info(f"Print update every printdt={self.printdt}s")
while True:
t0 = datetime.datetime.now()
......@@ -390,7 +394,6 @@ class Bilby_MCMC(MCMCSampler):
)
raise ResumeError(msg)
self.ptsampler.set_convergence_inputs(self.convergence_inputs)
self.ptsampler.proposal_cycle = self.proposal_cycle
self.ptsampler.pt_rejection_sample = self.pt_rejection_sample
logger.info(
......@@ -734,13 +737,19 @@ class BilbyPTMCMCSampler(object):
@property
def samples(self):
cached_samples = getattr(self, "_cached_samples", (False,))
if cached_samples[0] == self.position:
return cached_samples[1]
sample_list = []
for sampler in self.zerotemp_sampler_list:
sample_list.append(sampler.samples)
if self.pt_rejection_sample:
for sampler in self.tempered_sampler_list:
sample_list.append(sampler.samples)
return pd.concat(sample_list)
samples = pd.concat(sample_list, ignore_index=True)
self._cached_samples = (self.position, samples)
return samples
@property
def position(self):
......@@ -783,7 +792,7 @@ class BilbyPTMCMCSampler(object):
@staticmethod
def _get_sample_to_swap(sampler):
if sampler.chain.converged is False:
if not (sampler.chain.converged and sampler.stop_after_convergence):
v = sampler.chain[-1]
else:
v = sampler.chain.random_sample
......@@ -897,7 +906,7 @@ class BilbyPTMCMCSampler(object):
ln_z, ln_z_err = self.compute_evidence_per_ensemble(method, kwargs)
self.ln_z_dict[key] = ln_z
self.ln_z_err_dict[key] = ln_z_err
logger.info(
logger.debug(
f"Log-evidence of {ln_z:0.2f}+/-{ln_z_err:0.2f} calculated using {key} method"
)
......@@ -1141,7 +1150,9 @@ class BilbyMCMCSampler(object):
if initial_sample_dict is not None:
initial_sample.update(initial_sample_dict)
logger.info(f"Using initial sample {initial_sample}")
if self.beta == 1:
logger.info(f"Using initial sample {initial_sample}")
initial_sample = Sample(initial_sample)
initial_sample[LOGLKEY] = self.log_likelihood(initial_sample)
initial_sample[LOGPKEY] = self.log_prior(initial_sample)
......@@ -1216,7 +1227,11 @@ class BilbyMCMCSampler(object):
while internal_steps < self.chain.L1steps:
internal_steps += 1
proposal = self.proposal_cycle.get_proposal()
prop, log_factor = proposal(self.chain)
prop, log_factor = proposal(
self.chain,
likelihood=_sampling_convenience_dump.likelihood,
priors=_sampling_convenience_dump.priors,
)
logp = self.log_prior(prop)
if np.isinf(logp) or np.isnan(logp):
......
from . import grid, likelihood, prior, result, sampler, series, utils
from . import grid, likelihood, prior, result, sampler, series, utils, fisher
import numpy as np
import scipy.linalg
import pandas as pd
from scipy.optimize import minimize
class FisherMatrixPosteriorEstimator(object):
def __init__(self, likelihood, priors, parameters=None, fd_eps=1e-6, n_prior_samples=100):
""" A class to estimate posteriors using the Fisher Matrix approach
Parameters
----------
likelihood: bilby.core.likelihood.Likelihood
A bilby likelihood object
priors: bilby.core.prior.PriorDict
A bilby prior object
parameters: list
Names of parameters to sample in
fd_eps: float
A parameter to control the size of perturbation used when finite
differencing the likelihood
n_prior_samples: int
The number of prior samples to draw and use to attempt estimatation
of the maximum likelihood sample.
"""
self.likelihood = likelihood
if parameters is None:
self.parameter_names = priors.non_fixed_keys
else:
self.parameter_names = parameters
self.fd_eps = fd_eps
self.n_prior_samples = n_prior_samples
self.N = len(self.parameter_names)
self.prior_samples = [
priors.sample_subset(self.parameter_names) for _ in range(n_prior_samples)
]
self.prior_bounds = [(priors[key].minimum, priors[key].maximum) for key in self.parameter_names]
self.prior_width_dict = {}
for key in self.parameter_names:
width = priors[key].width
if np.isnan(width):
raise ValueError(f"Prior width is ill-formed for {key}")
self.prior_width_dict[key] = width
def log_likelihood(self, sample):
self.likelihood.parameters.update(sample)
return self.likelihood.log_likelihood()
def calculate_iFIM(self, sample):
FIM = self.calculate_FIM(sample)
iFIM = scipy.linalg.inv(FIM)
# Ensure iFIM is positive definite
min_eig = np.min(np.real(np.linalg.eigvals(iFIM)))
if min_eig < 0:
iFIM -= 10 * min_eig * np.eye(*iFIM.shape)
return iFIM
def sample_array(self, sample, n=1):
if sample == "maxL":
sample = self.get_maximum_likelihood_sample()
self.mean = np.array(list(sample.values()))
self.iFIM = self.calculate_iFIM(sample)
return np.random.multivariate_normal(self.mean, self.iFIM, n)
def sample_dataframe(self, sample, n=1):
samples = self.sample_array(sample, n)
return pd.DataFrame(samples, columns=self.parameter_names)
def calculate_FIM(self, sample):
FIM = np.zeros((self.N, self.N))
for ii, ii_key in enumerate(self.parameter_names):
for jj, jj_key in enumerate(self.parameter_names):
FIM[ii, jj] = -self.get_second_order_derivative(sample, ii_key, jj_key)
return FIM
def get_second_order_derivative(self, sample, ii, jj):
if ii == jj:
return self.get_finite_difference_xx(sample, ii)
else:
return self.get_finite_difference_xy(sample, ii, jj)
def get_finite_difference_xx(self, sample, ii):
# Sample grid
p = self.shift_sample_x(sample, ii, 1)
m = self.shift_sample_x(sample, ii, -1)
dx = .5 * (p[ii] - m[ii])
loglp = self.log_likelihood(p)
logl = self.log_likelihood(sample)
loglm = self.log_likelihood(m)
return (loglp - 2 * logl + loglm) / dx ** 2
def get_finite_difference_xy(self, sample, ii, jj):
# Sample grid
pp = self.shift_sample_xy(sample, ii, 1, jj, 1)
pm = self.shift_sample_xy(sample, ii, 1, jj, -1)
mp = self.shift_sample_xy(sample, ii, -1, jj, 1)
mm = self.shift_sample_xy(sample, ii, -1, jj, -1)
dx = .5 * (pp[ii] - mm[ii])
dy = .5 * (pp[jj] - mm[jj])
loglpp = self.log_likelihood(pp)
loglpm = self.log_likelihood(pm)
loglmp = self.log_likelihood(mp)
loglmm = self.log_likelihood(mm)
return (loglpp - loglpm - loglmp + loglmm) / (4 * dx * dy)
def shift_sample_x(self, sample, x_key, x_coef):
vx = sample[x_key]
dvx = self.fd_eps * self.prior_width_dict[x_key]
shift_sample = sample.copy()
shift_sample[x_key] = vx + x_coef * dvx
return shift_sample
def shift_sample_xy(self, sample, x_key, x_coef, y_key, y_coef):
vx = sample[x_key]
vy = sample[y_key]
dvx = self.fd_eps * self.prior_width_dict[x_key]
dvy = self.fd_eps * self.prior_width_dict[y_key]
shift_sample = sample.copy()
shift_sample[x_key] = vx + x_coef * dvx
shift_sample[y_key] = vy + y_coef * dvy
return shift_sample
def get_maximum_likelihood_sample(self, initial_sample=None):
""" A method to attempt optimization of the maximum likelihood
This uses a simple scipy optimization approach, starting from a number
of draws from the prior to avoid problems with local optimization.
Note: this approach works well in small numbers of dimensions when the
posterior is narrow relative to the prior. But, if the number of dimensions
is large or the posterior is wide relative to the prior, the method fails
to find the global maximum in high dimensional problems.
"""
minlogL = np.inf
for i in range(self.n_prior_samples):
initial_sample = self.prior_samples[i]
x0 = list(initial_sample.values())
def neg_log_like(x, self, T=1):
sample = {key: val for key, val in zip(self.parameter_names, x)}
return - 1 / T * self.log_likelihood(sample)
out = minimize(
neg_log_like,
x0,
args=(self, 1),
bounds=self.prior_bounds,
method="L-BFGS-B",
)
if out.fun < minlogL:
minout = out
return {key: val for key, val in zip(self.parameter_names, minout.x)}
......@@ -1517,3 +1517,107 @@ class Categorical(Prior):
idxs = np.isin(val, self.categories)
probs[idxs] = self.lnp
return probs
class Triangular(Prior):
"""
Define a new prior class which draws from a triangular distribution.
For distribution details see: wikipedia.org/wiki/Triangular_distribution
Here, minimum <= mode <= maximum,
where the mode has the highest pdf value.
"""
def __init__(self, mode, minimum, maximum, name=None, latex_label=None, unit=None):
super(Triangular, self).__init__(
name=name,
latex_label=latex_label,
unit=unit,
minimum=minimum,
maximum=maximum,
)
self.mode = mode
self.fractional_mode = (self.mode - self.minimum) / (
self.maximum - self.minimum
)
self.scale = self.maximum - self.minimum
self.rescaled_minimum = self.minimum - (self.minimum == self.mode) * self.scale
self.rescaled_maximum = self.maximum + (self.maximum == self.mode) * self.scale
def rescale(self, val):
"""
'Rescale' a sample from standard uniform to a triangular distribution.
This maps to the inverse CDF. This has been analytically solved for this case.
Parameters
==========
val: Union[float, int, array_like]
Uniform probability
Returns
=======
Union[float, array_like]: Rescaled probability
"""
below_mode = (val * self.scale * (self.mode - self.minimum)) ** 0.5
above_mode = ((1 - val) * self.scale * (self.maximum - self.mode)) ** 0.5
return (self.minimum + below_mode) * (val < self.fractional_mode) + (
self.maximum - above_mode
) * (val >= self.fractional_mode)
def prob(self, val):
"""
Return the prior probability of val
Parameters
==========
val: Union[float, int, array_like]
Returns
=======
float: Prior probability of val
"""
between_minimum_and_mode = (
(val < self.mode)
* (self.minimum <= val)
* (val - self.rescaled_minimum)
/ (self.mode - self.rescaled_minimum)
)
between_mode_and_maximum = (
(self.mode <= val)
* (val <= self.maximum)
* (self.rescaled_maximum - val)
/ (self.rescaled_maximum - self.mode)
)
return 2.0 * (between_minimum_and_mode + between_mode_and_maximum) / self.scale
def cdf(self, val):
"""
Return the prior cumulative probability at val
Parameters
==========
val: Union[float, int, array_like]
Returns
=======
float: prior cumulative probability at val
"""
return (
+ (val > self.mode)
+ (val > self.minimum)
* (val <= self.maximum)
/ (self.scale)
* (
(val > self.mode)
* (self.rescaled_maximum - val) ** 2.0
/ (self.mode - self.rescaled_maximum)
+ (val <= self.mode)
* (val - self.rescaled_minimum) ** 2.0
/ (self.mode - self.rescaled_minimum)
)
)
from numbers import Number
import numpy as np
from .base import Prior
......@@ -84,6 +85,7 @@ class SlabSpikePrior(Prior):
=======
array_like: Associated prior value with input value.
"""
original_is_number = isinstance(val, Number)
val = np.atleast_1d(val)
lower_indices = np.where(val < self.inverse_cdf_below_spike)[0]
......@@ -96,6 +98,12 @@ class SlabSpikePrior(Prior):
res[lower_indices] = self._contracted_rescale(val[lower_indices])
res[intermediate_indices] = self.spike_location
res[higher_indices] = self._contracted_rescale(val[higher_indices] - self.spike_height)
if original_is_number:
try:
res = res[0]
except (KeyError, TypeError):
logger.warning("Based on inputs, a number should be output\
but this could not be accessed from what was computed")
return res
def _contracted_rescale(self, val):
......@@ -126,9 +134,16 @@ class SlabSpikePrior(Prior):
=======
array_like: Prior probability of val
"""
original_is_number = isinstance(val, Number)
res = self.slab.prob(val) * self.slab_fraction
res = np.atleast_1d(res)
res[np.where(val == self.spike_location)] = np.inf
if original_is_number:
try:
res = res[0]
except (KeyError, TypeError):
logger.warning("Based on inputs, a number should be output\
but this could not be accessed from what was computed")
return res
def ln_prob(self, val):
......@@ -143,9 +158,16 @@ class SlabSpikePrior(Prior):
=======
array_like: Prior probability of val
"""
original_is_number = isinstance(val, Number)
res = self.slab.ln_prob(val) + np.log(self.slab_fraction)
res = np.atleast_1d(res)
res[np.where(val == self.spike_location)] = np.inf
if original_is_number:
try:
res = res[0]
except (KeyError, TypeError):
logger.warning("Based on inputs, a number should be output\
but this could not be accessed from what was computed")
return res
def cdf(self, val):
......
......@@ -76,7 +76,7 @@ def _determine_file_name(filename, outdir, label, extension, gzip):
return result_file_name(outdir, label, extension, gzip)
def read_in_result(filename=None, outdir=None, label=None, extension='json', gzip=False):
def read_in_result(filename=None, outdir=None, label=None, extension='json', gzip=False, result_class=None):
""" Reads in a stored bilby result object
Parameters
......@@ -86,21 +86,29 @@ def read_in_result(filename=None, outdir=None, label=None, extension='json', gzi
outdir, label, extension: str
Name of the output directory, label and extension used for the default
naming scheme.
result_class: bilby.core.result.Result, or child of
The result class to use. By default, `bilby.core.result.Result` is used,
but objects which inherit from this class can be given providing
additional methods.
"""
filename = _determine_file_name(filename, outdir, label, extension, gzip)
if result_class is None:
result_class = Result
elif not issubclass(result_class, Result):
raise ValueError(f"Input result_class={result_class} not understood")
# Get the actual extension (may differ from the default extension if the filename is given)
extension = os.path.splitext(filename)[1].lstrip('.')
if extension == 'gz': # gzipped file
extension = os.path.splitext(os.path.splitext(filename)[0])[1].lstrip('.')
if 'json' in extension:
result = Result.from_json(filename=filename)
result = result_class.from_json(filename=filename)
elif ('hdf5' in extension) or ('h5' in extension):
result = Result.from_hdf5(filename=filename)
result = result_class.from_hdf5(filename=filename)
elif ("pkl" in extension) or ("pickle" in extension):
result = Result.from_pickle(filename=filename)
result = result_class.from_pickle(filename=filename)
elif extension is None:
raise ValueError("No filetype extension provided")
else:
......@@ -108,6 +116,34 @@ def read_in_result(filename=None, outdir=None, label=None, extension='json', gzi
return result
def read_in_result_list(filename_list, invalid="warning"):
""" Read in a set of results
Parameters
==========
filename_list: list
A list of filename paths
invalid: str (ignore, warning, error)
Behaviour if a file in filename_list is not a valid bilby result
Returns
-------
result_list: ResultList
A list of results
"""
results_list = []
for filename in filename_list:
try:
results_list.append(read_in_result(filename=filename))
except Exception as e:
msg = f"Failed to read in file {filename} due to exception {e}"
if invalid == "error":
raise ResultListError(msg)
elif invalid == "warning":
logger.warning(msg)
return ResultList(results_list)
def get_weights_for_reweighting(
result, new_likelihood=None, new_prior=None, old_likelihood=None,
old_prior=None, resume_file=None, n_checkpoint=5000, npool=1):
......@@ -423,6 +459,8 @@ class Result(object):
self.injection_parameters = injection_parameters
self.posterior = posterior
self.samples = samples
if isinstance(nested_samples, dict):
nested_samples = pd.DataFrame(nested_samples)
self.nested_samples = nested_samples
self.walkers = walkers
self.nburn = nburn
......@@ -1696,7 +1734,7 @@ class Result(object):
class ResultList(list):
def __init__(self, results=None):
def __init__(self, results=None, consistency_level="warning"):
""" A class to store a list of :class:`bilby.core.result.Result` objects
from equivalent runs on the same data. This provides methods for
outputting combined results.
......@@ -1705,8 +1743,15 @@ class ResultList(list):
==========
results: list
A list of `:class:`bilby.core.result.Result`.
consistency_level: str, [ignore, warning, error]
If warning, print a warning if inconsistencies are discovered
between the results. If error, raise an error if inconsistencies
are discovered between the results before combining. If ignore, do
nothing.
"""
super(ResultList, self).__init__()
self.consistency_level = consistency_level
for result in results:
self.append(result)
......@@ -1737,9 +1782,11 @@ class ResultList(list):
----------
shuffle: bool
If true, shuffle the samples when combining, otherwise they are concatenated.
consistency_level: str, [warning, error]
If warning, print a warning if inconsistencies are discovered between the results before combining.
If error, raise an error if inconsistencies are discovered between the results before combining.
consistency_level: str, [ignore, warning, error]
Overwrite the class level consistency_level. If warning, print a
warning if inconsistencies are discovered between the results. If
error, raise an error if inconsistencies are discovered between
the results before combining. If ignore, do nothing.
Returns
-------
......@@ -1884,6 +1931,8 @@ class ResultList(list):
raise ResultListError(msg)
elif self.consistency_level == "warning":
logger.warning(msg)
elif self.consistency_level == "ignore":
pass
else:
raise ValueError(f"Input consistency_level {self.consistency_level} not understood")
......
......@@ -202,6 +202,7 @@ class Sampler(object):
If a specific sampler does not have a sampling seed option, then it should be
left as None.
"""
check_point_equiv_kwargs = ["check_point_deltaT", "check_point_delta_t"]
def __init__(
self,
......@@ -253,7 +254,7 @@ class Sampler(object):
self.kwargs = kwargs
self._check_cached_result()
self._check_cached_result(result_class)
self._log_summary_for_sampler()
......@@ -631,7 +632,7 @@ class Sampler(object):
"""
raise ValueError("Method not yet implemented")
def _check_cached_result(self):
def _check_cached_result(self, result_class=None):
"""Check if the cached data file exists and can be used"""
if command_line_args.clean:
......@@ -640,7 +641,9 @@ class Sampler(object):
return
try:
self.cached_result = read_in_result(outdir=self.outdir, label=self.label)
self.cached_result = read_in_result(
outdir=self.outdir, label=self.label, result_class=result_class
)
except IOError:
self.cached_result = None
......
This diff is collapsed.
......@@ -181,6 +181,29 @@ class CubicSpline(Recalibrate):
self.maximum_frequency = maximum_frequency
self._log_spline_points = np.linspace(
np.log10(minimum_frequency), np.log10(maximum_frequency), n_points)
self._delta_log_spline_points = self._log_spline_points[1] - self._log_spline_points[0]
# Precompute matrix converting values at nodes to spline coefficients.
# The algorithm for interpolation is described in
# https://dcc.ligo.org/LIGO-T2300140, and the matrix calculated here is
# to solve Eq. (9) in the note.
tmp1 = np.zeros(shape=(n_points, n_points))
tmp1[0, 0] = -1
tmp1[0, 1] = 2
tmp1[0, 2] = -1
tmp1[-1, -3] = -1
tmp1[-1, -2] = 2
tmp1[-1, -1] = -1
for i in range(1, n_points - 1):
tmp1[i, i - 1] = 1 / 6
tmp1[i, i] = 2 / 3
tmp1[i, i + 1] = 1 / 6
tmp2 = np.zeros(shape=(n_points, n_points))
for i in range(1, n_points - 1):
tmp2[i, i - 1] = 1
tmp2[i, i] = -2
tmp2[i, i + 1] = 1
self._nodes_to_spline_coefficients = np.linalg.solve(tmp1, tmp2)
@property
def log_spline_points(self):
......@@ -208,18 +231,26 @@ class CubicSpline(Recalibrate):
calibration_factor : array-like
The factor to multiply the strain by.
"""
log10f_per_deltalog10f = (
np.log10(frequency_array) - self.log_spline_points[0]
) / self._delta_log_spline_points
previous_nodes = np.clip(np.floor(log10f_per_deltalog10f).astype(int), a_min=0, a_max=self.n_points - 2)
next_nodes = previous_nodes + 1
b = log10f_per_deltalog10f - previous_nodes
a = 1 - b
c = (a**3 - a) / 6
d = (b**3 - b) / 6
self.set_calibration_parameters(**params)
amplitude_parameters = [self.params['amplitude_{}'.format(ii)]
for ii in range(self.n_points)]
delta_amplitude = interp1d(
self.log_spline_points, amplitude_parameters, kind='cubic',
bounds_error=False, fill_value=0)(np.log10(frequency_array))
phase_parameters = [
self.params['phase_{}'.format(ii)] for ii in range(self.n_points)]
delta_phase = interp1d(
self.log_spline_points, phase_parameters, kind='cubic',
bounds_error=False, fill_value=0)(np.log10(frequency_array))
amplitude_parameters = np.array([self.params['amplitude_{}'.format(ii)] for ii in range(self.n_points)])
_spline_coefficients = self._nodes_to_spline_coefficients.dot(amplitude_parameters)
delta_amplitude = a * amplitude_parameters[previous_nodes] + b * amplitude_parameters[next_nodes] + \
c * _spline_coefficients[previous_nodes] + d * _spline_coefficients[next_nodes]
phase_parameters = np.array([self.params['phase_{}'.format(ii)] for ii in range(self.n_points)])
_spline_coefficients = self._nodes_to_spline_coefficients.dot(phase_parameters)
delta_phase = a * phase_parameters[previous_nodes] + b * phase_parameters[next_nodes] + \
c * _spline_coefficients[previous_nodes] + d * _spline_coefficients[next_nodes]
calibration_factor = (1 + delta_amplitude) * (2 + 1j * delta_phase) / (2 - 1j * delta_phase)
......
# LIGO India Aundha at Aplus sensitvity.
# LIGO-T2000158
# Coordinates taken from https://dcc.ligo.org/LIGO-T2000158/public
# https://dcc.ligo.org/LIGO-T2000012/public
name = 'A1'
power_spectral_density = PowerSpectralDensity(asd_file='Aplus_asd.txt')
......
......@@ -216,9 +216,9 @@ class InterferometerStrainData(object):
if self._frequency_domain_strain is not None:
return self._frequency_domain_strain * self.frequency_mask
elif self._time_domain_strain is not None:
logger.info("Generating frequency domain strain from given time "
"domain strain.")
logger.info("Applying a tukey window with alpha={}, roll off={}".format(
logger.debug("Generating frequency domain strain from given time "
"domain strain.")
logger.debug("Applying a tukey window with alpha={}, roll off={}".format(
self.alpha, self.roll_off))
# self.low_pass_filter()
window = self.time_domain_window()
......
......@@ -944,19 +944,30 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
self.weights[ifo.name + '_quadratic'].append(
np.dot(quadratic_matrix_single, multibanded_inverse_psd[ifo.name]))
def save_weights(self, filename, format='npz'):
def save_weights(self, filename, format='hdf5'):
"""
Save ROQ weights into a single file. format should be json, npz, or hdf5. For weights from multiple bases, hdf5
is only the possible option.
Save ROQ weights into a single file. format should be npz, or hdf5.
For weights from multiple bases, hdf5 is only the possible option.
Support for json format is deprecated as of :code:`v2.1` and will be
removed in :code:`v2.2`, another method should be used by default.
Parameters
==========
filename : str
The name of the file to save the weights to.
format : str
The format to save the data to, this should be one of
:code:`"hdf5"`, :code:`"npz"`, default=:code:`"hdf5"`.
"""
if format not in ['json', 'npz', 'hdf5']:
raise IOError(f"Format {format} not recognized.")
if format == "json":
import warnings
warnings.warn(
"json format for ROQ weights is deprecated, use hdf5 instead.",
DeprecationWarning
)
if format not in filename:
filename += "." + format
logger.info(f"Saving ROQ weights to {filename}")
......@@ -1001,19 +1012,35 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
def load_weights(self, filename, format=None):
"""
Load ROQ weights. format should be json, npz, or hdf5. json or npz file is assumed to contain weights from a
single basis
Load ROQ weights. format should be json, npz, or hdf5.
json or npz file is assumed to contain weights from a single basis.
Support for json format is deprecated as of :code:`v2.1` and will be
removed in :code:`v2.2`, another method should be used by default.
Parameters
==========
filename : str
The name of the file to save the weights to.
format : str
The format to save the data to, this should be one of
:code:`"hdf5"`, :code:`"npz"`, default=:code:`"hdf5"`.
Returns
=======
weights: dict
Dictionary containing the ROQ weights.
"""
if format is None:
format = filename.split(".")[-1]
if format not in ["json", "npz", "hdf5"]:
raise IOError(f"Format {format} not recognized.")
if format == "json":
import warnings
warnings.warn(
"json format for ROQ weights is deprecated, use hdf5 instead.",
DeprecationWarning
)
logger.info(f"Loading ROQ weights from {filename}")
if format == "json" or format == "npz":
# Old file format assumed to contain only a single basis
......
......@@ -752,8 +752,21 @@ class CBCPriorDict(ConditionalPriorDict):
return None
def is_nonempty_intersection(self, pset):
""" Check if keys in self exist in the PARAMETER_SETS pset """
if len(PARAMETER_SETS[pset].intersection(self.non_fixed_keys)) > 0:
""" Check if keys in self exist in the parameter set
Parameters
----------
pset: str, set
Either a string referencing a parameter set in PARAMETER_SETS or
a set of keys
"""
if isinstance(pset, str) and pset in PARAMETER_SETS:
check_set = PARAMETER_SETS[pset]
elif isinstance(pset, set):
check_set = pset
else:
raise ValueError(f"pset {pset} not understood")
if len(check_set.intersection(self.non_fixed_keys)) > 0:
return True
else:
return False
......@@ -768,6 +781,11 @@ class CBCPriorDict(ConditionalPriorDict):
""" Return true if priors include any precession parameters """
return self.is_nonempty_intersection("precession_only")
@property
def measured_spin(self):
""" Return true if priors include any measured_spin parameters """
return self.is_nonempty_intersection("measured_spin")
@property
def intrinsic(self):
""" Return true if priors include any intrinsic parameters """
......@@ -778,6 +796,16 @@ class CBCPriorDict(ConditionalPriorDict):
""" Return true if priors include any extrinsic parameters """
return self.is_nonempty_intersection("extrinsic")
@property
def sky(self):
""" Return true if priors include any extrinsic parameters """
return self.is_nonempty_intersection("sky")
@property
def distance_inclination(self):
""" Return true if priors include any extrinsic parameters """
return self.is_nonempty_intersection("distance_inclination")
@property
def mass(self):
""" Return true if priors include any mass parameters """
......@@ -1003,8 +1031,8 @@ class BNSPriorDict(CBCPriorDict):
def test_redundancy(self, key, disable_logging=False):
logger.disabled = disable_logging
logger.info("Performing redundancy check using BBHPriorDict(self).test_redundancy")
logger.disabled = False
bbh_redundancy = BBHPriorDict(self).test_redundancy(key)
logger.disabled = False
if bbh_redundancy:
return True
......
......@@ -1081,10 +1081,21 @@ extrinsic = {
"cos_theta_jn", "geocent_time", "time_jitter", "ra", "dec",
"H1_time", "L1_time", "V1_time",
}
sky = {
"azimuth", "zenith", "ra", "dec",
}
distance_inclination = {
"luminosity_distance", "redshift", "theta_jn", "cos_theta_jn",
}
measured_spin = {
"chi_1", "chi_2", "a_1", "a_2", "chi_1_in_plane"
}
PARAMETER_SETS = dict(
spin=spin, mass=mass, phase=phase, extrinsic=extrinsic,
tidal=tidal, primary_spin_and_q=primary_spin_and_q,
intrinsic=spin.union(mass).union(phase).union(tidal),
precession_only=precession_only,
sky=sky, distance_inclination=distance_inclination,
measured_spin=measured_spin,
)
......@@ -791,6 +791,160 @@ def lalsim_SimInspiralWaveformParamsInsertTidalLambda2(
waveform_dictionary, lambda_2)
def lalsim_SimNeutronStarEOS4ParamSDGammaCheck(g0, g1, g2, g3):
from lalsimulation import SimNeutronStarEOS4ParamSDGammaCheck
try:
g0 = float(g0)
g1 = float(g1)
g2 = float(g2)
g3 = float(g3)
except ValueError:
raise ValueError("Unable to convert EOS spectral parameters to floats")
except TypeError:
raise TypeError("Unable to convert EOS spectral parameters to floats")
return SimNeutronStarEOS4ParamSDGammaCheck(g0, g1, g2, g3)
def lalsim_SimNeutronStarEOS4ParameterSpectralDecomposition(g0, g1, g2, g3):
from lalsimulation import SimNeutronStarEOS4ParameterSpectralDecomposition
try:
g0 = float(g0)
g1 = float(g1)
g2 = float(g2)
g3 = float(g3)
except ValueError:
raise ValueError("Unable to convert EOS spectral parameters to floats")
except TypeError:
raise TypeError("Unable to convert EOS spectral parameters to floats")
return SimNeutronStarEOS4ParameterSpectralDecomposition(g0, g1, g2, g3)
def lalsim_SimNeutronStarEOS4ParamSDViableFamilyCheck(g0, g1, g2, g3):
from lalsimulation import SimNeutronStarEOS4ParamSDViableFamilyCheck
try:
g0 = float(g0)
g1 = float(g1)
g2 = float(g2)
g3 = float(g3)
except ValueError:
raise ValueError("Unable to convert EOS spectral parameters to floats")
except TypeError:
raise TypeError("Unable to convert EOS spectral parameters to floats")
return SimNeutronStarEOS4ParamSDViableFamilyCheck(g0, g1, g2, g3)
def lalsim_SimNeutronStarEOS3PieceDynamicPolytrope(g0, log10p1_si, g1, log10p2_si, g2):
from lalsimulation import SimNeutronStarEOS3PieceDynamicPolytrope
try:
g0 = float(g0)
g1 = float(g1)
g2 = float(g2)
log10p1_si = float(log10p1_si)
log10p2_si = float(log10p2_si)
except ValueError:
raise ValueError("Unable to convert EOS polytrope parameters to floats")
except TypeError:
raise TypeError("Unable to convert EOS polytrope parameters to floats")
return SimNeutronStarEOS3PieceDynamicPolytrope(g0, log10p1_si, g1, log10p2_si, g2)
def lalsim_SimNeutronStarEOS3PieceCausalAnalytic(v1, log10p1_si, v2, log10p2_si, v3):
from lalsimulation import SimNeutronStarEOS3PieceCausalAnalytic
try:
v1 = float(v1)
v2 = float(v2)
v3 = float(v3)
log10p1_si = float(log10p1_si)
log10p2_si = float(log10p2_si)
except ValueError:
raise ValueError("Unable to convert EOS causal parameters to floats")
except TypeError:
raise TypeError("Unable to convert EOS causal parameters to floats")
return SimNeutronStarEOS3PieceCausalAnalytic(v1, log10p1_si, v2, log10p2_si, v3)
def lalsim_SimNeutronStarEOS3PDViableFamilyCheck(p0, log10p1_si, p1, log10p2_si, p2, causal):
from lalsimulation import SimNeutronStarEOS3PDViableFamilyCheck
try:
p0 = float(p0)
p1 = float(p1)
p2 = float(p2)
log10p1_si = float(log10p1_si)
log10p2_si = float(log10p2_si)
causal = int(causal)
except ValueError:
raise ValueError("Unable to convert EOS parameters to floats or int")
except TypeError:
raise TypeError("Unable to convert EOS parameters to floats or int")
return SimNeutronStarEOS3PDViableFamilyCheck(p0, log10p1_si, p1, log10p2_si, p2, causal)
def lalsim_CreateSimNeutronStarFamily(eos):
from lalsimulation import CreateSimNeutronStarFamily
return CreateSimNeutronStarFamily(eos)
def lalsim_SimNeutronStarEOSMaxPseudoEnthalpy(eos):
from lalsimulation import SimNeutronStarEOSMaxPseudoEnthalpy
return SimNeutronStarEOSMaxPseudoEnthalpy(eos)
def lalsim_SimNeutronStarEOSSpeedOfSoundGeometerized(max_pseudo_enthalpy, eos):
from lalsimulation import SimNeutronStarEOSSpeedOfSoundGeometerized
try:
max_pseudo_enthalpy = float(max_pseudo_enthalpy)
except ValueError:
raise ValueError("Unable to convert max_pseudo_enthalpy to float.")
except TypeError:
raise TypeError("Unable to convert max_pseudo_enthalpy to float.")
return SimNeutronStarEOSSpeedOfSoundGeometerized(max_pseudo_enthalpy, eos)
def lalsim_SimNeutronStarFamMinimumMass(fam):
from lalsimulation import SimNeutronStarFamMinimumMass
return SimNeutronStarFamMinimumMass(fam)
def lalsim_SimNeutronStarMaximumMass(fam):
from lalsimulation import SimNeutronStarMaximumMass
return SimNeutronStarMaximumMass(fam)
def lalsim_SimNeutronStarRadius(mass_in_SI, fam):
from lalsimulation import SimNeutronStarRadius
try:
mass_in_SI = float(mass_in_SI)
except ValueError:
raise ValueError("Unable to convert mass_in_SI to float.")
except TypeError:
raise TypeError("Unable to convert mass_in_SI to float.")
return SimNeutronStarRadius(mass_in_SI, fam)
def lalsim_SimNeutronStarLoveNumberK2(mass_in_SI, fam):
from lalsimulation import SimNeutronStarLoveNumberK2
try:
mass_in_SI = float(mass_in_SI)
except ValueError:
raise ValueError("Unable to convert mass_in_SI to float.")
except TypeError:
raise TypeError("Unable to convert mass_in_SI to float.")
return SimNeutronStarLoveNumberK2(mass_in_SI, fam)
def spline_angle_xform(delta_psi):
"""
Returns the angle in degrees corresponding to the spline
......