Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (26)
Showing
with 1669 additions and 101 deletions
......@@ -89,3 +89,5 @@ Vivien Raymond
Ka-Lok Lo
Isaac Legred
Marc Penuliar
Andrew Fowlie
Martin White
# All notable changes will be documented in this file
## [2.1.0] 2023-04-12
Version 2.1.0 release of Bilby
Minor feature improvements and bug fixes
### Additions
- Additional parameterizations for equation-of-state inference (!1083, !1240)
- Add Fisher matrix posterior estimator (!1242)
### Changes
- Improvements to the bilby-mcmc sampler including a Fisher Information Matrix proposal (!1242)
- Optimize spline interpolation of calibration uncertainties (!1241)
- Update LIGO India coordinates record to public DCC (!1246)
- Make logger disabling work in redundancy test (!1245)
- Make sure nested samples are data frame (!1244)
- Minor improvements to the result methods including moving to top level imports (!1243)
- Fix a bug in the slabspike prior (!1235)
- Reduce verbosity when setting strain data (!1233)
- Fix issue with cached results class (!1223)
### Deprecated
- Reading/writing ROQ weights to json (!1232)
## [2.0.2] 2023-03-21
Version 2.0.2 release of Bilby
......
......@@ -23,6 +23,7 @@ from . import core, gw, hyper
from .core import utils, likelihood, prior, result, sampler
from .core.sampler import run_sampler
from .core.likelihood import Likelihood
from .core.result import read_in_result, read_in_result_list
try:
from ._version import version as __version__
......
......@@ -157,7 +157,7 @@ class Chain(object):
@property
def minimum_index(self):
"""This calculated a minimum index from which to discard samples
"""This calculates a minimum index from which to discard samples
A number of methods are provided for the calculation. A subset are
switched off (by `if False` statements) for future development
......@@ -342,7 +342,12 @@ class Chain(object):
@property
def nsamples(self):
nuseable_steps = self.position - self.minimum_index
return int(nuseable_steps / (self.thin_by_nact * self.tau))
n_independent_samples = nuseable_steps / self.tau
nsamples = int(n_independent_samples / self.thin_by_nact)
if nuseable_steps >= nsamples:
return nsamples
else:
return 0
@property
def nsamples_last(self):
......
......@@ -6,6 +6,7 @@ import numpy as np
from scipy.spatial.distance import jensenshannon
from scipy.stats import gaussian_kde
from ..core.fisher import FisherMatrixPosteriorEstimator
from ..core.prior import PriorDict
from ..core.sampler.base_sampler import SamplerError
from ..core.utils import logger, reflect
......@@ -61,6 +62,9 @@ class BaseProposal(object):
self.parameters = [p for p in self.parameters if p in subset]
self._str_attrs.append("parameters")
if len(self.parameters) == 0:
raise ValueError("Proposal requested with zero parameters")
self.ndim = len(self.parameters)
self.prior_boundary_dict = {key: priors[key].boundary for key in priors}
......@@ -129,10 +133,16 @@ class BaseProposal(object):
val_normalised_reflected = reflect(np.array(val_normalised))
return minimum + width * val_normalised_reflected
def __call__(self, chain):
sample, log_factor = self.propose(chain)
def __call__(self, chain, likelihood=None, priors=None):
if getattr(self, "needs_likelihood_and_priors", False):
sample, log_factor = self.propose(chain, likelihood, priors)
else:
sample, log_factor = self.propose(chain)
if log_factor == 0:
sample = self.apply_boundaries(sample)
return sample, log_factor
@abstractmethod
......@@ -459,7 +469,7 @@ class DensityEstimateProposal(BaseProposal):
# Print a log message
took = time.time() - start
logger.info(
logger.debug(
f"{self.density_name} construction at {self.steps_since_refit} finished"
f" for length {chain.position} chain, took {took:0.2f}s."
f" Current accept-ratio={self.acceptance_ratio:0.2f}"
......@@ -480,7 +490,7 @@ class DensityEstimateProposal(BaseProposal):
fail_parameters.append(key)
if len(fail_parameters) > 0:
logger.info(
logger.debug(
f"{self.density_name} construction failed verification and is discarded"
)
self.density = current_density
......@@ -493,7 +503,10 @@ class DensityEstimateProposal(BaseProposal):
# Check if we refit
testA = self.steps_since_refit >= self.next_refit_time
if testA:
self.refit(chain)
try:
self.refit(chain)
except Exception as e:
logger.warning(f"Failed to refit chain due to error {e}")
# If KDE is yet to be fitted, use the fallback
if self.trained is False:
......@@ -656,7 +669,7 @@ class NormalizingFlowProposal(DensityEstimateProposal):
return np.power(max_js, 2)
def train(self, chain):
logger.info("Starting NF training")
logger.debug("Starting NF training")
import torch
......@@ -687,14 +700,14 @@ class NormalizingFlowProposal(DensityEstimateProposal):
validation_samples, training_samples_draw
)
if max_js_bits < max_js_threshold:
logger.info(
logger.debug(
f"Training complete after {epoch} steps, "
f"max_js_bits={max_js_bits:0.5f}<{max_js_threshold}"
)
break
took = time.time() - start
logger.info(
logger.debug(
f"Flow training step ({self.steps_since_refit}) finished"
f" for length {chain.position} chain, took {took:0.2f}s."
f" Current accept-ratio={self.acceptance_ratio:0.2f}"
......@@ -715,7 +728,10 @@ class NormalizingFlowProposal(DensityEstimateProposal):
# Check if we retrain the NF
testA = self.steps_since_refit >= self.next_refit_time
if testA:
self.train(chain)
try:
self.train(chain)
except Exception as e:
logger.warning(f"Failed to retrain chain due to error {e}")
if self.trained is False:
return self.fallback.propose(chain)
......@@ -772,6 +788,64 @@ class FixedJumpProposal(BaseProposal):
return self.scale * np.random.normal()
class FisherMatrixProposal(AdaptiveGaussianProposal):
needs_likelihood_and_priors = True
"""Fisher Matrix Proposals
Uses a finite differencing approach motivated by BayesWave (see, e.g.
https://arxiv.org/abs/1410.3835). The inverse Fisher Information Matrix
is calculated from the current sample, then proposals are drawn from a
multivariate Gaussian and scaled by an adaptive parameter.
"""
def __init__(
self,
priors,
subset=None,
weight=1,
update_interval=100,
scale_init=1e0,
fd_eps=1e-6,
adapt=False,
):
super(FisherMatrixProposal, self).__init__(
priors, weight, subset, scale_init=scale_init
)
self.update_interval = update_interval
self.steps_since_update = update_interval
self.adapt = adapt
self.mean = np.zeros(len(self.parameters))
self.fd_eps = fd_eps
def propose(self, chain, likelihood, priors):
sample = chain.current_sample
if self.adapt:
self.update_scale(chain)
if self.steps_since_update >= self.update_interval:
fmp = FisherMatrixPosteriorEstimator(
likelihood, priors, parameters=self.parameters, fd_eps=self.fd_eps
)
try:
self.iFIM = fmp.calculate_iFIM(sample.dict)
except (RuntimeError, ValueError) as e:
logger.warning(f"FisherMatrixProposal failed with {e}")
if hasattr(self, "iFIM") is False:
# No past iFIM exists, return sample
return sample, 0
self.steps_since_update = 0
jump = self.scale * np.random.multivariate_normal(
self.mean, self.iFIM, check_valid="ignore"
)
for key, val in zip(self.parameters, jump):
sample[key] += val
log_factor = 0
self.steps_since_update += 1
return sample, log_factor
class BaseGravitationalWaveTransientProposal(BaseProposal):
def __init__(self, priors, weight=1):
super(BaseGravitationalWaveTransientProposal, self).__init__(
......@@ -985,7 +1059,7 @@ def get_default_ensemble_proposal_cycle(priors):
def get_proposal_cycle(string, priors, L1steps=1, warn=True):
big_weight = 10
small_weight = 5
tiny_weight = 0.1
tiny_weight = 0.5
if "gwA" in string:
# Parameters for learning proposals
......@@ -1009,15 +1083,15 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
if priors.intrinsic:
intrinsic = PARAMETER_SETS["intrinsic"]
plist += [
AdaptiveGaussianProposal(priors, weight=big_weight, subset=intrinsic),
AdaptiveGaussianProposal(priors, weight=small_weight, subset=intrinsic),
DifferentialEvolutionProposal(
priors, weight=big_weight, subset=intrinsic
priors, weight=small_weight, subset=intrinsic
),
KDEProposal(
priors, weight=big_weight, subset=intrinsic, **learning_kwargs
priors, weight=small_weight, subset=intrinsic, **learning_kwargs
),
GMMProposal(
priors, weight=big_weight, subset=intrinsic, **learning_kwargs
priors, weight=small_weight, subset=intrinsic, **learning_kwargs
),
]
......@@ -1026,13 +1100,13 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
plist += [
AdaptiveGaussianProposal(priors, weight=small_weight, subset=extrinsic),
DifferentialEvolutionProposal(
priors, weight=big_weight, subset=extrinsic
priors, weight=small_weight, subset=extrinsic
),
KDEProposal(
priors, weight=big_weight, subset=extrinsic, **learning_kwargs
priors, weight=small_weight, subset=extrinsic, **learning_kwargs
),
GMMProposal(
priors, weight=big_weight, subset=extrinsic, **learning_kwargs
priors, weight=small_weight, subset=extrinsic, **learning_kwargs
),
]
......@@ -1043,6 +1117,11 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
GMMProposal(
priors, weight=small_weight, subset=mass, **learning_kwargs
),
FisherMatrixProposal(
priors,
weight=small_weight,
subset=mass,
),
]
if priors.spin:
......@@ -1052,13 +1131,23 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
GMMProposal(
priors, weight=small_weight, subset=spin, **learning_kwargs
),
FisherMatrixProposal(
priors,
weight=big_weight,
subset=spin,
),
]
if priors.precession:
measured_spin = ["chi_1", "chi_2", "a_1", "a_2", "chi_1_in_plane"]
if priors.measured_spin:
measured_spin = PARAMETER_SETS["measured_spin"]
plist += [
AdaptiveGaussianProposal(
priors, weight=small_weight, subset=measured_spin
),
FisherMatrixProposal(
priors,
weight=small_weight,
subset=measured_spin,
),
]
if priors.mass and priors.spin:
......@@ -1086,6 +1175,21 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
CorrelatedPolarisationPhaseJump(priors, weight=tiny_weight),
PhasePolarisationReversalProposal(priors, weight=tiny_weight),
]
if priors.sky:
sky = PARAMETER_SETS["sky"]
plist += [
FisherMatrixProposal(
priors,
weight=small_weight,
subset=sky,
),
GMMProposal(
priors,
weight=small_weight,
subset=sky,
**learning_kwargs,
),
]
for key in ["time_jitter", "psi", "phi_12", "tilt_2", "lambda_1", "lambda_2"]:
if key in priors.non_fixed_keys:
plist.append(PriorProposal(priors, subset=[key], weight=tiny_weight))
......@@ -1101,6 +1205,7 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
DifferentialEvolutionProposal(priors, weight=big_weight),
UniformProposal(priors, weight=tiny_weight),
KDEProposal(priors, weight=big_weight, scale_fits=L1steps),
FisherMatrixProposal(priors, weight=big_weight),
]
if GMMProposal.check_dependencies(warn=warn):
plist.append(GMMProposal(priors, weight=big_weight, scale_fits=L1steps))
......@@ -1120,6 +1225,7 @@ def remove_proposals_using_string(plist, string):
GM=GMMProposal,
PR=PriorProposal,
UN=UniformProposal,
FM=FisherMatrixProposal,
)
for element in string.split("no")[1:]:
......
......@@ -146,6 +146,7 @@ class Bilby_MCMC(MCMCSampler):
L1steps=100,
L2steps=3,
printdt=60,
check_point_delta_t=1800,
min_tau=1,
proposal_cycle="default",
stop_after_convergence=False,
......@@ -165,7 +166,6 @@ class Bilby_MCMC(MCMCSampler):
use_ratio=False,
skip_import_verification=True,
check_point_plot=True,
check_point_delta_t=1800,
diagnostic=False,
resume=True,
exit_code=130,
......@@ -202,9 +202,9 @@ class Bilby_MCMC(MCMCSampler):
self.initial_sample_dict = self.kwargs["initial_sample_dict"]
self.printdt = self.kwargs["printdt"]
self.check_point_delta_t = self.kwargs["check_point_delta_t"]
check_directory_exists_and_if_not_mkdir(self.outdir)
self.resume = resume
self.check_point_delta_t = check_point_delta_t
self.resume_file = "{}/{}_resume.pickle".format(self.outdir, self.label)
self.verify_configuration()
......@@ -224,6 +224,10 @@ class Bilby_MCMC(MCMCSampler):
for equiv in self.npool_equiv_kwargs:
if equiv in kwargs:
kwargs["npool"] = kwargs.pop(equiv)
if "check_point_delta_t" not in kwargs:
for equiv in self.check_point_equiv_kwargs:
if equiv in kwargs:
kwargs["check_point_delta_t"] = kwargs.pop(equiv)
@property
def target_nsamples(self):
......@@ -315,8 +319,8 @@ class Bilby_MCMC(MCMCSampler):
self._steps_since_last_print = 0
self._time_since_last_print = 0
logger.info(f"Drawing {self.target_nsamples} samples")
logger.info(f"Checkpoint every {self.check_point_delta_t}s")
logger.info(f"Print update every {self.printdt}s")
logger.info(f"Checkpoint every check_point_delta_t={self.check_point_delta_t}s")
logger.info(f"Print update every printdt={self.printdt}s")
while True:
t0 = datetime.datetime.now()
......@@ -390,7 +394,6 @@ class Bilby_MCMC(MCMCSampler):
)
raise ResumeError(msg)
self.ptsampler.set_convergence_inputs(self.convergence_inputs)
self.ptsampler.proposal_cycle = self.proposal_cycle
self.ptsampler.pt_rejection_sample = self.pt_rejection_sample
logger.info(
......@@ -734,13 +737,19 @@ class BilbyPTMCMCSampler(object):
@property
def samples(self):
cached_samples = getattr(self, "_cached_samples", (False,))
if cached_samples[0] == self.position:
return cached_samples[1]
sample_list = []
for sampler in self.zerotemp_sampler_list:
sample_list.append(sampler.samples)
if self.pt_rejection_sample:
for sampler in self.tempered_sampler_list:
sample_list.append(sampler.samples)
return pd.concat(sample_list)
samples = pd.concat(sample_list, ignore_index=True)
self._cached_samples = (self.position, samples)
return samples
@property
def position(self):
......@@ -783,7 +792,7 @@ class BilbyPTMCMCSampler(object):
@staticmethod
def _get_sample_to_swap(sampler):
if sampler.chain.converged is False:
if not (sampler.chain.converged and sampler.stop_after_convergence):
v = sampler.chain[-1]
else:
v = sampler.chain.random_sample
......@@ -897,7 +906,7 @@ class BilbyPTMCMCSampler(object):
ln_z, ln_z_err = self.compute_evidence_per_ensemble(method, kwargs)
self.ln_z_dict[key] = ln_z
self.ln_z_err_dict[key] = ln_z_err
logger.info(
logger.debug(
f"Log-evidence of {ln_z:0.2f}+/-{ln_z_err:0.2f} calculated using {key} method"
)
......@@ -1141,7 +1150,9 @@ class BilbyMCMCSampler(object):
if initial_sample_dict is not None:
initial_sample.update(initial_sample_dict)
logger.info(f"Using initial sample {initial_sample}")
if self.beta == 1:
logger.info(f"Using initial sample {initial_sample}")
initial_sample = Sample(initial_sample)
initial_sample[LOGLKEY] = self.log_likelihood(initial_sample)
initial_sample[LOGPKEY] = self.log_prior(initial_sample)
......@@ -1216,7 +1227,11 @@ class BilbyMCMCSampler(object):
while internal_steps < self.chain.L1steps:
internal_steps += 1
proposal = self.proposal_cycle.get_proposal()
prop, log_factor = proposal(self.chain)
prop, log_factor = proposal(
self.chain,
likelihood=_sampling_convenience_dump.likelihood,
priors=_sampling_convenience_dump.priors,
)
logp = self.log_prior(prop)
if np.isinf(logp) or np.isnan(logp):
......
from . import grid, likelihood, prior, result, sampler, series, utils
from . import grid, likelihood, prior, result, sampler, series, utils, fisher
import numpy as np
import scipy.linalg
import pandas as pd
from scipy.optimize import minimize
class FisherMatrixPosteriorEstimator(object):
def __init__(self, likelihood, priors, parameters=None, fd_eps=1e-6, n_prior_samples=100):
""" A class to estimate posteriors using the Fisher Matrix approach
Parameters
----------
likelihood: bilby.core.likelihood.Likelihood
A bilby likelihood object
priors: bilby.core.prior.PriorDict
A bilby prior object
parameters: list
Names of parameters to sample in
fd_eps: float
A parameter to control the size of perturbation used when finite
differencing the likelihood
n_prior_samples: int
The number of prior samples to draw and use to attempt estimatation
of the maximum likelihood sample.
"""
self.likelihood = likelihood
if parameters is None:
self.parameter_names = priors.non_fixed_keys
else:
self.parameter_names = parameters
self.fd_eps = fd_eps
self.n_prior_samples = n_prior_samples
self.N = len(self.parameter_names)
self.prior_samples = [
priors.sample_subset(self.parameter_names) for _ in range(n_prior_samples)
]
self.prior_bounds = [(priors[key].minimum, priors[key].maximum) for key in self.parameter_names]
self.prior_width_dict = {}
for key in self.parameter_names:
width = priors[key].width
if np.isnan(width):
raise ValueError(f"Prior width is ill-formed for {key}")
self.prior_width_dict[key] = width
def log_likelihood(self, sample):
self.likelihood.parameters.update(sample)
return self.likelihood.log_likelihood()
def calculate_iFIM(self, sample):
FIM = self.calculate_FIM(sample)
iFIM = scipy.linalg.inv(FIM)
# Ensure iFIM is positive definite
min_eig = np.min(np.real(np.linalg.eigvals(iFIM)))
if min_eig < 0:
iFIM -= 10 * min_eig * np.eye(*iFIM.shape)
return iFIM
def sample_array(self, sample, n=1):
if sample == "maxL":
sample = self.get_maximum_likelihood_sample()
self.mean = np.array(list(sample.values()))
self.iFIM = self.calculate_iFIM(sample)
return np.random.multivariate_normal(self.mean, self.iFIM, n)
def sample_dataframe(self, sample, n=1):
samples = self.sample_array(sample, n)
return pd.DataFrame(samples, columns=self.parameter_names)
def calculate_FIM(self, sample):
FIM = np.zeros((self.N, self.N))
for ii, ii_key in enumerate(self.parameter_names):
for jj, jj_key in enumerate(self.parameter_names):
FIM[ii, jj] = -self.get_second_order_derivative(sample, ii_key, jj_key)
return FIM
def get_second_order_derivative(self, sample, ii, jj):
if ii == jj:
return self.get_finite_difference_xx(sample, ii)
else:
return self.get_finite_difference_xy(sample, ii, jj)
def get_finite_difference_xx(self, sample, ii):
# Sample grid
p = self.shift_sample_x(sample, ii, 1)
m = self.shift_sample_x(sample, ii, -1)
dx = .5 * (p[ii] - m[ii])
loglp = self.log_likelihood(p)
logl = self.log_likelihood(sample)
loglm = self.log_likelihood(m)
return (loglp - 2 * logl + loglm) / dx ** 2
def get_finite_difference_xy(self, sample, ii, jj):
# Sample grid
pp = self.shift_sample_xy(sample, ii, 1, jj, 1)
pm = self.shift_sample_xy(sample, ii, 1, jj, -1)
mp = self.shift_sample_xy(sample, ii, -1, jj, 1)
mm = self.shift_sample_xy(sample, ii, -1, jj, -1)
dx = .5 * (pp[ii] - mm[ii])
dy = .5 * (pp[jj] - mm[jj])
loglpp = self.log_likelihood(pp)
loglpm = self.log_likelihood(pm)
loglmp = self.log_likelihood(mp)
loglmm = self.log_likelihood(mm)
return (loglpp - loglpm - loglmp + loglmm) / (4 * dx * dy)
def shift_sample_x(self, sample, x_key, x_coef):
vx = sample[x_key]
dvx = self.fd_eps * self.prior_width_dict[x_key]
shift_sample = sample.copy()
shift_sample[x_key] = vx + x_coef * dvx
return shift_sample
def shift_sample_xy(self, sample, x_key, x_coef, y_key, y_coef):
vx = sample[x_key]
vy = sample[y_key]
dvx = self.fd_eps * self.prior_width_dict[x_key]
dvy = self.fd_eps * self.prior_width_dict[y_key]
shift_sample = sample.copy()
shift_sample[x_key] = vx + x_coef * dvx
shift_sample[y_key] = vy + y_coef * dvy
return shift_sample
def get_maximum_likelihood_sample(self, initial_sample=None):
""" A method to attempt optimization of the maximum likelihood
This uses a simple scipy optimization approach, starting from a number
of draws from the prior to avoid problems with local optimization.
Note: this approach works well in small numbers of dimensions when the
posterior is narrow relative to the prior. But, if the number of dimensions
is large or the posterior is wide relative to the prior, the method fails
to find the global maximum in high dimensional problems.
"""
minlogL = np.inf
for i in range(self.n_prior_samples):
initial_sample = self.prior_samples[i]
x0 = list(initial_sample.values())
def neg_log_like(x, self, T=1):
sample = {key: val for key, val in zip(self.parameter_names, x)}
return - 1 / T * self.log_likelihood(sample)
out = minimize(
neg_log_like,
x0,
args=(self, 1),
bounds=self.prior_bounds,
method="L-BFGS-B",
)
if out.fun < minlogL:
minout = out
return {key: val for key, val in zip(self.parameter_names, minout.x)}
......@@ -1517,3 +1517,107 @@ class Categorical(Prior):
idxs = np.isin(val, self.categories)
probs[idxs] = self.lnp
return probs
class Triangular(Prior):
"""
Define a new prior class which draws from a triangular distribution.
For distribution details see: wikipedia.org/wiki/Triangular_distribution
Here, minimum <= mode <= maximum,
where the mode has the highest pdf value.
"""
def __init__(self, mode, minimum, maximum, name=None, latex_label=None, unit=None):
super(Triangular, self).__init__(
name=name,
latex_label=latex_label,
unit=unit,
minimum=minimum,
maximum=maximum,
)
self.mode = mode
self.fractional_mode = (self.mode - self.minimum) / (
self.maximum - self.minimum
)
self.scale = self.maximum - self.minimum
self.rescaled_minimum = self.minimum - (self.minimum == self.mode) * self.scale
self.rescaled_maximum = self.maximum + (self.maximum == self.mode) * self.scale
def rescale(self, val):
"""
'Rescale' a sample from standard uniform to a triangular distribution.
This maps to the inverse CDF. This has been analytically solved for this case.
Parameters
==========
val: Union[float, int, array_like]
Uniform probability
Returns
=======
Union[float, array_like]: Rescaled probability
"""
below_mode = (val * self.scale * (self.mode - self.minimum)) ** 0.5
above_mode = ((1 - val) * self.scale * (self.maximum - self.mode)) ** 0.5
return (self.minimum + below_mode) * (val < self.fractional_mode) + (
self.maximum - above_mode
) * (val >= self.fractional_mode)
def prob(self, val):
"""
Return the prior probability of val
Parameters
==========
val: Union[float, int, array_like]
Returns
=======
float: Prior probability of val
"""
between_minimum_and_mode = (
(val < self.mode)
* (self.minimum <= val)
* (val - self.rescaled_minimum)
/ (self.mode - self.rescaled_minimum)
)
between_mode_and_maximum = (
(self.mode <= val)
* (val <= self.maximum)
* (self.rescaled_maximum - val)
/ (self.rescaled_maximum - self.mode)
)
return 2.0 * (between_minimum_and_mode + between_mode_and_maximum) / self.scale
def cdf(self, val):
"""
Return the prior cumulative probability at val
Parameters
==========
val: Union[float, int, array_like]
Returns
=======
float: prior cumulative probability at val
"""
return (
+ (val > self.mode)
+ (val > self.minimum)
* (val <= self.maximum)
/ (self.scale)
* (
(val > self.mode)
* (self.rescaled_maximum - val) ** 2.0
/ (self.mode - self.rescaled_maximum)
+ (val <= self.mode)
* (val - self.rescaled_minimum) ** 2.0
/ (self.mode - self.rescaled_minimum)
)
)
from numbers import Number
import numpy as np
from .base import Prior
......@@ -84,6 +85,7 @@ class SlabSpikePrior(Prior):
=======
array_like: Associated prior value with input value.
"""
original_is_number = isinstance(val, Number)
val = np.atleast_1d(val)
lower_indices = np.where(val < self.inverse_cdf_below_spike)[0]
......@@ -96,6 +98,12 @@ class SlabSpikePrior(Prior):
res[lower_indices] = self._contracted_rescale(val[lower_indices])
res[intermediate_indices] = self.spike_location
res[higher_indices] = self._contracted_rescale(val[higher_indices] - self.spike_height)
if original_is_number:
try:
res = res[0]
except (KeyError, TypeError):
logger.warning("Based on inputs, a number should be output\
but this could not be accessed from what was computed")
return res
def _contracted_rescale(self, val):
......@@ -126,9 +134,16 @@ class SlabSpikePrior(Prior):
=======
array_like: Prior probability of val
"""
original_is_number = isinstance(val, Number)
res = self.slab.prob(val) * self.slab_fraction
res = np.atleast_1d(res)
res[np.where(val == self.spike_location)] = np.inf
if original_is_number:
try:
res = res[0]
except (KeyError, TypeError):
logger.warning("Based on inputs, a number should be output\
but this could not be accessed from what was computed")
return res
def ln_prob(self, val):
......@@ -143,9 +158,16 @@ class SlabSpikePrior(Prior):
=======
array_like: Prior probability of val
"""
original_is_number = isinstance(val, Number)
res = self.slab.ln_prob(val) + np.log(self.slab_fraction)
res = np.atleast_1d(res)
res[np.where(val == self.spike_location)] = np.inf
if original_is_number:
try:
res = res[0]
except (KeyError, TypeError):
logger.warning("Based on inputs, a number should be output\
but this could not be accessed from what was computed")
return res
def cdf(self, val):
......
......@@ -76,7 +76,7 @@ def _determine_file_name(filename, outdir, label, extension, gzip):
return result_file_name(outdir, label, extension, gzip)
def read_in_result(filename=None, outdir=None, label=None, extension='json', gzip=False):
def read_in_result(filename=None, outdir=None, label=None, extension='json', gzip=False, result_class=None):
""" Reads in a stored bilby result object
Parameters
......@@ -86,21 +86,29 @@ def read_in_result(filename=None, outdir=None, label=None, extension='json', gzi
outdir, label, extension: str
Name of the output directory, label and extension used for the default
naming scheme.
result_class: bilby.core.result.Result, or child of
The result class to use. By default, `bilby.core.result.Result` is used,
but objects which inherit from this class can be given providing
additional methods.
"""
filename = _determine_file_name(filename, outdir, label, extension, gzip)
if result_class is None:
result_class = Result
elif not issubclass(result_class, Result):
raise ValueError(f"Input result_class={result_class} not understood")
# Get the actual extension (may differ from the default extension if the filename is given)
extension = os.path.splitext(filename)[1].lstrip('.')
if extension == 'gz': # gzipped file
extension = os.path.splitext(os.path.splitext(filename)[0])[1].lstrip('.')
if 'json' in extension:
result = Result.from_json(filename=filename)
result = result_class.from_json(filename=filename)
elif ('hdf5' in extension) or ('h5' in extension):
result = Result.from_hdf5(filename=filename)
result = result_class.from_hdf5(filename=filename)
elif ("pkl" in extension) or ("pickle" in extension):
result = Result.from_pickle(filename=filename)
result = result_class.from_pickle(filename=filename)
elif extension is None:
raise ValueError("No filetype extension provided")
else:
......@@ -108,6 +116,34 @@ def read_in_result(filename=None, outdir=None, label=None, extension='json', gzi
return result
def read_in_result_list(filename_list, invalid="warning"):
""" Read in a set of results
Parameters
==========
filename_list: list
A list of filename paths
invalid: str (ignore, warning, error)
Behaviour if a file in filename_list is not a valid bilby result
Returns
-------
result_list: ResultList
A list of results
"""
results_list = []
for filename in filename_list:
try:
results_list.append(read_in_result(filename=filename))
except Exception as e:
msg = f"Failed to read in file {filename} due to exception {e}"
if invalid == "error":
raise ResultListError(msg)
elif invalid == "warning":
logger.warning(msg)
return ResultList(results_list)
def get_weights_for_reweighting(
result, new_likelihood=None, new_prior=None, old_likelihood=None,
old_prior=None, resume_file=None, n_checkpoint=5000, npool=1):
......@@ -423,6 +459,8 @@ class Result(object):
self.injection_parameters = injection_parameters
self.posterior = posterior
self.samples = samples
if isinstance(nested_samples, dict):
nested_samples = pd.DataFrame(nested_samples)
self.nested_samples = nested_samples
self.walkers = walkers
self.nburn = nburn
......@@ -1696,7 +1734,7 @@ class Result(object):
class ResultList(list):
def __init__(self, results=None):
def __init__(self, results=None, consistency_level="warning"):
""" A class to store a list of :class:`bilby.core.result.Result` objects
from equivalent runs on the same data. This provides methods for
outputting combined results.
......@@ -1705,8 +1743,15 @@ class ResultList(list):
==========
results: list
A list of `:class:`bilby.core.result.Result`.
consistency_level: str, [ignore, warning, error]
If warning, print a warning if inconsistencies are discovered
between the results. If error, raise an error if inconsistencies
are discovered between the results before combining. If ignore, do
nothing.
"""
super(ResultList, self).__init__()
self.consistency_level = consistency_level
for result in results:
self.append(result)
......@@ -1737,9 +1782,11 @@ class ResultList(list):
----------
shuffle: bool
If true, shuffle the samples when combining, otherwise they are concatenated.
consistency_level: str, [warning, error]
If warning, print a warning if inconsistencies are discovered between the results before combining.
If error, raise an error if inconsistencies are discovered between the results before combining.
consistency_level: str, [ignore, warning, error]
Overwrite the class level consistency_level. If warning, print a
warning if inconsistencies are discovered between the results. If
error, raise an error if inconsistencies are discovered between
the results before combining. If ignore, do nothing.
Returns
-------
......@@ -1884,6 +1931,8 @@ class ResultList(list):
raise ResultListError(msg)
elif self.consistency_level == "warning":
logger.warning(msg)
elif self.consistency_level == "ignore":
pass
else:
raise ValueError(f"Input consistency_level {self.consistency_level} not understood")
......
......@@ -202,6 +202,7 @@ class Sampler(object):
If a specific sampler does not have a sampling seed option, then it should be
left as None.
"""
check_point_equiv_kwargs = ["check_point_deltaT", "check_point_delta_t"]
def __init__(
self,
......@@ -253,7 +254,7 @@ class Sampler(object):
self.kwargs = kwargs
self._check_cached_result()
self._check_cached_result(result_class)
self._log_summary_for_sampler()
......@@ -631,7 +632,7 @@ class Sampler(object):
"""
raise ValueError("Method not yet implemented")
def _check_cached_result(self):
def _check_cached_result(self, result_class=None):
"""Check if the cached data file exists and can be used"""
if command_line_args.clean:
......@@ -640,7 +641,9 @@ class Sampler(object):
return
try:
self.cached_result = read_in_result(outdir=self.outdir, label=self.label)
self.cached_result = read_in_result(
outdir=self.outdir, label=self.label, result_class=result_class
)
except IOError:
self.cached_result = None
......
......@@ -10,12 +10,27 @@ import pickle
import numpy as np
from pandas import DataFrame, Series
from scipy.stats import norm
from .utils import (lalsim_SimNeutronStarEOS4ParamSDGammaCheck,
lalsim_SimNeutronStarEOS4ParameterSpectralDecomposition,
lalsim_SimNeutronStarEOS4ParamSDViableFamilyCheck,
lalsim_SimNeutronStarEOS3PieceDynamicPolytrope,
lalsim_SimNeutronStarEOS3PieceCausalAnalytic,
lalsim_SimNeutronStarEOS3PDViableFamilyCheck,
lalsim_CreateSimNeutronStarFamily,
lalsim_SimNeutronStarEOSMaxPseudoEnthalpy,
lalsim_SimNeutronStarEOSSpeedOfSoundGeometerized,
lalsim_SimNeutronStarFamMinimumMass,
lalsim_SimNeutronStarMaximumMass,
lalsim_SimNeutronStarRadius,
lalsim_SimNeutronStarLoveNumberK2)
from ..core.likelihood import MarginalizedLikelihoodReconstructionError
from ..core.utils import logger, solar_mass, command_line_args, safe_file_dump
from ..core.utils import logger, solar_mass, gravitational_constant, speed_of_light, command_line_args, safe_file_dump
from ..core.prior import DeltaFunction
from .utils import lalsim_SimInspiralTransformPrecessingNewInitialConditions
from .eos.eos import SpectralDecompositionEOS, EOSFamily, IntegrateTOV
from .eos.eos import IntegrateTOV
from .cosmology import get_cosmology, z_at_value
......@@ -260,9 +275,7 @@ def convert_to_lal_binary_black_hole_parameters(parameters):
converted_parameters["delta_phase"]
- np.sign(np.cos(converted_parameters["theta_jn"]))
* converted_parameters["psi"],
2 * np.pi
)
2 * np.pi)
added_keys = [key for key in converted_parameters.keys()
if key not in original_keys]
......@@ -279,9 +292,11 @@ def convert_to_lal_binary_neutron_star_parameters(parameters):
Mass: mass_1, mass_2
Spin: a_1, a_2, tilt_1, tilt_2, phi_12, phi_jl
Extrinsic: luminosity_distance, theta_jn, phase, ra, dec, geocent_time, psi
Tidal: lambda_1, lamda_2, lambda_tilde, delta_lambda_tilde
This involves popping a lot of things from parameters.
The keys in added_keys should be popped after evaluating the waveform.
For details on tidal parameters see https://arxiv.org/pdf/1402.5156.pdf.
Parameters
==========
......@@ -301,7 +316,9 @@ def convert_to_lal_binary_neutron_star_parameters(parameters):
convert_to_lal_binary_black_hole_parameters(converted_parameters)
if not any([key in converted_parameters for key in
['lambda_1', 'lambda_2', 'lambda_tilde', 'delta_lambda_tilde', 'eos_spectral_gamma_0']]):
['lambda_1', 'lambda_2',
'lambda_tilde', 'delta_lambda_tilde', 'lambda_symmetric',
'eos_polytrope_gamma_0', 'eos_spectral_pca_gamma_0', 'eos_v1']]):
converted_parameters['lambda_1'] = 0
converted_parameters['lambda_2'] = 0
added_keys = added_keys + ['lambda_1', 'lambda_2']
......@@ -328,31 +345,233 @@ def convert_to_lal_binary_neutron_star_parameters(parameters):
converted_parameters['lambda_1']\
* converted_parameters['mass_1']**5\
/ converted_parameters['mass_2']**5
elif 'eos_spectral_gamma_0' in converted_parameters.keys(): # FIXME: This is a clunky way to do this
# Pick out the eos parameters from dict of parameters and sort them
eos_parameter_keys = sorted([key for key in original_keys if 'eos_spectral_gamma_' in key])
gammas = [converted_parameters[key] for key in eos_parameter_keys]
eos = SpectralDecompositionEOS(gammas, sampling_flag=True, e0=1.2856e14, p0=5.3716e32)
if eos.warning_flag:
converted_parameters['lambda_1'] = 0.0
converted_parameters['lambda_2'] = 0.0
converted_parameters['eos_check'] = False
elif eos_family_physical_check(eos):
converted_parameters['lambda_1'] = 0.0
converted_parameters['lambda_2'] = 0.0
converted_parameters['eos_check'] = False
else:
fam = EOSFamily(eos)
if (converted_parameters['mass_1'] <= fam.maximum_mass and
converted_parameters['mass_2'] <= fam.maximum_mass):
converted_parameters['lambda_1'] = fam.lambda_from_mass(converted_parameters['mass_1'])
converted_parameters['lambda_2'] = fam.lambda_from_mass(converted_parameters['mass_2'])
elif 'eos_spectral_pca_gamma_0' in converted_parameters.keys(): # FIXME: This is a clunky way to do this
converted_parameters = generate_source_frame_parameters(converted_parameters)
float_eos_params = {}
max_len = 1
eos_keys = ['eos_spectral_pca_gamma_0',
'eos_spectral_pca_gamma_1',
'eos_spectral_pca_gamma_2',
'eos_spectral_pca_gamma_3',
'mass_1_source', 'mass_2_source']
for key in eos_keys:
try:
if (len(converted_parameters[key]) > max_len):
max_len = len(converted_parameters[key])
except TypeError:
float_eos_params[key] = converted_parameters[key]
if len(float_eos_params) == len(eos_keys): # case where all eos params are floats (pinned)
g0, g1, g2, g3 = spectral_pca_to_spectral(
converted_parameters['eos_spectral_pca_gamma_0'],
converted_parameters['eos_spectral_pca_gamma_1'],
converted_parameters['eos_spectral_pca_gamma_2'],
converted_parameters['eos_spectral_pca_gamma_3'])
converted_parameters['lambda_1'], converted_parameters['lambda_2'], converted_parameters['eos_check'] = \
spectral_params_to_lambda_1_lambda_2(
g0, g1, g2, g3, converted_parameters['mass_1_source'], converted_parameters['mass_2_source'])
elif len(float_eos_params) < len(eos_keys): # case where some or none of the eos params are floats (pinned)
for key in float_eos_params.keys():
converted_parameters[key] = np.ones(max_len) * converted_parameters[key]
g0pca = converted_parameters['eos_spectral_pca_gamma_0']
g1pca = converted_parameters['eos_spectral_pca_gamma_1']
g2pca = converted_parameters['eos_spectral_pca_gamma_2']
g3pca = converted_parameters['eos_spectral_pca_gamma_3']
m1s = converted_parameters['mass_1_source']
m2s = converted_parameters['mass_2_source']
all_lambda_1 = np.empty(0)
all_lambda_2 = np.empty(0)
all_eos_check = np.empty(0, dtype=bool)
for (g_0pca, g_1pca, g_2pca, g_3pca, m1_s, m2_s) in zip(g0pca, g1pca, g2pca, g3pca, m1s, m2s):
g_0, g_1, g_2, g_3 = spectral_pca_to_spectral(g_0pca, g_1pca, g_2pca, g_3pca)
lambda_1, lambda_2, eos_check = \
spectral_params_to_lambda_1_lambda_2(g_0, g_1, g_2, g_3, m1_s, m2_s)
all_lambda_1 = np.append(all_lambda_1, lambda_1)
all_lambda_2 = np.append(all_lambda_2, lambda_2)
all_eos_check = np.append(all_eos_check, eos_check)
converted_parameters['lambda_1'] = all_lambda_1
converted_parameters['lambda_2'] = all_lambda_2
converted_parameters['eos_check'] = all_eos_check
for key in float_eos_params.keys():
converted_parameters[key] = float_eos_params[key]
elif 'eos_polytrope_gamma_0' and 'eos_polytrope_log10_pressure_1' in converted_parameters.keys():
converted_parameters = generate_source_frame_parameters(converted_parameters)
float_eos_params = {}
max_len = 1
eos_keys = ['eos_polytrope_gamma_0',
'eos_polytrope_gamma_1',
'eos_polytrope_gamma_2',
'eos_polytrope_log10_pressure_1',
'eos_polytrope_log10_pressure_2',
'mass_1_source', 'mass_2_source']
for key in eos_keys:
try:
if (len(converted_parameters[key]) > max_len):
max_len = len(converted_parameters[key])
except TypeError:
float_eos_params[key] = converted_parameters[key]
if len(float_eos_params) == len(eos_keys): # case where all eos params are floats (pinned)
converted_parameters['lambda_1'], converted_parameters['lambda_2'], converted_parameters['eos_check'] = \
polytrope_or_causal_params_to_lambda_1_lambda_2(
converted_parameters['eos_polytrope_gamma_0'],
converted_parameters['eos_polytrope_log10_pressure_1'],
converted_parameters['eos_polytrope_gamma_1'],
converted_parameters['eos_polytrope_log10_pressure_2'],
converted_parameters['eos_polytrope_gamma_2'],
converted_parameters['mass_1_source'],
converted_parameters['mass_2_source'],
causal=0)
elif len(float_eos_params) < len(eos_keys): # case where some or none are floats (pinned)
for key in float_eos_params.keys():
converted_parameters[key] = np.ones(max_len) * converted_parameters[key]
pg0 = converted_parameters['eos_polytrope_gamma_0']
pg1 = converted_parameters['eos_polytrope_gamma_1']
pg2 = converted_parameters['eos_polytrope_gamma_2']
logp1 = converted_parameters['eos_polytrope_log10_pressure_1']
logp2 = converted_parameters['eos_polytrope_log10_pressure_2']
m1s = converted_parameters['mass_1_source']
m2s = converted_parameters['mass_2_source']
all_lambda_1 = np.empty(0)
all_lambda_2 = np.empty(0)
all_eos_check = np.empty(0, dtype=bool)
for (pg_0, pg_1, pg_2, logp_1, logp_2, m1_s, m2_s) in zip(pg0, pg1, pg2, logp1, logp2, m1s, m2s):
lambda_1, lambda_2, eos_check = \
polytrope_or_causal_params_to_lambda_1_lambda_2(
pg_0, logp_1, pg_1, logp_2, pg_2, m1_s, m2_s, causal=0)
all_lambda_1 = np.append(all_lambda_1, lambda_1)
all_lambda_2 = np.append(all_lambda_2, lambda_2)
all_eos_check = np.append(all_eos_check, eos_check)
converted_parameters['lambda_1'] = all_lambda_1
converted_parameters['lambda_2'] = all_lambda_2
converted_parameters['eos_check'] = all_eos_check
for key in float_eos_params.keys():
converted_parameters[key] = float_eos_params[key]
elif 'eos_polytrope_gamma_0' and 'eos_polytrope_scaled_pressure_ratio' in converted_parameters.keys():
converted_parameters = generate_source_frame_parameters(converted_parameters)
float_eos_params = {}
max_len = 1
eos_keys = ['eos_polytrope_gamma_0',
'eos_polytrope_gamma_1',
'eos_polytrope_gamma_2',
'eos_polytrope_scaled_pressure_ratio',
'eos_polytrope_scaled_pressure_2',
'mass_1_source', 'mass_2_source']
for key in eos_keys:
try:
if (len(converted_parameters[key]) > max_len):
max_len = len(converted_parameters[key])
except TypeError:
float_eos_params[key] = converted_parameters[key]
if len(float_eos_params) == len(eos_keys): # case where all eos params are floats (pinned)
logp1, logp2 = log_pressure_reparameterization_conversion(
converted_parameters['eos_polytrope_scaled_pressure_ratio'],
converted_parameters['eos_polytrope_scaled_pressure_2'])
converted_parameters['lambda_1'], converted_parameters['lambda_2'], converted_parameters['eos_check'] = \
polytrope_or_causal_params_to_lambda_1_lambda_2(
converted_parameters['eos_polytrope_gamma_0'],
logp1,
converted_parameters['eos_polytrope_gamma_1'],
logp2,
converted_parameters['eos_polytrope_gamma_2'],
converted_parameters['mass_1_source'],
converted_parameters['mass_2_source'],
causal=0)
elif len(float_eos_params) < len(eos_keys): # case where some or none are floats (pinned)
for key in float_eos_params.keys():
converted_parameters[key] = np.ones(max_len) * converted_parameters[key]
pg0 = converted_parameters['eos_polytrope_gamma_0']
pg1 = converted_parameters['eos_polytrope_gamma_1']
pg2 = converted_parameters['eos_polytrope_gamma_2']
scaledratio = converted_parameters['eos_polytrope_scaled_pressure_ratio']
scaled_p2 = converted_parameters['eos_polytrope_scaled_pressure_2']
m1s = converted_parameters['mass_1_source']
m2s = converted_parameters['mass_2_source']
all_lambda_1 = np.empty(0)
all_lambda_2 = np.empty(0)
all_eos_check = np.empty(0, dtype=bool)
for (pg_0, pg_1, pg_2, scaled_ratio, scaled_p_2, m1_s,
m2_s) in zip(pg0, pg1, pg2, scaledratio, scaled_p2, m1s, m2s):
logp_1, logp_2 = log_pressure_reparameterization_conversion(scaled_ratio, scaled_p_2)
lambda_1, lambda_2, eos_check = \
polytrope_or_causal_params_to_lambda_1_lambda_2(
pg_0, logp_1, pg_1, logp_2, pg_2, m1_s, m2_s, causal=0)
all_lambda_1 = np.append(all_lambda_1, lambda_1)
all_lambda_2 = np.append(all_lambda_2, lambda_2)
all_eos_check = np.append(all_eos_check, eos_check)
converted_parameters['lambda_1'] = all_lambda_1
converted_parameters['lambda_2'] = all_lambda_2
converted_parameters['eos_check'] = all_eos_check
for key in float_eos_params.keys():
converted_parameters[key] = float_eos_params[key]
elif 'eos_v1' in converted_parameters.keys():
converted_parameters = generate_source_frame_parameters(converted_parameters)
float_eos_params = {}
max_len = 1
eos_keys = ['eos_v1',
'eos_v2',
'eos_v3',
'eos_log10_pressure1_cgs',
'eos_log10_pressure2_cgs',
'mass_1_source', 'mass_2_source']
for key in eos_keys:
try:
if (len(converted_parameters[key]) > max_len):
max_len = len(converted_parameters[key])
except TypeError:
float_eos_params[key] = converted_parameters[key]
if len(float_eos_params) == len(eos_keys): # case where all eos params are floats (pinned)
converted_parameters['lambda_1'], converted_parameters['lambda_2'], converted_parameters['eos_check'] = \
polytrope_or_causal_params_to_lambda_1_lambda_2(
converted_parameters['eos_v1'],
converted_parameters['eos_log10_pressure1_cgs'],
converted_parameters['eos_v2'],
converted_parameters['eos_log10_pressure2_cgs'],
converted_parameters['eos_v3'],
converted_parameters['mass_1_source'],
converted_parameters['mass_2_source'],
causal=1)
elif len(float_eos_params) < len(eos_keys): # case where some or none are floats (pinned)
for key in float_eos_params.keys():
converted_parameters[key] = np.ones(max_len) * converted_parameters[key]
v1 = converted_parameters['eos_v1']
v2 = converted_parameters['eos_v2']
v3 = converted_parameters['eos_v3']
logp1 = converted_parameters['eos_log10_pressure1_cgs']
logp2 = converted_parameters['eos_log10_pressure2_cgs']
m1s = converted_parameters['mass_1_source']
m2s = converted_parameters['mass_2_source']
all_lambda_1 = np.empty(0)
all_lambda_2 = np.empty(0)
all_eos_check = np.empty(0, dtype=bool)
for (v_1, v_2, v_3, logp_1, logp_2, m1_s, m2_s) in zip(v1, v2, v3, logp1, logp2, m1s, m2s):
lambda_1, lambda_2, eos_check = \
polytrope_or_causal_params_to_lambda_1_lambda_2(
v_1, logp_1, v_2, logp_2, v_3, m1_s, m2_s, causal=1)
all_lambda_1 = np.append(all_lambda_1, lambda_1)
all_lambda_2 = np.append(all_lambda_2, lambda_2)
all_eos_check = np.append(all_eos_check, eos_check)
converted_parameters['lambda_1'] = all_lambda_1
converted_parameters['lambda_2'] = all_lambda_2
converted_parameters['eos_check'] = all_eos_check
for key in float_eos_params.keys():
converted_parameters[key] = float_eos_params[key]
elif 'lambda_symmetric' in converted_parameters.keys():
if 'lambda_antisymmetric' in converted_parameters.keys():
converted_parameters['lambda_1'], converted_parameters['lambda_2'] =\
lambda_symmetric_lambda_antisymmetric_to_lambda_1_lambda_2(
converted_parameters['lambda_symmetric'],
converted_parameters['lambda_antisymmetric'])
elif 'mass_ratio' in converted_parameters.keys():
if 'binary_love_uniform' in converted_parameters.keys():
converted_parameters['lambda_1'], converted_parameters['lambda_2'] =\
binary_love_lambda_symmetric_to_lambda_1_lambda_2_manual_marginalisation(
converted_parameters['binary_love_uniform'],
converted_parameters['lambda_symmetric'],
converted_parameters['mass_ratio'])
else:
converted_parameters['lambda_1'] = 0.0
converted_parameters['lambda_2'] = 0.0
converted_parameters['eos_check'] = False
converted_parameters['lambda_1'], converted_parameters['lambda_2'] =\
binary_love_lambda_symmetric_to_lambda_1_lambda_2_automatic_marginalisation(
converted_parameters['lambda_symmetric'],
converted_parameters['mass_ratio'])
added_keys = [key for key in converted_parameters.keys()
if key not in original_keys]
......@@ -360,6 +579,233 @@ def convert_to_lal_binary_neutron_star_parameters(parameters):
return converted_parameters, added_keys
def log_pressure_reparameterization_conversion(scaled_pressure_ratio, scaled_pressure_2):
'''
Converts the reparameterization joining pressures from
(scaled_pressure_ratio,scaled_pressure_2) to (log10_pressure_1,log10_pressure_2).
This reparameterization with a triangular prior (with mode = max) on scaled_pressure_2
and a uniform prior on scaled_pressure_ratio
mimics identical uniform priors on log10_pressure_1 and log10_pressure_2
where samples with log10_pressure_2 > log10_pressure_1 are rejected.
This reparameterization allows for a faster initialization.
A minimum log10_pressure of 33 (in cgs units) is chosen to be slightly higher than the low-density crust EOS
that is stitched to the dynamic polytrope EOS model in LALSimulation.
Parameters
----------
scaled_pressure_ratio, scaled_pressure_2: float
reparameterizations of the dividing pressures
Returns
-------
log10_pressure_1, log10_pressure_2: float
joining pressures in the original parameterization
'''
minimum_pressure = 33.
log10_pressure_1 = (scaled_pressure_ratio * scaled_pressure_2) + minimum_pressure
log10_pressure_2 = minimum_pressure + scaled_pressure_2
return log10_pressure_1, log10_pressure_2
def spectral_pca_to_spectral(gamma_pca_0, gamma_pca_1, gamma_pca_2, gamma_pca_3):
'''
Change of basis on parameter space
from an efficient space to sample in (sample space)
to the space used in spectral eos model (model space).
Uses principle component analysis (PCA) to sample spectral gamma parameters more efficiently.
See arxiv:2001.01747 for the PCA conversion transformation matrix, mean, and standard deviation.
(Note that this transformation matrix is the inverse of the one referenced
in order to perform the inverse transformation.)
Parameters
----------
gamma_pca_0, gamma_pca_1, gamma_pca_2, gamma_pca_3: float
Spectral gamma PCA parameters to be converted to spectral gamma parameters.
Returns
-------
converted_gamma_parameters: np.array()
array of gamma_0, gamma_1, gamma_2, gamma_3 in model space
'''
sampled_pca_gammas = np.array([gamma_pca_0, gamma_pca_1, gamma_pca_2, gamma_pca_3])
transformation_matrix = np.array(
[
[0.43801, -0.76705, 0.45143, 0.12646],
[-0.53573, 0.17169, 0.67968, 0.47070],
[0.52660, 0.31255, -0.19454, 0.76626],
[-0.49379, -0.53336, -0.54444, 0.41868]
]
)
model_space_mean = np.array([0.89421, 0.33878, -0.07894, 0.00393])
model_space_standard_deviation = np.array([0.35700, 0.25769, 0.05452, 0.00312])
converted_gamma_parameters = \
model_space_mean + model_space_standard_deviation * np.dot(transformation_matrix, sampled_pca_gammas)
return converted_gamma_parameters
def spectral_params_to_lambda_1_lambda_2(gamma_0, gamma_1, gamma_2, gamma_3, mass_1_source, mass_2_source):
'''
Converts from the 4 spectral decomposition parameters and the source masses
to the tidal deformability parameters.
Parameters
----------
gamma_0, gamma_1, gamma_2, gamma_3: float
sampled spectral decomposition parameters
mass_1_source, mass_2_source: float
sampled component mass parameters converted to source frame in solar masses
Returns
-------
lambda_1, lambda_2: float
component tidal deformability parameters
eos_check: bool
whether or not the equation of state is viable /
if eos_check = False, lambdas are 0 and the sample is rejected.
'''
eos_check = True
if lalsim_SimNeutronStarEOS4ParamSDGammaCheck(gamma_0, gamma_1, gamma_2, gamma_3) != 0:
lambda_1 = 0.0
lambda_2 = 0.0
eos_check = False
else:
eos = lalsim_SimNeutronStarEOS4ParameterSpectralDecomposition(gamma_0, gamma_1, gamma_2, gamma_3)
if lalsim_SimNeutronStarEOS4ParamSDViableFamilyCheck(gamma_0, gamma_1, gamma_2, gamma_3) != 0:
lambda_1 = 0.0
lambda_2 = 0.0
eos_check = False
else:
lambda_1, lambda_2, eos_check = neutron_star_family_physical_check(eos, mass_1_source, mass_2_source)
return lambda_1, lambda_2, eos_check
def polytrope_or_causal_params_to_lambda_1_lambda_2(
param1, log10_pressure1_cgs, param2, log10_pressure2_cgs, param3, mass_1_source, mass_2_source, causal):
"""
Converts parameters from sampled dynamic piecewise polytrope parameters
to component tidal deformablity parameters.
Enforces log10_pressure1_cgs < log10_pressure2_cgs.
Checks number of points in the equation of state for viability.
Note that subtracting 1 from the log10 pressure in cgs converts it to
log10 pressure in si units.
Parameters
----------
param1, param2, param3: float
either the sampled adiabatic indices in piecewise polytrope model
or the sampled causal model params v1, v2, v3
log10_pressure1_cgs, log10_pressure2_cgs: float
dividing pressures in piecewise polytrope model or causal model
mass_1_source, mass_2_source: float
source frame component mass parameters in Msuns
causal: bool
whether or not to use causal polytrope model
1 - causal; 0 - not causal
Returns
-------
lambda_1: float
tidal deformability parameter associated with mass 1
lambda_2: float
tidal deformability parameter associated with mass 2
eos_check: bool
whether eos is valid or not
"""
eos_check = True
if log10_pressure1_cgs >= log10_pressure2_cgs:
lambda_1 = 0.0
lambda_2 = 0.0
eos_check = False
else:
if causal == 0:
eos = lalsim_SimNeutronStarEOS3PieceDynamicPolytrope(
param1, log10_pressure1_cgs - 1., param2, log10_pressure2_cgs - 1., param3)
else:
eos = lalsim_SimNeutronStarEOS3PieceCausalAnalytic(
param1, log10_pressure1_cgs - 1., param2, log10_pressure2_cgs - 1., param3)
if lalsim_SimNeutronStarEOS3PDViableFamilyCheck(
param1, log10_pressure1_cgs - 1., param2, log10_pressure2_cgs - 1., param3, causal) != 0:
lambda_1 = 0.0
lambda_2 = 0.0
eos_check = False
else:
lambda_1, lambda_2, eos_check = neutron_star_family_physical_check(eos, mass_1_source, mass_2_source)
return lambda_1, lambda_2, eos_check
def neutron_star_family_physical_check(eos, mass_1_source, mass_2_source):
"""
Takes in a lalsim eos object. Performs causal and max/min mass eos checks.
Calculates component lambdas if eos object passes causality.
Returns lambda = 0 if not.
Parameters
----------
eos: lalsim swig-wrapped eos object
the neutron star equation of state
mass_1_source, mass_2_source: float
source frame component masses 1 and 2 in solar masses
Returns
-------
lambda_1, lambda_2: float
component tidal deformability parameters
eos_check: bool
whether or not the equation of state is physically allowed
"""
eos_check = True
family = lalsim_CreateSimNeutronStarFamily(eos)
max_pseudo_enthalpy = lalsim_SimNeutronStarEOSMaxPseudoEnthalpy(eos)
max_speed_of_sound = lalsim_SimNeutronStarEOSSpeedOfSoundGeometerized(max_pseudo_enthalpy, eos)
min_mass = lalsim_SimNeutronStarFamMinimumMass(family) / solar_mass
max_mass = lalsim_SimNeutronStarMaximumMass(family) / solar_mass
if max_speed_of_sound <= 1.1 and min_mass <= mass_1_source <= max_mass and min_mass <= mass_2_source <= max_mass:
lambda_1 = lambda_from_mass_and_family(mass_1_source, family)
lambda_2 = lambda_from_mass_and_family(mass_2_source, family)
else:
lambda_1 = 0.0
lambda_2 = 0.0
eos_check = False
return lambda_1, lambda_2, eos_check
def lambda_from_mass_and_family(mass_i, family):
"""
Convert from equation of state model parameters to
component tidal parameters.
Parameters
----------
family: lalsim family object
EOS family of type lalsimulation.SimNeutronStarFamily.
mass_i: Component mass of neutron star in solar masses.
Returns
-------
lambda_1: float
component tidal deformability parameter
"""
radius = lalsim_SimNeutronStarRadius(mass_i * solar_mass, family)
love_number_k2 = lalsim_SimNeutronStarLoveNumberK2(mass_i * solar_mass, family)
mass_geometrized = mass_i * solar_mass * gravitational_constant / speed_of_light ** 2.
compactness = mass_geometrized / radius
lambda_i = (2. / 3.) * love_number_k2 / compactness ** 5.
return lambda_i
def eos_family_physical_check(eos):
"""
Function that determines if the EoS family contains
......@@ -692,6 +1138,7 @@ def lambda_1_lambda_2_to_lambda_tilde(lambda_1, lambda_2, mass_1, mass_2):
=======
lambda_tilde: float
Dominant tidal term.
"""
eta = component_masses_to_symmetric_mass_ratio(mass_1, mass_2)
lambda_plus = lambda_1 + lambda_2
......@@ -730,9 +1177,8 @@ def lambda_1_lambda_2_to_delta_lambda_tilde(lambda_1, lambda_2, mass_1, mass_2):
lambda_minus = lambda_1 - lambda_2
delta_lambda_tilde = 1 / 2 * (
(1 - 4 * eta) ** 0.5 * (1 - 13272 / 1319 * eta + 8944 / 1319 * eta**2) *
lambda_plus + (1 - 15910 / 1319 * eta + 32850 / 1319 * eta**2 +
3380 / 1319 * eta**3) * lambda_minus)
lambda_plus + (1 - 15910 / 1319 * eta + 32850 / 1319 * eta ** 2 +
3380 / 1319 * eta ** 3) * lambda_minus)
return delta_lambda_tilde
......@@ -760,6 +1206,7 @@ def lambda_tilde_delta_lambda_tilde_to_lambda_1_lambda_2(
Tidal parameter of more massive neutron star.
lambda_2: float
Tidal parameter of less massive neutron star.
"""
eta = component_masses_to_symmetric_mass_ratio(mass_1, mass_2)
coefficient_1 = (1 + 7 * eta - 31 * eta**2)
......@@ -778,6 +1225,7 @@ def lambda_tilde_delta_lambda_tilde_to_lambda_1_lambda_2(
2 * delta_lambda_tilde * (coefficient_1 + coefficient_2)) \
/ ((coefficient_1 - coefficient_2) * (coefficient_3 + coefficient_4) -
(coefficient_1 + coefficient_2) * (coefficient_3 - coefficient_4))
return lambda_1, lambda_2
......@@ -814,6 +1262,372 @@ def lambda_tilde_to_lambda_1_lambda_2(
return lambda_1, lambda_2
def lambda_1_lambda_2_to_lambda_symmetric(lambda_1, lambda_2):
"""
Convert from individual tidal parameters to symmetric tidal term.
See, e.g., Yagi & Yunes, https://arxiv.org/abs/1512.02639
Parameters
==========
lambda_1: float
Tidal parameter of more massive neutron star.
lambda_2: float
Tidal parameter of less massive neutron star.
Returns
======
lambda_symmetric: float
Symmetric tidal parameter.
"""
lambda_symmetric = (lambda_2 + lambda_1) / 2.
return lambda_symmetric
def lambda_1_lambda_2_to_lambda_antisymmetric(lambda_1, lambda_2):
"""
Convert from individual tidal parameters to antisymmetric tidal term.
See, e.g., Yagi & Yunes, https://arxiv.org/abs/1512.02639
Parameters
==========
lambda_1: float
Tidal parameter of more massive neutron star.
lambda_2: float
Tidal parameter of less massive neutron star.
Returns
======
lambda_antisymmetric: float
Antisymmetric tidal parameter.
"""
lambda_antisymmetric = (lambda_2 - lambda_1) / 2.
return lambda_antisymmetric
def lambda_symmetric_lambda_antisymmetric_to_lambda_1(lambda_symmetric, lambda_antisymmetric):
"""
Convert from symmetric and antisymmetric tidal terms to lambda_1
See, e.g., Yagi & Yunes, https://arxiv.org/abs/1512.02639
Parameters
==========
lambda_symmetric: float
Symmetric tidal parameter.
lambda_antisymmetric: float
Antisymmetric tidal parameter.
Returns
======
lambda_1: float
Tidal parameter of more massive neutron star.
"""
lambda_1 = lambda_symmetric - lambda_antisymmetric
return lambda_1
def lambda_symmetric_lambda_antisymmetric_to_lambda_2(lambda_symmetric, lambda_antisymmetric):
"""
Convert from symmetric and antisymmetric tidal terms to lambda_2
See, e.g., Yagi & Yunes, https://arxiv.org/abs/1512.02639
Parameters
==========
lambda_symmetric: float
Symmetric tidal parameter.
lambda_antisymmetric: float
Antisymmetric tidal parameter.
Returns
======
lambda_2: float
Tidal parameter of less massive neutron star.
"""
lambda_2 = lambda_symmetric + lambda_antisymmetric
return lambda_2
def lambda_symmetric_lambda_antisymmetric_to_lambda_1_lambda_2(lambda_symmetric, lambda_antisymmetric):
"""
Convert from symmetric and antisymmetric tidal terms to lambda_1 and lambda_2
See, e.g., Yagi & Yunes, https://arxiv.org/abs/1512.02639
Parameters
==========
lambda_symmetric: float
Symmetric tidal parameter.
lambda_antisymmetric: float
Antisymmetric tidal parameter.
Returns
======
lambda_1: float
Tidal parameter of more massive neutron star.
lambda_2: float
Tidal parameter of less massive neutron star.
"""
lambda_1 = lambda_symmetric_lambda_antisymmetric_to_lambda_1(lambda_symmetric, lambda_antisymmetric)
lambda_2 = lambda_symmetric_lambda_antisymmetric_to_lambda_2(lambda_symmetric, lambda_antisymmetric)
return lambda_1, lambda_2
def binary_love_fit_lambda_symmetric_mass_ratio_to_lambda_antisymmetric(lambda_symmetric, mass_ratio):
"""
Convert from symmetric tidal terms and mass ratio to antisymmetric tidal terms
using BinaryLove relations.
This function does only the fit itself, and doesn't account for marginalisation
over the uncertanties in the fit
See Yagi & Yunes, https://arxiv.org/abs/1512.02639
and Chatziioannou, Haster, Zimmerman, https://arxiv.org/abs/1804.03221
This function will use the implementation from the CHZ paper, which
marginalises over the uncertainties in the BinaryLove fits.
This is also implemented in LALInference through the function
LALInferenceBinaryLove.
Parameters
==========
lambda_symmetric: float
Symmetric tidal parameter.
mass_ratio: float
Mass ratio (mass_2/mass_1, with mass_2 < mass_1) of the binary
Returns
======
lambda_antisymmetric: float
Antisymmetric tidal parameter.
"""
lambda_symmetric_m1o5 = np.power(lambda_symmetric, -1. / 5.)
lambda_symmetric_m2o5 = lambda_symmetric_m1o5 * lambda_symmetric_m1o5
lambda_symmetric_m3o5 = lambda_symmetric_m2o5 * lambda_symmetric_m1o5
q = mass_ratio
q2 = np.square(mass_ratio)
# Eqn.2 from CHZ, incorporating the dependence on mass ratio
n_polytropic = 0.743 # average polytropic index for the EoSs included in the fit
q_for_Fnofq = np.power(q, 10. / (3. - n_polytropic))
Fnofq = (1. - q_for_Fnofq) / (1. + q_for_Fnofq)
# b_ij and c_ij coefficients are given in Table I of CHZ
b11 = -27.7408
b12 = 8.42358
b21 = 122.686
b22 = -19.7551
b31 = -175.496
b32 = 133.708
c11 = -25.5593
c12 = 5.58527
c21 = 92.0337
c22 = 26.8586
c31 = -70.247
c32 = -56.3076
# Eqn 1 from CHZ, giving the lambda_antisymmetric_fitOnly
# not yet accounting for the uncertainty in the fit
numerator = 1.0 + \
(b11 * q * lambda_symmetric_m1o5) + (b12 * q2 * lambda_symmetric_m1o5) + \
(b21 * q * lambda_symmetric_m2o5) + (b22 * q2 * lambda_symmetric_m2o5) + \
(b31 * q * lambda_symmetric_m3o5) + (b32 * q2 * lambda_symmetric_m3o5)
denominator = 1.0 + \
(c11 * q * lambda_symmetric_m1o5) + (c12 * q2 * lambda_symmetric_m1o5) + \
(c21 * q * lambda_symmetric_m2o5) + (c22 * q2 * lambda_symmetric_m2o5) + \
(c31 * q * lambda_symmetric_m3o5) + (c32 * q2 * lambda_symmetric_m3o5)
lambda_antisymmetric_fitOnly = Fnofq * lambda_symmetric * numerator / denominator
return lambda_antisymmetric_fitOnly
def binary_love_lambda_symmetric_to_lambda_1_lambda_2_manual_marginalisation(binary_love_uniform,
lambda_symmetric, mass_ratio):
"""
Convert from symmetric tidal terms to lambda_1 and lambda_2
using BinaryLove relations
See Yagi & Yunes, https://arxiv.org/abs/1512.02639
and Chatziioannou, Haster, Zimmerman, https://arxiv.org/abs/1804.03221
This function will use the implementation from the CHZ paper, which
marginalises over the uncertainties in the BinaryLove fits.
This is also implemented in LALInference through the function
LALInferenceBinaryLove.
Parameters
==========
binary_love_uniform: float (defined in the range [0,1])
Uniformly distributed variable used in BinaryLove uncertainty marginalisation
lambda_symmetric: float
Symmetric tidal parameter.
mass_ratio: float
Mass ratio (mass_2/mass_1, with mass_2 < mass_1) of the binary
Returns
======
lambda_1: float
Tidal parameter of more massive neutron star.
lambda_2: float
Tidal parameter of less massive neutron star.
"""
lambda_antisymmetric_fitOnly = binary_love_fit_lambda_symmetric_mass_ratio_to_lambda_antisymmetric(lambda_symmetric,
mass_ratio)
lambda_symmetric_sqrt = np.sqrt(lambda_symmetric)
q = mass_ratio
q2 = np.square(mass_ratio)
# mu_i and sigma_i coefficients are given in Table II of CHZ
mu_1 = 137.1252739
mu_2 = -32.8026613
mu_3 = 0.5168637
mu_4 = -11.2765281
mu_5 = 14.9499544
mu_6 = - 4.6638851
sigma_1 = -0.0000739
sigma_2 = 0.0103778
sigma_3 = 0.4581717
sigma_4 = -0.8341913
sigma_5 = -201.4323962
sigma_6 = 273.9268276
sigma_7 = -71.2342246
# Eqn 6 from CHZ, correction on fit for lambdaA caused by
# uncertainty in the mean of the lambdaS residual fit,
# using coefficients mu_1, mu_2 and mu_3 from Table II of CHZ
lambda_antisymmetric_lambda_symmetric_meanCorr = \
(mu_1 / (lambda_symmetric * lambda_symmetric)) + \
(mu_2 / lambda_symmetric) + mu_3
# Eqn 8 from CHZ, correction on fit for lambdaA caused by
# uncertainty in the standard deviation of lambdaS residual fit,
# using coefficients sigma_1, sigma_2, sigma_3 and sigma_4 from Table II
lambda_antisymmetric_lambda_symmetric_stdCorr = \
(sigma_1 * lambda_symmetric * lambda_symmetric_sqrt) + \
(sigma_2 * lambda_symmetric) + \
(sigma_3 * lambda_symmetric_sqrt) + sigma_4
# Eqn 7, correction on fit for lambdaA caused by
# uncertainty in the mean of the q residual fit,
# using coefficients mu_4, mu_5 and mu_6 from Table II
lambda_antisymmetric_mass_ratio_meanCorr = \
(mu_4 * q2) + (mu_5 * q) + mu_6
# Eqn 9 from CHZ, correction on fit for lambdaA caused by
# uncertainty in the standard deviation of the q residual fit,
# using coefficients sigma_5, sigma_6 and sigma_7 from Table II
lambda_antisymmetric_mass_ratio_stdCorr = \
(sigma_5 * q2) + (sigma_6 * q) + sigma_7
# Eqn 4 from CHZ, averaging the corrections from the
# mean of the residual fits
lambda_antisymmetric_meanCorr = \
(lambda_antisymmetric_lambda_symmetric_meanCorr +
lambda_antisymmetric_mass_ratio_meanCorr) / 2.
# Eqn 5 from CHZ, averaging the corrections from the
# standard deviations of the residual fits
lambda_antisymmetric_stdCorr = \
np.sqrt(np.square(lambda_antisymmetric_lambda_symmetric_stdCorr) +
np.square(lambda_antisymmetric_mass_ratio_stdCorr))
# Draw a correction on the fit from a
# Gaussian distribution with width lambda_antisymmetric_stdCorr
# this is done by sampling a percent point function (inverse cdf)
# through a U{0,1} variable called binary_love_uniform
lambda_antisymmetric_scatter = norm.ppf(binary_love_uniform, loc=0.,
scale=lambda_antisymmetric_stdCorr)
# Add the correction of the residual mean
# and the Gaussian scatter to the lambda_antisymmetric_fitOnly value
lambda_antisymmetric = lambda_antisymmetric_fitOnly + \
(lambda_antisymmetric_meanCorr + lambda_antisymmetric_scatter)
lambda_1 = lambda_symmetric_lambda_antisymmetric_to_lambda_1(lambda_symmetric, lambda_antisymmetric)
lambda_2 = lambda_symmetric_lambda_antisymmetric_to_lambda_2(lambda_symmetric, lambda_antisymmetric)
# The BinaryLove model is only physically valid where
# lambda_2 > lambda_1 as it assumes mass_1 > mass_2
# It also assumes both lambda1 and lambda2 to be positive
# This is an explicit feature of the "raw" model fit,
# but since this implementation also incorporates
# marginalisation over the fit uncertainty, there can
# be instances where those assumptions are randomly broken.
# For those cases, set lambda_1 and lambda_2 to negative
# values, which in turn will cause (effectively all) the
# waveform models in LALSimulation to fail, thus setting
# logL = -infinity
# if np.greater(lambda_1, lambda_2) or np.less(lambda_1, 0) or np.less(lambda_2, 0):
# lambda_1 = -np.inf
# lambda_2 = -np.inf
# For now set this through an explicit constraint prior instead
return lambda_1, lambda_2
def binary_love_lambda_symmetric_to_lambda_1_lambda_2_automatic_marginalisation(lambda_symmetric, mass_ratio):
"""
Convert from symmetric tidal terms to lambda_1 and lambda_2
using BinaryLove relations
See Yagi & Yunes, https://arxiv.org/abs/1512.02639
and Chatziioannou, Haster, Zimmerman, https://arxiv.org/abs/1804.03221
This function will use the implementation from the CHZ paper, which
marginalises over the uncertainties in the BinaryLove fits.
This is also implemented in LALInference through the function
LALInferenceBinaryLove.
This function should be used when the BinaryLove marginalisation wasn't
explicitly active in the sampling stage. It will draw a random number in U[0,1]
here instead.
Parameters
==========
lambda_symmetric: float
Symmetric tidal parameter.
mass_ratio: float
Mass ratio (mass_2/mass_1, with mass_2 < mass_1) of the binary
Returns
======
lambda_1: float
Tidal parameter of more massive neutron star.
lambda_2: float
Tidal parameter of less massive neutron star.
"""
binary_love_uniform = np.random.uniform(0, 1, len(lambda_symmetric))
lambda_1, lambda_2 = binary_love_lambda_symmetric_to_lambda_1_lambda_2_manual_marginalisation(
binary_love_uniform, lambda_symmetric, mass_ratio)
return lambda_1, lambda_2
def _generate_all_cbc_parameters(sample, defaults, base_conversion,
likelihood=None, priors=None, npool=1):
"""Generate all cbc parameters, helper function for BBH/BNS"""
......
......@@ -181,6 +181,29 @@ class CubicSpline(Recalibrate):
self.maximum_frequency = maximum_frequency
self._log_spline_points = np.linspace(
np.log10(minimum_frequency), np.log10(maximum_frequency), n_points)
self._delta_log_spline_points = self._log_spline_points[1] - self._log_spline_points[0]
# Precompute matrix converting values at nodes to spline coefficients.
# The algorithm for interpolation is described in
# https://dcc.ligo.org/LIGO-T2300140, and the matrix calculated here is
# to solve Eq. (9) in the note.
tmp1 = np.zeros(shape=(n_points, n_points))
tmp1[0, 0] = -1
tmp1[0, 1] = 2
tmp1[0, 2] = -1
tmp1[-1, -3] = -1
tmp1[-1, -2] = 2
tmp1[-1, -1] = -1
for i in range(1, n_points - 1):
tmp1[i, i - 1] = 1 / 6
tmp1[i, i] = 2 / 3
tmp1[i, i + 1] = 1 / 6
tmp2 = np.zeros(shape=(n_points, n_points))
for i in range(1, n_points - 1):
tmp2[i, i - 1] = 1
tmp2[i, i] = -2
tmp2[i, i + 1] = 1
self._nodes_to_spline_coefficients = np.linalg.solve(tmp1, tmp2)
@property
def log_spline_points(self):
......@@ -208,18 +231,26 @@ class CubicSpline(Recalibrate):
calibration_factor : array-like
The factor to multiply the strain by.
"""
log10f_per_deltalog10f = (
np.log10(frequency_array) - self.log_spline_points[0]
) / self._delta_log_spline_points
previous_nodes = np.clip(np.floor(log10f_per_deltalog10f).astype(int), a_min=0, a_max=self.n_points - 2)
next_nodes = previous_nodes + 1
b = log10f_per_deltalog10f - previous_nodes
a = 1 - b
c = (a**3 - a) / 6
d = (b**3 - b) / 6
self.set_calibration_parameters(**params)
amplitude_parameters = [self.params['amplitude_{}'.format(ii)]
for ii in range(self.n_points)]
delta_amplitude = interp1d(
self.log_spline_points, amplitude_parameters, kind='cubic',
bounds_error=False, fill_value=0)(np.log10(frequency_array))
phase_parameters = [
self.params['phase_{}'.format(ii)] for ii in range(self.n_points)]
delta_phase = interp1d(
self.log_spline_points, phase_parameters, kind='cubic',
bounds_error=False, fill_value=0)(np.log10(frequency_array))
amplitude_parameters = np.array([self.params['amplitude_{}'.format(ii)] for ii in range(self.n_points)])
_spline_coefficients = self._nodes_to_spline_coefficients.dot(amplitude_parameters)
delta_amplitude = a * amplitude_parameters[previous_nodes] + b * amplitude_parameters[next_nodes] + \
c * _spline_coefficients[previous_nodes] + d * _spline_coefficients[next_nodes]
phase_parameters = np.array([self.params['phase_{}'.format(ii)] for ii in range(self.n_points)])
_spline_coefficients = self._nodes_to_spline_coefficients.dot(phase_parameters)
delta_phase = a * phase_parameters[previous_nodes] + b * phase_parameters[next_nodes] + \
c * _spline_coefficients[previous_nodes] + d * _spline_coefficients[next_nodes]
calibration_factor = (1 + delta_amplitude) * (2 + 1j * delta_phase) / (2 - 1j * delta_phase)
......
# LIGO India Aundha at Aplus sensitvity.
# LIGO-T2000158
# Coordinates taken from https://dcc.ligo.org/LIGO-T2000158/public
# https://dcc.ligo.org/LIGO-T2000012/public
name = 'A1'
power_spectral_density = PowerSpectralDensity(asd_file='Aplus_asd.txt')
......
......@@ -216,9 +216,9 @@ class InterferometerStrainData(object):
if self._frequency_domain_strain is not None:
return self._frequency_domain_strain * self.frequency_mask
elif self._time_domain_strain is not None:
logger.info("Generating frequency domain strain from given time "
"domain strain.")
logger.info("Applying a tukey window with alpha={}, roll off={}".format(
logger.debug("Generating frequency domain strain from given time "
"domain strain.")
logger.debug("Applying a tukey window with alpha={}, roll off={}".format(
self.alpha, self.roll_off))
# self.low_pass_filter()
window = self.time_domain_window()
......
......@@ -944,19 +944,30 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
self.weights[ifo.name + '_quadratic'].append(
np.dot(quadratic_matrix_single, multibanded_inverse_psd[ifo.name]))
def save_weights(self, filename, format='npz'):
def save_weights(self, filename, format='hdf5'):
"""
Save ROQ weights into a single file. format should be json, npz, or hdf5. For weights from multiple bases, hdf5
is only the possible option.
Save ROQ weights into a single file. format should be npz, or hdf5.
For weights from multiple bases, hdf5 is only the possible option.
Support for json format is deprecated as of :code:`v2.1` and will be
removed in :code:`v2.2`, another method should be used by default.
Parameters
==========
filename : str
The name of the file to save the weights to.
format : str
The format to save the data to, this should be one of
:code:`"hdf5"`, :code:`"npz"`, default=:code:`"hdf5"`.
"""
if format not in ['json', 'npz', 'hdf5']:
raise IOError(f"Format {format} not recognized.")
if format == "json":
import warnings
warnings.warn(
"json format for ROQ weights is deprecated, use hdf5 instead.",
DeprecationWarning
)
if format not in filename:
filename += "." + format
logger.info(f"Saving ROQ weights to {filename}")
......@@ -1001,19 +1012,35 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
def load_weights(self, filename, format=None):
"""
Load ROQ weights. format should be json, npz, or hdf5. json or npz file is assumed to contain weights from a
single basis
Load ROQ weights. format should be json, npz, or hdf5.
json or npz file is assumed to contain weights from a single basis.
Support for json format is deprecated as of :code:`v2.1` and will be
removed in :code:`v2.2`, another method should be used by default.
Parameters
==========
filename : str
The name of the file to save the weights to.
format : str
The format to save the data to, this should be one of
:code:`"hdf5"`, :code:`"npz"`, default=:code:`"hdf5"`.
Returns
=======
weights: dict
Dictionary containing the ROQ weights.
"""
if format is None:
format = filename.split(".")[-1]
if format not in ["json", "npz", "hdf5"]:
raise IOError(f"Format {format} not recognized.")
if format == "json":
import warnings
warnings.warn(
"json format for ROQ weights is deprecated, use hdf5 instead.",
DeprecationWarning
)
logger.info(f"Loading ROQ weights from {filename}")
if format == "json" or format == "npz":
# Old file format assumed to contain only a single basis
......
......@@ -752,8 +752,21 @@ class CBCPriorDict(ConditionalPriorDict):
return None
def is_nonempty_intersection(self, pset):
""" Check if keys in self exist in the PARAMETER_SETS pset """
if len(PARAMETER_SETS[pset].intersection(self.non_fixed_keys)) > 0:
""" Check if keys in self exist in the parameter set
Parameters
----------
pset: str, set
Either a string referencing a parameter set in PARAMETER_SETS or
a set of keys
"""
if isinstance(pset, str) and pset in PARAMETER_SETS:
check_set = PARAMETER_SETS[pset]
elif isinstance(pset, set):
check_set = pset
else:
raise ValueError(f"pset {pset} not understood")
if len(check_set.intersection(self.non_fixed_keys)) > 0:
return True
else:
return False
......@@ -768,6 +781,11 @@ class CBCPriorDict(ConditionalPriorDict):
""" Return true if priors include any precession parameters """
return self.is_nonempty_intersection("precession_only")
@property
def measured_spin(self):
""" Return true if priors include any measured_spin parameters """
return self.is_nonempty_intersection("measured_spin")
@property
def intrinsic(self):
""" Return true if priors include any intrinsic parameters """
......@@ -778,6 +796,16 @@ class CBCPriorDict(ConditionalPriorDict):
""" Return true if priors include any extrinsic parameters """
return self.is_nonempty_intersection("extrinsic")
@property
def sky(self):
""" Return true if priors include any extrinsic parameters """
return self.is_nonempty_intersection("sky")
@property
def distance_inclination(self):
""" Return true if priors include any extrinsic parameters """
return self.is_nonempty_intersection("distance_inclination")
@property
def mass(self):
""" Return true if priors include any mass parameters """
......@@ -1003,8 +1031,8 @@ class BNSPriorDict(CBCPriorDict):
def test_redundancy(self, key, disable_logging=False):
logger.disabled = disable_logging
logger.info("Performing redundancy check using BBHPriorDict(self).test_redundancy")
logger.disabled = False
bbh_redundancy = BBHPriorDict(self).test_redundancy(key)
logger.disabled = False
if bbh_redundancy:
return True
......
......@@ -1081,10 +1081,21 @@ extrinsic = {
"cos_theta_jn", "geocent_time", "time_jitter", "ra", "dec",
"H1_time", "L1_time", "V1_time",
}
sky = {
"azimuth", "zenith", "ra", "dec",
}
distance_inclination = {
"luminosity_distance", "redshift", "theta_jn", "cos_theta_jn",
}
measured_spin = {
"chi_1", "chi_2", "a_1", "a_2", "chi_1_in_plane"
}
PARAMETER_SETS = dict(
spin=spin, mass=mass, phase=phase, extrinsic=extrinsic,
tidal=tidal, primary_spin_and_q=primary_spin_and_q,
intrinsic=spin.union(mass).union(phase).union(tidal),
precession_only=precession_only,
sky=sky, distance_inclination=distance_inclination,
measured_spin=measured_spin,
)
......@@ -791,6 +791,160 @@ def lalsim_SimInspiralWaveformParamsInsertTidalLambda2(
waveform_dictionary, lambda_2)
def lalsim_SimNeutronStarEOS4ParamSDGammaCheck(g0, g1, g2, g3):
from lalsimulation import SimNeutronStarEOS4ParamSDGammaCheck
try:
g0 = float(g0)
g1 = float(g1)
g2 = float(g2)
g3 = float(g3)
except ValueError:
raise ValueError("Unable to convert EOS spectral parameters to floats")
except TypeError:
raise TypeError("Unable to convert EOS spectral parameters to floats")
return SimNeutronStarEOS4ParamSDGammaCheck(g0, g1, g2, g3)
def lalsim_SimNeutronStarEOS4ParameterSpectralDecomposition(g0, g1, g2, g3):
from lalsimulation import SimNeutronStarEOS4ParameterSpectralDecomposition
try:
g0 = float(g0)
g1 = float(g1)
g2 = float(g2)
g3 = float(g3)
except ValueError:
raise ValueError("Unable to convert EOS spectral parameters to floats")
except TypeError:
raise TypeError("Unable to convert EOS spectral parameters to floats")
return SimNeutronStarEOS4ParameterSpectralDecomposition(g0, g1, g2, g3)
def lalsim_SimNeutronStarEOS4ParamSDViableFamilyCheck(g0, g1, g2, g3):
from lalsimulation import SimNeutronStarEOS4ParamSDViableFamilyCheck
try:
g0 = float(g0)
g1 = float(g1)
g2 = float(g2)
g3 = float(g3)
except ValueError:
raise ValueError("Unable to convert EOS spectral parameters to floats")
except TypeError:
raise TypeError("Unable to convert EOS spectral parameters to floats")
return SimNeutronStarEOS4ParamSDViableFamilyCheck(g0, g1, g2, g3)
def lalsim_SimNeutronStarEOS3PieceDynamicPolytrope(g0, log10p1_si, g1, log10p2_si, g2):
from lalsimulation import SimNeutronStarEOS3PieceDynamicPolytrope
try:
g0 = float(g0)
g1 = float(g1)
g2 = float(g2)
log10p1_si = float(log10p1_si)
log10p2_si = float(log10p2_si)
except ValueError:
raise ValueError("Unable to convert EOS polytrope parameters to floats")
except TypeError:
raise TypeError("Unable to convert EOS polytrope parameters to floats")
return SimNeutronStarEOS3PieceDynamicPolytrope(g0, log10p1_si, g1, log10p2_si, g2)
def lalsim_SimNeutronStarEOS3PieceCausalAnalytic(v1, log10p1_si, v2, log10p2_si, v3):
from lalsimulation import SimNeutronStarEOS3PieceCausalAnalytic
try:
v1 = float(v1)
v2 = float(v2)
v3 = float(v3)
log10p1_si = float(log10p1_si)
log10p2_si = float(log10p2_si)
except ValueError:
raise ValueError("Unable to convert EOS causal parameters to floats")
except TypeError:
raise TypeError("Unable to convert EOS causal parameters to floats")
return SimNeutronStarEOS3PieceCausalAnalytic(v1, log10p1_si, v2, log10p2_si, v3)
def lalsim_SimNeutronStarEOS3PDViableFamilyCheck(p0, log10p1_si, p1, log10p2_si, p2, causal):
from lalsimulation import SimNeutronStarEOS3PDViableFamilyCheck
try:
p0 = float(p0)
p1 = float(p1)
p2 = float(p2)
log10p1_si = float(log10p1_si)
log10p2_si = float(log10p2_si)
causal = int(causal)
except ValueError:
raise ValueError("Unable to convert EOS parameters to floats or int")
except TypeError:
raise TypeError("Unable to convert EOS parameters to floats or int")
return SimNeutronStarEOS3PDViableFamilyCheck(p0, log10p1_si, p1, log10p2_si, p2, causal)
def lalsim_CreateSimNeutronStarFamily(eos):
from lalsimulation import CreateSimNeutronStarFamily
return CreateSimNeutronStarFamily(eos)
def lalsim_SimNeutronStarEOSMaxPseudoEnthalpy(eos):
from lalsimulation import SimNeutronStarEOSMaxPseudoEnthalpy
return SimNeutronStarEOSMaxPseudoEnthalpy(eos)
def lalsim_SimNeutronStarEOSSpeedOfSoundGeometerized(max_pseudo_enthalpy, eos):
from lalsimulation import SimNeutronStarEOSSpeedOfSoundGeometerized
try:
max_pseudo_enthalpy = float(max_pseudo_enthalpy)
except ValueError:
raise ValueError("Unable to convert max_pseudo_enthalpy to float.")
except TypeError:
raise TypeError("Unable to convert max_pseudo_enthalpy to float.")
return SimNeutronStarEOSSpeedOfSoundGeometerized(max_pseudo_enthalpy, eos)
def lalsim_SimNeutronStarFamMinimumMass(fam):
from lalsimulation import SimNeutronStarFamMinimumMass
return SimNeutronStarFamMinimumMass(fam)
def lalsim_SimNeutronStarMaximumMass(fam):
from lalsimulation import SimNeutronStarMaximumMass
return SimNeutronStarMaximumMass(fam)
def lalsim_SimNeutronStarRadius(mass_in_SI, fam):
from lalsimulation import SimNeutronStarRadius
try:
mass_in_SI = float(mass_in_SI)
except ValueError:
raise ValueError("Unable to convert mass_in_SI to float.")
except TypeError:
raise TypeError("Unable to convert mass_in_SI to float.")
return SimNeutronStarRadius(mass_in_SI, fam)
def lalsim_SimNeutronStarLoveNumberK2(mass_in_SI, fam):
from lalsimulation import SimNeutronStarLoveNumberK2
try:
mass_in_SI = float(mass_in_SI)
except ValueError:
raise ValueError("Unable to convert mass_in_SI to float.")
except TypeError:
raise TypeError("Unable to convert mass_in_SI to float.")
return SimNeutronStarLoveNumberK2(mass_in_SI, fam)
def spline_angle_xform(delta_psi):
"""
Returns the angle in degrees corresponding to the spline
......