Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (47)
Showing with 182 additions and 99 deletions
# All notable changes will be documented in this file
## [2.0.2] 2023-03-21
Version 2.0.2 release of Bilby
This is a bugfix release after the last major update.
### Changes
- Fix to bilby-MCMC implementation of prior boundary (!1237)
- Fix to time calibration (!1234)
- Fix nessai sampling time (!1236)
## [2.0.1] 2023-03-13
Version 2.0.1 release of Bilby
This is a bugfix release after the last major update.
Users may notice changes in inferred binary neutron star masses after updating to match [lalsuite](https://git.ligo.org/lscsoft/lalsuite/-/merge_requests/1658).
### Changes
- Make sure quantities that need to be conserved between dynesty iterations are class-level attributes (!1225).
- Fix massive memory usage in post-processing calculation of SNRs (!1227).
- Update value for the solar mass (!1229).
- Make `scikit-learn` an explicit dependence of `bilby[GW]` (!1230).
## [2.0.0] 2023-02-29
Version 2.0.0 release of Bilby
......
......@@ -131,7 +131,8 @@ class BaseProposal(object):
def __call__(self, chain):
sample, log_factor = self.propose(chain)
sample = self.apply_boundaries(sample)
if log_factor == 0:
sample = self.apply_boundaries(sample)
return sample, log_factor
@abstractmethod
......@@ -945,7 +946,8 @@ class EnsembleProposal(BaseProposal):
def __call__(self, chain, chain_complement):
sample, log_factor = self.propose(chain, chain_complement)
sample = self.apply_boundaries(sample)
if log_factor == 0:
sample = self.apply_boundaries(sample)
return sample, log_factor
......
......@@ -6,7 +6,8 @@ from collections import namedtuple
from copy import copy
from importlib import import_module
from itertools import product
import multiprocessing
from functools import partial
import numpy as np
import pandas as pd
import scipy.stats
......@@ -30,6 +31,11 @@ from .prior import Prior, PriorDict, DeltaFunction, ConditionalDeltaFunction
EXTENSIONS = ["json", "hdf5", "h5", "pickle", "pkl"]
def __eval_l(likelihood, params):
likelihood.parameters.update(params)
return likelihood.log_likelihood()
def result_file_name(outdir, label, extension='json', gzip=False):
""" Returns the standard filename used for a result file
......@@ -104,7 +110,7 @@ def read_in_result(filename=None, outdir=None, label=None, extension='json', gzi
def get_weights_for_reweighting(
result, new_likelihood=None, new_prior=None, old_likelihood=None,
old_prior=None, resume_file=None, n_checkpoint=5000):
old_prior=None, resume_file=None, n_checkpoint=5000, npool=1):
""" Calculate the weights for reweight()
See bilby.core.result.reweight() for help with the inputs
......@@ -145,30 +151,45 @@ def get_weights_for_reweighting(
starting_index = 0
for ii, sample in tqdm(result.posterior.iloc[starting_index:].iterrows()):
# Convert sample to dictionary
par_sample = {key: sample[key] for key in result.posterior}
dict_samples = [{key: sample[key] for key in result.posterior}
for _, sample in result.posterior.iterrows()]
n = len(dict_samples) - starting_index
# Helper function to compute likelihoods in parallel
def eval_pool(this_logl):
with multiprocessing.Pool(processes=npool) as pool:
chunksize = max(100, n // (2 * npool))
return list(tqdm(
pool.imap(partial(__eval_l, this_logl),
dict_samples[starting_index:], chunksize=chunksize),
desc='Computing likelihoods',
total=n)
)
if old_likelihood is not None:
old_likelihood.parameters.update(par_sample)
old_log_likelihood_array[ii] = old_likelihood.log_likelihood()
else:
old_log_likelihood_array[ii] = sample["log_likelihood"]
if old_likelihood is None:
old_log_likelihood_array[starting_index:] = \
result.posterior["log_likelihood"][starting_index:].to_numpy()
else:
old_log_likelihood_array[starting_index:] = eval_pool(old_likelihood)
if new_likelihood is not None:
new_likelihood.parameters.update(par_sample)
new_log_likelihood_array[ii] = new_likelihood.log_likelihood()
else:
# Don't perform likelihood reweighting (i.e. likelihood isn't updated)
new_log_likelihood_array[ii] = old_log_likelihood_array[ii]
if new_likelihood is None:
# Don't perform likelihood reweighting (i.e. likelihood isn't updated)
new_log_likelihood_array[starting_index:] = old_log_likelihood_array[starting_index:]
else:
new_log_likelihood_array[starting_index:] = eval_pool(new_likelihood)
# Compute priors
for ii, sample in enumerate(tqdm(dict_samples[starting_index:],
desc='Computing priors',
total=n),
start=starting_index):
if old_prior is not None:
old_log_prior_array[ii] = old_prior.ln_prob(par_sample)
old_log_prior_array[ii] = old_prior.ln_prob(sample)
else:
old_log_prior_array[ii] = sample["log_prior"]
if new_prior is not None:
new_log_prior_array[ii] = new_prior.ln_prob(par_sample)
new_log_prior_array[ii] = new_prior.ln_prob(sample)
else:
# Don't perform prior reweighting (i.e. prior isn't updated)
new_log_prior_array[ii] = old_log_prior_array[ii]
......@@ -272,7 +293,7 @@ def reweight(result, label=None, new_likelihood=None, new_prior=None,
get_weights_for_reweighting(
result, new_likelihood=new_likelihood, new_prior=new_prior,
old_likelihood=old_likelihood, old_prior=old_prior,
resume_file=resume_file, n_checkpoint=n_checkpoint)
resume_file=resume_file, n_checkpoint=n_checkpoint, npool=npool)
if use_nested_samples:
ln_weights += np.log(result.posterior["weights"])
......
......@@ -450,32 +450,32 @@ class Dynesty(NestedSampler):
f"An average of {2 * self.nact} steps will be accepted up to chain length "
f"{self.maxmcmc}."
)
from .dynesty_utils import sample_rwalk_bilby
from .dynesty_utils import AcceptanceTrackingRWalk
if self.kwargs["walks"] > self.maxmcmc:
raise DynestySetupError("You have maxmcmc < walks (minimum mcmc)")
if self.nact < 1:
raise DynestySetupError("Unable to run with nact < 1")
dynesty.nestedsamplers._SAMPLING["rwalk"] = sample_rwalk_bilby
AcceptanceTrackingRWalk.old_act = None
dynesty.nestedsamplers._SAMPLING["rwalk"] = AcceptanceTrackingRWalk()
elif sample == "acceptance-walk":
logger.info(
"Using the bilby-implemented rwalk sampling with an average of "
f"{self.naccept} accepted steps per MCMC and maximum length {self.maxmcmc}"
)
from .dynesty_utils import fixed_length_rwalk_bilby
from .dynesty_utils import FixedRWalk
dynesty.nestedsamplers._SAMPLING[
"acceptance-walk"
] = fixed_length_rwalk_bilby
dynesty.nestedsamplers._SAMPLING["acceptance-walk"] = FixedRWalk()
elif sample == "act-walk":
logger.info(
"Using the bilby-implemented rwalk sampling tracking the "
f"autocorrelation function and thinning by "
f"{self.nact} with maximum length {self.nact * self.maxmcmc}"
)
from .dynesty_utils import bilby_act_rwalk
from .dynesty_utils import ACTTrackingRWalk
dynesty.nestedsamplers._SAMPLING["act-walk"] = bilby_act_rwalk
ACTTrackingRWalk._cache = list()
dynesty.nestedsamplers._SAMPLING["act-walk"] = ACTTrackingRWalk()
elif sample == "rwalk_dynesty":
sample = sample.strip("_dynesty")
self.kwargs["sample"] = sample
......@@ -901,9 +901,11 @@ class Dynesty(NestedSampler):
"""Run the sampler very briefly as a sanity test that it works."""
import pandas as pd
self._set_sampling_method()
self._setup_pool()
self.sampler = self.sampler_init(
loglikelihood=self.log_likelihood,
prior_transform=self.prior_transform,
loglikelihood=_log_likelihood_wrapper,
prior_transform=_prior_transform_wrapper,
ndim=self.ndim,
**self.sampler_init_kwargs,
)
......@@ -915,6 +917,8 @@ class Dynesty(NestedSampler):
self.pbar = tqdm(file=sys.stdout, initial=self.sampler.it)
self.sampler.run_nested(**sampler_kwargs)
self._close_pool()
if self.pbar is not None:
self.pbar = self.pbar.close()
print("")
......
......@@ -181,8 +181,11 @@ class ACTTrackingRWalk:
parallel process.
"""
# the _cache is a class level variable to avoid being forgotten at every
# iteration when using multiprocessing
_cache = list()
def __init__(self):
self._cache = list()
self.act = 1
self.thin = getattr(_SamplingContainer, "nact", 2)
self.maxmcmc = getattr(_SamplingContainer, "maxmcmc", 5000) * 50
......@@ -367,10 +370,13 @@ class AcceptanceTrackingRWalk:
corresponds to specifying :code:`sample="rwalk"`
"""
# to retain state between calls to pool.Map, this needs to be a class
# level attribute
old_act = None
def __init__(self, old_act=None):
self.maxmcmc = getattr(_SamplingContainer, "maxmcmc", 5000)
self.nact = getattr(_SamplingContainer, "nact", 40)
self.old_act = old_act
def __call__(self, args):
rstate = get_random_generator(args.rseed)
......@@ -437,7 +443,7 @@ class AcceptanceTrackingRWalk:
logl = args.loglikelihood(v)
blob = {"accept": accept, "reject": reject + nfail, "scale": args.scale}
self.old_act = act
AcceptanceTrackingRWalk.old_act = act
ncall = accept + reject
return u, v, logl, ncall, blob
......@@ -708,7 +714,3 @@ def apply_boundaries_(u_prop, periodic, reflective):
proposal_funcs = dict(diff=propose_differetial_evolution, volumetric=propose_volumetric)
fixed_length_rwalk_bilby = FixedRWalk()
bilby_act_rwalk = ACTTrackingRWalk()
sample_rwalk_bilby = AcceptanceTrackingRWalk()
......@@ -197,6 +197,7 @@ class Nessai(NestedSampler):
# Manually set likelihood evaluations because parallelisation breaks the counter
self.result.num_likelihood_evaluations = self.fs.ns.total_likelihood_evaluations
self.result.sampling_time = self.fs.ns.sampling_time
self.result.samples = live_points_to_array(
self.fs.posterior_samples, self.search_parameter_keys
)
......
......@@ -2,6 +2,6 @@
speed_of_light = 299792458.0 # m/s
parsec = 3.085677581491367e+16 # m
solar_mass = 1.9884099021470415e+30 # Kg
solar_mass = 1.988409870698050731911960804878414216e30 # Kg
radius_of_earth = 6378136.6 # m
gravitational_constant = 6.6743e-11 # m^3 kg^-1 s^-2
......@@ -268,6 +268,11 @@ def encode_for_hdf5(key, item):
item = float(item)
elif isinstance(item, np.complex_):
item = complex(item)
if isinstance(item, np.ndarray):
# Numpy's wide unicode strings are not supported by hdf5
if item.dtype.kind == 'U':
logger.debug(f'converting dtype {item.dtype} for hdf5')
item = np.array(item, dtype='S')
if isinstance(item, (np.ndarray, int, float, complex, str, bytes)):
output = item
elif item is None:
......
......@@ -251,7 +251,8 @@ def convert_to_lal_binary_black_hole_parameters(parameters):
for angle in ['tilt_1', 'tilt_2', 'theta_jn']:
cos_angle = str('cos_' + angle)
if cos_angle in converted_parameters.keys():
converted_parameters[angle] = np.arccos(converted_parameters[cos_angle])
with np.errstate(invalid="ignore"):
converted_parameters[angle] = np.arccos(converted_parameters[cos_angle])
if "delta_phase" in original_keys:
with np.errstate(invalid="ignore"):
......@@ -1465,7 +1466,7 @@ def _compute_snrs(args):
)
snrs = list()
for ifo in likelihood.interferometers:
snrs.append(likelihood.calculate_snrs(signal_polarizations, ifo))
snrs.append(likelihood.calculate_snrs(signal_polarizations, ifo, return_array=False))
return snrs
......
......@@ -236,16 +236,26 @@ class GravitationalWaveTransient(Likelihood):
"waveform_generator.".format(attribute))
setattr(self.waveform_generator, attribute, ifo_attr)
def calculate_snrs(self, waveform_polarizations, interferometer):
def calculate_snrs(self, waveform_polarizations, interferometer, return_array=True):
"""
Compute the snrs
Parameters
==========
----------
waveform_polarizations: dict
A dictionary of waveform polarizations and the corresponding array
interferometer: bilby.gw.detector.Interferometer
The bilby interferometer object
return_array: bool
If true, calculate and return internal array objects
(d_inner_h_array and optimal_snr_squared_array), otherwise
these are returned as None.
Returns
-------
calculated_snrs: _CalculatedSNRs
An object containing the SNR quantities and (if return_array=True)
the internal array objects.
"""
signal = self._compute_full_waveform(
......@@ -266,7 +276,10 @@ class GravitationalWaveTransient(Likelihood):
normalization = 4 / self.waveform_generator.duration
if self.time_marginalization and self.calibration_marginalization:
if return_array is False:
d_inner_h_array = None
optimal_snr_squared_array = None
elif self.time_marginalization and self.calibration_marginalization:
d_inner_h_integrand = np.tile(
interferometer.frequency_domain_strain.conjugate() * signal /
......@@ -405,12 +418,7 @@ class GravitationalWaveTransient(Likelihood):
def compute_log_likelihood_from_snrs(self, total_snrs):
if self.calibration_marginalization and self.time_marginalization:
log_l = self.time_and_calibration_marginalized_likelihood(
d_inner_h_array=total_snrs.d_inner_h_array,
h_inner_h=total_snrs.optimal_snr_squared_array)
elif self.calibration_marginalization:
if self.calibration_marginalization:
log_l = self.calibration_marginalized_likelihood(
d_inner_h_calibration_array=total_snrs.d_inner_h_array,
h_inner_h=total_snrs.optimal_snr_squared_array)
......@@ -479,11 +487,6 @@ class GravitationalWaveTransient(Likelihood):
else:
return self.parameters
if self.calibration_marginalization and self.time_marginalization:
raise AttributeError(
"Cannot use time and calibration marginalization simultaneously for regeneration at the moment!"
"The matrix manipulation has not been tested.")
if self.calibration_marginalization:
new_calibration = self.generate_calibration_sample_from_marginalized_likelihood(
signal_polarizations=signal_polarizations)
......@@ -739,52 +742,36 @@ class GravitationalWaveTransient(Likelihood):
d_inner_h = ln_i0(abs(d_inner_h))
if self.calibration_marginalization and self.time_marginalization:
return d_inner_h - np.outer(h_inner_h, np.ones(np.shape(d_inner_h)[1])) / 2
return d_inner_h - h_inner_h[:, np.newaxis] / 2
else:
return d_inner_h - h_inner_h / 2
def time_marginalized_likelihood(self, d_inner_h_tc_array, h_inner_h):
if self.distance_marginalization:
log_l_tc_array = self.distance_marginalized_likelihood(
d_inner_h=d_inner_h_tc_array, h_inner_h=h_inner_h)
elif self.phase_marginalization:
log_l_tc_array = self.phase_marginalized_likelihood(
d_inner_h=d_inner_h_tc_array,
h_inner_h=h_inner_h)
else:
log_l_tc_array = np.real(d_inner_h_tc_array) - h_inner_h / 2
times = self._times
if self.jitter_time:
times = self._times + self.parameters['time_jitter']
time_prior_array = self.priors['geocent_time'].prob(times) * self._delta_tc
return logsumexp(log_l_tc_array, b=time_prior_array)
def time_and_calibration_marginalized_likelihood(self, d_inner_h_array, h_inner_h):
times = self._times
if self.jitter_time:
times = self._times + self.parameters['time_jitter']
_time_prior = self.priors['geocent_time']
time_mask = np.logical_and((times >= _time_prior.minimum), (times <= _time_prior.maximum))
time_mask = (times >= _time_prior.minimum) & (times <= _time_prior.maximum)
times = times[time_mask]
time_probs = self.priors['geocent_time'].prob(times) * self._delta_tc
d_inner_h_array = d_inner_h_array[:, time_mask]
h_inner_h = h_inner_h
time_prior_array = self.priors['geocent_time'].prob(times) * self._delta_tc
if self.calibration_marginalization:
d_inner_h_tc_array = d_inner_h_tc_array[:, time_mask]
else:
d_inner_h_tc_array = d_inner_h_tc_array[time_mask]
if self.distance_marginalization:
log_l_array = self.distance_marginalized_likelihood(
d_inner_h=d_inner_h_array, h_inner_h=h_inner_h)
log_l_tc_array = self.distance_marginalized_likelihood(
d_inner_h=d_inner_h_tc_array, h_inner_h=h_inner_h)
elif self.phase_marginalization:
log_l_array = self.phase_marginalized_likelihood(
d_inner_h=d_inner_h_array,
log_l_tc_array = self.phase_marginalized_likelihood(
d_inner_h=d_inner_h_tc_array,
h_inner_h=h_inner_h)
elif self.calibration_marginalization:
log_l_tc_array = np.real(d_inner_h_tc_array) - h_inner_h[:, np.newaxis] / 2
else:
log_l_array = np.real(d_inner_h_array) - np.outer(h_inner_h, np.ones(np.shape(d_inner_h_array)[1])) / 2
prior_array = np.outer(time_probs, 1. / self.number_of_response_curves * np.ones(len(h_inner_h))).T
return logsumexp(log_l_array, b=prior_array)
log_l_tc_array = np.real(d_inner_h_tc_array) - h_inner_h / 2
return logsumexp(log_l_tc_array, b=time_prior_array, axis=-1)
def get_calibration_log_likelihoods(self, signal_polarizations=None):
self.parameters.update(self.get_sky_frame_parameters())
......@@ -801,7 +788,12 @@ class GravitationalWaveTransient(Likelihood):
total_snrs += per_detector_snr
if self.distance_marginalization:
if self.time_marginalization:
log_l_cal_array = self.time_marginalized_likelihood(
d_inner_h_tc_array=total_snrs.d_inner_h_array,
h_inner_h=total_snrs.optimal_snr_squared_array,
)
elif self.distance_marginalization:
log_l_cal_array = self.distance_marginalized_likelihood(
d_inner_h=total_snrs.d_inner_h_array,
h_inner_h=total_snrs.optimal_snr_squared_array)
......@@ -816,7 +808,12 @@ class GravitationalWaveTransient(Likelihood):
return log_l_cal_array
def calibration_marginalized_likelihood(self, d_inner_h_calibration_array, h_inner_h):
if self.distance_marginalization:
if self.time_marginalization:
log_l_cal_array = self.time_marginalized_likelihood(
d_inner_h_tc_array=d_inner_h_calibration_array,
h_inner_h=h_inner_h,
)
elif self.distance_marginalization:
log_l_cal_array = self.distance_marginalized_likelihood(
d_inner_h=d_inner_h_calibration_array, h_inner_h=h_inner_h)
elif self.phase_marginalization:
......
......@@ -693,18 +693,26 @@ class MBGravitationalWaveTransient(GravitationalWaveTransient):
else:
setattr(self, key, value)
def calculate_snrs(self, waveform_polarizations, interferometer):
def calculate_snrs(self, waveform_polarizations, interferometer, return_array=False):
"""
Compute the snrs for multi-banding
Compute the snrs
Parameters
----------
waveform_polarizations: waveform
waveform_polarizations: dict
A dictionary of waveform polarizations and the corresponding array
interferometer: bilby.gw.detector.Interferometer
The bilby interferometer object
return_array: bool
If true, calculate and return internal array objects
(d_inner_h_array and optimal_snr_squared_array), otherwise
these are returned as None. This parameter is ignored for the multiband
model as these arrays are never calculated.
Returns
-------
snrs: named tuple of snrs
calculated_snrs: _CalculatedSNRs
An object containing the SNR quantities.
"""
strain = np.zeros(len(self.banded_frequency_points), dtype=complex)
......
......@@ -382,7 +382,7 @@ class RelativeBinningGravitationalWaveTransient(GravitationalWaveTransient):
full_waveform_ratio = duplicated_r0 + duplicated_r1 * (f - duplicated_fm)
return fiducial_waveform * full_waveform_ratio
def calculate_snrs(self, waveform_polarizations, interferometer):
def calculate_snrs(self, waveform_polarizations, interferometer, return_array=True):
r0, r1 = self.compute_waveform_ratio_per_interferometer(
waveform_polarizations=waveform_polarizations,
interferometer=interferometer,
......@@ -393,7 +393,7 @@ class RelativeBinningGravitationalWaveTransient(GravitationalWaveTransient):
optimal_snr_squared = h_inner_h
complex_matched_filter_snr = d_inner_h / (optimal_snr_squared ** 0.5)
if self.time_marginalization:
if return_array and self.time_marginalization:
full_waveform = self._compute_full_waveform(
signal_polarizations=waveform_polarizations,
interferometer=interferometer,
......
......@@ -394,7 +394,7 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
def waveform_generator(self, waveform_generator):
self._waveform_generator = waveform_generator
def calculate_snrs(self, waveform_polarizations, interferometer):
def calculate_snrs(self, waveform_polarizations, interferometer, return_array=True):
"""
Compute the snrs for ROQ
......@@ -458,7 +458,7 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
with np.errstate(invalid="ignore"):
complex_matched_filter_snr = d_inner_h / (optimal_snr_squared**0.5)
if self.time_marginalization:
if return_array and self.time_marginalization:
ifo_times = self._times - interferometer.strain_data.start_time
ifo_times += dt
if self.jitter_time:
......
......@@ -3,3 +3,4 @@ lalsuite
gwpy
tables
pyfftw
scikit-learn
......@@ -3,7 +3,7 @@
import re
import subprocess
special_cases = ["plasky", "thomas", "mj-will", "richard"]
special_cases = ["plasky", "thomas", "mj-will", "richard", "douglas"]
AUTHORS_list = []
with open("AUTHORS.md", "r") as f:
AUTHORS_list = " ".join([line for line in f]).lower()
......
import unittest
from copy import deepcopy
from unittest.mock import MagicMock
from attr import define
import bilby
......@@ -22,9 +21,22 @@ class Dummy:
loglstar: float = -1
class DummyLikelihood(bilby.core.likelihood.Likelihood):
"""
A trivial likelihood used for testing. Add some randomness so the likelihood
isn't flat everywhere as that can cause issues for nested samplers.
"""
def __init__(self):
super().__init__(dict())
def log_likelihood(self):
return np.random.uniform(0, 0.01)
class TestDynesty(unittest.TestCase):
def setUp(self):
self.likelihood = MagicMock()
self.likelihood = DummyLikelihood()
self.priors = bilby.core.prior.PriorDict(
dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
)
......@@ -83,6 +95,9 @@ class TestDynesty(unittest.TestCase):
self.assertEqual([0, 4], self.sampler.kwargs["periodic"])
self.assertEqual([1, 3], self.sampler.kwargs["reflective"])
def test_run_test_runs(self):
self.sampler._run_test()
class ProposalsTest(unittest.TestCase):
......@@ -242,6 +257,7 @@ class TestEstimateNMCMC(unittest.TestCase):
safety * (2 / accept_ratio - 1)
"""
sampler = dynesty_utils.AcceptanceTrackingRWalk()
dynesty_utils.AcceptanceTrackingRWalk.old_act = None
for _ in range(10):
accept_ratio = np.random.uniform()
safety = np.random.randint(2, 8)
......
......@@ -8,6 +8,7 @@ import lalsimulation as lalsim
from gwpy.timeseries import TimeSeries
from gwpy.detector import Channel
from scipy.stats import ks_2samp
import pytest
import bilby
from bilby.gw import utils as gwutils
......@@ -72,6 +73,7 @@ class TestGWUtils(unittest.TestCase):
)
self.assertEqual(mfsnr, 25.510869054168282)
@pytest.mark.skip(reason="GWOSC unstable: avoiding this test")
def test_get_event_time(self):
from urllib3.exceptions import NewConnectionError
events = [
......