diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 9460a2cfe4a304e03f097fe1a95a7d78597ba751..d3b5a4ce731ae10b1b541867272a3fecd6b99e52 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -44,12 +44,17 @@ containers:
     - apt-get -yqq install libhdf5-dev
   script:
     - python -m pip install .
+    - python -m pip list installed
     - python -c "import bilby"
+    - python -c "import bilby.bilby_mcmc"
     - python -c "import bilby.core"
     - python -c "import bilby.core.prior"
     - python -c "import bilby.core.sampler"
+    - python -c "import bilby.core.utils"
     - python -c "import bilby.gw"
     - python -c "import bilby.gw.detector"
+    - python -c "import bilby.gw.eos"
+    - python -c "import bilby.gw.likelihood"
     - python -c "import bilby.gw.sampler"
     - python -c "import bilby.hyper"
     - python -c "import cli_bilby"
@@ -122,6 +127,7 @@ python-3.7:
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
   script:
     - python -m pip install .
+    - python -m pip list installed
 
     # Run pyflakes
     - flake8 .
@@ -143,6 +149,7 @@ python-3.8:
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
   script:
     - python -m pip install .
+    - python -m pip list installed
     - pytest
 
 # test example on python 3.9
@@ -152,6 +159,7 @@ python-3.9:
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
   script:
     - python -m pip install .
+    - python -m pip list installed
     - pytest
 
 
@@ -162,6 +170,7 @@ python-3.7-samplers:
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
   script:
     - python -m pip install .
+    - python -m pip list installed
 
     - pytest test/integration/sampler_run_test.py --durations 10
 
@@ -172,6 +181,7 @@ python-3.8-samplers:
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
   script:
     - python -m pip install .
+    - python -m pip list installed
 
     - pytest test/integration/sampler_run_test.py --durations 10
 
@@ -182,6 +192,7 @@ python-3.9-samplers:
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
   script:
     - python -m pip install .
+    - python -m pip list installed
 
     - pytest test/integration/sampler_run_test.py --durations 10
 
@@ -193,6 +204,7 @@ integration-tests-python-3.7:
     - schedules
   script:
     - python -m pip install .
+    - python -m pip list installed
     # Run tests which are only done on schedule
     - pytest test/integration/example_test.py
 
@@ -204,7 +216,21 @@ plotting-python-3.7:
     - schedules
   script:
     - python -m pip install .
-    - python -m pip install ligo.skymap
+    - python -m pip install "ligo.skymap>=0.5.3"
+    - python -m pip list installed
+    - pytest test/gw/plot_test.py
+
+
+plotting-python-3.9:
+  stage: test
+  image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
+  needs: ["basic-3.9", "precommits-py3.9"]
+  only:
+    - schedules
+  script:
+    - python -m pip install .
+    - python -m pip install "ligo.skymap>=0.5.3"
+    - python -m pip list installed
     - pytest test/gw/plot_test.py
 
 
@@ -301,4 +327,4 @@ pypi-release:
   script:
     - twine upload dist/*
   only:
-  - tags
+    - tags
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 8b4324cb2954c2a0782615b95b3b688eee4660c4..959bc977f86c1bd11f9831084017e60d50e6def3 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -15,14 +15,8 @@ repos:
     hooks:
     -   id: codespell
         args: [--ignore-words=.dictionary.txt]
--   repo: https://github.com/asottile/seed-isort-config
-    rev: v1.3.0
-    hooks:
-    -   id: seed-isort-config
-        args: [--application-directories, 'bilby/']
-        files: '(^bilby/bilby_mcmc/|^examples/core_examples/)'
 -   repo: https://github.com/pre-commit/mirrors-isort
-    rev: v4.3.21
+    rev: v5.10.1
     hooks:
     -   id: isort # sort imports alphabetically and separates import into sections
         args: [-w=88, -m=3, -tc, -sp=setup.cfg ]
diff --git a/AUTHORS.md b/AUTHORS.md
index 71baaa6e947fae44cd327ab1a2506cc5afa9004c..b95f3ff66f4cc8f002143e8199199e840d0cf30c 100644
--- a/AUTHORS.md
+++ b/AUTHORS.md
@@ -27,6 +27,7 @@ Gregory Ashton
 Hector Estelles
 Ignacio Magaña Hernandez
 Isobel Marguarethe Romero-Shaw
+Jack Heinzel
 Jade Powell
 James A Clark
 Jeremy G Baier
@@ -69,6 +70,7 @@ Sharan Banagiri
 Shichao Wu
 Simon Stevenson
 Soichiro Morisaki
+Stephen R Green
 Sumeet Kulkarni
 Sylvia Biscoveanu
 Tathagata Ghosh
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c86ed6beae31f03c8d1e8c1a71f0a4ac88f84e0e..16ccafc5e7bd46d33b7223b16df5a663db1ce852 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,25 @@
 # All notable changes will be documented in this file
 
+## [1.1.4] 2021-10-08
+Version 1.1.4 release of bilby
+
+### Added
+- Version of dynesty pinned to less than v1.1 to anticipate breaking changes (!1020)
+- Pool to computation of SNR (!1013)
+- Ability to load results produced with custom priors (!1010)
+- The nestcheck test (!1005)
+- Bilby-mcmc guide to docs (!1001)
+- Codespell pre-commit (!996)
+- MBGravitationalWaveTransient (!972)
+- Zeus MCMC sampler support (!962)
+- Option to use print rather than tqdm (!937)
+
+### Changes
+- Updates citation guide (!1030)
+- Minor bug fixes (!1029, !1025, !1022, !1016, !1018, !1014, !1007, !1004)
+- Typo fix in eart light crossing (!1003)
+- Fix zero spin conversion (!1002)
+
 ## [1.1.3] 2021-07-02
 Version 1.1.3 release of bilby
 
diff --git a/README.rst b/README.rst
index 529ee6a41aea335f6c50d87e0f682b94092baed7..bddce5f89f78ff19960a31d6ca9c07b89943b6b9 100644
--- a/README.rst
+++ b/README.rst
@@ -35,6 +35,12 @@ If you use :code:`bilby` in a scientific publication, please cite
 * `Bilby: A user-friendly Bayesian inference library for gravitational-wave
   astronomy
   <https://ui.adsabs.harvard.edu/#abs/2018arXiv181102042A/abstract>`__
+* `Bayesian inference for compact binary coalescences with BILBY: validation and application to the first LIGO-Virgo gravitational-wave transient catalogue <https://ui.adsabs.harvard.edu/abs/2020MNRAS.499.3295R/abstract>`__
+
+The first of these papers introduces the software, while the second introduces advances in the sampling approaches and validation of the software.
+If you use the :code:`bilby_mcmc` sampler, please additionally cite
+
+* `BILBY-MCMC: an MCMC sampler for gravitational-wave inference <https://ui.adsabs.harvard.edu/abs/2021MNRAS.507.2037A/abstract>`__
 
 Additionally, :code:`bilby` builds on a number of open-source packages. If you
 make use of this functionality in your publications, we recommend you cite them
diff --git a/bilby/core/prior/dict.py b/bilby/core/prior/dict.py
index 27596590685f85efad184e5b0a48c57920a706ca..15e6199da24f1e7faba06f9d6c5cfa0bdbe3acc9 100644
--- a/bilby/core/prior/dict.py
+++ b/bilby/core/prior/dict.py
@@ -423,27 +423,38 @@ class PriorDict(dict):
             }
             return all_samples
 
-    def normalize_constraint_factor(self, keys):
+    def normalize_constraint_factor(self, keys, min_accept=10000, sampling_chunk=50000, nrepeats=10):
         if keys in self._cached_normalizations.keys():
             return self._cached_normalizations[keys]
         else:
-            min_accept = 1000
-            sampling_chunk = 5000
+            factor_estimates = [
+                self._estimate_normalization(keys, min_accept, sampling_chunk)
+                for _ in range(nrepeats)
+            ]
+            factor = np.mean(factor_estimates)
+            if np.std(factor_estimates) > 0:
+                decimals = int(-np.floor(np.log10(3 * np.std(factor_estimates))))
+                factor_rounded = np.round(factor, decimals)
+            else:
+                factor_rounded = factor
+            self._cached_normalizations[keys] = factor_rounded
+            return factor_rounded
+
+    def _estimate_normalization(self, keys, min_accept, sampling_chunk):
+        samples = self.sample_subset(keys=keys, size=sampling_chunk)
+        keep = np.atleast_1d(self.evaluate_constraints(samples))
+        if len(keep) == 1:
+            self._cached_normalizations[keys] = 1
+            return 1
+        all_samples = {key: np.array([]) for key in keys}
+        while np.count_nonzero(keep) < min_accept:
             samples = self.sample_subset(keys=keys, size=sampling_chunk)
-            keep = np.atleast_1d(self.evaluate_constraints(samples))
-            if len(keep) == 1:
-                self._cached_normalizations[keys] = 1
-                return 1
-            all_samples = {key: np.array([]) for key in keys}
-            while np.count_nonzero(keep) < min_accept:
-                samples = self.sample_subset(keys=keys, size=sampling_chunk)
-                for key in samples:
-                    all_samples[key] = np.hstack(
-                        [all_samples[key], samples[key].flatten()])
-                keep = np.array(self.evaluate_constraints(all_samples), dtype=bool)
-            factor = len(keep) / np.count_nonzero(keep)
-            self._cached_normalizations[keys] = factor
-            return factor
+            for key in samples:
+                all_samples[key] = np.hstack(
+                    [all_samples[key], samples[key].flatten()])
+            keep = np.array(self.evaluate_constraints(all_samples), dtype=bool)
+        factor = len(keep) / np.count_nonzero(keep)
+        return factor
 
     def prob(self, sample, **kwargs):
         """
@@ -468,11 +479,11 @@ class PriorDict(dict):
     def check_prob(self, sample, prob):
         ratio = self.normalize_constraint_factor(tuple(sample.keys()))
         if np.all(prob == 0.):
-            return prob
+            return prob * ratio
         else:
             if isinstance(prob, float):
                 if self.evaluate_constraints(sample):
-                    return prob
+                    return prob * ratio
                 else:
                     return 0.
             else:
@@ -508,7 +519,7 @@ class PriorDict(dict):
         else:
             if isinstance(ln_prob, float):
                 if self.evaluate_constraints(sample):
-                    return ln_prob
+                    return ln_prob + np.log(ratio)
                 else:
                     return -np.inf
             else:
diff --git a/bilby/core/result.py b/bilby/core/result.py
index dbe7e29ac8920d6423f415eca12ebda6c92c14a0..48797a4f100e6583a880b59822df61997c6aeaa8 100644
--- a/bilby/core/result.py
+++ b/bilby/core/result.py
@@ -538,10 +538,17 @@ class Result(object):
     @classmethod
     @docstring(_load_doctstring.format(format="json"))
     def from_json(cls, filename=None, outdir=None, label=None, gzip=False):
+        from json.decoder import JSONDecodeError
+
         filename = _determine_file_name(filename, outdir, label, 'json', gzip)
 
         if os.path.isfile(filename):
-            dictionary = load_json(filename, gzip)
+            try:
+                dictionary = load_json(filename, gzip)
+            except JSONDecodeError as e:
+                raise IOError(
+                    "JSON failed to decode {} with message {}".format(filename, e)
+                )
             try:
                 return cls(**dictionary)
             except TypeError as e:
@@ -1429,20 +1436,19 @@ class Result(object):
             Function which adds in extra parameters to the data frame,
             should take the data_frame, likelihood and prior as arguments.
         """
-        try:
-            data_frame = self.posterior
-        except ValueError:
-            data_frame = pd.DataFrame(
-                self.samples, columns=self.search_parameter_keys)
-            data_frame = self._add_prior_fixed_values_to_posterior(
-                data_frame, priors)
-            data_frame['log_likelihood'] = getattr(
-                self, 'log_likelihood_evaluations', np.nan)
-            if self.log_prior_evaluations is None and priors is not None:
-                data_frame['log_prior'] = priors.ln_prob(
-                    dict(data_frame[self.search_parameter_keys]), axis=0)
-            else:
-                data_frame['log_prior'] = self.log_prior_evaluations
+
+        data_frame = pd.DataFrame(
+            self.samples, columns=self.search_parameter_keys)
+        data_frame = self._add_prior_fixed_values_to_posterior(
+            data_frame, priors)
+        data_frame['log_likelihood'] = getattr(
+            self, 'log_likelihood_evaluations', np.nan)
+        if self.log_prior_evaluations is None and priors is not None:
+            data_frame['log_prior'] = priors.ln_prob(
+                dict(data_frame[self.search_parameter_keys]), axis=0)
+        else:
+            data_frame['log_prior'] = self.log_prior_evaluations
+
         if conversion_function is not None:
             if "npool" in inspect.getargspec(conversion_function).args:
                 data_frame = conversion_function(data_frame, likelihood, priors, npool=npool)
@@ -1988,9 +1994,9 @@ def plot_multiple(results, filename=None, labels=None, colours=None,
 
     if evidences:
         if np.isnan(results[0].log_bayes_factor):
-            template = ' $\mathrm{{ln}}(Z)={lnz:1.3g}$'
+            template = r' $\mathrm{{ln}}(Z)={lnz:1.3g}$'
         else:
-            template = ' $\mathrm{{ln}}(B)={lnbf:1.3g}$'
+            template = r' $\mathrm{{ln}}(B)={lnbf:1.3g}$'
         labels = [template.format(lnz=result.log_evidence,
                                   lnbf=result.log_bayes_factor)
                   for ii, result in enumerate(results)]
diff --git a/bilby/core/sampler/__init__.py b/bilby/core/sampler/__init__.py
index dde8d920cd52a03b7d3e647f29c5333378af20db..77e481e9574c2c3e098c8a08787d6fb00600afe2 100644
--- a/bilby/core/sampler/__init__.py
+++ b/bilby/core/sampler/__init__.py
@@ -22,15 +22,29 @@ from .pymultinest import Pymultinest
 from .ultranest import Ultranest
 from .fake_sampler import FakeSampler
 from .dnest4 import DNest4
+from .zeus import Zeus
 from bilby.bilby_mcmc import Bilby_MCMC
 from . import proposal
 
 IMPLEMENTED_SAMPLERS = {
-    'bilby_mcmc': Bilby_MCMC, 'cpnest': Cpnest, 'dnest4': DNest4, 'dynamic_dynesty': DynamicDynesty,
-    'dynesty': Dynesty, 'emcee': Emcee,'kombine': Kombine, 'nessai': Nessai,
-    'nestle': Nestle, 'ptemcee': Ptemcee, 'ptmcmcsampler': PTMCMCSampler,
-    'pymc3': Pymc3, 'pymultinest': Pymultinest, 'pypolychord': PyPolyChord,
-    'ultranest': Ultranest, 'fake_sampler': FakeSampler}
+    "bilby_mcmc": Bilby_MCMC,
+    "cpnest": Cpnest,
+    "dnest4": DNest4,
+    "dynamic_dynesty": DynamicDynesty,
+    "dynesty": Dynesty,
+    "emcee": Emcee,
+    "kombine": Kombine,
+    "nessai": Nessai,
+    "nestle": Nestle,
+    "ptemcee": Ptemcee,
+    "ptmcmcsampler": PTMCMCSampler,
+    "pymc3": Pymc3,
+    "pymultinest": Pymultinest,
+    "pypolychord": PyPolyChord,
+    "ultranest": Ultranest,
+    "zeus": Zeus,
+    "fake_sampler": FakeSampler,
+}
 
 if command_line_args.sampler_help:
     sampler = command_line_args.sampler_help
@@ -40,20 +54,36 @@ if command_line_args.sampler_help:
         print(sampler_class.__doc__)
     else:
         if sampler == "None":
-            print('For help with a specific sampler, call sampler-help with '
-                  'the name of the sampler')
+            print(
+                "For help with a specific sampler, call sampler-help with "
+                "the name of the sampler"
+            )
         else:
-            print('Requested sampler {} not implemented'.format(sampler))
-        print('Available samplers = {}'.format(IMPLEMENTED_SAMPLERS))
+            print("Requested sampler {} not implemented".format(sampler))
+        print("Available samplers = {}".format(IMPLEMENTED_SAMPLERS))
 
     sys.exit()
 
 
-def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
-                sampler='dynesty', use_ratio=None, injection_parameters=None,
-                conversion_function=None, plot=False, default_priors_file=None,
-                clean=None, meta_data=None, save=True, gzip=False,
-                result_class=None, npool=1, **kwargs):
+def run_sampler(
+    likelihood,
+    priors=None,
+    label="label",
+    outdir="outdir",
+    sampler="dynesty",
+    use_ratio=None,
+    injection_parameters=None,
+    conversion_function=None,
+    plot=False,
+    default_priors_file=None,
+    clean=None,
+    meta_data=None,
+    save=True,
+    gzip=False,
+    result_class=None,
+    npool=1,
+    **kwargs
+):
     """
     The primary interface to easy parameter estimation
 
@@ -116,13 +146,13 @@ def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
     """
 
     logger.info(
-        "Running for label '{}', output will be saved to '{}'".format(
-            label, outdir))
+        "Running for label '{}', output will be saved to '{}'".format(label, outdir)
+    )
 
     if clean:
         command_line_args.clean = clean
     if command_line_args.clean:
-        kwargs['resume'] = False
+        kwargs["resume"] = False
 
     from . import IMPLEMENTED_SAMPLERS
 
@@ -143,11 +173,14 @@ def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
     # Generate the meta-data if not given and append the likelihood meta_data
     if meta_data is None:
         meta_data = dict()
+    likelihood.label = label
+    likelihood.outdir = outdir
     meta_data['likelihood'] = likelihood.meta_data
     meta_data["loaded_modules"] = loaded_modules_dict()
 
     if command_line_args.bilby_zero_likelihood_mode:
         from bilby.core.likelihood import ZeroLikelihood
+
         likelihood = ZeroLikelihood(likelihood)
 
     if isinstance(sampler, Sampler):
@@ -156,66 +189,88 @@ def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
         if sampler.lower() in IMPLEMENTED_SAMPLERS:
             sampler_class = IMPLEMENTED_SAMPLERS[sampler.lower()]
             sampler = sampler_class(
-                likelihood, priors=priors, outdir=outdir, label=label,
-                injection_parameters=injection_parameters, meta_data=meta_data,
-                use_ratio=use_ratio, plot=plot, result_class=result_class,
-                npool=npool, **kwargs)
+                likelihood,
+                priors=priors,
+                outdir=outdir,
+                label=label,
+                injection_parameters=injection_parameters,
+                meta_data=meta_data,
+                use_ratio=use_ratio,
+                plot=plot,
+                result_class=result_class,
+                npool=npool,
+                **kwargs
+            )
         else:
             print(IMPLEMENTED_SAMPLERS)
-            raise ValueError(
-                "Sampler {} not yet implemented".format(sampler))
+            raise ValueError("Sampler {} not yet implemented".format(sampler))
     elif inspect.isclass(sampler):
         sampler = sampler.__init__(
-            likelihood, priors=priors,
-            outdir=outdir, label=label, use_ratio=use_ratio, plot=plot,
-            injection_parameters=injection_parameters, meta_data=meta_data,
-            npool=npool, **kwargs)
+            likelihood,
+            priors=priors,
+            outdir=outdir,
+            label=label,
+            use_ratio=use_ratio,
+            plot=plot,
+            injection_parameters=injection_parameters,
+            meta_data=meta_data,
+            npool=npool,
+            **kwargs
+        )
     else:
         raise ValueError(
             "Provided sampler should be a Sampler object or name of a known "
-            "sampler: {}.".format(', '.join(IMPLEMENTED_SAMPLERS.keys())))
+            "sampler: {}.".format(", ".join(IMPLEMENTED_SAMPLERS.keys()))
+        )
 
     if sampler.cached_result:
         logger.warning("Using cached result")
-        return sampler.cached_result
-
-    start_time = datetime.datetime.now()
-    if command_line_args.bilby_test_mode:
-        result = sampler._run_test()
-    else:
-        result = sampler.run_sampler()
-    end_time = datetime.datetime.now()
-
-    # Some samplers calculate the sampling time internally
-    if result.sampling_time is None:
-        result.sampling_time = end_time - start_time
-    elif isinstance(result.sampling_time, float):
-        result.sampling_time = datetime.timedelta(result.sampling_time)
-    logger.info('Sampling time: {}'.format(result.sampling_time))
-    # Convert sampling time into seconds
-    result.sampling_time = result.sampling_time.total_seconds()
-
-    if sampler.use_ratio:
-        result.log_noise_evidence = likelihood.noise_log_likelihood()
-        result.log_bayes_factor = result.log_evidence
-        result.log_evidence = \
-            result.log_bayes_factor + result.log_noise_evidence
+        result = sampler.cached_result
     else:
-        result.log_noise_evidence = likelihood.noise_log_likelihood()
-        result.log_bayes_factor = \
-            result.log_evidence - result.log_noise_evidence
+        # Run the sampler
+        start_time = datetime.datetime.now()
+        if command_line_args.bilby_test_mode:
+            result = sampler._run_test()
+        else:
+            result = sampler.run_sampler()
+        end_time = datetime.datetime.now()
 
-    # Initial save of the sampler in case of failure in post-processing
-    if save:
-        result.save_to_file(extension=save, gzip=gzip)
+        # Some samplers calculate the sampling time internally
+        if result.sampling_time is None:
+            result.sampling_time = end_time - start_time
+        elif isinstance(result.sampling_time, (float, int)):
+            result.sampling_time = datetime.timedelta(result.sampling_time)
+
+        logger.info('Sampling time: {}'.format(result.sampling_time))
+        # Convert sampling time into seconds
+        result.sampling_time = result.sampling_time.total_seconds()
+
+        if sampler.use_ratio:
+            result.log_noise_evidence = likelihood.noise_log_likelihood()
+            result.log_bayes_factor = result.log_evidence
+            result.log_evidence = \
+                result.log_bayes_factor + result.log_noise_evidence
+        else:
+            result.log_noise_evidence = likelihood.noise_log_likelihood()
+            result.log_bayes_factor = \
+                result.log_evidence - result.log_noise_evidence
+
+        if None not in [result.injection_parameters, conversion_function]:
+            result.injection_parameters = conversion_function(
+                result.injection_parameters)
+
+        # Initial save of the sampler in case of failure in samples_to_posterior
+        if save:
+            result.save_to_file(extension=save, gzip=gzip)
 
     if None not in [result.injection_parameters, conversion_function]:
-        result.injection_parameters = conversion_function(
-            result.injection_parameters)
+        result.injection_parameters = conversion_function(result.injection_parameters)
 
-    result.samples_to_posterior(likelihood=likelihood, priors=result.priors,
-                                conversion_function=conversion_function,
-                                npool=npool)
+    # Check if the posterior has already been created
+    if getattr(result, "_posterior", None) is None:
+        result.samples_to_posterior(likelihood=likelihood, priors=result.priors,
+                                    conversion_function=conversion_function,
+                                    npool=npool)
 
     if save:
         # The overwrite here ensures we overwrite the initially stored data
@@ -232,5 +287,7 @@ def _check_marginalized_parameters_not_sampled(likelihood, priors):
         if key in priors:
             if not isinstance(priors[key], (float, DeltaFunction)):
                 raise SamplingMarginalisedParameterError(
-                    "Likelihood is {} marginalized but you are trying to sample in {}. "
-                    .format(key, key))
+                    "Likelihood is {} marginalized but you are trying to sample in {}. ".format(
+                        key, key
+                    )
+                )
diff --git a/bilby/core/sampler/base_sampler.py b/bilby/core/sampler/base_sampler.py
index 5e25a7e54247fa0dcc7f5e9841e308bbfd071173..215104a98087bb91abc3964684edf1a0a5d0d458 100644
--- a/bilby/core/sampler/base_sampler.py
+++ b/bilby/core/sampler/base_sampler.py
@@ -505,14 +505,21 @@ class Sampler(object):
 
         logger.debug("Checking cached data")
         if self.cached_result:
-            check_keys = ['search_parameter_keys', 'fixed_parameter_keys',
-                          'kwargs']
+            check_keys = ['search_parameter_keys', 'fixed_parameter_keys']
             use_cache = True
             for key in check_keys:
                 if self.cached_result._check_attribute_match_to_other_object(
                         key, self) is False:
                     logger.debug("Cached value {} is unmatched".format(key))
                     use_cache = False
+            try:
+                # Recursive check the dictionaries allowing for numpy arrays
+                np.testing.assert_equal(
+                    self.meta_data["likelihood"],
+                    self.cached_result.meta_data["likelihood"]
+                )
+            except AssertionError:
+                use_cache = False
             if use_cache is False:
                 self.cached_result = None
 
diff --git a/bilby/core/sampler/cpnest.py b/bilby/core/sampler/cpnest.py
index 7a089a4ca559bf9a52b890c873e6a052e5fc72b7..e64365f2e423c9ac4968e8c9964f4d26cd55bc00 100644
--- a/bilby/core/sampler/cpnest.py
+++ b/bilby/core/sampler/cpnest.py
@@ -3,6 +3,7 @@ import array
 import copy
 
 import numpy as np
+from numpy.lib.recfunctions import structured_to_unstructured
 from pandas import DataFrame
 
 from .base_sampler import NestedSampler
@@ -121,11 +122,12 @@ class Cpnest(NestedSampler):
             out.plot()
 
         self.calc_likelihood_count()
-        self.result.posterior = DataFrame(out.posterior_samples)
+        self.result.samples = structured_to_unstructured(
+            out.posterior_samples[self.search_parameter_keys]
+        )
+        self.result.log_likelihood_evaluations = out.posterior_samples['logL']
         self.result.nested_samples = DataFrame(out.get_nested_samples(filename=''))
         self.result.nested_samples.rename(columns=dict(logL='log_likelihood'), inplace=True)
-        self.result.posterior.rename(columns=dict(logL='log_likelihood', logPrior='log_prior'),
-                                     inplace=True)
         _, log_weights = compute_weights(np.array(self.result.nested_samples.log_likelihood),
                                          np.array(out.NS.state.nlive))
         self.result.nested_samples['weights'] = np.exp(log_weights)
diff --git a/bilby/core/sampler/dynesty.py b/bilby/core/sampler/dynesty.py
index adc28f84922f5ef32b7a7b3a8fc78bb24a2bd04a..154866becde66a1438b9f35606b97d566cecffa8 100644
--- a/bilby/core/sampler/dynesty.py
+++ b/bilby/core/sampler/dynesty.py
@@ -148,7 +148,8 @@ class Dynesty(NestedSampler):
     def __init__(self, likelihood, priors, outdir='outdir', label='label',
                  use_ratio=False, plot=False, skip_import_verification=False,
                  check_point=True, check_point_plot=True, n_check_point=None,
-                 check_point_delta_t=600, resume=True, exit_code=130, **kwargs):
+                 check_point_delta_t=600, resume=True, nestcheck=False, exit_code=130, **kwargs):
+
         super(Dynesty, self).__init__(likelihood=likelihood, priors=priors,
                                       outdir=outdir, label=label, use_ratio=use_ratio,
                                       plot=plot, skip_import_verification=skip_import_verification,
@@ -162,6 +163,8 @@ class Dynesty(NestedSampler):
         self._reflective = list()
         self._apply_dynesty_boundaries()
 
+        self.nestcheck = nestcheck
+
         if self.n_check_point is None:
             self.n_check_point = 1000
         self.check_point_delta_t = check_point_delta_t
@@ -243,6 +246,12 @@ class Dynesty(NestedSampler):
             else:
                 self._last_print_time = _time
 
+                # Add time in current run to overall sampling time
+                total_time = self.sampling_time + _time - self.start_time
+
+                # Remove fractional seconds
+                total_time_str = str(total_time).split('.')[0]
+
         # Extract results at the current iteration.
         (worst, ustar, vstar, loglstar, logvol, logwt,
          logz, logzvar, h, nc, worst_it, boundidx, bounditer,
@@ -278,12 +287,11 @@ class Dynesty(NestedSampler):
             self.pbar.set_postfix_str(" ".join(string), refresh=False)
             self.pbar.update(niter - self.pbar.n)
         elif "interval" in self.kwargs["print_method"]:
-            formatted = " ".join([str(_time - self.start_time)] + string)
-            print("{}it [{}]".format(niter, formatted), file=sys.stdout)
+            formatted = " ".join([total_time_str] + string)
+            print("{}it [{}]".format(niter, formatted), file=sys.stdout, flush=True)
         else:
-            _time = datetime.datetime.now()
-            formatted = " ".join([str(_time - self.start_time)] + string)
-            print("{}it [{}]".format(niter, formatted), file=sys.stdout)
+            formatted = " ".join([total_time_str] + string)
+            print("{}it [{}]".format(niter, formatted), file=sys.stdout, flush=True)
 
     def _apply_dynesty_boundaries(self):
         self._periodic = list()
@@ -302,6 +310,14 @@ class Dynesty(NestedSampler):
         self.kwargs["periodic"] = self._periodic
         self.kwargs["reflective"] = self._reflective
 
+    def nestcheck_data(self, out_file):
+        import nestcheck.data_processing
+        import pickle
+        ns_run = nestcheck.data_processing.process_dynesty_run(out_file)
+        nestcheck_result = "{}/{}_nestcheck.pickle".format(self.outdir, self.label)
+        with open(nestcheck_result, 'wb') as file_nest:
+            pickle.dump(ns_run, file_nest)
+
     def _setup_pool(self):
         if self.kwargs["pool"] is not None:
             logger.info("Using user defined pool.")
@@ -396,6 +412,10 @@ class Dynesty(NestedSampler):
             print("")
 
         check_directory_exists_and_if_not_mkdir(self.outdir)
+
+        if self.nestcheck:
+            self.nestcheck_data(out)
+
         dynesty_result = "{}/{}_dynesty.pickle".format(self.outdir, self.label)
         with open(dynesty_result, 'wb') as file:
             dill.dump(out, file)
@@ -595,6 +615,11 @@ class Dynesty(NestedSampler):
         from ... import __version__ as bilby_version
         from dynesty import __version__ as dynesty_version
         import dill
+
+        if getattr(self, "sampler", None) is None:
+            # Sampler not initialized, not able to write current state
+            return
+
         check_directory_exists_and_if_not_mkdir(self.outdir)
         end_time = datetime.datetime.now()
         if hasattr(self, 'start_time'):
@@ -619,8 +644,6 @@ class Dynesty(NestedSampler):
         if self.sampler.pool is not None:
             self.sampler.M = self.sampler.pool.map
 
-        self.dump_samples_to_dat()
-
     def dump_samples_to_dat(self):
         sampler = self.sampler
         ln_weights = sampler.saved_logwt - sampler.saved_logz[-1]
diff --git a/bilby/core/sampler/emcee.py b/bilby/core/sampler/emcee.py
index 48516aa94e4d0770cd14adcbe31f73ed0d63753c..1f9ba786ccd1230db6f073defa90c1ca78bf9630 100644
--- a/bilby/core/sampler/emcee.py
+++ b/bilby/core/sampler/emcee.py
@@ -23,7 +23,7 @@ class Emcee(MCMCSampler):
 
     Parameters
     ==========
-    nwalkers: int, (100)
+    nwalkers: int, (500)
         The number of walkers
     nsteps: int, (100)
         The number of steps
@@ -273,7 +273,7 @@ class Emcee(MCMCSampler):
 
     @property
     def sampler(self):
-        """ Returns the ptemcee sampler object
+        """ Returns the emcee sampler object
 
         If, already initialized, returns the stored _sampler value. Otherwise,
         first checks if there is a pickle file from which to load. If there is
diff --git a/bilby/core/sampler/nessai.py b/bilby/core/sampler/nessai.py
index 0ad68945f9f9b0e27913e32e4c735be3c040a073..d8bb578a4d681c8aca425cd0b4825c651dfa73e0 100644
--- a/bilby/core/sampler/nessai.py
+++ b/bilby/core/sampler/nessai.py
@@ -1,4 +1,5 @@
 import numpy as np
+import os
 from pandas import DataFrame
 
 from .base_sampler import NestedSampler
@@ -15,55 +16,42 @@ class Nessai(NestedSampler):
 
     Documentation: https://nessai.readthedocs.io/
     """
-    default_kwargs = dict(
-        output=None,
-        nlive=1000,
-        stopping=0.1,
-        resume=True,
-        max_iteration=None,
-        checkpointing=True,
-        seed=1234,
-        acceptance_threshold=0.01,
-        analytic_priors=False,
-        maximum_uninformed=1000,
-        uninformed_proposal=None,
-        uninformed_proposal_kwargs=None,
-        flow_class=None,
-        flow_config=None,
-        training_frequency=None,
-        reset_weights=False,
-        reset_permutations=False,
-        reset_acceptance=False,
-        train_on_empty=True,
-        cooldown=100,
-        memory=False,
-        poolsize=None,
-        drawsize=None,
-        max_poolsize_scale=10,
-        update_poolsize=False,
-        latent_prior='truncated_gaussian',
-        draw_latent_kwargs=None,
-        compute_radius_with_all=False,
-        min_radius=False,
-        max_radius=50,
-        check_acceptance=False,
-        fuzz=1.0,
-        expansion_fraction=1.0,
-        rescale_parameters=True,
-        rescale_bounds=[-1, 1],
-        update_bounds=False,
-        boundary_inversion=False,
-        inversion_type='split', detect_edges=False,
-        detect_edges_kwargs=None,
-        reparameterisations=None,
-        n_pool=None,
-        max_threads=1,
-        pytorch_threads=None,
-        plot=None,
-        proposal_plots=False
-    )
+    _default_kwargs = None
     seed_equiv_kwargs = ['sampling_seed']
 
+    @property
+    def default_kwargs(self):
+        """Default kwargs for nessai.
+
+        Retrieves default values from nessai directly and then includes any
+        bilby specific defaults. This avoids the need to update bilby when the
+        defaults change or new kwargs are added to nessai.
+        """
+        if not self._default_kwargs:
+            from inspect import signature
+            from nessai.flowsampler import FlowSampler
+            from nessai.nestedsampler import NestedSampler
+            from nessai.proposal import AugmentedFlowProposal, FlowProposal
+
+            kwargs = {}
+            classes = [
+                AugmentedFlowProposal,
+                FlowProposal,
+                NestedSampler,
+                FlowSampler,
+            ]
+            for c in classes:
+                kwargs.update(
+                    {k: v.default for k, v in signature(c).parameters.items() if v.default is not v.empty}
+                )
+            # Defaults for bilby that will override nessai defaults
+            bilby_defaults = dict(
+                output=None,
+            )
+            kwargs.update(bilby_defaults)
+            self._default_kwargs = kwargs
+        return self._default_kwargs
+
     def log_prior(self, theta):
         """
 
@@ -82,7 +70,7 @@ class Nessai(NestedSampler):
     def run_sampler(self):
         from nessai.flowsampler import FlowSampler
         from nessai.model import Model as BaseModel
-        from nessai.livepoint import dict_to_live_points
+        from nessai.livepoint import dict_to_live_points, live_points_to_array
         from nessai.posterior import compute_weights
         from nessai.utils import setup_logger
 
@@ -148,12 +136,13 @@ class Nessai(NestedSampler):
         # Manually set likelihood evaluations because parallelisation breaks the counter
         self.result.num_likelihood_evaluations = out.ns.likelihood_evaluations[-1]
 
-        self.result.posterior = DataFrame(out.posterior_samples)
+        self.result.samples = live_points_to_array(
+            out.posterior_samples, self.search_parameter_keys
+        )
+        self.result.log_likelihood_evaluations = out.posterior_samples['logL']
         self.result.nested_samples = DataFrame(out.nested_samples)
         self.result.nested_samples.rename(
             columns=dict(logL='log_likelihood', logP='log_prior'), inplace=True)
-        self.result.posterior.rename(
-            columns=dict(logL='log_likelihood', logP='log_prior'), inplace=True)
         _, log_weights = compute_weights(np.array(self.result.nested_samples.log_likelihood),
                                          np.array(out.ns.state.nlive))
         self.result.nested_samples['weights'] = np.exp(log_weights)
@@ -194,9 +183,9 @@ class Nessai(NestedSampler):
             self.kwargs['n_pool'] = None
 
         if not self.kwargs['output']:
-            self.kwargs['output'] = self.outdir + '/{}_nessai/'.format(self.label)
-        if self.kwargs['output'].endswith('/') is False:
-            self.kwargs['output'] = '{}/'.format(self.kwargs['output'])
+            self.kwargs['output'] = os.path.join(
+                self.outdir, '{}_nessai'.format(self.label), ''
+            )
 
         check_directory_exists_and_if_not_mkdir(self.kwargs['output'])
         NestedSampler._verify_kwargs_against_default_kwargs(self)
diff --git a/bilby/core/sampler/pymc3.py b/bilby/core/sampler/pymc3.py
index c5c0a0d18fdd9fc8acc914034af2ee01d3f31ecf..25522f82ff4385f7cd7f572bfdfaea129af3b0e5 100644
--- a/bilby/core/sampler/pymc3.py
+++ b/bilby/core/sampler/pymc3.py
@@ -551,23 +551,13 @@ class Pymc3(MCMCSampler):
 
         with self.pymc3_model:
             # perform the sampling
-            trace = pymc3.sample(**self.kwargs)
+            trace = pymc3.sample(**self.kwargs, return_inferencedata=True)
 
-        nparams = len([key for key in self.priors.keys() if not isinstance(self.priors[key], DeltaFunction)])
-        nsamples = len(trace) * self.chains
-
-        self.result.samples = np.zeros((nsamples, nparams))
-        count = 0
-        for key in self.priors.keys():
-            if not isinstance(self.priors[key], DeltaFunction):  # ignore DeltaFunction variables
-                if not isinstance(self.priors[key], MultivariateGaussian):
-                    self.result.samples[:, count] = trace[key]
-                else:
-                    # get multivariate Gaussian samples
-                    priorset = self.multivariate_normal_sets[key]['set']
-                    index = self.multivariate_normal_sets[key]['index']
-                    self.result.samples[:, count] = trace[priorset][:, index]
-                count += 1
+        posterior = trace.posterior.to_dataframe().reset_index()
+        self.result.samples = posterior[self.search_parameter_keys]
+        self.result.log_likelihood_evaluations = np.sum(
+            trace.log_likelihood.likelihood.values, axis=-1
+        ).flatten()
         self.result.sampler_output = np.nan
         self.calculate_autocorrelation(self.result.samples)
         self.result.log_evidence = np.nan
diff --git a/bilby/core/sampler/pymultinest.py b/bilby/core/sampler/pymultinest.py
index 91356d14667f4b7e8538a70a31d5a0de8af61de2..d9869362256ad60c8029acdd0e19020a31dea8f8 100644
--- a/bilby/core/sampler/pymultinest.py
+++ b/bilby/core/sampler/pymultinest.py
@@ -79,6 +79,12 @@ class Pymultinest(NestedSampler):
         temporary_directory=True,
         **kwargs
     ):
+        try:
+            from mpi4py import MPI
+
+            using_mpi = MPI.COMM_WORLD.Get_size() > 1
+        except ImportError:
+            using_mpi = False
         super(Pymultinest, self).__init__(
             likelihood=likelihood,
             priors=priors,
@@ -92,7 +98,6 @@ class Pymultinest(NestedSampler):
         )
         self._apply_multinest_boundaries()
         self.exit_code = exit_code
-        using_mpi = len([key for key in os.environ if "MPI" in key])
         if using_mpi and temporary_directory:
             logger.info(
                 "Temporary directory incompatible with MPI, "
@@ -111,15 +116,15 @@ class Pymultinest(NestedSampler):
                     kwargs["n_live_points"] = kwargs.pop(equiv)
 
     def _verify_kwargs_against_default_kwargs(self):
-        """ Check the kwargs """
+        """Check the kwargs"""
 
         self.outputfiles_basename = self.kwargs.pop("outputfiles_basename", None)
 
         # for PyMultiNest >=2.9 the n_params kwarg cannot be None
         if self.kwargs["n_params"] is None:
             self.kwargs["n_params"] = self.ndim
-        if self.kwargs['dump_callback'] is None:
-            self.kwargs['dump_callback'] = self._dump_callback
+        if self.kwargs["dump_callback"] is None:
+            self.kwargs["dump_callback"] = self._dump_callback
         NestedSampler._verify_kwargs_against_default_kwargs(self)
 
     def _dump_callback(self, *args, **kwargs):
@@ -166,7 +171,7 @@ class Pymultinest(NestedSampler):
             )
 
     def write_current_state_and_exit(self, signum=None, frame=None):
-        """ Write current state and exit on exit_code """
+        """Write current state and exit on exit_code"""
         logger.info(
             "Run interrupted by signal {}: checkpoint and exit on {}".format(
                 signum, self.exit_code
@@ -187,11 +192,13 @@ class Pymultinest(NestedSampler):
                 self.outputfiles_basename, self.temporary_outputfiles_basename
             )
         )
-        if self.outputfiles_basename.endswith('/'):
+        if self.outputfiles_basename.endswith("/"):
             outputfiles_basename_stripped = self.outputfiles_basename[:-1]
         else:
             outputfiles_basename_stripped = self.outputfiles_basename
-        distutils.dir_util.copy_tree(self.temporary_outputfiles_basename, outputfiles_basename_stripped)
+        distutils.dir_util.copy_tree(
+            self.temporary_outputfiles_basename, outputfiles_basename_stripped
+        )
 
     def _move_temporary_directory_to_proper_path(self):
         """
@@ -241,9 +248,9 @@ class Pymultinest(NestedSampler):
         return self.result
 
     def _check_and_load_sampling_time_file(self):
-        self.time_file_path = self.kwargs["outputfiles_basename"] + '/sampling_time.dat'
+        self.time_file_path = self.kwargs["outputfiles_basename"] + "/sampling_time.dat"
         if os.path.exists(self.time_file_path):
-            with open(self.time_file_path, 'r') as time_file:
+            with open(self.time_file_path, "r") as time_file:
                 self.total_sampling_time = float(time_file.readline())
         else:
             self.total_sampling_time = 0
@@ -253,7 +260,7 @@ class Pymultinest(NestedSampler):
         new_sampling_time = current_time - self.start_time
         self.total_sampling_time += new_sampling_time
         self.start_time = current_time
-        with open(self.time_file_path, 'w') as time_file:
+        with open(self.time_file_path, "w") as time_file:
             time_file.write(str(self.total_sampling_time))
 
     def _clean_up_run_directory(self):
@@ -271,16 +278,18 @@ class Pymultinest(NestedSampler):
         estimate of `remaining_prior_volume / N`.
         """
         import pandas as pd
+
         dir_ = self.kwargs["outputfiles_basename"]
         dead_points = np.genfromtxt(dir_ + "/ev.dat")
         live_points = np.genfromtxt(dir_ + "/phys_live.points")
 
         nlive = self.kwargs["n_live_points"]
-        final_log_prior_volume = - len(dead_points) / nlive - np.log(nlive)
+        final_log_prior_volume = -len(dead_points) / nlive - np.log(nlive)
         live_points = np.insert(live_points, -1, final_log_prior_volume, axis=-1)
 
         nested_samples = pd.DataFrame(
             np.vstack([dead_points, live_points]).copy(),
-            columns=self.search_parameter_keys + ["log_likelihood", "log_prior_volume", "mode"]
+            columns=self.search_parameter_keys
+            + ["log_likelihood", "log_prior_volume", "mode"],
         )
         return nested_samples
diff --git a/bilby/core/sampler/zeus.py b/bilby/core/sampler/zeus.py
new file mode 100644
index 0000000000000000000000000000000000000000..78c3529ea00c1c9ee518a8698e2f70bc29fe194d
--- /dev/null
+++ b/bilby/core/sampler/zeus.py
@@ -0,0 +1,392 @@
+import os
+import signal
+import shutil
+import sys
+from collections import namedtuple
+from shutil import copyfile
+
+import numpy as np
+from pandas import DataFrame
+
+from ..utils import logger, check_directory_exists_and_if_not_mkdir
+from .base_sampler import MCMCSampler, SamplerError
+
+
+class Zeus(MCMCSampler):
+    """bilby wrapper for Zeus (https://zeus-mcmc.readthedocs.io/)
+
+    All positional and keyword arguments (i.e., the args and kwargs) passed to
+    `run_sampler` will be propagated to `zeus.EnsembleSampler`, see
+    documentation for that class for further help. Under Other Parameters, we
+    list commonly used kwargs and the bilby defaults.
+
+    Parameters
+    ==========
+    nwalkers: int, (500)
+        The number of walkers
+    nsteps: int, (100)
+        The number of steps
+    nburn: int (None)
+        If given, the fixed number of steps to discard as burn-in. These will
+        be discarded from the total number of steps set by `nsteps` and
+        therefore the value must be greater than `nsteps`. Else, nburn is
+        estimated from the autocorrelation time
+    burn_in_fraction: float, (0.25)
+        The fraction of steps to discard as burn-in in the event that the
+        autocorrelation time cannot be calculated
+    burn_in_act: float
+        The number of autocorrelation times to discard as burn-in
+
+    """
+
+    default_kwargs = dict(
+        nwalkers=500,
+        args=[],
+        kwargs={},
+        pool=None,
+        log_prob0=None,
+        start=None,
+        blobs0=None,
+        iterations=100,
+        thin=1,
+    )
+
+    def __init__(
+        self,
+        likelihood,
+        priors,
+        outdir="outdir",
+        label="label",
+        use_ratio=False,
+        plot=False,
+        skip_import_verification=False,
+        pos0=None,
+        nburn=None,
+        burn_in_fraction=0.25,
+        resume=True,
+        burn_in_act=3,
+        **kwargs
+    ):
+        import zeus
+
+        self.zeus = zeus
+
+        super(Zeus, self).__init__(
+            likelihood=likelihood,
+            priors=priors,
+            outdir=outdir,
+            label=label,
+            use_ratio=use_ratio,
+            plot=plot,
+            skip_import_verification=skip_import_verification,
+            **kwargs
+        )
+        self.resume = resume
+        self.pos0 = pos0
+        self.nburn = nburn
+        self.burn_in_fraction = burn_in_fraction
+        self.burn_in_act = burn_in_act
+
+        signal.signal(signal.SIGTERM, self.checkpoint_and_exit)
+        signal.signal(signal.SIGINT, self.checkpoint_and_exit)
+
+    def _translate_kwargs(self, kwargs):
+        if "nwalkers" not in kwargs:
+            for equiv in self.nwalkers_equiv_kwargs:
+                if equiv in kwargs:
+                    kwargs["nwalkers"] = kwargs.pop(equiv)
+        if "iterations" not in kwargs:
+            if "nsteps" in kwargs:
+                kwargs["iterations"] = kwargs.pop("nsteps")
+
+        # check if using emcee-style arguments
+        if "start" not in kwargs:
+            if "rstate0" in kwargs:
+                kwargs["start"] = kwargs.pop("rstate0")
+        if "log_prob0" not in kwargs:
+            if "lnprob0" in kwargs:
+                kwargs["log_prob0"] = kwargs.pop("lnprob0")
+
+        if "threads" in kwargs:
+            if kwargs["threads"] != 1:
+                logger.warning(
+                    "The 'threads' argument cannot be used for "
+                    "parallelisation. This run will proceed "
+                    "without parallelisation, but consider the use "
+                    "of an appropriate Pool object passed to the "
+                    "'pool' keyword."
+                )
+                kwargs["threads"] = 1
+
+    @property
+    def sampler_function_kwargs(self):
+        keys = ["log_prob0", "start", "blobs0", "iterations", "thin", "progress"]
+
+        function_kwargs = {key: self.kwargs[key] for key in keys if key in self.kwargs}
+
+        return function_kwargs
+
+    @property
+    def sampler_init_kwargs(self):
+        init_kwargs = {
+            key: value
+            for key, value in self.kwargs.items()
+            if key not in self.sampler_function_kwargs
+        }
+
+        init_kwargs["logprob_fn"] = self.lnpostfn
+        init_kwargs["ndim"] = self.ndim
+
+        return init_kwargs
+
+    def lnpostfn(self, theta):
+        log_prior = self.log_prior(theta)
+        if np.isinf(log_prior):
+            return -np.inf, [np.nan, np.nan]
+        else:
+            log_likelihood = self.log_likelihood(theta)
+            return log_likelihood + log_prior, [log_likelihood, log_prior]
+
+    @property
+    def nburn(self):
+        if type(self.__nburn) in [float, int]:
+            return int(self.__nburn)
+        elif self.result.max_autocorrelation_time is None:
+            return int(self.burn_in_fraction * self.nsteps)
+        else:
+            return int(self.burn_in_act * self.result.max_autocorrelation_time)
+
+    @nburn.setter
+    def nburn(self, nburn):
+        if isinstance(nburn, (float, int)):
+            if nburn > self.kwargs["iterations"] - 1:
+                raise ValueError(
+                    "Number of burn-in samples must be smaller "
+                    "than the total number of iterations"
+                )
+
+        self.__nburn = nburn
+
+    @property
+    def nwalkers(self):
+        return self.kwargs["nwalkers"]
+
+    @property
+    def nsteps(self):
+        return self.kwargs["iterations"]
+
+    @nsteps.setter
+    def nsteps(self, nsteps):
+        self.kwargs["iterations"] = nsteps
+
+    @property
+    def stored_chain(self):
+        """Read the stored zero-temperature chain data in from disk"""
+        return np.genfromtxt(self.checkpoint_info.chain_file, names=True)
+
+    @property
+    def stored_samples(self):
+        """Returns the samples stored on disk"""
+        return self.stored_chain[self.search_parameter_keys]
+
+    @property
+    def stored_loglike(self):
+        """Returns the log-likelihood stored on disk"""
+        return self.stored_chain["log_l"]
+
+    @property
+    def stored_logprior(self):
+        """Returns the log-prior stored on disk"""
+        return self.stored_chain["log_p"]
+
+    def _init_chain_file(self):
+        with open(self.checkpoint_info.chain_file, "w+") as ff:
+            ff.write(
+                "walker\t{}\tlog_l\tlog_p\n".format(
+                    "\t".join(self.search_parameter_keys)
+                )
+            )
+
+    @property
+    def checkpoint_info(self):
+        """Defines various things related to checkpointing and storing data
+
+        Returns
+        =======
+        checkpoint_info: named_tuple
+            An object with attributes `sampler_file`, `chain_file`, and
+            `chain_template`. The first two give paths to where the sampler and
+            chain data is stored, the last a formatted-str-template with which
+            to write the chain data to disk
+
+        """
+        out_dir = os.path.join(
+            self.outdir, "{}_{}".format(self.__class__.__name__.lower(), self.label)
+        )
+        check_directory_exists_and_if_not_mkdir(out_dir)
+
+        chain_file = os.path.join(out_dir, "chain.dat")
+        sampler_file = os.path.join(out_dir, "sampler.pickle")
+        chain_template = (
+            "{:d}" + "\t{:.9e}" * (len(self.search_parameter_keys) + 2) + "\n"
+        )
+
+        CheckpointInfo = namedtuple(
+            "CheckpointInfo", ["sampler_file", "chain_file", "chain_template"]
+        )
+
+        checkpoint_info = CheckpointInfo(
+            sampler_file=sampler_file,
+            chain_file=chain_file,
+            chain_template=chain_template,
+        )
+
+        return checkpoint_info
+
+    @property
+    def sampler_chain(self):
+        nsteps = self._previous_iterations
+        return self.sampler.chain[:, :nsteps, :]
+
+    def checkpoint(self):
+        """Writes a pickle file of the sampler to disk using dill"""
+        import dill
+
+        logger.info(
+            "Checkpointing sampler to file {}".format(self.checkpoint_info.sampler_file)
+        )
+        with open(self.checkpoint_info.sampler_file, "wb") as f:
+            # Overwrites the stored sampler chain with one that is truncated
+            # to only the completed steps
+            self.sampler._chain = self.sampler_chain
+            dill.dump(self._sampler, f)
+
+    def checkpoint_and_exit(self, signum, frame):
+        logger.info("Received signal {}".format(signum))
+        self.checkpoint()
+        sys.exit()
+
+    def _initialise_sampler(self):
+        self._sampler = self.zeus.EnsembleSampler(**self.sampler_init_kwargs)
+        self._init_chain_file()
+
+    @property
+    def sampler(self):
+        """Returns the Zeus sampler object
+
+        If, already initialized, returns the stored _sampler value. Otherwise,
+        first checks if there is a pickle file from which to load. If there is
+        not, then initialize the sampler and set the initial random draw
+
+        """
+        if hasattr(self, "_sampler"):
+            pass
+        elif self.resume and os.path.isfile(self.checkpoint_info.sampler_file):
+            import dill
+
+            logger.info(
+                "Resuming run from checkpoint file {}".format(
+                    self.checkpoint_info.sampler_file
+                )
+            )
+            with open(self.checkpoint_info.sampler_file, "rb") as f:
+                self._sampler = dill.load(f)
+            self._set_pos0_for_resume()
+        else:
+            self._initialise_sampler()
+            self._set_pos0()
+        return self._sampler
+
+    def write_chains_to_file(self, sample):
+        chain_file = self.checkpoint_info.chain_file
+        temp_chain_file = chain_file + ".temp"
+        if os.path.isfile(chain_file):
+            copyfile(chain_file, temp_chain_file)
+
+        points = np.hstack([sample[0], np.array(sample[2])])
+
+        with open(temp_chain_file, "a") as ff:
+            for ii, point in enumerate(points):
+                ff.write(self.checkpoint_info.chain_template.format(ii, *point))
+        shutil.move(temp_chain_file, chain_file)
+
+    @property
+    def _previous_iterations(self):
+        """Returns the number of iterations that the sampler has saved
+
+        This is used when loading in a sampler from a pickle file to figure out
+        how much of the run has already been completed
+        """
+        try:
+            return len(self.sampler.get_blobs())
+        except AttributeError:
+            return 0
+
+    def _draw_pos0_from_prior(self):
+        return np.array(
+            [self.get_random_draw_from_prior() for _ in range(self.nwalkers)]
+        )
+
+    @property
+    def _pos0_shape(self):
+        return (self.nwalkers, self.ndim)
+
+    def _set_pos0(self):
+        if self.pos0 is not None:
+            logger.debug("Using given initial positions for walkers")
+            if isinstance(self.pos0, DataFrame):
+                self.pos0 = self.pos0[self.search_parameter_keys].values
+            elif type(self.pos0) in (list, np.ndarray):
+                self.pos0 = np.squeeze(self.pos0)
+
+            if self.pos0.shape != self._pos0_shape:
+                raise ValueError("Input pos0 should be of shape ndim, nwalkers")
+            logger.debug("Checking input pos0")
+            for draw in self.pos0:
+                self.check_draw(draw)
+        else:
+            logger.debug("Generating initial walker positions from prior")
+            self.pos0 = self._draw_pos0_from_prior()
+
+    def _set_pos0_for_resume(self):
+        self.pos0 = self.sampler.get_last_sample()
+
+    def run_sampler(self):
+        sampler_function_kwargs = self.sampler_function_kwargs
+        iterations = sampler_function_kwargs.pop("iterations")
+        iterations -= self._previous_iterations
+
+        sampler_function_kwargs["start"] = self.pos0
+
+        # main iteration loop
+        for sample in self.sampler.sample(
+            iterations=iterations, **sampler_function_kwargs
+        ):
+            self.write_chains_to_file(sample)
+        self.checkpoint()
+
+        self.result.sampler_output = np.nan
+        self.calculate_autocorrelation(self.sampler.chain.reshape((-1, self.ndim)))
+        self.print_nburn_logging_info()
+
+        self._generate_result()
+
+        self.result.samples = self.sampler.get_chain(flat=True, discard=self.nburn)
+        self.result.walkers = self.sampler.chain
+        return self.result
+
+    def _generate_result(self):
+        self.result.nburn = self.nburn
+        self.calc_likelihood_count()
+        if self.result.nburn > self.nsteps:
+            raise SamplerError(
+                "The run has finished, but the chain is not burned in: "
+                "`nburn < nsteps` ({} < {}). Try increasing the "
+                "number of steps.".format(self.result.nburn, self.nsteps)
+            )
+        blobs = np.array(self.sampler.get_blobs(flat=True, discard=self.nburn)).reshape((-1, 2))
+        log_likelihoods, log_priors = blobs.T
+        self.result.log_likelihood_evaluations = log_likelihoods
+        self.result.log_prior_evaluations = log_priors
+        self.result.log_evidence = np.nan
+        self.result.log_evidence_err = np.nan
diff --git a/bilby/core/utils/io.py b/bilby/core/utils/io.py
index 85095dc6a3d533cb988d8451e2df6354dd5a0960..880abbfe68fb29fe07fc5216a6ab6a8b728ba6ed 100644
--- a/bilby/core/utils/io.py
+++ b/bilby/core/utils/io.py
@@ -4,6 +4,7 @@ import os
 import shutil
 from importlib import import_module
 from pathlib import Path
+from datetime import timedelta
 
 import numpy as np
 import pandas as pd
@@ -76,6 +77,12 @@ class BilbyJsonEncoder(json.JSONEncoder):
                 "__module__": obj.__module__,
                 "__name__": obj.__name__,
             }
+        if isinstance(obj, (timedelta)):
+            return {
+                "__timedelta__": True,
+                "__total_seconds__": obj.total_seconds()
+            }
+            return obj.isoformat()
         return json.JSONEncoder.default(self, obj)
 
 
@@ -171,6 +178,8 @@ def decode_bilby_json(dct):
     if dct.get("__function__", False) or dct.get("__class__", False):
         default = ".".join([dct["__module__"], dct["__name__"]])
         return getattr(import_module(dct["__module__"]), dct["__name__"], default)
+    if dct.get("__timedelta__", False):
+        return timedelta(seconds=dct["__total_seconds__"])
     return dct
 
 
diff --git a/bilby/core/utils/log.py b/bilby/core/utils/log.py
index 999aa445a5900fe99f42d74378000073fcba1ddc..d9db7581c9bf4a08d50bf4329c30d2b0f628e293 100644
--- a/bilby/core/utils/log.py
+++ b/bilby/core/utils/log.py
@@ -73,6 +73,6 @@ def loaded_modules_dict():
     module_names = list(sys.modules.keys())
     vdict = {}
     for key in module_names:
-        if "." not in key:
+        if "." not in str(key):
             vdict[key] = str(getattr(sys.modules[key], "__version__", "N/A"))
     return vdict
diff --git a/bilby/gw/conversion.py b/bilby/gw/conversion.py
index 758b0fdbed673366211f398ac0f2b05681f913fb..9c516c737d2e23db26b0d30c5507d87c95d17655 100644
--- a/bilby/gw/conversion.py
+++ b/bilby/gw/conversion.py
@@ -1,15 +1,17 @@
+import os
 import sys
 import multiprocessing
+import pickle
 
 import numpy as np
 from pandas import DataFrame
 
 from ..core.likelihood import MarginalizedLikelihoodReconstructionError
-from ..core.utils import logger, solar_mass
+from ..core.utils import logger, solar_mass, command_line_args
 from ..core.prior import DeltaFunction
 from .utils import lalsim_SimInspiralTransformPrecessingNewInitialConditions
 from .eos.eos import SpectralDecompositionEOS, EOSFamily, IntegrateTOV
-from .cosmology import get_cosmology
+from .cosmology import get_cosmology, z_at_value
 
 
 def redshift_to_luminosity_distance(redshift, cosmology=None):
@@ -25,7 +27,6 @@ def redshift_to_comoving_distance(redshift, cosmology=None):
 @np.vectorize
 def luminosity_distance_to_redshift(distance, cosmology=None):
     from astropy import units
-    from astropy.cosmology import z_at_value
     cosmology = get_cosmology(cosmology)
     return z_at_value(cosmology.luminosity_distance, distance * units.Mpc)
 
@@ -33,7 +34,6 @@ def luminosity_distance_to_redshift(distance, cosmology=None):
 @np.vectorize
 def comoving_distance_to_redshift(distance, cosmology=None):
     from astropy import units
-    from astropy.cosmology import z_at_value
     cosmology = get_cosmology(cosmology)
     return z_at_value(cosmology.comoving_distance, distance * units.Mpc)
 
@@ -1201,7 +1201,7 @@ def _compute_snrs(args):
 
 
 def generate_posterior_samples_from_marginalized_likelihood(
-        samples, likelihood, npool=1):
+        samples, likelihood, npool=1, block=10, use_cache=True):
     """
     Reconstruct the distance posterior from a run which used a likelihood which
     explicitly marginalised over time/distance/phase.
@@ -1216,6 +1216,11 @@ def generate_posterior_samples_from_marginalized_likelihood(
         Likelihood used during sampling.
     npool: int, (default=1)
         If given, perform generation (where possible) using a multiprocessing pool
+    block: int, (default=10)
+        Size of the blocks to use in multiprocessing
+    use_cache: bool, (default=True)
+        If true, cache the generation so that reconstuction can begin from the
+        cache on restart.
 
     Returns
     =======
@@ -1237,23 +1242,82 @@ def generate_posterior_samples_from_marginalized_likelihood(
 
     logger.info('Reconstructing marginalised parameters.')
 
-    fill_args = [(ii, row, likelihood) for ii, row in samples.iterrows()]
+    try:
+        cache_filename = f"{likelihood.outdir}/.{likelihood.label}_generate_posterior_cache.pickle"
+    except AttributeError:
+        logger.warning("Likelihood has no outdir and label attribute: caching disabled")
+        use_cache = False
+
+    if use_cache and os.path.exists(cache_filename) and not command_line_args.clean:
+        with open(cache_filename, "rb") as f:
+            cached_samples_dict = pickle.load(f)
+
+        # Check the samples are identical between the cache and current
+        if cached_samples_dict["_samples"].equals(samples):
+            # Calculate reconstruction percentage and print a log message
+            nsamples_converted = np.sum(
+                [len(val) for key, val in cached_samples_dict.items() if key != "_samples"]
+            )
+            perc = 100 * nsamples_converted / len(cached_samples_dict["_samples"])
+            logger.info(f'Using cached reconstruction with {perc:0.1f}% converted.')
+        else:
+            logger.info("Cached samples dict out of date, ignoring")
+            cached_samples_dict = dict(_samples=samples)
+
+    else:
+        # Initialize cache dict
+        cached_samples_dict = dict()
+
+        # Store samples to convert for checking
+        cached_samples_dict["_samples"] = samples
+
+    # Set up the multiprocessing
     if npool > 1:
         pool = multiprocessing.Pool(processes=npool)
         logger.info(
             "Using a pool with size {} for nsamples={}"
             .format(npool, len(samples))
         )
-        new_samples = np.array(pool.map(fill_sample, tqdm(fill_args, file=sys.stdout)))
-        pool.close()
     else:
-        new_samples = np.array([fill_sample(xx) for xx in tqdm(fill_args, file=sys.stdout)])
+        pool = None
+
+    fill_args = [(ii, row, likelihood) for ii, row in samples.iterrows()]
+    ii = 0
+    pbar = tqdm(total=len(samples), file=sys.stdout)
+    while ii < len(samples):
+        if ii in cached_samples_dict:
+            ii += block
+            pbar.update(block)
+            continue
+
+        if pool is not None:
+            subset_samples = pool.map(fill_sample, fill_args[ii: ii + block])
+        else:
+            subset_samples = [list(fill_sample(xx)) for xx in fill_args[ii: ii + block]]
+
+        cached_samples_dict[ii] = subset_samples
+
+        if use_cache:
+            with open(cache_filename, "wb") as f:
+                pickle.dump(cached_samples_dict, f)
+
+        ii += block
+        pbar.update(len(subset_samples))
+    pbar.close()
+
+    if pool is not None:
+        pool.close()
+
+    new_samples = np.concatenate(
+        [np.array(val) for key, val in cached_samples_dict.items() if key != "_samples"]
+    )
 
     samples['geocent_time'] = new_samples[:, 0]
     samples['luminosity_distance'] = new_samples[:, 1]
     samples['phase'] = new_samples[:, 2]
     if likelihood.calibration_marginalization:
         samples['recalib_index'] = new_samples[:, 3]
+
     return samples
 
 
diff --git a/bilby/gw/cosmology.py b/bilby/gw/cosmology.py
index ded3f5eb6b01c01c1066afdacb0c13f8f20a7a97..c05c7ada336aefd1b7b2b9a81f6adab2b377177e 100644
--- a/bilby/gw/cosmology.py
+++ b/bilby/gw/cosmology.py
@@ -63,3 +63,15 @@ def set_cosmology(cosmology=None):
         COSMOLOGY[1] = cosmology.name
     else:
         COSMOLOGY[1] = repr(cosmology)
+
+
+def z_at_value(func, fval, **kwargs):
+    """
+    Wrapped version of :code:`astropy.cosmology.z_at_value` to return float
+    rather than an :code:`astropy Quantity` as returned for :code:`astropy>=5`.
+
+    See https://docs.astropy.org/en/stable/api/astropy.cosmology.z_at_value.html#astropy.cosmology.z_at_value
+    for detailed documentation.
+    """
+    from astropy.cosmology import z_at_value
+    return float(z_at_value(func=func, fval=fval, **kwargs))
diff --git a/bilby/gw/detector/detectors/GEO600.interferometer b/bilby/gw/detector/detectors/GEO600.interferometer
index bf84d748c8dd88e121fbcb3a5c1aa1c0ddad83bd..addde6d1cd21593eb7a0d91c53bcba93a393171b 100644
--- a/bilby/gw/detector/detectors/GEO600.interferometer
+++ b/bilby/gw/detector/detectors/GEO600.interferometer
@@ -9,5 +9,5 @@ length = 0.6
 latitude = 52 + 14. / 60 + 42.528 / 3600
 longitude = 9 + 48. / 60 + 25.894 / 3600
 elevation = 114.425
-xarm_azimuth = 115.9431
-yarm_azimuth = 21.6117
+xarm_azimuth = 21.6117
+yarm_azimuth = 115.9431
diff --git a/bilby/gw/detector/detectors/K1.interferometer b/bilby/gw/detector/detectors/K1.interferometer
index 8585857ac1eac558056fb78582beebe9f610afe7..9120d24b71ab54ebd4a426b7ab95cc9050ca51e1 100644
--- a/bilby/gw/detector/detectors/K1.interferometer
+++ b/bilby/gw/detector/detectors/K1.interferometer
@@ -15,3 +15,5 @@ longitude = 137 + 18 / 60 + 21.44171 / 3600
 elevation = 414.181
 xarm_azimuth = 90 - 60.39623489157727
 yarm_azimuth = 90 + 29.60357629670688
+xarm_tilt = 0.0031414
+yarm_tilt = -0.0036270
diff --git a/bilby/gw/detector/detectors/L1.interferometer b/bilby/gw/detector/detectors/L1.interferometer
index 26f34e03b1f73d79f2ab0a6c9e6f0f68b88f2f87..840473e94dad673be4418ff28b0cf238b97438ad 100644
--- a/bilby/gw/detector/detectors/L1.interferometer
+++ b/bilby/gw/detector/detectors/L1.interferometer
@@ -11,3 +11,5 @@ longitude = -(90 + 46. / 60 + 27.2654 / 3600)
 elevation = -6.574
 xarm_azimuth = 197.7165
 yarm_azimuth = 287.7165
+xarm_tilt = -3.121e-4
+yarm_tilt = -6.107e-4
diff --git a/bilby/gw/detector/networks.py b/bilby/gw/detector/networks.py
index d354f2bf48a9f98c2d21b981fe7179affca7d3ac..bc1067ff94b12ff0a15bccc6c7773f3fa6c6cd37 100644
--- a/bilby/gw/detector/networks.py
+++ b/bilby/gw/detector/networks.py
@@ -11,10 +11,10 @@ from .psd import PowerSpectralDensity
 
 
 class InterferometerList(list):
-    """ A list of Interferometer objects """
+    """A list of Interferometer objects"""
 
     def __init__(self, interferometers):
-        """ Instantiate a InterferometerList
+        """Instantiate a InterferometerList
 
         The InterferometerList is a list of Interferometer objects, each
         object has the data used in evaluating the likelihood
@@ -32,7 +32,9 @@ class InterferometerList(list):
             if type(ifo) == str:
                 ifo = get_empty_interferometer(ifo)
             if type(ifo) not in [Interferometer, TriangularInterferometer]:
-                raise TypeError("Input list of interferometers are not all Interferometer objects")
+                raise TypeError(
+                    "Input list of interferometers are not all Interferometer objects"
+                )
             else:
                 self.append(ifo)
         self._check_interferometers()
@@ -45,28 +47,37 @@ class InterferometerList(list):
 
         If both checks fail, then a ValueError is raised.
         """
-        consistent_attributes = ['duration', 'start_time', 'sampling_frequency']
+        consistent_attributes = ["duration", "start_time", "sampling_frequency"]
         for attribute in consistent_attributes:
-            x = [getattr(interferometer.strain_data, attribute)
-                 for interferometer in self]
+            x = [
+                getattr(interferometer.strain_data, attribute)
+                for interferometer in self
+            ]
             try:
                 if not all(y == x[0] for y in x):
-                    ifo_strs = ["{ifo}[{attribute}]={value}".format(
-                        ifo=ifo.name,
-                        attribute=attribute,
-                        value=getattr(ifo.strain_data, attribute))
-                        for ifo in self]
+                    ifo_strs = [
+                        "{ifo}[{attribute}]={value}".format(
+                            ifo=ifo.name,
+                            attribute=attribute,
+                            value=getattr(ifo.strain_data, attribute),
+                        )
+                        for ifo in self
+                    ]
                     raise ValueError(
                         "The {} of all interferometers are not the same: {}".format(
-                            attribute, ', '.join(ifo_strs)))
+                            attribute, ", ".join(ifo_strs)
+                        )
+                    )
             except ValueError as e:
                 if not all(math.isclose(y, x[0], abs_tol=1e-5) for y in x):
                     raise ValueError(e)
                 else:
                     logger.warning(e)
 
-    def set_strain_data_from_power_spectral_densities(self, sampling_frequency, duration, start_time=0):
-        """ Set the `Interferometer.strain_data` from the power spectral densities of the detectors
+    def set_strain_data_from_power_spectral_densities(
+        self, sampling_frequency, duration, start_time=0
+    ):
+        """Set the `Interferometer.strain_data` from the power spectral densities of the detectors
 
         This uses the `interferometer.power_spectral_density` object to set
         the `strain_data` to a noise realization. See
@@ -83,12 +94,16 @@ class InterferometerList(list):
 
         """
         for interferometer in self:
-            interferometer.set_strain_data_from_power_spectral_density(sampling_frequency=sampling_frequency,
-                                                                       duration=duration,
-                                                                       start_time=start_time)
+            interferometer.set_strain_data_from_power_spectral_density(
+                sampling_frequency=sampling_frequency,
+                duration=duration,
+                start_time=start_time,
+            )
 
-    def set_strain_data_from_zero_noise(self, sampling_frequency, duration, start_time=0):
-        """ Set the `Interferometer.strain_data` from the power spectral densities of the detectors
+    def set_strain_data_from_zero_noise(
+        self, sampling_frequency, duration, start_time=0
+    ):
+        """Set the `Interferometer.strain_data` from the power spectral densities of the detectors
 
         This uses the `interferometer.power_spectral_density` object to set
         the `strain_data` to zero noise. See
@@ -105,11 +120,19 @@ class InterferometerList(list):
 
         """
         for interferometer in self:
-            interferometer.set_strain_data_from_zero_noise(sampling_frequency=sampling_frequency,
-                                                           duration=duration,
-                                                           start_time=start_time)
-
-    def inject_signal(self, parameters=None, injection_polarizations=None, waveform_generator=None):
+            interferometer.set_strain_data_from_zero_noise(
+                sampling_frequency=sampling_frequency,
+                duration=duration,
+                start_time=start_time,
+            )
+
+    def inject_signal(
+        self,
+        parameters=None,
+        injection_polarizations=None,
+        waveform_generator=None,
+        raise_error=True,
+    ):
         """ Inject a signal into noise in each of the three detectors.
 
         Parameters
@@ -124,6 +147,9 @@ class InterferometerList(list):
         waveform_generator: bilby.gw.waveform_generator.WaveformGenerator
             A WaveformGenerator instance using the source model to inject. If
             `injection_polarizations` is given, this will be ignored.
+        raise_error: bool
+            Whether to raise an error if the injected signal does not fit in
+            the segment.
 
         Notes
         ==========
@@ -138,22 +164,29 @@ class InterferometerList(list):
         """
         if injection_polarizations is None:
             if waveform_generator is not None:
-                injection_polarizations = \
-                    waveform_generator.frequency_domain_strain(parameters)
+                injection_polarizations = waveform_generator.frequency_domain_strain(
+                    parameters
+                )
             else:
                 raise ValueError(
                     "inject_signal needs one of waveform_generator or "
-                    "injection_polarizations.")
+                    "injection_polarizations."
+                )
 
         all_injection_polarizations = list()
         for interferometer in self:
             all_injection_polarizations.append(
-                interferometer.inject_signal(parameters=parameters, injection_polarizations=injection_polarizations))
+                interferometer.inject_signal(
+                    parameters=parameters,
+                    injection_polarizations=injection_polarizations,
+                    raise_error=raise_error,
+                )
+            )
 
         return all_injection_polarizations
 
     def save_data(self, outdir, label=None):
-        """ Creates a save file for the data in plain text format
+        """Creates a save file for the data in plain text format
 
         Parameters
         ==========
@@ -165,7 +198,7 @@ class InterferometerList(list):
         for interferometer in self:
             interferometer.save_data(outdir=outdir, label=label)
 
-    def plot_data(self, signal=None, outdir='.', label=None):
+    def plot_data(self, signal=None, outdir=".", label=None):
         if utils.command_line_args.bilby_test_mode:
             return
 
@@ -209,13 +242,14 @@ class InterferometerList(list):
 
     @property
     def meta_data(self):
-        """ Dictionary of the per-interferometer meta_data """
-        return {interferometer.name: interferometer.meta_data
-                for interferometer in self}
+        """Dictionary of the per-interferometer meta_data"""
+        return {
+            interferometer.name: interferometer.meta_data for interferometer in self
+        }
 
     @staticmethod
     def _filename_from_outdir_label_extension(outdir, label, extension="h5"):
-        return os.path.join(outdir, label + f'.{extension}')
+        return os.path.join(outdir, label + f".{extension}")
 
     _save_docstring = """ Saves the object to a {format} file
 
@@ -239,53 +273,66 @@ class InterferometerList(list):
 
     """
 
-    def to_hdf5(self, outdir='outdir', label='ifo_list'):
+    def to_hdf5(self, outdir="outdir", label="ifo_list"):
         import deepdish
+
         if sys.version_info[0] < 3:
-            raise NotImplementedError('Pickling of InterferometerList is not supported in Python 2.'
-                                      'Use Python 3 instead.')
-        label = label + '_' + ''.join(ifo.name for ifo in self)
+            raise NotImplementedError(
+                "Pickling of InterferometerList is not supported in Python 2."
+                "Use Python 3 instead."
+            )
+        label = label + "_" + "".join(ifo.name for ifo in self)
         utils.check_directory_exists_and_if_not_mkdir(outdir)
         try:
             filename = self._filename_from_outdir_label_extension(outdir, label, "h5")
             deepdish.io.save(filename, self)
         except AttributeError:
-            logger.warning("Saving to hdf5 using deepdish failed. Pickle dumping instead.")
+            logger.warning(
+                "Saving to hdf5 using deepdish failed. Pickle dumping instead."
+            )
             self.to_pickle(outdir=outdir, label=label)
 
     @classmethod
     def from_hdf5(cls, filename=None):
         import deepdish
+
         if sys.version_info[0] < 3:
-            raise NotImplementedError('Pickling of InterferometerList is not supported in Python 2.'
-                                      'Use Python 3 instead.')
+            raise NotImplementedError(
+                "Pickling of InterferometerList is not supported in Python 2."
+                "Use Python 3 instead."
+            )
         res = deepdish.io.load(filename)
         if res.__class__ == list:
             res = cls(res)
         if res.__class__ != cls:
-            raise TypeError('The loaded object is not a InterferometerList')
+            raise TypeError("The loaded object is not a InterferometerList")
         return res
 
     def to_pickle(self, outdir="outdir", label="ifo_list"):
         import dill
-        utils.check_directory_exists_and_if_not_mkdir('outdir')
-        label = label + '_' + ''.join(ifo.name for ifo in self)
-        filename = self._filename_from_outdir_label_extension(outdir, label, extension="pkl")
+
+        utils.check_directory_exists_and_if_not_mkdir("outdir")
+        label = label + "_" + "".join(ifo.name for ifo in self)
+        filename = self._filename_from_outdir_label_extension(
+            outdir, label, extension="pkl"
+        )
         with open(filename, "wb") as ff:
             dill.dump(self, ff)
 
     @classmethod
     def from_pickle(cls, filename=None):
         import dill
+
         with open(filename, "rb") as ff:
             res = dill.load(ff)
         if res.__class__ != cls:
-            raise TypeError('The loaded object is not an InterferometerList')
+            raise TypeError("The loaded object is not an InterferometerList")
         return res
 
     to_hdf5.__doc__ = _save_docstring.format(
-        format="hdf5", extra=""".. deprecated:: 1.1.0
-    Use :func:`to_pickle` instead."""
+        format="hdf5",
+        extra=""".. deprecated:: 1.1.0
+    Use :func:`to_pickle` instead.""",
     )
     to_pickle.__doc__ = _save_docstring.format(
         format="pickle", extra=".. versionadded:: 1.1.0"
@@ -295,10 +342,21 @@ class InterferometerList(list):
 
 
 class TriangularInterferometer(InterferometerList):
-
-    def __init__(self, name, power_spectral_density, minimum_frequency, maximum_frequency,
-                 length, latitude, longitude, elevation, xarm_azimuth, yarm_azimuth,
-                 xarm_tilt=0., yarm_tilt=0.):
+    def __init__(
+        self,
+        name,
+        power_spectral_density,
+        minimum_frequency,
+        maximum_frequency,
+        length,
+        latitude,
+        longitude,
+        elevation,
+        xarm_azimuth,
+        yarm_azimuth,
+        xarm_tilt=0.0,
+        yarm_tilt=0.0,
+    ):
         super(TriangularInterferometer, self).__init__([])
         self.name = name
         # for attr in ['power_spectral_density', 'minimum_frequency', 'maximum_frequency']:
@@ -310,15 +368,46 @@ class TriangularInterferometer(InterferometerList):
             maximum_frequency = [maximum_frequency] * 3
 
         for ii in range(3):
-            self.append(Interferometer(
-                '{}{}'.format(name, ii + 1), power_spectral_density[ii], minimum_frequency[ii], maximum_frequency[ii],
-                length, latitude, longitude, elevation, xarm_azimuth, yarm_azimuth, xarm_tilt, yarm_tilt))
+            self.append(
+                Interferometer(
+                    "{}{}".format(name, ii + 1),
+                    power_spectral_density[ii],
+                    minimum_frequency[ii],
+                    maximum_frequency[ii],
+                    length,
+                    latitude,
+                    longitude,
+                    elevation,
+                    xarm_azimuth,
+                    yarm_azimuth,
+                    xarm_tilt,
+                    yarm_tilt,
+                )
+            )
 
             xarm_azimuth += 240
             yarm_azimuth += 240
 
-            latitude += np.arctan(length * np.sin(xarm_azimuth * np.pi / 180) * 1e3 / utils.radius_of_earth)
-            longitude += np.arctan(length * np.cos(xarm_azimuth * np.pi / 180) * 1e3 / utils.radius_of_earth)
+            latitude += (
+                np.arctan(
+                    length
+                    * np.sin(xarm_azimuth * np.pi / 180)
+                    * 1e3
+                    / utils.radius_of_earth
+                )
+                * 180
+                / np.pi
+            )
+            longitude += (
+                np.arctan(
+                    length
+                    * np.cos(xarm_azimuth * np.pi / 180)
+                    * 1e3
+                    / utils.radius_of_earth
+                )
+                * 180
+                / np.pi
+            )
 
 
 def get_empty_interferometer(name):
@@ -351,34 +440,38 @@ def get_empty_interferometer(name):
     interferometer: Interferometer
         Interferometer instance
     """
-    filename = os.path.join(os.path.dirname(__file__), 'detectors', '{}.interferometer'.format(name))
+    filename = os.path.join(
+        os.path.dirname(__file__), "detectors", "{}.interferometer".format(name)
+    )
     try:
         return load_interferometer(filename)
     except OSError:
-        raise ValueError('Interferometer {} not implemented'.format(name))
+        raise ValueError("Interferometer {} not implemented".format(name))
 
 
 def load_interferometer(filename):
     """Load an interferometer from a file."""
     parameters = dict()
-    with open(filename, 'r') as parameter_file:
+    with open(filename, "r") as parameter_file:
         lines = parameter_file.readlines()
         for line in lines:
-            if line[0] == '#' or line[0] == '\n':
+            if line[0] == "#" or line[0] == "\n":
                 continue
-            split_line = line.split('=')
+            split_line = line.split("=")
             key = split_line[0].strip()
-            value = eval('='.join(split_line[1:]))
+            value = eval("=".join(split_line[1:]))
             parameters[key] = value
-    if 'shape' not in parameters.keys():
+    if "shape" not in parameters.keys():
         ifo = Interferometer(**parameters)
-        logger.debug('Assuming L shape for {}'.format('name'))
-    elif parameters['shape'].lower() in ['l', 'ligo']:
-        parameters.pop('shape')
+        logger.debug("Assuming L shape for {}".format("name"))
+    elif parameters["shape"].lower() in ["l", "ligo"]:
+        parameters.pop("shape")
         ifo = Interferometer(**parameters)
-    elif parameters['shape'].lower() in ['triangular', 'triangle']:
-        parameters.pop('shape')
+    elif parameters["shape"].lower() in ["triangular", "triangle"]:
+        parameters.pop("shape")
         ifo = TriangularInterferometer(**parameters)
     else:
-        raise IOError("{} could not be loaded. Invalid parameter 'shape'.".format(filename))
+        raise IOError(
+            "{} could not be loaded. Invalid parameter 'shape'.".format(filename)
+        )
     return ifo
diff --git a/bilby/gw/eos/eos.py b/bilby/gw/eos/eos.py
index 0f5ff179eccce2f502e4062edca56a5dfe8c8934..5693fb33e927153ba3a577a592bc6fe557b5944e 100644
--- a/bilby/gw/eos/eos.py
+++ b/bilby/gw/eos/eos.py
@@ -17,7 +17,7 @@ conversion_dict = {'pressure': {'cgs': C_SI ** 4. / G_SI * 10., 'si': C_SI ** 4.
                    'pseudo_enthalpy': {'dimensionless': 1.},
                    'mass': {'g': C_SI ** 2. / G_SI * 1000, 'kg': C_SI ** 2. / G_SI, 'geom': 1.,
                             'm_sol': C_SI ** 2. / G_SI / MSUN_SI},
-                   'radius': {'cm': 100., 'mass': 1., 'km': .001},
+                   'radius': {'cm': 100., 'm': 1., 'km': .001},
                    'tidal_deformability': {'geom': 1.}}
 
 
diff --git a/bilby/gw/likelihood.py b/bilby/gw/likelihood.py
deleted file mode 100644
index a6ab1a2b77030172676df04d02dbc70a00b51975..0000000000000000000000000000000000000000
--- a/bilby/gw/likelihood.py
+++ /dev/null
@@ -1,2288 +0,0 @@
-
-import os
-import json
-import copy
-import math
-
-import attr
-import numpy as np
-import pandas as pd
-from scipy.special import logsumexp
-
-from ..core.likelihood import Likelihood
-from ..core.utils import BilbyJsonEncoder, decode_bilby_json
-from ..core.utils import (
-    logger, UnsortedInterp2d, create_frequency_series, create_time_series,
-    speed_of_light, solar_mass, radius_of_earth, gravitational_constant,
-    round_up_to_power_of_two)
-from ..core.prior import Interped, Prior, Uniform, PriorDict, DeltaFunction
-from .detector import InterferometerList, get_empty_interferometer, calibration
-from .prior import BBHPriorDict, CBCPriorDict, Cosmological
-from .source import lal_binary_black_hole
-from .utils import (
-    noise_weighted_inner_product, build_roq_weights, zenith_azimuth_to_ra_dec,
-    ln_i0
-)
-from .waveform_generator import WaveformGenerator
-
-
-class GravitationalWaveTransient(Likelihood):
-    """ A gravitational-wave transient likelihood object
-
-    This is the usual likelihood object to use for transient gravitational
-    wave parameter estimation. It computes the log-likelihood in the frequency
-    domain assuming a colored Gaussian noise model described by a power
-    spectral density. See Thrane & Talbot (2019), arxiv.org/abs/1809.02293.
-
-    Parameters
-    ==========
-    interferometers: list, bilby.gw.detector.InterferometerList
-        A list of `bilby.detector.Interferometer` instances - contains the
-        detector data and power spectral densities
-    waveform_generator: `bilby.waveform_generator.WaveformGenerator`
-        An object which computes the frequency-domain strain of the signal,
-        given some set of parameters
-    distance_marginalization: bool, optional
-        If true, marginalize over distance in the likelihood.
-        This uses a look up table calculated at run time.
-        The distance prior is set to be a delta function at the minimum
-        distance allowed in the prior being marginalised over.
-    time_marginalization: bool, optional
-        If true, marginalize over time in the likelihood.
-        This uses a FFT to calculate the likelihood over a regularly spaced
-        grid.
-        In order to cover the whole space the prior is set to be uniform over
-        the spacing of the array of times.
-        If using time marginalisation and jitter_time is True a "jitter"
-        parameter is added to the prior which modifies the position of the
-        grid of times.
-    phase_marginalization: bool, optional
-        If true, marginalize over phase in the likelihood.
-        This is done analytically using a Bessel function.
-        The phase prior is set to be a delta function at phase=0.
-    calibration_marginalization: bool, optional
-        If true, marginalize over calibration response curves in the likelihood.
-        This is done numerically over a number of calibration response curve realizations.
-    priors: dict, optional
-        If given, used in the distance and phase marginalization.
-        Warning: when using marginalisation the dict is overwritten which will change the
-        the dict you are passing in. If this behaviour is undesired, pass `priors.copy()`.
-    distance_marginalization_lookup_table: (dict, str), optional
-        If a dict, dictionary containing the lookup_table, distance_array,
-        (distance) prior_array, and reference_distance used to construct
-        the table.
-        If a string the name of a file containing these quantities.
-        The lookup table is stored after construction in either the
-        provided string or a default location:
-        '.distance_marginalization_lookup_dmin{}_dmax{}_n{}.npz'
-    calibration_lookup_table: dict, optional
-        If a dict, contains the arrays over which to marginalize for each interferometer or the filepaths of the
-        calibration files.
-        If not provided, but calibration_marginalization is used, then the appropriate file is created to
-        contain the curves.
-    number_of_response_curves: int, optional
-        Number of curves from the calibration lookup table to use.
-        Default is 1000.
-    starting_index: int, optional
-        Sets the index for the first realization of the calibration curve to be considered.
-        This, coupled with number_of_response_curves, allows for restricting the set of curves used. This can be used
-        when dealing with large frequency arrays to split the calculation into sections.
-        Defaults to 0.
-    jitter_time: bool, optional
-        Whether to introduce a `time_jitter` parameter. This avoids either
-        missing the likelihood peak, or introducing biases in the
-        reconstructed time posterior due to an insufficient sampling frequency.
-        Default is False, however using this parameter is strongly encouraged.
-    reference_frame: (str, bilby.gw.detector.InterferometerList, list), optional
-        Definition of the reference frame for the sky location.
-
-        - :code:`sky`: sample in RA/dec, this is the default
-        - e.g., :code:`"H1L1", ["H1", "L1"], InterferometerList(["H1", "L1"])`:
-          sample in azimuth and zenith, `azimuth` and `zenith` defined in the
-          frame where the z-axis is aligned the the vector connecting H1
-          and L1.
-
-    time_reference: str, optional
-        Name of the reference for the sampled time parameter.
-
-        - :code:`geocent`/:code:`geocenter`: sample in the time at the
-          Earth's center, this is the default
-        - e.g., :code:`H1`: sample in the time of arrival at H1
-
-    Returns
-    =======
-    Likelihood: `bilby.core.likelihood.Likelihood`
-        A likelihood object, able to compute the likelihood of the data given
-        some model parameters
-
-    """
-
-    @attr.s
-    class _CalculatedSNRs:
-        d_inner_h = attr.ib()
-        optimal_snr_squared = attr.ib()
-        complex_matched_filter_snr = attr.ib()
-        d_inner_h_array = attr.ib()
-        optimal_snr_squared_array = attr.ib()
-        d_inner_h_squared_tc_array = attr.ib()
-
-    def __init__(
-        self, interferometers, waveform_generator, time_marginalization=False,
-        distance_marginalization=False, phase_marginalization=False, calibration_marginalization=False, priors=None,
-        distance_marginalization_lookup_table=None, calibration_lookup_table=None,
-        number_of_response_curves=1000, starting_index=0, jitter_time=True, reference_frame="sky",
-        time_reference="geocenter"
-    ):
-
-        self.waveform_generator = waveform_generator
-        super(GravitationalWaveTransient, self).__init__(dict())
-        self.interferometers = InterferometerList(interferometers)
-        self.time_marginalization = time_marginalization
-        self.distance_marginalization = distance_marginalization
-        self.phase_marginalization = phase_marginalization
-        self.calibration_marginalization = calibration_marginalization
-        self.priors = priors
-        self._check_set_duration_and_sampling_frequency_of_waveform_generator()
-        self.jitter_time = jitter_time
-        self.reference_frame = reference_frame
-        if "geocent" not in time_reference:
-            self.time_reference = time_reference
-            self.reference_ifo = get_empty_interferometer(self.time_reference)
-            if self.time_marginalization:
-                logger.info("Cannot marginalise over non-geocenter time.")
-                self.time_marginalization = False
-                self.jitter_time = False
-        else:
-            self.time_reference = "geocent"
-            self.reference_ifo = None
-
-        if self.time_marginalization:
-            self._check_marginalized_prior_is_set(key='geocent_time')
-            self._setup_time_marginalization()
-            priors['geocent_time'] = float(self.interferometers.start_time)
-            if self.jitter_time:
-                priors['time_jitter'] = Uniform(
-                    minimum=- self._delta_tc / 2,
-                    maximum=self._delta_tc / 2,
-                    boundary='periodic',
-                    name="time_jitter",
-                    latex_label="$t_j$"
-                )
-            self._marginalized_parameters.append('geocent_time')
-        elif self.jitter_time:
-            logger.debug(
-                "Time jittering requested with non-time-marginalised "
-                "likelihood, ignoring.")
-            self.jitter_time = False
-
-        if self.phase_marginalization:
-            self._check_marginalized_prior_is_set(key='phase')
-            priors['phase'] = float(0)
-            self._marginalized_parameters.append('phase')
-
-        if self.distance_marginalization:
-            self._lookup_table_filename = None
-            self._check_marginalized_prior_is_set(key='luminosity_distance')
-            self._distance_array = np.linspace(
-                self.priors['luminosity_distance'].minimum,
-                self.priors['luminosity_distance'].maximum, int(1e4))
-            self.distance_prior_array = np.array(
-                [self.priors['luminosity_distance'].prob(distance)
-                 for distance in self._distance_array])
-            self._ref_dist = self.priors['luminosity_distance'].rescale(0.5)
-            self._setup_distance_marginalization(
-                distance_marginalization_lookup_table)
-            for key in ['redshift', 'comoving_distance']:
-                if key in priors:
-                    del priors[key]
-            priors['luminosity_distance'] = float(self._ref_dist)
-            self._marginalized_parameters.append('luminosity_distance')
-
-        if self.calibration_marginalization:
-            self.number_of_response_curves = number_of_response_curves
-            self.starting_index = starting_index
-            self._setup_calibration_marginalization(calibration_lookup_table)
-            self._marginalized_parameters.append('recalib_index')
-
-    def __repr__(self):
-        return self.__class__.__name__ + '(interferometers={},\n\twaveform_generator={},\n\ttime_marginalization={}, ' \
-                                         'distance_marginalization={}, phase_marginalization={}, '\
-                                         'calibration_marginalization={}, priors={})'\
-            .format(self.interferometers, self.waveform_generator, self.time_marginalization,
-                    self.distance_marginalization, self.phase_marginalization, self.calibration_marginalization,
-                    self.priors)
-
-    def _check_set_duration_and_sampling_frequency_of_waveform_generator(self):
-        """ Check the waveform_generator has the same duration and
-        sampling_frequency as the interferometers. If they are unset, then
-        set them, if they differ, raise an error
-        """
-
-        attributes = ['duration', 'sampling_frequency', 'start_time']
-        for attribute in attributes:
-            wfg_attr = getattr(self.waveform_generator, attribute)
-            ifo_attr = getattr(self.interferometers, attribute)
-            if wfg_attr is None:
-                logger.debug(
-                    "The waveform_generator {} is None. Setting from the "
-                    "provided interferometers.".format(attribute))
-            elif wfg_attr != ifo_attr:
-                logger.debug(
-                    "The waveform_generator {} is not equal to that of the "
-                    "provided interferometers. Overwriting the "
-                    "waveform_generator.".format(attribute))
-            setattr(self.waveform_generator, attribute, ifo_attr)
-
-    def calculate_snrs(self, waveform_polarizations, interferometer):
-        """
-        Compute the snrs
-
-        Parameters
-        ==========
-        waveform_polarizations: dict
-            A dictionary of waveform polarizations and the corresponding array
-        interferometer: bilby.gw.detector.Interferometer
-            The bilby interferometer object
-
-        """
-        signal = interferometer.get_detector_response(
-            waveform_polarizations, self.parameters)
-        _mask = interferometer.frequency_mask
-
-        if 'recalib_index' in self.parameters:
-            signal[_mask] *= self.calibration_draws[interferometer.name][int(self.parameters['recalib_index'])]
-
-        d_inner_h = interferometer.inner_product(signal=signal)
-        optimal_snr_squared = interferometer.optimal_snr_squared(signal=signal)
-        complex_matched_filter_snr = d_inner_h / (optimal_snr_squared**0.5)
-
-        d_inner_h_array = None
-        optimal_snr_squared_array = None
-
-        if self.time_marginalization and self.calibration_marginalization:
-
-            d_inner_h_integrand = np.tile(
-                interferometer.frequency_domain_strain.conjugate() * signal /
-                interferometer.power_spectral_density_array, (self.number_of_response_curves, 1)).T
-
-            d_inner_h_integrand[_mask] *= self.calibration_draws[interferometer.name].T
-
-            d_inner_h_array =\
-                4 / self.waveform_generator.duration * np.fft.fft(
-                    d_inner_h_integrand[0:-1], axis=0).T
-
-            optimal_snr_squared_integrand = 4. / self.waveform_generator.duration *\
-                np.abs(signal)**2 / interferometer.power_spectral_density_array
-            optimal_snr_squared_array = np.dot(optimal_snr_squared_integrand[_mask],
-                                               self.calibration_abs_draws[interferometer.name].T)
-
-        elif self.time_marginalization and not self.calibration_marginalization:
-            d_inner_h_array =\
-                4 / self.waveform_generator.duration * np.fft.fft(
-                    signal[0:-1] *
-                    interferometer.frequency_domain_strain.conjugate()[0:-1] /
-                    interferometer.power_spectral_density_array[0:-1])
-
-        elif self.calibration_marginalization and ('recalib_index' not in self.parameters):
-            d_inner_h_integrand = 4. / self.waveform_generator.duration * \
-                interferometer.frequency_domain_strain.conjugate() * signal / \
-                interferometer.power_spectral_density_array
-            d_inner_h_array = np.dot(d_inner_h_integrand[_mask], self.calibration_draws[interferometer.name].T)
-
-            optimal_snr_squared_integrand = 4. / self.waveform_generator.duration *\
-                np.abs(signal)**2 / interferometer.power_spectral_density_array
-            optimal_snr_squared_array = np.dot(optimal_snr_squared_integrand[_mask],
-                                               self.calibration_abs_draws[interferometer.name].T)
-
-        return self._CalculatedSNRs(
-            d_inner_h=d_inner_h, optimal_snr_squared=optimal_snr_squared,
-            complex_matched_filter_snr=complex_matched_filter_snr,
-            d_inner_h_array=d_inner_h_array,
-            optimal_snr_squared_array=optimal_snr_squared_array,
-            d_inner_h_squared_tc_array=None)
-
-    def _check_marginalized_prior_is_set(self, key):
-        if key in self.priors and self.priors[key].is_fixed:
-            raise ValueError(
-                "Cannot use marginalized likelihood for {}: prior is fixed"
-                .format(key))
-        if key not in self.priors or not isinstance(
-                self.priors[key], Prior):
-            logger.warning(
-                'Prior not provided for {}, using the BBH default.'.format(key))
-            if key == 'geocent_time':
-                self.priors[key] = Uniform(
-                    self.interferometers.start_time,
-                    self.interferometers.start_time + self.interferometers.duration)
-            elif key == 'luminosity_distance':
-                for key in ['redshift', 'comoving_distance']:
-                    if key in self.priors:
-                        if not isinstance(self.priors[key], Cosmological):
-                            raise TypeError(
-                                "To marginalize over {}, the prior must be specified as a "
-                                "subclass of bilby.gw.prior.Cosmological.".format(key)
-                            )
-                        self.priors['luminosity_distance'] = self.priors[key].get_corresponding_prior(
-                            'luminosity_distance'
-                        )
-                        del self.priors[key]
-            else:
-                self.priors[key] = BBHPriorDict()[key]
-
-    @property
-    def priors(self):
-        return self._prior
-
-    @priors.setter
-    def priors(self, priors):
-        if priors is not None:
-            self._prior = priors.copy()
-        elif any([self.time_marginalization, self.phase_marginalization,
-                  self.distance_marginalization]):
-            raise ValueError("You can't use a marginalized likelihood without specifying a priors")
-        else:
-            self._prior = None
-
-    def noise_log_likelihood(self):
-        log_l = 0
-        for interferometer in self.interferometers:
-            mask = interferometer.frequency_mask
-            log_l -= noise_weighted_inner_product(
-                interferometer.frequency_domain_strain[mask],
-                interferometer.frequency_domain_strain[mask],
-                interferometer.power_spectral_density_array[mask],
-                self.waveform_generator.duration) / 2
-        return float(np.real(log_l))
-
-    def log_likelihood_ratio(self):
-        waveform_polarizations =\
-            self.waveform_generator.frequency_domain_strain(self.parameters)
-
-        self.parameters.update(self.get_sky_frame_parameters())
-
-        if waveform_polarizations is None:
-            return np.nan_to_num(-np.inf)
-
-        d_inner_h = 0.
-        optimal_snr_squared = 0.
-        complex_matched_filter_snr = 0.
-
-        if self.time_marginalization and self.calibration_marginalization:
-            if self.jitter_time:
-                self.parameters['geocent_time'] += self.parameters['time_jitter']
-
-            d_inner_h_array = np.zeros(
-                (self.number_of_response_curves, len(self.interferometers.frequency_array[0:-1])),
-                dtype=np.complex128)
-            optimal_snr_squared_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
-
-        elif self.time_marginalization:
-            if self.jitter_time:
-                self.parameters['geocent_time'] += self.parameters['time_jitter']
-            d_inner_h_array = np.zeros(
-                len(self.interferometers.frequency_array[0:-1]),
-                dtype=np.complex128)
-
-        elif self.calibration_marginalization:
-            d_inner_h_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
-            optimal_snr_squared_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
-
-        for interferometer in self.interferometers:
-            per_detector_snr = self.calculate_snrs(
-                waveform_polarizations=waveform_polarizations,
-                interferometer=interferometer)
-
-            d_inner_h += per_detector_snr.d_inner_h
-            optimal_snr_squared += np.real(per_detector_snr.optimal_snr_squared)
-            complex_matched_filter_snr += per_detector_snr.complex_matched_filter_snr
-
-            if self.time_marginalization or self.calibration_marginalization:
-                d_inner_h_array += per_detector_snr.d_inner_h_array
-
-            if self.calibration_marginalization:
-                optimal_snr_squared_array += per_detector_snr.optimal_snr_squared_array
-
-        if self.calibration_marginalization and self.time_marginalization:
-            log_l = self.time_and_calibration_marginalized_likelihood(
-                d_inner_h_array=d_inner_h_array,
-                h_inner_h=optimal_snr_squared_array)
-            if self.jitter_time:
-                self.parameters['geocent_time'] -= self.parameters['time_jitter']
-
-        elif self.calibration_marginalization:
-            log_l = self.calibration_marginalized_likelihood(
-                d_inner_h_calibration_array=d_inner_h_array,
-                h_inner_h=optimal_snr_squared_array)
-
-        elif self.time_marginalization:
-            log_l = self.time_marginalized_likelihood(
-                d_inner_h_tc_array=d_inner_h_array,
-                h_inner_h=optimal_snr_squared)
-            if self.jitter_time:
-                self.parameters['geocent_time'] -= self.parameters['time_jitter']
-
-        elif self.distance_marginalization:
-            log_l = self.distance_marginalized_likelihood(
-                d_inner_h=d_inner_h, h_inner_h=optimal_snr_squared)
-
-        elif self.phase_marginalization:
-            log_l = self.phase_marginalized_likelihood(
-                d_inner_h=d_inner_h, h_inner_h=optimal_snr_squared)
-
-        else:
-            log_l = np.real(d_inner_h) - optimal_snr_squared / 2
-
-        return float(log_l.real)
-
-    def generate_posterior_sample_from_marginalized_likelihood(self):
-        """
-        Reconstruct the distance posterior from a run which used a likelihood
-        which explicitly marginalised over time/distance/phase.
-
-        See Eq. (C29-C32) of https://arxiv.org/abs/1809.02293
-
-        Returns
-        =======
-        sample: dict
-            Returns the parameters with new samples.
-
-        Notes
-        =====
-        This involves a deepcopy of the signal to avoid issues with waveform
-        caching, as the signal is overwritten in place.
-        """
-        if any([self.phase_marginalization, self.distance_marginalization,
-                self.time_marginalization, self.calibration_marginalization]):
-            signal_polarizations = copy.deepcopy(
-                self.waveform_generator.frequency_domain_strain(
-                    self.parameters))
-        else:
-            return self.parameters
-
-        if self.calibration_marginalization and self.time_marginalization:
-            raise AttributeError(
-                "Cannot use time and calibration marginalization simultaneously for regeneration at the moment!"
-                "The matrix manipulation has not been tested.")
-
-        if self.calibration_marginalization:
-            new_calibration = self.generate_calibration_sample_from_marginalized_likelihood(
-                signal_polarizations=signal_polarizations)
-            self.parameters['recalib_index'] = new_calibration
-        if self.time_marginalization:
-            new_time = self.generate_time_sample_from_marginalized_likelihood(
-                signal_polarizations=signal_polarizations)
-            self.parameters['geocent_time'] = new_time
-        if self.distance_marginalization:
-            new_distance = self.generate_distance_sample_from_marginalized_likelihood(
-                signal_polarizations=signal_polarizations)
-            self.parameters['luminosity_distance'] = new_distance
-        if self.phase_marginalization:
-            new_phase = self.generate_phase_sample_from_marginalized_likelihood(
-                signal_polarizations=signal_polarizations)
-            self.parameters['phase'] = new_phase
-        return self.parameters.copy()
-
-    def generate_calibration_sample_from_marginalized_likelihood(
-            self, signal_polarizations=None):
-        """
-        Generate a single sample from the posterior distribution for the set of calibration response curves when
-        explicitly marginalizing over the calibration uncertainty.
-
-        Parameters
-        ----------
-        signal_polarizations: dict, optional
-            Polarizations modes of the template.
-
-        Returns
-        -------
-        new_calibration: dict
-            Sample set from the calibration posterior
-        """
-        if 'recalib_index' in self.parameters:
-            self.parameters.pop('recalib_index')
-        self.parameters.update(self.get_sky_frame_parameters())
-        if signal_polarizations is None:
-            signal_polarizations = \
-                self.waveform_generator.frequency_domain_strain(self.parameters)
-
-        log_like = self.get_calibration_log_likelihoods(signal_polarizations=signal_polarizations)
-
-        calibration_post = np.exp(log_like - max(log_like))
-        calibration_post /= np.sum(calibration_post)
-
-        new_calibration = np.random.choice(self.number_of_response_curves, p=calibration_post)
-
-        return new_calibration
-
-    def generate_time_sample_from_marginalized_likelihood(
-            self, signal_polarizations=None):
-        """
-        Generate a single sample from the posterior distribution for coalescence
-        time when using a likelihood which explicitly marginalises over time.
-
-        In order to resolve the posterior we artificially upsample to 16kHz.
-
-        See Eq. (C29-C32) of https://arxiv.org/abs/1809.02293
-
-        Parameters
-        ==========
-        signal_polarizations: dict, optional
-            Polarizations modes of the template.
-
-        Returns
-        =======
-        new_time: float
-            Sample from the time posterior.
-        """
-        self.parameters.update(self.get_sky_frame_parameters())
-        if self.jitter_time:
-            self.parameters['geocent_time'] += self.parameters['time_jitter']
-        if signal_polarizations is None:
-            signal_polarizations = \
-                self.waveform_generator.frequency_domain_strain(self.parameters)
-
-        times = create_time_series(
-            sampling_frequency=16384,
-            starting_time=self.parameters['geocent_time'] - self.waveform_generator.start_time,
-            duration=self.waveform_generator.duration)
-        times = times % self.waveform_generator.duration
-        times += self.waveform_generator.start_time
-
-        prior = self.priors["geocent_time"]
-        in_prior = (times >= prior.minimum) & (times < prior.maximum)
-        times = times[in_prior]
-
-        n_time_steps = int(self.waveform_generator.duration * 16384)
-        d_inner_h = np.zeros(len(times), dtype=complex)
-        psd = np.ones(n_time_steps)
-        signal_long = np.zeros(n_time_steps, dtype=complex)
-        data = np.zeros(n_time_steps, dtype=complex)
-        h_inner_h = np.zeros(1)
-        for ifo in self.interferometers:
-            ifo_length = len(ifo.frequency_domain_strain)
-            mask = ifo.frequency_mask
-            signal = ifo.get_detector_response(
-                signal_polarizations, self.parameters)
-            signal_long[:ifo_length] = signal
-            data[:ifo_length] = np.conj(ifo.frequency_domain_strain)
-            psd[:ifo_length][mask] = ifo.power_spectral_density_array[mask]
-            d_inner_h += np.fft.fft(signal_long * data / psd)[in_prior]
-            h_inner_h += ifo.optimal_snr_squared(signal=signal).real
-
-        if self.distance_marginalization:
-            time_log_like = self.distance_marginalized_likelihood(
-                d_inner_h, h_inner_h)
-        elif self.phase_marginalization:
-            time_log_like = ln_i0(abs(d_inner_h)) - h_inner_h.real / 2
-        else:
-            time_log_like = (d_inner_h.real - h_inner_h.real / 2)
-
-        time_prior_array = self.priors['geocent_time'].prob(times)
-        time_post = (
-            np.exp(time_log_like - max(time_log_like)) * time_prior_array)
-
-        keep = (time_post > max(time_post) / 1000)
-        if sum(keep) < 3:
-            keep[1:-1] = keep[1:-1] | keep[2:] | keep[:-2]
-        time_post = time_post[keep]
-        times = times[keep]
-
-        new_time = Interped(times, time_post).sample()
-        return new_time
-
-    def generate_distance_sample_from_marginalized_likelihood(
-            self, signal_polarizations=None):
-        """
-        Generate a single sample from the posterior distribution for luminosity
-        distance when using a likelihood which explicitly marginalises over
-        distance.
-
-        See Eq. (C29-C32) of https://arxiv.org/abs/1809.02293
-
-        Parameters
-        ==========
-        signal_polarizations: dict, optional
-            Polarizations modes of the template.
-            Note: These are rescaled in place after the distance sample is
-            generated to allow further parameter reconstruction to occur.
-
-        Returns
-        =======
-        new_distance: float
-            Sample from the distance posterior.
-        """
-        self.parameters.update(self.get_sky_frame_parameters())
-        if signal_polarizations is None:
-            signal_polarizations = \
-                self.waveform_generator.frequency_domain_strain(self.parameters)
-
-        d_inner_h, h_inner_h = self._calculate_inner_products(signal_polarizations)
-
-        d_inner_h_dist = (
-            d_inner_h * self.parameters['luminosity_distance'] /
-            self._distance_array)
-
-        h_inner_h_dist = (
-            h_inner_h * self.parameters['luminosity_distance']**2 /
-            self._distance_array**2)
-
-        if self.phase_marginalization:
-            distance_log_like = (
-                ln_i0(abs(d_inner_h_dist)) -
-                h_inner_h_dist.real / 2
-            )
-        else:
-            distance_log_like = (d_inner_h_dist.real - h_inner_h_dist.real / 2)
-
-        distance_post = (np.exp(distance_log_like - max(distance_log_like)) *
-                         self.distance_prior_array)
-
-        new_distance = Interped(
-            self._distance_array, distance_post).sample()
-
-        self._rescale_signal(signal_polarizations, new_distance)
-        return new_distance
-
-    def _calculate_inner_products(self, signal_polarizations):
-        d_inner_h = 0
-        h_inner_h = 0
-        for interferometer in self.interferometers:
-            per_detector_snr = self.calculate_snrs(
-                signal_polarizations, interferometer)
-
-            d_inner_h += per_detector_snr.d_inner_h
-            h_inner_h += per_detector_snr.optimal_snr_squared
-        return d_inner_h, h_inner_h
-
-    def generate_phase_sample_from_marginalized_likelihood(
-            self, signal_polarizations=None):
-        """
-        Generate a single sample from the posterior distribution for phase when
-        using a likelihood which explicitly marginalises over phase.
-
-        See Eq. (C29-C32) of https://arxiv.org/abs/1809.02293
-
-        Parameters
-        ==========
-        signal_polarizations: dict, optional
-            Polarizations modes of the template.
-
-        Returns
-        =======
-        new_phase: float
-            Sample from the phase posterior.
-
-        Notes
-        =====
-        This is only valid when assumes that mu(phi) \propto exp(-2i phi).
-        """
-        self.parameters.update(self.get_sky_frame_parameters())
-        if signal_polarizations is None:
-            signal_polarizations = \
-                self.waveform_generator.frequency_domain_strain(self.parameters)
-        d_inner_h, h_inner_h = self._calculate_inner_products(signal_polarizations)
-
-        phases = np.linspace(0, 2 * np.pi, 101)
-        phasor = np.exp(-2j * phases)
-        phase_log_post = d_inner_h * phasor - h_inner_h / 2
-        phase_post = np.exp(phase_log_post.real - max(phase_log_post.real))
-        new_phase = Interped(phases, phase_post).sample()
-        return new_phase
-
-    def distance_marginalized_likelihood(self, d_inner_h, h_inner_h):
-        d_inner_h_ref, h_inner_h_ref = self._setup_rho(
-            d_inner_h, h_inner_h)
-        if self.phase_marginalization:
-            d_inner_h_ref = np.abs(d_inner_h_ref)
-        else:
-            d_inner_h_ref = np.real(d_inner_h_ref)
-
-        return self._interp_dist_margd_loglikelihood(
-            d_inner_h_ref, h_inner_h_ref)
-
-    def phase_marginalized_likelihood(self, d_inner_h, h_inner_h):
-        d_inner_h = ln_i0(abs(d_inner_h))
-
-        if self.calibration_marginalization and self.time_marginalization:
-            return d_inner_h - np.outer(h_inner_h, np.ones(np.shape(d_inner_h)[1])) / 2
-        else:
-            return d_inner_h - h_inner_h / 2
-
-    def time_marginalized_likelihood(self, d_inner_h_tc_array, h_inner_h):
-        if self.distance_marginalization:
-            log_l_tc_array = self.distance_marginalized_likelihood(
-                d_inner_h=d_inner_h_tc_array, h_inner_h=h_inner_h)
-        elif self.phase_marginalization:
-            log_l_tc_array = self.phase_marginalized_likelihood(
-                d_inner_h=d_inner_h_tc_array,
-                h_inner_h=h_inner_h)
-        else:
-            log_l_tc_array = np.real(d_inner_h_tc_array) - h_inner_h / 2
-        times = self._times
-        if self.jitter_time:
-            times = self._times + self.parameters['time_jitter']
-        time_prior_array = self.priors['geocent_time'].prob(times) * self._delta_tc
-        return logsumexp(log_l_tc_array, b=time_prior_array)
-
-    def time_and_calibration_marginalized_likelihood(self, d_inner_h_array, h_inner_h):
-        times = self._times
-        if self.jitter_time:
-            times = self._times + self.parameters['time_jitter']
-
-        _time_prior = self.priors['geocent_time']
-        time_mask = np.logical_and((times >= _time_prior.minimum), (times <= _time_prior.maximum))
-        times = times[time_mask]
-        time_probs = self.priors['geocent_time'].prob(times) * self._delta_tc
-
-        d_inner_h_array = d_inner_h_array[:, time_mask]
-        h_inner_h = h_inner_h
-
-        if self.distance_marginalization:
-            log_l_array = self.distance_marginalized_likelihood(
-                d_inner_h=d_inner_h_array, h_inner_h=h_inner_h)
-        elif self.phase_marginalization:
-            log_l_array = self.phase_marginalized_likelihood(
-                d_inner_h=d_inner_h_array,
-                h_inner_h=h_inner_h)
-        else:
-            log_l_array = np.real(d_inner_h_array) - np.outer(h_inner_h, np.ones(np.shape(d_inner_h_array)[1])) / 2
-
-        prior_array = np.outer(time_probs, 1. / self.number_of_response_curves * np.ones(len(h_inner_h))).T
-
-        return logsumexp(log_l_array, b=prior_array)
-
-    def get_calibration_log_likelihoods(self, signal_polarizations=None):
-        self.parameters.update(self.get_sky_frame_parameters())
-        if signal_polarizations is None:
-            signal_polarizations =\
-                self.waveform_generator.frequency_domain_strain(self.parameters)
-
-        d_inner_h = 0.
-        optimal_snr_squared = 0.
-        complex_matched_filter_snr = 0.
-        d_inner_h_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
-        optimal_snr_squared_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
-
-        for interferometer in self.interferometers:
-            per_detector_snr = self.calculate_snrs(
-                waveform_polarizations=signal_polarizations,
-                interferometer=interferometer)
-
-            d_inner_h += per_detector_snr.d_inner_h
-            optimal_snr_squared += np.real(per_detector_snr.optimal_snr_squared)
-            complex_matched_filter_snr += per_detector_snr.complex_matched_filter_snr
-            d_inner_h_array += per_detector_snr.d_inner_h_array
-            optimal_snr_squared_array += per_detector_snr.optimal_snr_squared_array
-
-        if self.distance_marginalization:
-            log_l_cal_array = self.distance_marginalized_likelihood(
-                d_inner_h=d_inner_h_array, h_inner_h=optimal_snr_squared_array)
-        elif self.phase_marginalization:
-            log_l_cal_array = self.phase_marginalized_likelihood(
-                d_inner_h=d_inner_h_array,
-                h_inner_h=optimal_snr_squared_array)
-        else:
-            log_l_cal_array = np.real(d_inner_h_array - optimal_snr_squared_array / 2)
-
-        return log_l_cal_array
-
-    def calibration_marginalized_likelihood(self, d_inner_h_calibration_array, h_inner_h):
-        if self.distance_marginalization:
-            log_l_cal_array = self.distance_marginalized_likelihood(
-                d_inner_h=d_inner_h_calibration_array, h_inner_h=h_inner_h)
-        elif self.phase_marginalization:
-            log_l_cal_array = self.phase_marginalized_likelihood(
-                d_inner_h=d_inner_h_calibration_array,
-                h_inner_h=h_inner_h)
-        else:
-            log_l_cal_array = np.real(d_inner_h_calibration_array - h_inner_h / 2)
-
-        return logsumexp(log_l_cal_array) - np.log(self.number_of_response_curves)
-
-    def _setup_rho(self, d_inner_h, optimal_snr_squared):
-        optimal_snr_squared_ref = (optimal_snr_squared.real *
-                                   self.parameters['luminosity_distance'] ** 2 /
-                                   self._ref_dist ** 2.)
-        d_inner_h_ref = (d_inner_h * self.parameters['luminosity_distance'] /
-                         self._ref_dist)
-        return d_inner_h_ref, optimal_snr_squared_ref
-
-    def log_likelihood(self):
-        return self.log_likelihood_ratio() + self.noise_log_likelihood()
-
-    @property
-    def _delta_distance(self):
-        return self._distance_array[1] - self._distance_array[0]
-
-    @property
-    def _dist_multiplier(self):
-        ''' Maximum value of ref_dist/dist_array '''
-        return self._ref_dist / self._distance_array[0]
-
-    @property
-    def _optimal_snr_squared_ref_array(self):
-        """ Optimal filter snr at fiducial distance of ref_dist Mpc """
-        return np.logspace(-5, 10, self._dist_margd_loglikelihood_array.shape[0])
-
-    @property
-    def _d_inner_h_ref_array(self):
-        """ Matched filter snr at fiducial distance of ref_dist Mpc """
-        if self.phase_marginalization:
-            return np.logspace(-5, 10, self._dist_margd_loglikelihood_array.shape[1])
-        else:
-            n_negative = self._dist_margd_loglikelihood_array.shape[1] // 2
-            n_positive = self._dist_margd_loglikelihood_array.shape[1] - n_negative
-            return np.hstack((
-                -np.logspace(3, -3, n_negative), np.logspace(-3, 10, n_positive)
-            ))
-
-    def _setup_distance_marginalization(self, lookup_table=None):
-        if isinstance(lookup_table, str) or lookup_table is None:
-            self.cached_lookup_table_filename = lookup_table
-            lookup_table = self.load_lookup_table(
-                self.cached_lookup_table_filename)
-        if isinstance(lookup_table, dict):
-            if self._test_cached_lookup_table(lookup_table):
-                self._dist_margd_loglikelihood_array = lookup_table[
-                    'lookup_table']
-            else:
-                self._create_lookup_table()
-        else:
-            self._create_lookup_table()
-        self._interp_dist_margd_loglikelihood = UnsortedInterp2d(
-            self._d_inner_h_ref_array, self._optimal_snr_squared_ref_array,
-            self._dist_margd_loglikelihood_array, kind='cubic', fill_value=-np.inf)
-
-    @property
-    def cached_lookup_table_filename(self):
-        if self._lookup_table_filename is None:
-            self._lookup_table_filename = (
-                '.distance_marginalization_lookup.npz')
-        return self._lookup_table_filename
-
-    @cached_lookup_table_filename.setter
-    def cached_lookup_table_filename(self, filename):
-        if isinstance(filename, str):
-            if filename[-4:] != '.npz':
-                filename += '.npz'
-        self._lookup_table_filename = filename
-
-    def load_lookup_table(self, filename):
-        if os.path.exists(filename):
-            try:
-                loaded_file = dict(np.load(filename))
-            except AttributeError as e:
-                logger.warning(e)
-                self._create_lookup_table()
-                return None
-            match, failure = self._test_cached_lookup_table(loaded_file)
-            if match:
-                logger.info('Loaded distance marginalisation lookup table from '
-                            '{}.'.format(filename))
-                return loaded_file
-            else:
-                logger.info('Loaded distance marginalisation lookup table does '
-                            'not match for {}.'.format(failure))
-        elif isinstance(filename, str):
-            logger.info('Distance marginalisation file {} does not '
-                        'exist'.format(filename))
-        return None
-
-    def cache_lookup_table(self):
-        np.savez(self.cached_lookup_table_filename,
-                 distance_array=self._distance_array,
-                 prior_array=self.distance_prior_array,
-                 lookup_table=self._dist_margd_loglikelihood_array,
-                 reference_distance=self._ref_dist,
-                 phase_marginalization=self.phase_marginalization)
-
-    def _test_cached_lookup_table(self, loaded_file):
-        pairs = dict(
-            distance_array=self._distance_array,
-            prior_array=self.distance_prior_array,
-            reference_distance=self._ref_dist,
-            phase_marginalization=self.phase_marginalization)
-        for key in pairs:
-            if key not in loaded_file:
-                return False, key
-            elif not np.array_equal(np.atleast_1d(loaded_file[key]),
-                                    np.atleast_1d(pairs[key])):
-                return False, key
-        return True, None
-
-    def _create_lookup_table(self):
-        """ Make the lookup table """
-        from tqdm.auto import tqdm
-        logger.info('Building lookup table for distance marginalisation.')
-
-        self._dist_margd_loglikelihood_array = np.zeros((400, 800))
-        scaling = self._ref_dist / self._distance_array
-        d_inner_h_array_full = np.outer(self._d_inner_h_ref_array, scaling)
-        h_inner_h_array_full = np.outer(self._optimal_snr_squared_ref_array, scaling ** 2)
-        if self.phase_marginalization:
-            d_inner_h_array_full = ln_i0(abs(d_inner_h_array_full))
-        prior_term = self.distance_prior_array * self._delta_distance
-        for ii, optimal_snr_squared_array in tqdm(
-            enumerate(h_inner_h_array_full), total=len(self._optimal_snr_squared_ref_array)
-        ):
-            for jj, d_inner_h_array in enumerate(d_inner_h_array_full):
-                self._dist_margd_loglikelihood_array[ii][jj] = logsumexp(
-                    d_inner_h_array - optimal_snr_squared_array / 2,
-                    b=prior_term
-                )
-        log_norm = logsumexp(
-            0 / self._distance_array, b=self.distance_prior_array * self._delta_distance
-        )
-        self._dist_margd_loglikelihood_array -= log_norm
-        self.cache_lookup_table()
-
-    def _setup_phase_marginalization(self, min_bound=-5, max_bound=10):
-        logger.warning(
-            "The _setup_phase_marginalization method is deprecated and will be removed, "
-            "please update the implementation of phase marginalization "
-            "to use bilby.gw.utils.ln_i0"
-        )
-
-    @staticmethod
-    def _bessel_function_interped(xx):
-        logger.warning(
-            "The _bessel_function_interped method is deprecated and will be removed, "
-            "please update the implementation of phase marginalization "
-            "to use bilby.gw.utils.ln_i0"
-        )
-        return ln_i0(xx) + xx
-
-    def _setup_time_marginalization(self):
-        self._delta_tc = 2 / self.waveform_generator.sampling_frequency
-        self._times =\
-            self.interferometers.start_time + np.linspace(
-                0, self.interferometers.duration,
-                int(self.interferometers.duration / 2 *
-                    self.waveform_generator.sampling_frequency + 1))[1:]
-        self.time_prior_array = \
-            self.priors['geocent_time'].prob(self._times) * self._delta_tc
-
-    def _setup_calibration_marginalization(self, calibration_lookup_table):
-        if calibration_lookup_table is None:
-            calibration_lookup_table = {}
-        self.calibration_draws = {}
-        self.calibration_abs_draws = {}
-        self.calibration_parameter_draws = {}
-        for interferometer in self.interferometers:
-
-            # Force the priors
-            calibration_priors = PriorDict()
-            for key in self.priors.keys():
-                if 'recalib' in key and interferometer.name in key:
-                    calibration_priors[key] = copy.copy(self.priors[key])
-                    self.priors[key] = DeltaFunction(0.0)
-
-            # If there is no entry in the lookup table, make an empty one
-            if interferometer.name not in calibration_lookup_table.keys():
-                calibration_lookup_table[interferometer.name] =\
-                    f'{interferometer.name}_calibration_file.h5'
-
-            # If the interferometer lookup table file exists, generate the curves from it
-            if os.path.exists(calibration_lookup_table[interferometer.name]):
-                self.calibration_draws[interferometer.name] =\
-                    calibration.read_calibration_file(
-                        calibration_lookup_table[interferometer.name], self.interferometers.frequency_array,
-                        self.number_of_response_curves, self.starting_index)
-
-            else:  # generate the fake curves
-                from tqdm.auto import tqdm
-                self.calibration_parameter_draws[interferometer.name] =\
-                    pd.DataFrame(calibration_priors.sample(self.number_of_response_curves))
-
-                self.calibration_draws[interferometer.name] = \
-                    np.zeros((self.number_of_response_curves, len(interferometer.frequency_array)), dtype=complex)
-
-                for i in tqdm(range(self.number_of_response_curves)):
-                    self.calibration_draws[interferometer.name][i, :] =\
-                        interferometer.calibration_model.get_calibration_factor(
-                            interferometer.frequency_array,
-                            prefix='recalib_{}_'.format(interferometer.name),
-                            **self.calibration_parameter_draws[interferometer.name].iloc[i])
-
-                calibration.write_calibration_file(
-                    calibration_lookup_table[interferometer.name],
-                    self.interferometers.frequency_array,
-                    self.calibration_draws[interferometer.name],
-                    self.calibration_parameter_draws[interferometer.name])
-
-            interferometer.calibration_model = calibration.Recalibrate()
-
-            _mask = interferometer.frequency_mask
-            self.calibration_draws[interferometer.name] = self.calibration_draws[interferometer.name][:, _mask]
-            self.calibration_abs_draws[interferometer.name] =\
-                np.abs(self.calibration_draws[interferometer.name])**2
-
-    @property
-    def interferometers(self):
-        return self._interferometers
-
-    @interferometers.setter
-    def interferometers(self, interferometers):
-        self._interferometers = InterferometerList(interferometers)
-
-    def _rescale_signal(self, signal, new_distance):
-        for mode in signal:
-            signal[mode] *= self._ref_dist / new_distance
-
-    @property
-    def reference_frame(self):
-        return self._reference_frame
-
-    @property
-    def _reference_frame_str(self):
-        if isinstance(self.reference_frame, str):
-            return self.reference_frame
-        else:
-            return "".join([ifo.name for ifo in self.reference_frame])
-
-    @reference_frame.setter
-    def reference_frame(self, frame):
-        if frame == "sky":
-            self._reference_frame = frame
-        elif isinstance(frame, InterferometerList):
-            self._reference_frame = frame[:2]
-        elif isinstance(frame, list):
-            self._reference_frame = InterferometerList(frame[:2])
-        elif isinstance(frame, str):
-            self._reference_frame = InterferometerList([frame[:2], frame[2:4]])
-        else:
-            raise ValueError("Unable to parse reference frame {}".format(frame))
-
-    def get_sky_frame_parameters(self):
-        time = self.parameters['{}_time'.format(self.time_reference)]
-        if not self.reference_frame == "sky":
-            ra, dec = zenith_azimuth_to_ra_dec(
-                self.parameters['zenith'], self.parameters['azimuth'],
-                time, self.reference_frame)
-        else:
-            ra = self.parameters["ra"]
-            dec = self.parameters["dec"]
-        if "geocent" not in self.time_reference:
-            geocent_time = (
-                time - self.reference_ifo.time_delay_from_geocenter(
-                    ra=ra, dec=dec, time=time
-                )
-            )
-        else:
-            geocent_time = self.parameters["geocent_time"]
-        return dict(ra=ra, dec=dec, geocent_time=geocent_time)
-
-    @property
-    def lal_version(self):
-        try:
-            from lal import git_version, __version__
-            lal_version = str(__version__)
-            logger.info("Using lal version {}".format(lal_version))
-            lal_git_version = str(git_version.verbose_msg).replace("\n", ";")
-            logger.info("Using lal git version {}".format(lal_git_version))
-            return "lal_version={}, lal_git_version={}".format(lal_version, lal_git_version)
-        except (ImportError, AttributeError):
-            return "N/A"
-
-    @property
-    def lalsimulation_version(self):
-        try:
-            from lalsimulation import git_version, __version__
-            lalsim_version = str(__version__)
-            logger.info("Using lalsimulation version {}".format(lalsim_version))
-            lalsim_git_version = str(git_version.verbose_msg).replace("\n", ";")
-            logger.info("Using lalsimulation git version {}".format(lalsim_git_version))
-            return "lalsimulation_version={}, lalsimulation_git_version={}".format(lalsim_version, lalsim_git_version)
-        except (ImportError, AttributeError):
-            return "N/A"
-
-    @property
-    def meta_data(self):
-        return dict(
-            interferometers=self.interferometers.meta_data,
-            time_marginalization=self.time_marginalization,
-            phase_marginalization=self.phase_marginalization,
-            distance_marginalization=self.distance_marginalization,
-            calibration_marginalization=self.calibration_marginalization,
-            waveform_generator_class=self.waveform_generator.__class__,
-            waveform_arguments=self.waveform_generator.waveform_arguments,
-            frequency_domain_source_model=self.waveform_generator.frequency_domain_source_model,
-            parameter_conversion=self.waveform_generator.parameter_conversion,
-            sampling_frequency=self.waveform_generator.sampling_frequency,
-            duration=self.waveform_generator.duration,
-            start_time=self.waveform_generator.start_time,
-            time_reference=self.time_reference,
-            reference_frame=self._reference_frame_str,
-            lal_version=self.lal_version,
-            lalsimulation_version=self.lalsimulation_version)
-
-
-class BasicGravitationalWaveTransient(Likelihood):
-
-    def __init__(self, interferometers, waveform_generator):
-        """
-
-        A likelihood object, able to compute the likelihood of the data given
-        some model parameters
-
-        The simplest frequency-domain gravitational wave transient likelihood. Does
-        not include distance/phase marginalization.
-
-
-        Parameters
-        ==========
-        interferometers: list
-            A list of `bilby.gw.detector.Interferometer` instances - contains the
-            detector data and power spectral densities
-        waveform_generator: bilby.gw.waveform_generator.WaveformGenerator
-            An object which computes the frequency-domain strain of the signal,
-            given some set of parameters
-
-        """
-        super(BasicGravitationalWaveTransient, self).__init__(dict())
-        self.interferometers = interferometers
-        self.waveform_generator = waveform_generator
-
-    def __repr__(self):
-        return self.__class__.__name__ + '(interferometers={},\n\twaveform_generator={})'\
-            .format(self.interferometers, self.waveform_generator)
-
-    def noise_log_likelihood(self):
-        """ Calculates the real part of noise log-likelihood
-
-        Returns
-        =======
-        float: The real part of the noise log likelihood
-
-        """
-        log_l = 0
-        for interferometer in self.interferometers:
-            log_l -= 2. / self.waveform_generator.duration * np.sum(
-                abs(interferometer.frequency_domain_strain) ** 2 /
-                interferometer.power_spectral_density_array)
-        return log_l.real
-
-    def log_likelihood(self):
-        """ Calculates the real part of log-likelihood value
-
-        Returns
-        =======
-        float: The real part of the log likelihood
-
-        """
-        log_l = 0
-        waveform_polarizations =\
-            self.waveform_generator.frequency_domain_strain(
-                self.parameters.copy())
-        if waveform_polarizations is None:
-            return np.nan_to_num(-np.inf)
-        for interferometer in self.interferometers:
-            log_l += self.log_likelihood_interferometer(
-                waveform_polarizations, interferometer)
-        return log_l.real
-
-    def log_likelihood_interferometer(self, waveform_polarizations,
-                                      interferometer):
-        """
-
-        Parameters
-        ==========
-        waveform_polarizations: dict
-            Dictionary containing the desired waveform polarization modes and the related strain
-        interferometer: bilby.gw.detector.Interferometer
-            The Interferometer object we want to have the log-likelihood for
-
-        Returns
-        =======
-        float: The real part of the log-likelihood for this interferometer
-
-        """
-        signal_ifo = interferometer.get_detector_response(
-            waveform_polarizations, self.parameters)
-
-        log_l = - 2. / self.waveform_generator.duration * np.vdot(
-            interferometer.frequency_domain_strain - signal_ifo,
-            (interferometer.frequency_domain_strain - signal_ifo) /
-            interferometer.power_spectral_density_array)
-        return log_l.real
-
-
-class ROQGravitationalWaveTransient(GravitationalWaveTransient):
-    """A reduced order quadrature likelihood object
-
-    This uses the method described in Smith et al., (2016) Phys. Rev. D 94,
-    044031. A public repository of the ROQ data is available from
-    https://git.ligo.org/lscsoft/ROQ_data.
-
-    Parameters
-    ==========
-    interferometers: list, bilby.gw.detector.InterferometerList
-        A list of `bilby.detector.Interferometer` instances - contains the
-        detector data and power spectral densities
-    waveform_generator: `bilby.waveform_generator.WaveformGenerator`
-        An object which computes the frequency-domain strain of the signal,
-        given some set of parameters
-    linear_matrix: str, array_like
-        Either a string point to the file from which to load the linear_matrix
-        array, or the array itself.
-    quadratic_matrix: str, array_like
-        Either a string point to the file from which to load the
-        quadratic_matrix array, or the array itself.
-    roq_params: str, array_like
-        Parameters describing the domain of validity of the ROQ basis.
-    roq_params_check: bool
-        If true, run tests using the roq_params to check the prior and data are
-        valid for the ROQ
-    roq_scale_factor: float
-        The ROQ scale factor used.
-    priors: dict, bilby.prior.PriorDict
-        A dictionary of priors containing at least the geocent_time prior
-        Warning: when using marginalisation the dict is overwritten which will change the
-        the dict you are passing in. If this behaviour is undesired, pass `priors.copy()`.
-    distance_marginalization_lookup_table: (dict, str), optional
-        If a dict, dictionary containing the lookup_table, distance_array,
-        (distance) prior_array, and reference_distance used to construct
-        the table.
-        If a string the name of a file containing these quantities.
-        The lookup table is stored after construction in either the
-        provided string or a default location:
-        '.distance_marginalization_lookup_dmin{}_dmax{}_n{}.npz'
-    reference_frame: (str, bilby.gw.detector.InterferometerList, list), optional
-        Definition of the reference frame for the sky location.
-        - "sky": sample in RA/dec, this is the default
-        - e.g., "H1L1", ["H1", "L1"], InterferometerList(["H1", "L1"]):
-          sample in azimuth and zenith, `azimuth` and `zenith` defined in the
-          frame where the z-axis is aligned the the vector connecting H1
-          and L1.
-    time_reference: str, optional
-        Name of the reference for the sampled time parameter.
-        - "geocent"/"geocenter": sample in the time at the Earth's center,
-          this is the default
-        - e.g., "H1": sample in the time of arrival at H1
-
-    """
-    def __init__(
-        self, interferometers, waveform_generator, priors,
-        weights=None, linear_matrix=None, quadratic_matrix=None,
-        roq_params=None, roq_params_check=True, roq_scale_factor=1,
-        distance_marginalization=False, phase_marginalization=False,
-        distance_marginalization_lookup_table=None,
-        reference_frame="sky", time_reference="geocenter"
-
-    ):
-        super(ROQGravitationalWaveTransient, self).__init__(
-            interferometers=interferometers,
-            waveform_generator=waveform_generator, priors=priors,
-            distance_marginalization=distance_marginalization,
-            phase_marginalization=phase_marginalization,
-            time_marginalization=False,
-            distance_marginalization_lookup_table=distance_marginalization_lookup_table,
-            jitter_time=False,
-            reference_frame=reference_frame,
-            time_reference=time_reference
-        )
-
-        self.roq_params_check = roq_params_check
-        self.roq_scale_factor = roq_scale_factor
-        if isinstance(roq_params, np.ndarray) or roq_params is None:
-            self.roq_params = roq_params
-        elif isinstance(roq_params, str):
-            self.roq_params_file = roq_params
-            self.roq_params = np.genfromtxt(roq_params, names=True)
-        else:
-            raise TypeError("roq_params should be array or str")
-        if isinstance(weights, dict):
-            self.weights = weights
-        elif isinstance(weights, str):
-            self.weights = self.load_weights(weights)
-        else:
-            self.weights = dict()
-            if isinstance(linear_matrix, str):
-                logger.info(
-                    "Loading linear matrix from {}".format(linear_matrix))
-                linear_matrix = np.load(linear_matrix).T
-            if isinstance(quadratic_matrix, str):
-                logger.info(
-                    "Loading quadratic_matrix from {}".format(quadratic_matrix))
-                quadratic_matrix = np.load(quadratic_matrix).T
-            self._set_weights(linear_matrix=linear_matrix,
-                              quadratic_matrix=quadratic_matrix)
-        self.frequency_nodes_linear =\
-            waveform_generator.waveform_arguments['frequency_nodes_linear']
-        self.frequency_nodes_quadratic = \
-            waveform_generator.waveform_arguments['frequency_nodes_quadratic']
-
-    def calculate_snrs(self, waveform_polarizations, interferometer):
-        """
-        Compute the snrs for ROQ
-
-        Parameters
-        ==========
-        waveform_polarizations: waveform
-        interferometer: bilby.gw.detector.Interferometer
-
-        """
-
-        f_plus = interferometer.antenna_response(
-            self.parameters['ra'], self.parameters['dec'],
-            self.parameters['geocent_time'], self.parameters['psi'], 'plus')
-        f_cross = interferometer.antenna_response(
-            self.parameters['ra'], self.parameters['dec'],
-            self.parameters['geocent_time'], self.parameters['psi'], 'cross')
-
-        dt = interferometer.time_delay_from_geocenter(
-            self.parameters['ra'], self.parameters['dec'],
-            self.parameters['geocent_time'])
-        dt_geocent = self.parameters['geocent_time'] - interferometer.strain_data.start_time
-        ifo_time = dt_geocent + dt
-
-        calib_linear = interferometer.calibration_model.get_calibration_factor(
-            self.frequency_nodes_linear,
-            prefix='recalib_{}_'.format(interferometer.name), **self.parameters)
-        calib_quadratic = interferometer.calibration_model.get_calibration_factor(
-            self.frequency_nodes_quadratic,
-            prefix='recalib_{}_'.format(interferometer.name), **self.parameters)
-
-        h_plus_linear = f_plus * waveform_polarizations['linear']['plus'] * calib_linear
-        h_cross_linear = f_cross * waveform_polarizations['linear']['cross'] * calib_linear
-        h_plus_quadratic = (
-            f_plus * waveform_polarizations['quadratic']['plus'] * calib_quadratic)
-        h_cross_quadratic = (
-            f_cross * waveform_polarizations['quadratic']['cross'] * calib_quadratic)
-
-        indices, in_bounds = self._closest_time_indices(
-            ifo_time, self.weights['time_samples'])
-        if not in_bounds:
-            logger.debug("SNR calculation error: requested time at edge of ROQ time samples")
-            return self._CalculatedSNRs(
-                d_inner_h=np.nan_to_num(-np.inf), optimal_snr_squared=0,
-                complex_matched_filter_snr=np.nan_to_num(-np.inf),
-                d_inner_h_squared_tc_array=None,
-                d_inner_h_array=None,
-                optimal_snr_squared_array=None)
-
-        d_inner_h_tc_array = np.einsum(
-            'i,ji->j', np.conjugate(h_plus_linear + h_cross_linear),
-            self.weights[interferometer.name + '_linear'][indices])
-
-        d_inner_h = self._interp_five_samples(
-            self.weights['time_samples'][indices], d_inner_h_tc_array, ifo_time)
-
-        optimal_snr_squared = \
-            np.vdot(np.abs(h_plus_quadratic + h_cross_quadratic)**2,
-                    self.weights[interferometer.name + '_quadratic'])
-
-        with np.errstate(invalid="ignore"):
-            complex_matched_filter_snr = d_inner_h / (optimal_snr_squared**0.5)
-        d_inner_h_squared_tc_array = None
-
-        return self._CalculatedSNRs(
-            d_inner_h=d_inner_h, optimal_snr_squared=optimal_snr_squared,
-            complex_matched_filter_snr=complex_matched_filter_snr,
-            d_inner_h_squared_tc_array=d_inner_h_squared_tc_array,
-            d_inner_h_array=None,
-            optimal_snr_squared_array=None)
-
-    @staticmethod
-    def _closest_time_indices(time, samples):
-        """
-        Get the closest five times
-
-        Parameters
-        ==========
-        time: float
-            Time to check
-        samples: array-like
-            Available times
-
-        Returns
-        =======
-        indices: list
-            Indices nearest to time
-        in_bounds: bool
-            Whether the indices are for valid times
-        """
-        closest = int((time - samples[0]) / (samples[1] - samples[0]))
-        indices = [closest + ii for ii in [-2, -1, 0, 1, 2]]
-        in_bounds = (indices[0] >= 0) & (indices[-1] < samples.size)
-        return indices, in_bounds
-
-    @staticmethod
-    def _interp_five_samples(time_samples, values, time):
-        """
-        Interpolate a function of time with its values at the closest five times.
-        The algorithm is explained in https://dcc.ligo.org/T2100224.
-
-        Parameters
-        ==========
-        time_samples: array-like
-            Closest 5 times
-        values: array-like
-            The values of the function at closest 5 times
-        time: float
-            Time at which the function is calculated
-
-        Returns
-        =======
-        value: float
-            The value of the function at the input time
-        """
-        r1 = (-values[0] + 8. * values[1] - 14. * values[2] + 8. * values[3] - values[4]) / 4.
-        r2 = values[2] - 2. * values[3] + values[4]
-        a = (time_samples[3] - time) / (time_samples[1] - time_samples[0])
-        b = 1. - a
-        c = (a**3. - a) / 6.
-        d = (b**3. - b) / 6.
-        return a * values[2] + b * values[3] + c * r1 + d * r2
-
-    def perform_roq_params_check(self, ifo=None):
-        """ Perform checking that the prior and data are valid for the ROQ
-
-        Parameters
-        ==========
-        ifo: bilby.gw.detector.Interferometer
-            The interferometer
-        """
-        if self.roq_params_check is False:
-            logger.warning("No ROQ params checking performed")
-            return
-        else:
-            if getattr(self, "roq_params_file", None) is not None:
-                msg = ("Check ROQ params {} with roq_scale_factor={}"
-                       .format(self.roq_params_file, self.roq_scale_factor))
-            else:
-                msg = ("Check ROQ params with roq_scale_factor={}"
-                       .format(self.roq_scale_factor))
-            logger.info(msg)
-
-        roq_params = self.roq_params
-        roq_minimum_frequency = roq_params['flow'] * self.roq_scale_factor
-        roq_maximum_frequency = roq_params['fhigh'] * self.roq_scale_factor
-        roq_segment_length = roq_params['seglen'] / self.roq_scale_factor
-        roq_minimum_chirp_mass = roq_params['chirpmassmin'] / self.roq_scale_factor
-        roq_maximum_chirp_mass = roq_params['chirpmassmax'] / self.roq_scale_factor
-        roq_minimum_component_mass = roq_params['compmin'] / self.roq_scale_factor
-
-        if ifo.maximum_frequency > roq_maximum_frequency:
-            raise BilbyROQParamsRangeError(
-                "Requested maximum frequency {} larger than ROQ basis fhigh {}"
-                .format(ifo.maximum_frequency, roq_maximum_frequency))
-        if ifo.minimum_frequency < roq_minimum_frequency:
-            raise BilbyROQParamsRangeError(
-                "Requested minimum frequency {} lower than ROQ basis flow {}"
-                .format(ifo.minimum_frequency, roq_minimum_frequency))
-        if ifo.strain_data.duration != roq_segment_length:
-            raise BilbyROQParamsRangeError(
-                "Requested duration differs from ROQ basis seglen")
-
-        priors = self.priors
-        if isinstance(priors, CBCPriorDict) is False:
-            logger.warning("Unable to check ROQ parameter bounds: priors not understood")
-            return
-
-        if priors.minimum_chirp_mass is None:
-            logger.warning("Unable to check minimum chirp mass ROQ bounds")
-        elif priors.minimum_chirp_mass < roq_minimum_chirp_mass:
-            raise BilbyROQParamsRangeError(
-                "Prior minimum chirp mass {} less than ROQ basis bound {}"
-                .format(priors.minimum_chirp_mass,
-                        roq_minimum_chirp_mass))
-
-        if priors.maximum_chirp_mass is None:
-            logger.warning("Unable to check maximum_chirp mass ROQ bounds")
-        elif priors.maximum_chirp_mass > roq_maximum_chirp_mass:
-            raise BilbyROQParamsRangeError(
-                "Prior maximum chirp mass {} greater than ROQ basis bound {}"
-                .format(priors.maximum_chirp_mass,
-                        roq_maximum_chirp_mass))
-
-        if priors.minimum_component_mass is None:
-            logger.warning("Unable to check minimum component mass ROQ bounds")
-        elif priors.minimum_component_mass < roq_minimum_component_mass:
-            raise BilbyROQParamsRangeError(
-                "Prior minimum component mass {} less than ROQ basis bound {}"
-                .format(priors.minimum_component_mass,
-                        roq_minimum_component_mass))
-
-    def _set_weights(self, linear_matrix, quadratic_matrix):
-        """
-        Setup the time-dependent ROQ weights.
-        See https://dcc.ligo.org/LIGO-T2100125 for the detail of how to compute them.
-
-        Parameters
-        ==========
-        linear_matrix, quadratic_matrix: array_like
-            Arrays of the linear and quadratic basis
-
-        """
-
-        time_space = self._get_time_resolution()
-        number_of_time_samples = int(self.interferometers.duration / time_space)
-        try:
-            import pyfftw
-            ifft_input = pyfftw.empty_aligned(number_of_time_samples, dtype=complex)
-            ifft_output = pyfftw.empty_aligned(number_of_time_samples, dtype=complex)
-            ifft = pyfftw.FFTW(ifft_input, ifft_output, direction='FFTW_BACKWARD')
-        except ImportError:
-            pyfftw = None
-            logger.warning("You do not have pyfftw installed, falling back to numpy.fft.")
-            ifft_input = np.zeros(number_of_time_samples, dtype=complex)
-            ifft = np.fft.ifft
-        earth_light_crossing_time = 2 * radius_of_earth / speed_of_light + 5 * time_space
-        start_idx = max(0, int(np.floor((self.priors['{}_time'.format(self.time_reference)].minimum -
-                        earth_light_crossing_time - self.interferometers.start_time) / time_space)))
-        end_idx = min(number_of_time_samples - 1, int(np.ceil((
-                      self.priors['{}_time'.format(self.time_reference)].maximum + earth_light_crossing_time -
-                      self.interferometers.start_time) / time_space)))
-        self.weights['time_samples'] = np.arange(start_idx, end_idx + 1) * time_space
-        logger.info("Using {} ROQ time samples".format(len(self.weights['time_samples'])))
-
-        for ifo in self.interferometers:
-            if self.roq_params is not None:
-                self.perform_roq_params_check(ifo)
-                # Get scaled ROQ quantities
-                roq_scaled_minimum_frequency = self.roq_params['flow'] * self.roq_scale_factor
-                roq_scaled_maximum_frequency = self.roq_params['fhigh'] * self.roq_scale_factor
-                roq_scaled_segment_length = self.roq_params['seglen'] / self.roq_scale_factor
-                # Generate frequencies for the ROQ
-                roq_frequencies = create_frequency_series(
-                    sampling_frequency=roq_scaled_maximum_frequency * 2,
-                    duration=roq_scaled_segment_length)
-                roq_mask = roq_frequencies >= roq_scaled_minimum_frequency
-                roq_frequencies = roq_frequencies[roq_mask]
-                overlap_frequencies, ifo_idxs, roq_idxs = np.intersect1d(
-                    ifo.frequency_array[ifo.frequency_mask], roq_frequencies,
-                    return_indices=True)
-            else:
-                overlap_frequencies = ifo.frequency_array[ifo.frequency_mask]
-                roq_idxs = np.arange(linear_matrix.shape[0], dtype=int)
-                ifo_idxs = np.arange(sum(ifo.frequency_mask))
-                if len(ifo_idxs) != len(roq_idxs):
-                    raise ValueError(
-                        "Mismatch between ROQ basis and frequency array for "
-                        "{}".format(ifo.name))
-            logger.info(
-                "Building ROQ weights for {} with {} frequencies between {} "
-                "and {}.".format(
-                    ifo.name, len(overlap_frequencies),
-                    min(overlap_frequencies), max(overlap_frequencies)))
-
-            ifft_input[:] *= 0.
-            self.weights[ifo.name + '_linear'] = \
-                np.zeros((len(self.weights['time_samples']), linear_matrix.shape[1]), dtype=complex)
-            data_over_psd = ifo.frequency_domain_strain[ifo.frequency_mask][ifo_idxs] / \
-                ifo.power_spectral_density_array[ifo.frequency_mask][ifo_idxs]
-            nonzero_idxs = ifo_idxs + int(ifo.frequency_array[ifo.frequency_mask][0] * self.interferometers.duration)
-            for i, basis_element in enumerate(linear_matrix[roq_idxs].T):
-                ifft_input[nonzero_idxs] = data_over_psd * np.conj(basis_element)
-                self.weights[ifo.name + '_linear'][:, i] = ifft(ifft_input)[start_idx:end_idx + 1]
-            self.weights[ifo.name + '_linear'] *= 4. * number_of_time_samples / self.interferometers.duration
-
-            self.weights[ifo.name + '_quadratic'] = build_roq_weights(
-                1 /
-                ifo.power_spectral_density_array[ifo.frequency_mask][ifo_idxs],
-                quadratic_matrix[roq_idxs].real,
-                1 / ifo.strain_data.duration)
-
-            logger.info("Finished building weights for {}".format(ifo.name))
-
-        if pyfftw is not None:
-            pyfftw.forget_wisdom()
-
-    def save_weights(self, filename, format='npz'):
-        if format not in filename:
-            filename += "." + format
-        logger.info("Saving ROQ weights to {}".format(filename))
-        if format == 'json':
-            with open(filename, 'w') as file:
-                json.dump(self.weights, file, indent=2, cls=BilbyJsonEncoder)
-        elif format == 'npz':
-            np.savez(filename, **self.weights)
-
-    @staticmethod
-    def load_weights(filename, format=None):
-        if format is None:
-            format = filename.split(".")[-1]
-        if format not in ["json", "npz"]:
-            raise IOError("Format {} not recognized.".format(format))
-        logger.info("Loading ROQ weights from {}".format(filename))
-        if format == "json":
-            with open(filename, 'r') as file:
-                weights = json.load(file, object_hook=decode_bilby_json)
-        elif format == "npz":
-            # Wrap in dict to load data into memory
-            weights = dict(np.load(filename))
-        return weights
-
-    def _get_time_resolution(self):
-        """
-        This method estimates the time resolution given the optimal SNR of the
-        signal in the detector. This is then used when constructing the weights
-        for the ROQ.
-
-        A minimum resolution is set by assuming the SNR in each detector is at
-        least 10. When the SNR is not available the SNR is assumed to be 30 in
-        each detector.
-
-        Returns
-        =======
-        delta_t: float
-            Time resolution
-        """
-
-        def calc_fhigh(freq, psd, scaling=20.):
-            """
-
-            Parameters
-            ==========
-            freq: array-like
-                Frequency array
-            psd: array-like
-                Power spectral density
-            scaling: float
-                SNR dependent scaling factor
-
-            Returns
-            =======
-            f_high: float
-                The maximum frequency which must be considered
-            """
-            from scipy.integrate import simps
-            integrand1 = np.power(freq, -7. / 3) / psd
-            integral1 = simps(integrand1, freq)
-            integrand3 = np.power(freq, 2. / 3.) / (psd * integral1)
-            f_3_bar = simps(integrand3, freq)
-
-            f_high = scaling * f_3_bar**(1 / 3)
-
-            return f_high
-
-        def c_f_scaling(snr):
-            return (np.pi**2 * snr**2 / 6)**(1 / 3)
-
-        inj_snr_sq = 0
-        for ifo in self.interferometers:
-            inj_snr_sq += max(10, ifo.meta_data.get('optimal_SNR', 30))**2
-
-        psd = ifo.power_spectral_density_array[ifo.frequency_mask]
-        freq = ifo.frequency_array[ifo.frequency_mask]
-        fhigh = calc_fhigh(freq, psd, scaling=c_f_scaling(inj_snr_sq**0.5))
-
-        delta_t = fhigh**-1
-
-        # Apply a safety factor to ensure the time step is short enough
-        delta_t = delta_t / 5
-
-        # duration / delta_t needs to be a power of 2 for IFFT
-        number_of_time_samples = max(
-            self.interferometers.duration / delta_t,
-            self.interferometers.frequency_array[-1] * self.interferometers.duration + 1)
-        number_of_time_samples = int(2**np.ceil(np.log2(number_of_time_samples)))
-        delta_t = self.interferometers.duration / number_of_time_samples
-        logger.info("ROQ time-step = {}".format(delta_t))
-        return delta_t
-
-    def _rescale_signal(self, signal, new_distance):
-        for kind in ['linear', 'quadratic']:
-            for mode in signal[kind]:
-                signal[kind][mode] *= self._ref_dist / new_distance
-
-
-def get_binary_black_hole_likelihood(interferometers):
-    """ A wrapper to quickly set up a likelihood for BBH parameter estimation
-
-    Parameters
-    ==========
-    interferometers: {bilby.gw.detector.InterferometerList, list}
-        A list of `bilby.detector.Interferometer` instances, typically the
-        output of either `bilby.detector.get_interferometer_with_open_data`
-        or `bilby.detector.get_interferometer_with_fake_noise_and_injection`
-
-    Returns
-    =======
-    bilby.GravitationalWaveTransient: The likelihood to pass to `run_sampler`
-
-    """
-    waveform_generator = WaveformGenerator(
-        duration=interferometers.duration,
-        sampling_frequency=interferometers.sampling_frequency,
-        frequency_domain_source_model=lal_binary_black_hole,
-        waveform_arguments={'waveform_approximant': 'IMRPhenomPv2',
-                            'reference_frequency': 50})
-    return GravitationalWaveTransient(interferometers, waveform_generator)
-
-
-class BilbyROQParamsRangeError(Exception):
-    pass
-
-
-class MBGravitationalWaveTransient(GravitationalWaveTransient):
-    """A multi-banded likelihood object
-
-    This uses the method described in S. Morisaki, 2021, arXiv: 2104.07813.
-
-    Parameters
-    ----------
-    interferometers: list, bilby.gw.detector.InterferometerList
-        A list of `bilby.detector.Interferometer` instances - contains the detector data and power spectral densities
-    waveform_generator: `bilby.waveform_generator.WaveformGenerator`
-        An object which computes the frequency-domain strain of the signal, given some set of parameters
-    reference_chirp_mass: float
-        A reference chirp mass for determining the frequency banding
-    highest_mode: int, optional
-        The maximum magnetic number of gravitational-wave moments. Default is 2
-    linear_interpolation: bool, optional
-        If True, the linear-interpolation method is used for the computation of (h, h). If False, the IFFT-FFT method
-        is used. Default is True.
-    accuracy_factor: float, optional
-        A parameter to determine the accuracy of multi-banding. The larger this factor is, the more accurate the
-        approximation is. This corresponds to L in the paper. Default is 5.
-    time_offset: float, optional
-        (end time of data) - (maximum arrival time). If None, it is inferred from the prior of geocent time.
-    delta_f_end: float, optional
-        The frequency scale with which waveforms at the high-frequency end are smoothed. If None, it is determined from
-        the prior of geocent time.
-    maximum_banding_frequency: float, optional
-        A maximum frequency for multi-banding. If specified, the low-frequency limit of a band does not exceed it.
-    minimum_banding_duration: float, optional
-        A minimum duration for multi-banding. If specified, the duration of a band is not smaller than it.
-    distance_marginalization: bool, optional
-        If true, marginalize over distance in the likelihood. This uses a look up table calculated at run time. The
-        distance prior is set to be a delta function at the minimum distance allowed in the prior being marginalised
-        over.
-    phase_marginalization: bool, optional
-        If true, marginalize over phase in the likelihood. This is done analytically using a Bessel function. The phase
-        prior is set to be a delta function at phase=0.
-    priors: dict, bilby.prior.PriorDict
-        A dictionary of priors containing at least the geocent_time prior
-    distance_marginalization_lookup_table: (dict, str), optional
-        If a dict, dictionary containing the lookup_table, distance_array, (distance) prior_array, and
-        reference_distance used to construct the table. If a string the name of a file containing these quantities. The
-        lookup table is stored after construction in either the provided string or a default location:
-        '.distance_marginalization_lookup_dmin{}_dmax{}_n{}.npz'
-    reference_frame: (str, bilby.gw.detector.InterferometerList, list), optional
-        Definition of the reference frame for the sky location.
-        - "sky": sample in RA/dec, this is the default
-        - e.g., "H1L1", ["H1", "L1"], InterferometerList(["H1", "L1"]):
-          sample in azimuth and zenith, `azimuth` and `zenith` defined in the frame where the z-axis is aligned the the
-          vector connecting H1 and L1.
-    time_reference: str, optional
-        Name of the reference for the sampled time parameter.
-        - "geocent"/"geocenter": sample in the time at the Earth's center, this is the default
-        - e.g., "H1": sample in the time of arrival at H1
-
-    Returns
-    -------
-    Likelihood: `bilby.core.likelihood.Likelihood`
-        A likelihood object, able to compute the likelihood of the data given some model parameters
-
-    """
-    def __init__(
-        self, interferometers, waveform_generator, reference_chirp_mass, highest_mode=2, linear_interpolation=True,
-        accuracy_factor=5, time_offset=None, delta_f_end=None, maximum_banding_frequency=None,
-        minimum_banding_duration=0., distance_marginalization=False, phase_marginalization=False, priors=None,
-        distance_marginalization_lookup_table=None, reference_frame="sky", time_reference="geocenter"
-    ):
-        super(MBGravitationalWaveTransient, self).__init__(
-            interferometers=interferometers, waveform_generator=waveform_generator, priors=priors,
-            distance_marginalization=distance_marginalization, phase_marginalization=phase_marginalization,
-            time_marginalization=False, distance_marginalization_lookup_table=distance_marginalization_lookup_table,
-            jitter_time=False, reference_frame=reference_frame, time_reference=time_reference
-        )
-        self.reference_chirp_mass = reference_chirp_mass
-        self.highest_mode = highest_mode
-        self.linear_interpolation = linear_interpolation
-        self.accuracy_factor = accuracy_factor
-        self.time_offset = time_offset
-        self.delta_f_end = delta_f_end
-        self.minimum_frequency = np.min([i.minimum_frequency for i in self.interferometers])
-        self.maximum_frequency = np.max([i.maximum_frequency for i in self.interferometers])
-        self.maximum_banding_frequency = maximum_banding_frequency
-        self.minimum_banding_duration = minimum_banding_duration
-        self.setup_multibanding()
-
-    @property
-    def reference_chirp_mass(self):
-        return self._reference_chirp_mass
-
-    @property
-    def reference_chirp_mass_in_second(self):
-        return gravitational_constant * self._reference_chirp_mass * solar_mass / speed_of_light**3.
-
-    @reference_chirp_mass.setter
-    def reference_chirp_mass(self, reference_chirp_mass):
-        if isinstance(reference_chirp_mass, int) or isinstance(reference_chirp_mass, float):
-            self._reference_chirp_mass = reference_chirp_mass
-        else:
-            raise TypeError("reference_chirp_mass must be a number")
-
-    @property
-    def highest_mode(self):
-        return self._highest_mode
-
-    @highest_mode.setter
-    def highest_mode(self, highest_mode):
-        if isinstance(highest_mode, int) or isinstance(highest_mode, float):
-            self._highest_mode = highest_mode
-        else:
-            raise TypeError("highest_mode must be a number")
-
-    @property
-    def linear_interpolation(self):
-        return self._linear_interpolation
-
-    @linear_interpolation.setter
-    def linear_interpolation(self, linear_interpolation):
-        if isinstance(linear_interpolation, bool):
-            self._linear_interpolation = linear_interpolation
-        else:
-            raise TypeError("linear_interpolation must be a bool")
-
-    @property
-    def accuracy_factor(self):
-        return self._accuracy_factor
-
-    @accuracy_factor.setter
-    def accuracy_factor(self, accuracy_factor):
-        if isinstance(accuracy_factor, int) or isinstance(accuracy_factor, float):
-            self._accuracy_factor = accuracy_factor
-        else:
-            raise TypeError("accuracy_factor must be a number")
-
-    @property
-    def time_offset(self):
-        return self._time_offset
-
-    @time_offset.setter
-    def time_offset(self, time_offset):
-        """
-        This sets the time offset assumed when frequency bands are constructed. The default value is (the
-        maximum offset of geocent time in the prior range) +  (light-traveling time of the Earth). If the
-        prior does not contain 'geocent_time', 2.12 seconds is used. It is calculated assuming that the
-        maximum offset of geocent time is 2.1 seconds, which is the value for the standard prior used by
-        LIGO-Virgo-KAGRA.
-        """
-        if time_offset is not None:
-            if isinstance(time_offset, int) or isinstance(time_offset, float):
-                self._time_offset = time_offset
-            else:
-                raise TypeError("time_offset must be a number")
-        elif self.priors is not None and 'geocent_time' in self.priors:
-            self._time_offset = self.interferometers.start_time + self.interferometers.duration \
-                - self.priors['geocent_time'].minimum + radius_of_earth / speed_of_light
-        else:
-            self._time_offset = 2.12
-            logger.warning("time offset can not be inferred. Use the standard time offset of {} seconds.".format(
-                self._time_offset))
-
-    @property
-    def delta_f_end(self):
-        return self._delta_f_end
-
-    @delta_f_end.setter
-    def delta_f_end(self, delta_f_end):
-        """
-        This sets the frequency scale of tapering the high-frequency end of waveform, to avoid the issues of
-        abrupt termination of waveform described in Sec. 2. F of arXiv: 2104.07813. This needs to be much
-        larger than the inverse of the minimum time offset, and the default value is 100 times of that. If
-        the prior does not contain 'geocent_time' and the minimum time offset can not be computed, 53Hz is
-        used. It is computed assuming that the minimum offset of geocent time is 1.9 seconds, which is the
-        value for the standard prior used by LIGO-Virgo-KAGRA.
-        """
-        if delta_f_end is not None:
-            if isinstance(delta_f_end, int) or isinstance(delta_f_end, float):
-                self._delta_f_end = delta_f_end
-            else:
-                raise TypeError("delta_f_end must be a number")
-        elif self.priors is not None and 'geocent_time' in self.priors:
-            self._delta_f_end = 100. / (self.interferometers.start_time + self.interferometers.duration
-                                        - self.priors['geocent_time'].maximum - radius_of_earth / speed_of_light)
-        else:
-            self._delta_f_end = 53.
-            logger.warning("delta_f_end can not be inferred. Use the standard delta_f_end of {} Hz.".format(
-                self._delta_f_end))
-
-    @property
-    def maximum_banding_frequency(self):
-        return self._maximum_banding_frequency
-
-    @maximum_banding_frequency.setter
-    def maximum_banding_frequency(self, maximum_banding_frequency):
-        """
-        This sets the upper limit on a starting frequency of a band. The default value is the frequency at
-        which f - 1 / \sqrt(- d\tau / df) starts to decrease, because the bisection search of the starting
-        frequency does not work from that frequency. The stationary phase approximation is not valid at such
-        a high frequency, which can break down the approximation. It is calculated from the 0PN formula of
-        time-to-merger \tau(f). The user-specified frequency is used if it is lower than that frequency.
-        """
-        fmax_tmp = (15. / 968.)**(3. / 5.) * (self.highest_mode / (2. * np.pi))**(8. / 5.) \
-            / self.reference_chirp_mass_in_second
-        if maximum_banding_frequency is not None:
-            if isinstance(maximum_banding_frequency, int) or isinstance(maximum_banding_frequency, float):
-                if maximum_banding_frequency < fmax_tmp:
-                    fmax_tmp = maximum_banding_frequency
-                else:
-                    logger.warning("The input maximum_banding_frequency is too large."
-                                   "It is set to be {} Hz.".format(fmax_tmp))
-            else:
-                raise TypeError("maximum_banding_frequency must be a number")
-        self._maximum_banding_frequency = fmax_tmp
-
-    @property
-    def minimum_banding_duration(self):
-        return self._minimum_banding_duration
-
-    @minimum_banding_duration.setter
-    def minimum_banding_duration(self, minimum_banding_duration):
-        if isinstance(minimum_banding_duration, int) or isinstance(minimum_banding_duration, float):
-            self._minimum_banding_duration = minimum_banding_duration
-        else:
-            raise TypeError("minimum_banding_duration must be a number")
-
-    def setup_multibanding(self):
-        """Set up frequency bands and coefficients needed for likelihood evaluations"""
-        self._setup_frequency_bands()
-        self._setup_integers()
-        self._setup_waveform_frequency_points()
-        self._setup_linear_coefficients()
-        if self.linear_interpolation:
-            self._setup_quadratic_coefficients_linear_interp()
-        else:
-            self._setup_quadratic_coefficients_ifft_fft()
-
-    def _tau(self, f):
-        """Compute time-to-merger from the input frequency. This uses the 0PN formula.
-
-        Parameters
-        ----------
-        f: float
-            input frequency
-
-        Returns
-        -------
-        tau: float
-            time-to-merger
-
-        """
-        f_22 = 2. * f / self.highest_mode
-        return 5. / 256. * self.reference_chirp_mass_in_second * \
-            (np.pi * self.reference_chirp_mass_in_second * f_22)**(-8. / 3.)
-
-    def _dtaudf(self, f):
-        """Compute the derivative of time-to-merger with respect to a starting frequency. This uses the 0PN formula.
-
-        Parameters
-        ----------
-        f: float
-            input frequency
-
-        Returns
-        -------
-        dtaudf: float
-            derivative of time-to-merger
-
-        """
-        f_22 = 2. * f / self.highest_mode
-        return -5. / 96. * self.reference_chirp_mass_in_second * \
-            (np.pi * self.reference_chirp_mass_in_second * f_22)**(-8. / 3.) / f
-
-    def _find_starting_frequency(self, duration, fnow):
-        """Find the starting frequency of the next band satisfying (10) and
-        (51) of arXiv: 2104.07813.
-
-        Parameters
-        ----------
-        duration: float
-            duration of the next band
-        fnow: float
-            starting frequency of the current band
-
-        Returns
-        -------
-        fnext: float or None
-            starting frequency of the next band. None if a frequency satisfying the conditions does not exist.
-        dfnext: float or None
-            frequency scale with which waveforms are smoothed. None if a frequency satisfying the conditions does not
-            exist.
-
-        """
-        def _is_above_fnext(f):
-            "This function returns True if f > fnext"
-            cond1 = duration - self.time_offset - self._tau(f) - \
-                self.accuracy_factor * np.sqrt(-self._dtaudf(f)) > 0.
-            cond2 = f - 1. / np.sqrt(-self._dtaudf(f)) - fnow > 0.
-            return cond1 and cond2
-        # Bisection search for fnext
-        fmin, fmax = fnow, self.maximum_banding_frequency
-        if not _is_above_fnext(fmax):
-            return None, None
-        while fmax - fmin > 1e-2 / duration:
-            f = (fmin + fmax) / 2.
-            if _is_above_fnext(f):
-                fmax = f
-            else:
-                fmin = f
-        return f, 1. / np.sqrt(-self._dtaudf(f))
-
-    def _setup_frequency_bands(self):
-        """Set up frequency bands. The durations of bands geometrically decrease T, T/2. T/4, ..., where T is the
-        original duration. This sets the following instance variables.
-
-        durations: durations of bands (T^(b) in the paper)
-        fb_dfb: the list of tuples, which contain starting frequencies (f^(b) in the paper) and frequency scales for
-        smoothing waveforms (\Delta f^(b) in the paper) of bands
-
-        """
-        self.durations = [self.interferometers.duration]
-        self.fb_dfb = [(self.minimum_frequency, 0.)]
-        dnext = self.interferometers.duration / 2
-        while dnext > max(self.time_offset, self.minimum_banding_duration):
-            fnow, _ = self.fb_dfb[-1]
-            fnext, dfnext = self._find_starting_frequency(dnext, fnow)
-            if fnext is not None and fnext < min(self.maximum_frequency, self.maximum_banding_frequency):
-                self.durations.append(dnext)
-                self.fb_dfb.append((fnext, dfnext))
-                dnext /= 2
-            else:
-                break
-        self.fb_dfb.append((self.maximum_frequency + self.delta_f_end, self.delta_f_end))
-        logger.info("The total frequency range is divided into {} bands with frequency intervals of {}.".format(
-            len(self.durations), ", ".join(["1/{} Hz".format(d) for d in self.durations])))
-
-    def _setup_integers(self):
-        """Set up integers needed for likelihood evaluations. This sets the following instance variables.
-
-        Nbs: the numbers of samples of downsampled data (N^(b) in the paper)
-        Mbs: the numbers of samples of shortened data (M^(b) in the paper)
-        Ks_Ke: start and end frequency indices of bands (K^(b)_s and K^(b)_e in the paper)
-
-        """
-        self.Nbs = []
-        self.Mbs = []
-        self.Ks_Ke = []
-        for b in range(len(self.durations)):
-            dnow = self.durations[b]
-            fnow, dfnow = self.fb_dfb[b]
-            fnext, _ = self.fb_dfb[b + 1]
-            Nb = max(round_up_to_power_of_two(2. * (fnext * self.interferometers.duration + 1.)), 2**b)
-            self.Nbs.append(Nb)
-            self.Mbs.append(Nb // 2**b)
-            self.Ks_Ke.append((math.ceil((fnow - dfnow) * dnow), math.floor(fnext * dnow)))
-
-    def _setup_waveform_frequency_points(self):
-        """Set up frequency points where waveforms are evaluated. Frequency points are reordered because some waveform
-        models raise an error if the input frequencies are not increasing. This adds frequency_points into the
-        waveform_arguments of waveform_generator. This sets the following instance variables.
-
-        banded_frequency_points: ndarray of total banded frequency points
-        start_end_idxs: list of tuples containing start and end indices of each band
-        unique_to_original_frequencies: indices converting unique frequency
-        points into the original duplicated banded frequencies
-
-        """
-        self.banded_frequency_points = np.array([])
-        self.start_end_idxs = []
-        start_idx = 0
-        for i in range(len(self.fb_dfb) - 1):
-            d = self.durations[i]
-            Ks, Ke = self.Ks_Ke[i]
-            self.banded_frequency_points = np.append(self.banded_frequency_points, np.arange(Ks, Ke + 1) / d)
-            end_idx = start_idx + Ke - Ks
-            self.start_end_idxs.append((start_idx, end_idx))
-            start_idx = end_idx + 1
-        unique_frequencies, idxs = np.unique(self.banded_frequency_points, return_inverse=True)
-        self.waveform_generator.waveform_arguments['frequencies'] = unique_frequencies
-        self.unique_to_original_frequencies = idxs
-        logger.info("The number of frequency points where waveforms are evaluated is {}.".format(
-            len(unique_frequencies)))
-        logger.info("The speed-up gain of multi-banding is {}.".format(
-            (self.maximum_frequency - self.minimum_frequency) * self.interferometers.duration /
-            len(unique_frequencies)))
-
-    def _window(self, f, b):
-        """Compute window function in the b-th band
-
-        Parameters
-        ----------
-        f: float or ndarray
-            frequency at which the window function is computed
-        b: int
-
-        Returns
-        -------
-        window: float
-            window function at f
-        """
-        fnow, dfnow = self.fb_dfb[b]
-        fnext, dfnext = self.fb_dfb[b + 1]
-
-        @np.vectorize
-        def _vectorized_window(f):
-            if fnow - dfnow < f < fnow:
-                return (1. + np.cos(np.pi * (f - fnow) / dfnow)) / 2.
-            elif fnow <= f <= fnext - dfnext:
-                return 1.
-            elif fnext - dfnext < f < fnext:
-                return (1. - np.cos(np.pi * (f - fnext) / dfnext)) / 2.
-            else:
-                return 0.
-
-        return _vectorized_window(f)
-
-    def _setup_linear_coefficients(self):
-        """Set up coefficients by which waveforms are multiplied to compute (d, h)"""
-        self.linear_coeffs = dict((ifo.name, np.array([])) for ifo in self.interferometers)
-        N = self.Nbs[-1]
-        for ifo in self.interferometers:
-            logger.info("Pre-computing linear coefficients for {}".format(ifo.name))
-            fddata = np.zeros(N // 2 + 1, dtype=complex)
-            fddata[:len(ifo.frequency_domain_strain)][ifo.frequency_mask] += \
-                ifo.frequency_domain_strain[ifo.frequency_mask] / ifo.power_spectral_density_array[ifo.frequency_mask]
-            for b in range(len(self.fb_dfb) - 1):
-                start_idx, end_idx = self.start_end_idxs[b]
-                windows = self._window(self.banded_frequency_points[start_idx:end_idx + 1], b)
-                fddata_in_ith_band = np.copy(fddata[:int(self.Nbs[b] / 2 + 1)])
-                fddata_in_ith_band[-1] = 0.  # zeroing data at the Nyquist frequency
-                tddata = np.fft.irfft(fddata_in_ith_band)[-self.Mbs[b]:]
-                Ks, Ke = self.Ks_Ke[b]
-                fddata_in_ith_band = np.fft.rfft(tddata)[Ks:Ke + 1]
-                self.linear_coeffs[ifo.name] = np.append(
-                    self.linear_coeffs[ifo.name], (4. / self.durations[b]) * windows * np.conj(fddata_in_ith_band))
-
-    def _setup_quadratic_coefficients_linear_interp(self):
-        """Set up coefficients by which the squares of waveforms are multiplied to compute (h, h) for the
-        linear-interpolation algorithm"""
-        logger.info("Linear-interpolation algorithm is used for (h, h).")
-        self.quadratic_coeffs = dict((ifo.name, np.array([])) for ifo in self.interferometers)
-        N = self.Nbs[-1]
-        for ifo in self.interferometers:
-            logger.info("Pre-computing quadratic coefficients for {}".format(ifo.name))
-            full_frequencies = np.arange(N // 2 + 1) / ifo.duration
-            full_inv_psds = np.zeros(N // 2 + 1)
-            full_inv_psds[:len(ifo.power_spectral_density_array)][ifo.frequency_mask] = \
-                1. / ifo.power_spectral_density_array[ifo.frequency_mask]
-            for i in range(len(self.fb_dfb) - 1):
-                start_idx, end_idx = self.start_end_idxs[i]
-                banded_frequencies = self.banded_frequency_points[start_idx:end_idx + 1]
-                coeffs = np.zeros(len(banded_frequencies))
-                for k in range(len(coeffs) - 1):
-                    if k == 0:
-                        start_idx_in_sum = 0
-                    else:
-                        start_idx_in_sum = math.ceil(ifo.duration * banded_frequencies[k])
-                    if k == len(coeffs) - 2:
-                        end_idx_in_sum = len(full_frequencies) - 1
-                    else:
-                        end_idx_in_sum = math.ceil(ifo.duration * banded_frequencies[k + 1]) - 1
-                    window_over_psd = full_inv_psds[start_idx_in_sum:end_idx_in_sum + 1] \
-                        * self._window(full_frequencies[start_idx_in_sum:end_idx_in_sum + 1], i)
-                    frequencies_in_sum = full_frequencies[start_idx_in_sum:end_idx_in_sum + 1]
-                    coeffs[k] += 4. * self.durations[i] / ifo.duration * np.sum(
-                        (banded_frequencies[k + 1] - frequencies_in_sum) * window_over_psd)
-                    coeffs[k + 1] += 4. * self.durations[i] / ifo.duration \
-                        * np.sum((frequencies_in_sum - banded_frequencies[k]) * window_over_psd)
-                self.quadratic_coeffs[ifo.name] = np.append(self.quadratic_coeffs[ifo.name], coeffs)
-
-    def _setup_quadratic_coefficients_ifft_fft(self):
-        """Set up coefficients needed for the IFFT-FFT algorithm to compute (h, h)"""
-        logger.info("IFFT-FFT algorithm is used for (h, h).")
-        N = self.Nbs[-1]
-        # variables defined below correspond to \hat{N}^(b), \hat{T}^(b), \tilde{I}^(b)_{c, k}, h^(b)_{c, m} and
-        # \sqrt{w^(b)(f^(b)_k)} \tilde{h}(f^(b)_k) in the paper
-        Nhatbs = [min(2 * Mb, Nb) for Mb, Nb in zip(self.Mbs, self.Nbs)]
-        self.Tbhats = [self.interferometers.duration * Nbhat / Nb for Nb, Nbhat in zip(self.Nbs, Nhatbs)]
-        self.Ibcs = dict((ifo.name, []) for ifo in self.interferometers)
-        self.hbcs = dict((ifo.name, []) for ifo in self.interferometers)
-        self.wths = dict((ifo.name, []) for ifo in self.interferometers)
-        for ifo in self.interferometers:
-            logger.info("Pre-computing quadratic coefficients for {}".format(ifo.name))
-            full_inv_psds = np.zeros(N // 2 + 1)
-            full_inv_psds[:len(ifo.power_spectral_density_array)][ifo.frequency_mask] = 1. / \
-                ifo.power_spectral_density_array[ifo.frequency_mask]
-            for b in range(len(self.fb_dfb) - 1):
-                Imb = np.fft.irfft(full_inv_psds[:self.Nbs[b] // 2 + 1])
-                half_length = Nhatbs[b] // 2
-                Imbc = np.append(Imb[:half_length + 1], Imb[-(Nhatbs[b] - half_length - 1):])
-                self.Ibcs[ifo.name].append(np.fft.rfft(Imbc))
-                # Allocate arrays for IFFT-FFT operations
-                self.hbcs[ifo.name].append(np.zeros(Nhatbs[b]))
-                self.wths[ifo.name].append(np.zeros(self.Mbs[b] // 2 + 1, dtype=complex))
-        # precompute windows and their squares
-        self.windows = np.array([])
-        self.square_root_windows = np.array([])
-        for b in range(len(self.fb_dfb) - 1):
-            start, end = self.start_end_idxs[b]
-            ws = self._window(self.banded_frequency_points[start:end + 1], b)
-            self.windows = np.append(self.windows, ws)
-            self.square_root_windows = np.append(self.square_root_windows, np.sqrt(ws))
-
-    def calculate_snrs(self, waveform_polarizations, interferometer):
-        """
-        Compute the snrs for multi-banding
-
-        Parameters
-        ----------
-        waveform_polarizations: waveform
-        interferometer: bilby.gw.detector.Interferometer
-
-        Returns
-        -------
-        snrs: named tuple of snrs
-
-        """
-        f_plus = interferometer.antenna_response(
-            self.parameters['ra'], self.parameters['dec'],
-            self.parameters['geocent_time'], self.parameters['psi'], 'plus')
-        f_cross = interferometer.antenna_response(
-            self.parameters['ra'], self.parameters['dec'],
-            self.parameters['geocent_time'], self.parameters['psi'], 'cross')
-
-        dt = interferometer.time_delay_from_geocenter(
-            self.parameters['ra'], self.parameters['dec'],
-            self.parameters['geocent_time'])
-        dt_geocent = self.parameters['geocent_time'] - interferometer.strain_data.start_time
-        ifo_time = dt_geocent + dt
-
-        calib_factor = interferometer.calibration_model.get_calibration_factor(
-            self.banded_frequency_points, prefix='recalib_{}_'.format(interferometer.name), **self.parameters)
-
-        h = f_plus * waveform_polarizations['plus'][self.unique_to_original_frequencies] \
-            + f_cross * waveform_polarizations['cross'][self.unique_to_original_frequencies]
-        h *= np.exp(-1j * 2. * np.pi * self.banded_frequency_points * ifo_time)
-        h *= np.conjugate(calib_factor)
-
-        d_inner_h = np.dot(h, self.linear_coeffs[interferometer.name])
-
-        if self.linear_interpolation:
-            optimal_snr_squared = np.vdot(np.real(h * np.conjugate(h)), self.quadratic_coeffs[interferometer.name])
-        else:
-            optimal_snr_squared = 0.
-            for b in range(len(self.fb_dfb) - 1):
-                Ks, Ke = self.Ks_Ke[b]
-                start_idx, end_idx = self.start_end_idxs[b]
-                Mb = self.Mbs[b]
-                if b == 0:
-                    optimal_snr_squared += (4. / self.interferometers.duration) * np.vdot(
-                        np.real(h[start_idx:end_idx + 1] * np.conjugate(h[start_idx:end_idx + 1])),
-                        interferometer.frequency_mask[Ks:Ke + 1] * self.windows[start_idx:end_idx + 1]
-                        / interferometer.power_spectral_density_array[Ks:Ke + 1])
-                else:
-                    self.wths[interferometer.name][b][Ks:Ke + 1] = self.square_root_windows[start_idx:end_idx + 1] \
-                        * h[start_idx:end_idx + 1]
-                    self.hbcs[interferometer.name][b][-Mb:] = np.fft.irfft(self.wths[interferometer.name][b])
-                    thbc = np.fft.rfft(self.hbcs[interferometer.name][b])
-                    optimal_snr_squared += (4. / self.Tbhats[b]) * np.vdot(
-                        np.real(thbc * np.conjugate(thbc)), self.Ibcs[interferometer.name][b])
-
-        complex_matched_filter_snr = d_inner_h / (optimal_snr_squared**0.5)
-
-        return self._CalculatedSNRs(
-            d_inner_h=d_inner_h, optimal_snr_squared=optimal_snr_squared,
-            complex_matched_filter_snr=complex_matched_filter_snr,
-            d_inner_h_squared_tc_array=None,
-            d_inner_h_array=None,
-            optimal_snr_squared_array=None)
-
-    def _rescale_signal(self, signal, new_distance):
-        for mode in signal:
-            signal[mode] *= self._ref_dist / new_distance
diff --git a/bilby/gw/likelihood/__init__.py b/bilby/gw/likelihood/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..88fb527ad579513e39c4cda69e0d20a9c1d38e84
--- /dev/null
+++ b/bilby/gw/likelihood/__init__.py
@@ -0,0 +1,33 @@
+from .base import GravitationalWaveTransient
+from .basic import BasicGravitationalWaveTransient
+from .roq import BilbyROQParamsRangeError, ROQGravitationalWaveTransient
+from .multiband import MBGravitationalWaveTransient
+
+from ..source import lal_binary_black_hole
+from ..waveform_generator import WaveformGenerator
+
+
+def get_binary_black_hole_likelihood(interferometers):
+    """ A wrapper to quickly set up a likelihood for BBH parameter estimation
+
+    Parameters
+    ==========
+    interferometers: {bilby.gw.detector.InterferometerList, list}
+        A list of `bilby.detector.Interferometer` instances, typically the
+        output of either `bilby.detector.get_interferometer_with_open_data`
+        or `bilby.detector.get_interferometer_with_fake_noise_and_injection`
+
+    Returns
+    =======
+    bilby.GravitationalWaveTransient: The likelihood to pass to `run_sampler`
+
+    """
+    waveform_generator = WaveformGenerator(
+        duration=interferometers.duration,
+        sampling_frequency=interferometers.sampling_frequency,
+        frequency_domain_source_model=lal_binary_black_hole,
+        waveform_arguments={'waveform_approximant': 'IMRPhenomPv2',
+                            'reference_frequency': 50})
+    return GravitationalWaveTransient(interferometers, waveform_generator)
+
+
diff --git a/bilby/gw/likelihood/base.py b/bilby/gw/likelihood/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..b468ff67ed793a9644288b08d5b0b0758d37ca49
--- /dev/null
+++ b/bilby/gw/likelihood/base.py
@@ -0,0 +1,1110 @@
+
+import os
+import copy
+
+import attr
+import numpy as np
+import pandas as pd
+from scipy.special import logsumexp
+
+from ...core.likelihood import Likelihood
+from ...core.utils import logger, UnsortedInterp2d, create_time_series
+from ...core.prior import Interped, Prior, Uniform, PriorDict, DeltaFunction
+from ..detector import InterferometerList, get_empty_interferometer, calibration
+from ..prior import BBHPriorDict, Cosmological
+from ..utils import noise_weighted_inner_product, zenith_azimuth_to_ra_dec, ln_i0
+
+
+class GravitationalWaveTransient(Likelihood):
+    """ A gravitational-wave transient likelihood object
+
+    This is the usual likelihood object to use for transient gravitational
+    wave parameter estimation. It computes the log-likelihood in the frequency
+    domain assuming a colored Gaussian noise model described by a power
+    spectral density. See Thrane & Talbot (2019), arxiv.org/abs/1809.02293.
+
+    Parameters
+    ==========
+    interferometers: list, bilby.gw.detector.InterferometerList
+        A list of `bilby.detector.Interferometer` instances - contains the
+        detector data and power spectral densities
+    waveform_generator: `bilby.waveform_generator.WaveformGenerator`
+        An object which computes the frequency-domain strain of the signal,
+        given some set of parameters
+    distance_marginalization: bool, optional
+        If true, marginalize over distance in the likelihood.
+        This uses a look up table calculated at run time.
+        The distance prior is set to be a delta function at the minimum
+        distance allowed in the prior being marginalised over.
+    time_marginalization: bool, optional
+        If true, marginalize over time in the likelihood.
+        This uses a FFT to calculate the likelihood over a regularly spaced
+        grid.
+        In order to cover the whole space the prior is set to be uniform over
+        the spacing of the array of times.
+        If using time marginalisation and jitter_time is True a "jitter"
+        parameter is added to the prior which modifies the position of the
+        grid of times.
+    phase_marginalization: bool, optional
+        If true, marginalize over phase in the likelihood.
+        This is done analytically using a Bessel function.
+        The phase prior is set to be a delta function at phase=0.
+    calibration_marginalization: bool, optional
+        If true, marginalize over calibration response curves in the likelihood.
+        This is done numerically over a number of calibration response curve realizations.
+    priors: dict, optional
+        If given, used in the distance and phase marginalization.
+        Warning: when using marginalisation the dict is overwritten which will change the
+        the dict you are passing in. If this behaviour is undesired, pass `priors.copy()`.
+    distance_marginalization_lookup_table: (dict, str), optional
+        If a dict, dictionary containing the lookup_table, distance_array,
+        (distance) prior_array, and reference_distance used to construct
+        the table.
+        If a string the name of a file containing these quantities.
+        The lookup table is stored after construction in either the
+        provided string or a default location:
+        '.distance_marginalization_lookup_dmin{}_dmax{}_n{}.npz'
+    calibration_lookup_table: dict, optional
+        If a dict, contains the arrays over which to marginalize for each interferometer or the filepaths of the
+        calibration files.
+        If not provided, but calibration_marginalization is used, then the appropriate file is created to
+        contain the curves.
+    number_of_response_curves: int, optional
+        Number of curves from the calibration lookup table to use.
+        Default is 1000.
+    starting_index: int, optional
+        Sets the index for the first realization of the calibration curve to be considered.
+        This, coupled with number_of_response_curves, allows for restricting the set of curves used. This can be used
+        when dealing with large frequency arrays to split the calculation into sections.
+        Defaults to 0.
+    jitter_time: bool, optional
+        Whether to introduce a `time_jitter` parameter. This avoids either
+        missing the likelihood peak, or introducing biases in the
+        reconstructed time posterior due to an insufficient sampling frequency.
+        Default is False, however using this parameter is strongly encouraged.
+    reference_frame: (str, bilby.gw.detector.InterferometerList, list), optional
+        Definition of the reference frame for the sky location.
+
+        - :code:`sky`: sample in RA/dec, this is the default
+        - e.g., :code:`"H1L1", ["H1", "L1"], InterferometerList(["H1", "L1"])`:
+          sample in azimuth and zenith, `azimuth` and `zenith` defined in the
+          frame where the z-axis is aligned the the vector connecting H1
+          and L1.
+
+    time_reference: str, optional
+        Name of the reference for the sampled time parameter.
+
+        - :code:`geocent`/:code:`geocenter`: sample in the time at the
+          Earth's center, this is the default
+        - e.g., :code:`H1`: sample in the time of arrival at H1
+
+    Returns
+    =======
+    Likelihood: `bilby.core.likelihood.Likelihood`
+        A likelihood object, able to compute the likelihood of the data given
+        some model parameters
+
+    """
+
+    @attr.s
+    class _CalculatedSNRs:
+        d_inner_h = attr.ib()
+        optimal_snr_squared = attr.ib()
+        complex_matched_filter_snr = attr.ib()
+        d_inner_h_array = attr.ib()
+        optimal_snr_squared_array = attr.ib()
+        d_inner_h_squared_tc_array = attr.ib()
+
+    def __init__(
+            self, interferometers, waveform_generator, time_marginalization=False,
+            distance_marginalization=False, phase_marginalization=False, calibration_marginalization=False, priors=None,
+            distance_marginalization_lookup_table=None, calibration_lookup_table=None,
+            number_of_response_curves=1000, starting_index=0, jitter_time=True, reference_frame="sky",
+            time_reference="geocenter"
+    ):
+
+        self.waveform_generator = waveform_generator
+        super(GravitationalWaveTransient, self).__init__(dict())
+        self.interferometers = InterferometerList(interferometers)
+        self.time_marginalization = time_marginalization
+        self.distance_marginalization = distance_marginalization
+        self.phase_marginalization = phase_marginalization
+        self.calibration_marginalization = calibration_marginalization
+        self.priors = priors
+        self._check_set_duration_and_sampling_frequency_of_waveform_generator()
+        self.jitter_time = jitter_time
+        self.reference_frame = reference_frame
+        if "geocent" not in time_reference:
+            self.time_reference = time_reference
+            self.reference_ifo = get_empty_interferometer(self.time_reference)
+            if self.time_marginalization:
+                logger.info("Cannot marginalise over non-geocenter time.")
+                self.time_marginalization = False
+                self.jitter_time = False
+        else:
+            self.time_reference = "geocent"
+            self.reference_ifo = None
+
+        if self.time_marginalization:
+            self._check_marginalized_prior_is_set(key='geocent_time')
+            self._setup_time_marginalization()
+            priors['geocent_time'] = float(self.interferometers.start_time)
+            if self.jitter_time:
+                priors['time_jitter'] = Uniform(
+                    minimum=- self._delta_tc / 2,
+                    maximum=self._delta_tc / 2,
+                    boundary='periodic',
+                    name="time_jitter",
+                    latex_label="$t_j$"
+                )
+            self._marginalized_parameters.append('geocent_time')
+        elif self.jitter_time:
+            logger.debug(
+                "Time jittering requested with non-time-marginalised "
+                "likelihood, ignoring.")
+            self.jitter_time = False
+
+        if self.phase_marginalization:
+            self._check_marginalized_prior_is_set(key='phase')
+            priors['phase'] = float(0)
+            self._marginalized_parameters.append('phase')
+
+        if self.distance_marginalization:
+            self._lookup_table_filename = None
+            self._check_marginalized_prior_is_set(key='luminosity_distance')
+            self._distance_array = np.linspace(
+                self.priors['luminosity_distance'].minimum,
+                self.priors['luminosity_distance'].maximum, int(1e4))
+            self.distance_prior_array = np.array(
+                [self.priors['luminosity_distance'].prob(distance)
+                 for distance in self._distance_array])
+            self._ref_dist = self.priors['luminosity_distance'].rescale(0.5)
+            self._setup_distance_marginalization(
+                distance_marginalization_lookup_table)
+            for key in ['redshift', 'comoving_distance']:
+                if key in priors:
+                    del priors[key]
+            priors['luminosity_distance'] = float(self._ref_dist)
+            self._marginalized_parameters.append('luminosity_distance')
+
+        if self.calibration_marginalization:
+            self.number_of_response_curves = number_of_response_curves
+            self.starting_index = starting_index
+            self._setup_calibration_marginalization(calibration_lookup_table)
+            self._marginalized_parameters.append('recalib_index')
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(interferometers={},\n\twaveform_generator={},\n\ttime_marginalization={}, ' \
+                                         'distance_marginalization={}, phase_marginalization={}, ' \
+                                         'calibration_marginalization={}, priors={})' \
+            .format(self.interferometers, self.waveform_generator, self.time_marginalization,
+                    self.distance_marginalization, self.phase_marginalization, self.calibration_marginalization,
+                    self.priors)
+
+    def _check_set_duration_and_sampling_frequency_of_waveform_generator(self):
+        """ Check the waveform_generator has the same duration and
+        sampling_frequency as the interferometers. If they are unset, then
+        set them, if they differ, raise an error
+        """
+
+        attributes = ['duration', 'sampling_frequency', 'start_time']
+        for attribute in attributes:
+            wfg_attr = getattr(self.waveform_generator, attribute)
+            ifo_attr = getattr(self.interferometers, attribute)
+            if wfg_attr is None:
+                logger.debug(
+                    "The waveform_generator {} is None. Setting from the "
+                    "provided interferometers.".format(attribute))
+            elif wfg_attr != ifo_attr:
+                logger.debug(
+                    "The waveform_generator {} is not equal to that of the "
+                    "provided interferometers. Overwriting the "
+                    "waveform_generator.".format(attribute))
+            setattr(self.waveform_generator, attribute, ifo_attr)
+
+    def calculate_snrs(self, waveform_polarizations, interferometer):
+        """
+        Compute the snrs
+
+        Parameters
+        ==========
+        waveform_polarizations: dict
+            A dictionary of waveform polarizations and the corresponding array
+        interferometer: bilby.gw.detector.Interferometer
+            The bilby interferometer object
+
+        """
+        signal = interferometer.get_detector_response(
+            waveform_polarizations, self.parameters)
+        _mask = interferometer.frequency_mask
+
+        if 'recalib_index' in self.parameters:
+            signal[_mask] *= self.calibration_draws[interferometer.name][int(self.parameters['recalib_index'])]
+
+        d_inner_h = interferometer.inner_product(signal=signal)
+        optimal_snr_squared = interferometer.optimal_snr_squared(signal=signal)
+        complex_matched_filter_snr = d_inner_h / (optimal_snr_squared**0.5)
+
+        d_inner_h_array = None
+        optimal_snr_squared_array = None
+
+        normalization = 4 / self.waveform_generator.duration
+
+        if self.time_marginalization and self.calibration_marginalization:
+
+            d_inner_h_integrand = np.tile(
+                interferometer.frequency_domain_strain.conjugate() * signal /
+                interferometer.power_spectral_density_array, (self.number_of_response_curves, 1)).T
+
+            d_inner_h_integrand[_mask] *= self.calibration_draws[interferometer.name].T
+
+            d_inner_h_array = 4 / self.waveform_generator.duration * np.fft.fft(
+                d_inner_h_integrand[0:-1], axis=0
+            ).T
+
+            optimal_snr_squared_integrand = (
+                normalization * np.abs(signal)**2 / interferometer.power_spectral_density_array
+            )
+            optimal_snr_squared_array = np.dot(
+                optimal_snr_squared_integrand[_mask],
+                self.calibration_abs_draws[interferometer.name].T
+            )
+
+        elif self.time_marginalization and not self.calibration_marginalization:
+            d_inner_h_array = normalization * np.fft.fft(
+                signal[0:-1]
+                * interferometer.frequency_domain_strain.conjugate()[0:-1]
+                / interferometer.power_spectral_density_array[0:-1]
+            )
+
+        elif self.calibration_marginalization and ('recalib_index' not in self.parameters):
+            d_inner_h_integrand = (
+                normalization *
+                interferometer.frequency_domain_strain.conjugate() * signal
+                / interferometer.power_spectral_density_array
+            )
+            d_inner_h_array = np.dot(d_inner_h_integrand[_mask], self.calibration_draws[interferometer.name].T)
+
+            optimal_snr_squared_integrand = (
+                normalization * np.abs(signal)**2 / interferometer.power_spectral_density_array
+            )
+            optimal_snr_squared_array = np.dot(
+                optimal_snr_squared_integrand[_mask],
+                self.calibration_abs_draws[interferometer.name].T
+            )
+
+        return self._CalculatedSNRs(
+            d_inner_h=d_inner_h, optimal_snr_squared=optimal_snr_squared,
+            complex_matched_filter_snr=complex_matched_filter_snr,
+            d_inner_h_array=d_inner_h_array,
+            optimal_snr_squared_array=optimal_snr_squared_array,
+            d_inner_h_squared_tc_array=None)
+
+    def _check_marginalized_prior_is_set(self, key):
+        if key in self.priors and self.priors[key].is_fixed:
+            raise ValueError(
+                "Cannot use marginalized likelihood for {}: prior is fixed".format(key)
+            )
+        if key not in self.priors or not isinstance(
+                self.priors[key], Prior):
+            logger.warning(
+                'Prior not provided for {}, using the BBH default.'.format(key))
+            if key == 'geocent_time':
+                self.priors[key] = Uniform(
+                    self.interferometers.start_time,
+                    self.interferometers.start_time + self.interferometers.duration)
+            elif key == 'luminosity_distance':
+                for key in ['redshift', 'comoving_distance']:
+                    if key in self.priors:
+                        if not isinstance(self.priors[key], Cosmological):
+                            raise TypeError(
+                                "To marginalize over {}, the prior must be specified as a "
+                                "subclass of bilby.gw.prior.Cosmological.".format(key)
+                            )
+                        self.priors['luminosity_distance'] = self.priors[key].get_corresponding_prior(
+                            'luminosity_distance'
+                        )
+                        del self.priors[key]
+            else:
+                self.priors[key] = BBHPriorDict()[key]
+
+    @property
+    def priors(self):
+        return self._prior
+
+    @priors.setter
+    def priors(self, priors):
+        if priors is not None:
+            self._prior = priors.copy()
+        elif any([self.time_marginalization, self.phase_marginalization,
+                  self.distance_marginalization]):
+            raise ValueError("You can't use a marginalized likelihood without specifying a priors")
+        else:
+            self._prior = None
+
+    def noise_log_likelihood(self):
+        log_l = 0
+        for interferometer in self.interferometers:
+            mask = interferometer.frequency_mask
+            log_l -= noise_weighted_inner_product(
+                interferometer.frequency_domain_strain[mask],
+                interferometer.frequency_domain_strain[mask],
+                interferometer.power_spectral_density_array[mask],
+                self.waveform_generator.duration) / 2
+        return float(np.real(log_l))
+
+    def log_likelihood_ratio(self):
+        waveform_polarizations = \
+            self.waveform_generator.frequency_domain_strain(self.parameters)
+
+        self.parameters.update(self.get_sky_frame_parameters())
+
+        if waveform_polarizations is None:
+            return np.nan_to_num(-np.inf)
+
+        d_inner_h = 0.
+        optimal_snr_squared = 0.
+        complex_matched_filter_snr = 0.
+
+        if self.time_marginalization and self.calibration_marginalization:
+            if self.jitter_time:
+                self.parameters['geocent_time'] += self.parameters['time_jitter']
+
+            d_inner_h_array = np.zeros(
+                (self.number_of_response_curves, len(self.interferometers.frequency_array[0:-1])),
+                dtype=np.complex128)
+            optimal_snr_squared_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
+
+        elif self.time_marginalization:
+            if self.jitter_time:
+                self.parameters['geocent_time'] += self.parameters['time_jitter']
+            d_inner_h_array = np.zeros(
+                len(self.interferometers.frequency_array[0:-1]),
+                dtype=np.complex128)
+
+        elif self.calibration_marginalization:
+            d_inner_h_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
+            optimal_snr_squared_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
+
+        for interferometer in self.interferometers:
+            per_detector_snr = self.calculate_snrs(
+                waveform_polarizations=waveform_polarizations,
+                interferometer=interferometer)
+
+            d_inner_h += per_detector_snr.d_inner_h
+            optimal_snr_squared += np.real(per_detector_snr.optimal_snr_squared)
+            complex_matched_filter_snr += per_detector_snr.complex_matched_filter_snr
+
+            if self.time_marginalization or self.calibration_marginalization:
+                d_inner_h_array += per_detector_snr.d_inner_h_array
+
+            if self.calibration_marginalization:
+                optimal_snr_squared_array += per_detector_snr.optimal_snr_squared_array
+
+        if self.calibration_marginalization and self.time_marginalization:
+            log_l = self.time_and_calibration_marginalized_likelihood(
+                d_inner_h_array=d_inner_h_array,
+                h_inner_h=optimal_snr_squared_array)
+            if self.jitter_time:
+                self.parameters['geocent_time'] -= self.parameters['time_jitter']
+
+        elif self.calibration_marginalization:
+            log_l = self.calibration_marginalized_likelihood(
+                d_inner_h_calibration_array=d_inner_h_array,
+                h_inner_h=optimal_snr_squared_array)
+
+        elif self.time_marginalization:
+            log_l = self.time_marginalized_likelihood(
+                d_inner_h_tc_array=d_inner_h_array,
+                h_inner_h=optimal_snr_squared)
+            if self.jitter_time:
+                self.parameters['geocent_time'] -= self.parameters['time_jitter']
+
+        elif self.distance_marginalization:
+            log_l = self.distance_marginalized_likelihood(
+                d_inner_h=d_inner_h, h_inner_h=optimal_snr_squared)
+
+        elif self.phase_marginalization:
+            log_l = self.phase_marginalized_likelihood(
+                d_inner_h=d_inner_h, h_inner_h=optimal_snr_squared)
+
+        else:
+            log_l = np.real(d_inner_h) - optimal_snr_squared / 2
+
+        return float(log_l.real)
+
+    def generate_posterior_sample_from_marginalized_likelihood(self):
+        """
+        Reconstruct the distance posterior from a run which used a likelihood
+        which explicitly marginalised over time/distance/phase.
+
+        See Eq. (C29-C32) of https://arxiv.org/abs/1809.02293
+
+        Returns
+        =======
+        sample: dict
+            Returns the parameters with new samples.
+
+        Notes
+        =====
+        This involves a deepcopy of the signal to avoid issues with waveform
+        caching, as the signal is overwritten in place.
+        """
+        if any([self.phase_marginalization, self.distance_marginalization,
+                self.time_marginalization, self.calibration_marginalization]):
+            signal_polarizations = copy.deepcopy(
+                self.waveform_generator.frequency_domain_strain(
+                    self.parameters))
+        else:
+            return self.parameters
+
+        if self.calibration_marginalization and self.time_marginalization:
+            raise AttributeError(
+                "Cannot use time and calibration marginalization simultaneously for regeneration at the moment!"
+                "The matrix manipulation has not been tested.")
+
+        if self.calibration_marginalization:
+            new_calibration = self.generate_calibration_sample_from_marginalized_likelihood(
+                signal_polarizations=signal_polarizations)
+            self.parameters['recalib_index'] = new_calibration
+        if self.time_marginalization:
+            new_time = self.generate_time_sample_from_marginalized_likelihood(
+                signal_polarizations=signal_polarizations)
+            self.parameters['geocent_time'] = new_time
+        if self.distance_marginalization:
+            new_distance = self.generate_distance_sample_from_marginalized_likelihood(
+                signal_polarizations=signal_polarizations)
+            self.parameters['luminosity_distance'] = new_distance
+        if self.phase_marginalization:
+            new_phase = self.generate_phase_sample_from_marginalized_likelihood(
+                signal_polarizations=signal_polarizations)
+            self.parameters['phase'] = new_phase
+        return self.parameters.copy()
+
+    def generate_calibration_sample_from_marginalized_likelihood(
+            self, signal_polarizations=None):
+        """
+        Generate a single sample from the posterior distribution for the set of calibration response curves when
+        explicitly marginalizing over the calibration uncertainty.
+
+        Parameters
+        ----------
+        signal_polarizations: dict, optional
+            Polarizations modes of the template.
+
+        Returns
+        -------
+        new_calibration: dict
+            Sample set from the calibration posterior
+        """
+        if 'recalib_index' in self.parameters:
+            self.parameters.pop('recalib_index')
+        self.parameters.update(self.get_sky_frame_parameters())
+        if signal_polarizations is None:
+            signal_polarizations = \
+                self.waveform_generator.frequency_domain_strain(self.parameters)
+
+        log_like = self.get_calibration_log_likelihoods(signal_polarizations=signal_polarizations)
+
+        calibration_post = np.exp(log_like - max(log_like))
+        calibration_post /= np.sum(calibration_post)
+
+        new_calibration = np.random.choice(self.number_of_response_curves, p=calibration_post)
+
+        return new_calibration
+
+    def generate_time_sample_from_marginalized_likelihood(
+            self, signal_polarizations=None):
+        """
+        Generate a single sample from the posterior distribution for coalescence
+        time when using a likelihood which explicitly marginalises over time.
+
+        In order to resolve the posterior we artificially upsample to 16kHz.
+
+        See Eq. (C29-C32) of https://arxiv.org/abs/1809.02293
+
+        Parameters
+        ==========
+        signal_polarizations: dict, optional
+            Polarizations modes of the template.
+
+        Returns
+        =======
+        new_time: float
+            Sample from the time posterior.
+        """
+        self.parameters.update(self.get_sky_frame_parameters())
+        if self.jitter_time:
+            self.parameters['geocent_time'] += self.parameters['time_jitter']
+        if signal_polarizations is None:
+            signal_polarizations = \
+                self.waveform_generator.frequency_domain_strain(self.parameters)
+
+        times = create_time_series(
+            sampling_frequency=16384,
+            starting_time=self.parameters['geocent_time'] - self.waveform_generator.start_time,
+            duration=self.waveform_generator.duration)
+        times = times % self.waveform_generator.duration
+        times += self.waveform_generator.start_time
+
+        prior = self.priors["geocent_time"]
+        in_prior = (times >= prior.minimum) & (times < prior.maximum)
+        times = times[in_prior]
+
+        n_time_steps = int(self.waveform_generator.duration * 16384)
+        d_inner_h = np.zeros(len(times), dtype=complex)
+        psd = np.ones(n_time_steps)
+        signal_long = np.zeros(n_time_steps, dtype=complex)
+        data = np.zeros(n_time_steps, dtype=complex)
+        h_inner_h = np.zeros(1)
+        for ifo in self.interferometers:
+            ifo_length = len(ifo.frequency_domain_strain)
+            mask = ifo.frequency_mask
+            signal = ifo.get_detector_response(
+                signal_polarizations, self.parameters)
+            signal_long[:ifo_length] = signal
+            data[:ifo_length] = np.conj(ifo.frequency_domain_strain)
+            psd[:ifo_length][mask] = ifo.power_spectral_density_array[mask]
+            d_inner_h += np.fft.fft(signal_long * data / psd)[in_prior]
+            h_inner_h += ifo.optimal_snr_squared(signal=signal).real
+
+        if self.distance_marginalization:
+            time_log_like = self.distance_marginalized_likelihood(
+                d_inner_h, h_inner_h)
+        elif self.phase_marginalization:
+            time_log_like = ln_i0(abs(d_inner_h)) - h_inner_h.real / 2
+        else:
+            time_log_like = (d_inner_h.real - h_inner_h.real / 2)
+
+        time_prior_array = self.priors['geocent_time'].prob(times)
+        time_post = np.exp(time_log_like - max(time_log_like)) * time_prior_array
+
+        keep = (time_post > max(time_post) / 1000)
+        if sum(keep) < 3:
+            keep[1:-1] = keep[1:-1] | keep[2:] | keep[:-2]
+        time_post = time_post[keep]
+        times = times[keep]
+
+        new_time = Interped(times, time_post).sample()
+        return new_time
+
+    def generate_distance_sample_from_marginalized_likelihood(
+            self, signal_polarizations=None):
+        """
+        Generate a single sample from the posterior distribution for luminosity
+        distance when using a likelihood which explicitly marginalises over
+        distance.
+
+        See Eq. (C29-C32) of https://arxiv.org/abs/1809.02293
+
+        Parameters
+        ==========
+        signal_polarizations: dict, optional
+            Polarizations modes of the template.
+            Note: These are rescaled in place after the distance sample is
+            generated to allow further parameter reconstruction to occur.
+
+        Returns
+        =======
+        new_distance: float
+            Sample from the distance posterior.
+        """
+        self.parameters.update(self.get_sky_frame_parameters())
+        if signal_polarizations is None:
+            signal_polarizations = \
+                self.waveform_generator.frequency_domain_strain(self.parameters)
+
+        d_inner_h, h_inner_h = self._calculate_inner_products(signal_polarizations)
+
+        d_inner_h_dist = (
+            d_inner_h * self.parameters['luminosity_distance'] / self._distance_array
+        )
+
+        h_inner_h_dist = (
+            h_inner_h * self.parameters['luminosity_distance']**2 / self._distance_array**2
+        )
+
+        if self.phase_marginalization:
+            distance_log_like = ln_i0(abs(d_inner_h_dist)) - h_inner_h_dist.real / 2
+        else:
+            distance_log_like = (d_inner_h_dist.real - h_inner_h_dist.real / 2)
+
+        distance_post = (np.exp(distance_log_like - max(distance_log_like)) *
+                         self.distance_prior_array)
+
+        new_distance = Interped(
+            self._distance_array, distance_post).sample()
+
+        self._rescale_signal(signal_polarizations, new_distance)
+        return new_distance
+
+    def _calculate_inner_products(self, signal_polarizations):
+        d_inner_h = 0
+        h_inner_h = 0
+        for interferometer in self.interferometers:
+            per_detector_snr = self.calculate_snrs(
+                signal_polarizations, interferometer)
+
+            d_inner_h += per_detector_snr.d_inner_h
+            h_inner_h += per_detector_snr.optimal_snr_squared
+        return d_inner_h, h_inner_h
+
+    def generate_phase_sample_from_marginalized_likelihood(
+            self, signal_polarizations=None):
+        r"""
+        Generate a single sample from the posterior distribution for phase when
+        using a likelihood which explicitly marginalises over phase.
+
+        See Eq. (C29-C32) of https://arxiv.org/abs/1809.02293
+
+        Parameters
+        ==========
+        signal_polarizations: dict, optional
+            Polarizations modes of the template.
+
+        Returns
+        =======
+        new_phase: float
+            Sample from the phase posterior.
+
+        Notes
+        =====
+        This is only valid when assumes that mu(phi) \propto exp(-2i phi).
+        """
+        self.parameters.update(self.get_sky_frame_parameters())
+        if signal_polarizations is None:
+            signal_polarizations = \
+                self.waveform_generator.frequency_domain_strain(self.parameters)
+        d_inner_h, h_inner_h = self._calculate_inner_products(signal_polarizations)
+
+        phases = np.linspace(0, 2 * np.pi, 101)
+        phasor = np.exp(-2j * phases)
+        phase_log_post = d_inner_h * phasor - h_inner_h / 2
+        phase_post = np.exp(phase_log_post.real - max(phase_log_post.real))
+        new_phase = Interped(phases, phase_post).sample()
+        return new_phase
+
+    def distance_marginalized_likelihood(self, d_inner_h, h_inner_h):
+        d_inner_h_ref, h_inner_h_ref = self._setup_rho(
+            d_inner_h, h_inner_h)
+        if self.phase_marginalization:
+            d_inner_h_ref = np.abs(d_inner_h_ref)
+        else:
+            d_inner_h_ref = np.real(d_inner_h_ref)
+
+        return self._interp_dist_margd_loglikelihood(
+            d_inner_h_ref, h_inner_h_ref)
+
+    def phase_marginalized_likelihood(self, d_inner_h, h_inner_h):
+        d_inner_h = ln_i0(abs(d_inner_h))
+
+        if self.calibration_marginalization and self.time_marginalization:
+            return d_inner_h - np.outer(h_inner_h, np.ones(np.shape(d_inner_h)[1])) / 2
+        else:
+            return d_inner_h - h_inner_h / 2
+
+    def time_marginalized_likelihood(self, d_inner_h_tc_array, h_inner_h):
+        if self.distance_marginalization:
+            log_l_tc_array = self.distance_marginalized_likelihood(
+                d_inner_h=d_inner_h_tc_array, h_inner_h=h_inner_h)
+        elif self.phase_marginalization:
+            log_l_tc_array = self.phase_marginalized_likelihood(
+                d_inner_h=d_inner_h_tc_array,
+                h_inner_h=h_inner_h)
+        else:
+            log_l_tc_array = np.real(d_inner_h_tc_array) - h_inner_h / 2
+        times = self._times
+        if self.jitter_time:
+            times = self._times + self.parameters['time_jitter']
+        time_prior_array = self.priors['geocent_time'].prob(times) * self._delta_tc
+        return logsumexp(log_l_tc_array, b=time_prior_array)
+
+    def time_and_calibration_marginalized_likelihood(self, d_inner_h_array, h_inner_h):
+        times = self._times
+        if self.jitter_time:
+            times = self._times + self.parameters['time_jitter']
+
+        _time_prior = self.priors['geocent_time']
+        time_mask = np.logical_and((times >= _time_prior.minimum), (times <= _time_prior.maximum))
+        times = times[time_mask]
+        time_probs = self.priors['geocent_time'].prob(times) * self._delta_tc
+
+        d_inner_h_array = d_inner_h_array[:, time_mask]
+        h_inner_h = h_inner_h
+
+        if self.distance_marginalization:
+            log_l_array = self.distance_marginalized_likelihood(
+                d_inner_h=d_inner_h_array, h_inner_h=h_inner_h)
+        elif self.phase_marginalization:
+            log_l_array = self.phase_marginalized_likelihood(
+                d_inner_h=d_inner_h_array,
+                h_inner_h=h_inner_h)
+        else:
+            log_l_array = np.real(d_inner_h_array) - np.outer(h_inner_h, np.ones(np.shape(d_inner_h_array)[1])) / 2
+
+        prior_array = np.outer(time_probs, 1. / self.number_of_response_curves * np.ones(len(h_inner_h))).T
+
+        return logsumexp(log_l_array, b=prior_array)
+
+    def get_calibration_log_likelihoods(self, signal_polarizations=None):
+        self.parameters.update(self.get_sky_frame_parameters())
+        if signal_polarizations is None:
+            signal_polarizations = \
+                self.waveform_generator.frequency_domain_strain(self.parameters)
+
+        d_inner_h = 0.
+        optimal_snr_squared = 0.
+        complex_matched_filter_snr = 0.
+        d_inner_h_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
+        optimal_snr_squared_array = np.zeros(self.number_of_response_curves, dtype=np.complex128)
+
+        for interferometer in self.interferometers:
+            per_detector_snr = self.calculate_snrs(
+                waveform_polarizations=signal_polarizations,
+                interferometer=interferometer)
+
+            d_inner_h += per_detector_snr.d_inner_h
+            optimal_snr_squared += np.real(per_detector_snr.optimal_snr_squared)
+            complex_matched_filter_snr += per_detector_snr.complex_matched_filter_snr
+            d_inner_h_array += per_detector_snr.d_inner_h_array
+            optimal_snr_squared_array += per_detector_snr.optimal_snr_squared_array
+
+        if self.distance_marginalization:
+            log_l_cal_array = self.distance_marginalized_likelihood(
+                d_inner_h=d_inner_h_array, h_inner_h=optimal_snr_squared_array)
+        elif self.phase_marginalization:
+            log_l_cal_array = self.phase_marginalized_likelihood(
+                d_inner_h=d_inner_h_array,
+                h_inner_h=optimal_snr_squared_array)
+        else:
+            log_l_cal_array = np.real(d_inner_h_array - optimal_snr_squared_array / 2)
+
+        return log_l_cal_array
+
+    def calibration_marginalized_likelihood(self, d_inner_h_calibration_array, h_inner_h):
+        if self.distance_marginalization:
+            log_l_cal_array = self.distance_marginalized_likelihood(
+                d_inner_h=d_inner_h_calibration_array, h_inner_h=h_inner_h)
+        elif self.phase_marginalization:
+            log_l_cal_array = self.phase_marginalized_likelihood(
+                d_inner_h=d_inner_h_calibration_array,
+                h_inner_h=h_inner_h)
+        else:
+            log_l_cal_array = np.real(d_inner_h_calibration_array - h_inner_h / 2)
+
+        return logsumexp(log_l_cal_array) - np.log(self.number_of_response_curves)
+
+    def _setup_rho(self, d_inner_h, optimal_snr_squared):
+        optimal_snr_squared_ref = (optimal_snr_squared.real *
+                                   self.parameters['luminosity_distance'] ** 2 /
+                                   self._ref_dist ** 2.)
+        d_inner_h_ref = (d_inner_h * self.parameters['luminosity_distance'] /
+                         self._ref_dist)
+        return d_inner_h_ref, optimal_snr_squared_ref
+
+    def log_likelihood(self):
+        return self.log_likelihood_ratio() + self.noise_log_likelihood()
+
+    @property
+    def _delta_distance(self):
+        return self._distance_array[1] - self._distance_array[0]
+
+    @property
+    def _dist_multiplier(self):
+        ''' Maximum value of ref_dist/dist_array '''
+        return self._ref_dist / self._distance_array[0]
+
+    @property
+    def _optimal_snr_squared_ref_array(self):
+        """ Optimal filter snr at fiducial distance of ref_dist Mpc """
+        return np.logspace(-5, 10, self._dist_margd_loglikelihood_array.shape[0])
+
+    @property
+    def _d_inner_h_ref_array(self):
+        """ Matched filter snr at fiducial distance of ref_dist Mpc """
+        if self.phase_marginalization:
+            return np.logspace(-5, 10, self._dist_margd_loglikelihood_array.shape[1])
+        else:
+            n_negative = self._dist_margd_loglikelihood_array.shape[1] // 2
+            n_positive = self._dist_margd_loglikelihood_array.shape[1] - n_negative
+            return np.hstack((
+                -np.logspace(3, -3, n_negative), np.logspace(-3, 10, n_positive)
+            ))
+
+    def _setup_distance_marginalization(self, lookup_table=None):
+        if isinstance(lookup_table, str) or lookup_table is None:
+            self.cached_lookup_table_filename = lookup_table
+            lookup_table = self.load_lookup_table(
+                self.cached_lookup_table_filename)
+        if isinstance(lookup_table, dict):
+            if self._test_cached_lookup_table(lookup_table):
+                self._dist_margd_loglikelihood_array = lookup_table[
+                    'lookup_table']
+            else:
+                self._create_lookup_table()
+        else:
+            self._create_lookup_table()
+        self._interp_dist_margd_loglikelihood = UnsortedInterp2d(
+            self._d_inner_h_ref_array, self._optimal_snr_squared_ref_array,
+            self._dist_margd_loglikelihood_array, kind='cubic', fill_value=-np.inf)
+
+    @property
+    def cached_lookup_table_filename(self):
+        if self._lookup_table_filename is None:
+            self._lookup_table_filename = (
+                '.distance_marginalization_lookup.npz')
+        return self._lookup_table_filename
+
+    @cached_lookup_table_filename.setter
+    def cached_lookup_table_filename(self, filename):
+        if isinstance(filename, str):
+            if filename[-4:] != '.npz':
+                filename += '.npz'
+        self._lookup_table_filename = filename
+
+    def load_lookup_table(self, filename):
+        if os.path.exists(filename):
+            try:
+                loaded_file = dict(np.load(filename))
+            except AttributeError as e:
+                logger.warning(e)
+                self._create_lookup_table()
+                return None
+            match, failure = self._test_cached_lookup_table(loaded_file)
+            if match:
+                logger.info('Loaded distance marginalisation lookup table from '
+                            '{}.'.format(filename))
+                return loaded_file
+            else:
+                logger.info('Loaded distance marginalisation lookup table does '
+                            'not match for {}.'.format(failure))
+        elif isinstance(filename, str):
+            logger.info('Distance marginalisation file {} does not '
+                        'exist'.format(filename))
+        return None
+
+    def cache_lookup_table(self):
+        np.savez(self.cached_lookup_table_filename,
+                 distance_array=self._distance_array,
+                 prior_array=self.distance_prior_array,
+                 lookup_table=self._dist_margd_loglikelihood_array,
+                 reference_distance=self._ref_dist,
+                 phase_marginalization=self.phase_marginalization)
+
+    def _test_cached_lookup_table(self, loaded_file):
+        pairs = dict(
+            distance_array=self._distance_array,
+            prior_array=self.distance_prior_array,
+            reference_distance=self._ref_dist,
+            phase_marginalization=self.phase_marginalization)
+        for key in pairs:
+            if key not in loaded_file:
+                return False, key
+            elif not np.array_equal(np.atleast_1d(loaded_file[key]),
+                                    np.atleast_1d(pairs[key])):
+                return False, key
+        return True, None
+
+    def _create_lookup_table(self):
+        """ Make the lookup table """
+        from tqdm.auto import tqdm
+        logger.info('Building lookup table for distance marginalisation.')
+
+        self._dist_margd_loglikelihood_array = np.zeros((400, 800))
+        scaling = self._ref_dist / self._distance_array
+        d_inner_h_array_full = np.outer(self._d_inner_h_ref_array, scaling)
+        h_inner_h_array_full = np.outer(self._optimal_snr_squared_ref_array, scaling ** 2)
+        if self.phase_marginalization:
+            d_inner_h_array_full = ln_i0(abs(d_inner_h_array_full))
+        prior_term = self.distance_prior_array * self._delta_distance
+        for ii, optimal_snr_squared_array in tqdm(
+                enumerate(h_inner_h_array_full), total=len(self._optimal_snr_squared_ref_array)
+        ):
+            for jj, d_inner_h_array in enumerate(d_inner_h_array_full):
+                self._dist_margd_loglikelihood_array[ii][jj] = logsumexp(
+                    d_inner_h_array - optimal_snr_squared_array / 2,
+                    b=prior_term
+                )
+        log_norm = logsumexp(
+            0 / self._distance_array, b=self.distance_prior_array * self._delta_distance
+        )
+        self._dist_margd_loglikelihood_array -= log_norm
+        self.cache_lookup_table()
+
+    def _setup_phase_marginalization(self, min_bound=-5, max_bound=10):
+        logger.warning(
+            "The _setup_phase_marginalization method is deprecated and will be removed, "
+            "please update the implementation of phase marginalization "
+            "to use bilby.gw.utils.ln_i0"
+        )
+
+    @staticmethod
+    def _bessel_function_interped(xx):
+        logger.warning(
+            "The _bessel_function_interped method is deprecated and will be removed, "
+            "please update the implementation of phase marginalization "
+            "to use bilby.gw.utils.ln_i0"
+        )
+        return ln_i0(xx) + xx
+
+    def _setup_time_marginalization(self):
+        self._delta_tc = 2 / self.waveform_generator.sampling_frequency
+        self._times = \
+            self.interferometers.start_time + np.linspace(
+                0, self.interferometers.duration,
+                int(self.interferometers.duration / 2 *
+                    self.waveform_generator.sampling_frequency + 1))[1:]
+        self.time_prior_array = \
+            self.priors['geocent_time'].prob(self._times) * self._delta_tc
+
+    def _setup_calibration_marginalization(self, calibration_lookup_table):
+        if calibration_lookup_table is None:
+            calibration_lookup_table = {}
+        self.calibration_draws = {}
+        self.calibration_abs_draws = {}
+        self.calibration_parameter_draws = {}
+        for interferometer in self.interferometers:
+
+            # Force the priors
+            calibration_priors = PriorDict()
+            for key in self.priors.keys():
+                if 'recalib' in key and interferometer.name in key:
+                    calibration_priors[key] = copy.copy(self.priors[key])
+                    self.priors[key] = DeltaFunction(0.0)
+
+            # If there is no entry in the lookup table, make an empty one
+            if interferometer.name not in calibration_lookup_table.keys():
+                calibration_lookup_table[interferometer.name] = \
+                    f'{interferometer.name}_calibration_file.h5'
+
+            # If the interferometer lookup table file exists, generate the curves from it
+            if os.path.exists(calibration_lookup_table[interferometer.name]):
+                self.calibration_draws[interferometer.name] = \
+                    calibration.read_calibration_file(
+                        calibration_lookup_table[interferometer.name], self.interferometers.frequency_array,
+                        self.number_of_response_curves, self.starting_index)
+
+            else:  # generate the fake curves
+                from tqdm.auto import tqdm
+                self.calibration_parameter_draws[interferometer.name] = \
+                    pd.DataFrame(calibration_priors.sample(self.number_of_response_curves))
+
+                self.calibration_draws[interferometer.name] = \
+                    np.zeros((self.number_of_response_curves, len(interferometer.frequency_array)), dtype=complex)
+
+                for i in tqdm(range(self.number_of_response_curves)):
+                    self.calibration_draws[interferometer.name][i, :] = \
+                        interferometer.calibration_model.get_calibration_factor(
+                            interferometer.frequency_array,
+                            prefix='recalib_{}_'.format(interferometer.name),
+                            **self.calibration_parameter_draws[interferometer.name].iloc[i])
+
+                calibration.write_calibration_file(
+                    calibration_lookup_table[interferometer.name],
+                    self.interferometers.frequency_array,
+                    self.calibration_draws[interferometer.name],
+                    self.calibration_parameter_draws[interferometer.name])
+
+            interferometer.calibration_model = calibration.Recalibrate()
+
+            _mask = interferometer.frequency_mask
+            self.calibration_draws[interferometer.name] = self.calibration_draws[interferometer.name][:, _mask]
+            self.calibration_abs_draws[interferometer.name] = \
+                np.abs(self.calibration_draws[interferometer.name])**2
+
+    @property
+    def interferometers(self):
+        return self._interferometers
+
+    @interferometers.setter
+    def interferometers(self, interferometers):
+        self._interferometers = InterferometerList(interferometers)
+
+    def _rescale_signal(self, signal, new_distance):
+        for mode in signal:
+            signal[mode] *= self._ref_dist / new_distance
+
+    @property
+    def reference_frame(self):
+        return self._reference_frame
+
+    @property
+    def _reference_frame_str(self):
+        if isinstance(self.reference_frame, str):
+            return self.reference_frame
+        else:
+            return "".join([ifo.name for ifo in self.reference_frame])
+
+    @reference_frame.setter
+    def reference_frame(self, frame):
+        if frame == "sky":
+            self._reference_frame = frame
+        elif isinstance(frame, InterferometerList):
+            self._reference_frame = frame[:2]
+        elif isinstance(frame, list):
+            self._reference_frame = InterferometerList(frame[:2])
+        elif isinstance(frame, str):
+            self._reference_frame = InterferometerList([frame[:2], frame[2:4]])
+        else:
+            raise ValueError("Unable to parse reference frame {}".format(frame))
+
+    def get_sky_frame_parameters(self):
+        time = self.parameters['{}_time'.format(self.time_reference)]
+        if not self.reference_frame == "sky":
+            ra, dec = zenith_azimuth_to_ra_dec(
+                self.parameters['zenith'], self.parameters['azimuth'],
+                time, self.reference_frame)
+        else:
+            ra = self.parameters["ra"]
+            dec = self.parameters["dec"]
+        if "geocent" not in self.time_reference:
+            geocent_time = time - self.reference_ifo.time_delay_from_geocenter(
+                ra=ra, dec=dec, time=time
+            )
+        else:
+            geocent_time = self.parameters["geocent_time"]
+        return dict(ra=ra, dec=dec, geocent_time=geocent_time)
+
+    @property
+    def lal_version(self):
+        try:
+            from lal import git_version, __version__
+            lal_version = str(__version__)
+            logger.info("Using lal version {}".format(lal_version))
+            lal_git_version = str(git_version.verbose_msg).replace("\n", ";")
+            logger.info("Using lal git version {}".format(lal_git_version))
+            return "lal_version={}, lal_git_version={}".format(lal_version, lal_git_version)
+        except (ImportError, AttributeError):
+            return "N/A"
+
+    @property
+    def lalsimulation_version(self):
+        try:
+            from lalsimulation import git_version, __version__
+            lalsim_version = str(__version__)
+            logger.info("Using lalsimulation version {}".format(lalsim_version))
+            lalsim_git_version = str(git_version.verbose_msg).replace("\n", ";")
+            logger.info("Using lalsimulation git version {}".format(lalsim_git_version))
+            return "lalsimulation_version={}, lalsimulation_git_version={}".format(lalsim_version, lalsim_git_version)
+        except (ImportError, AttributeError):
+            return "N/A"
+
+    @property
+    def meta_data(self):
+        return dict(
+            interferometers=self.interferometers.meta_data,
+            time_marginalization=self.time_marginalization,
+            phase_marginalization=self.phase_marginalization,
+            distance_marginalization=self.distance_marginalization,
+            calibration_marginalization=self.calibration_marginalization,
+            waveform_generator_class=self.waveform_generator.__class__,
+            waveform_arguments=self.waveform_generator.waveform_arguments,
+            frequency_domain_source_model=self.waveform_generator.frequency_domain_source_model,
+            parameter_conversion=self.waveform_generator.parameter_conversion,
+            sampling_frequency=self.waveform_generator.sampling_frequency,
+            duration=self.waveform_generator.duration,
+            start_time=self.waveform_generator.start_time,
+            time_reference=self.time_reference,
+            reference_frame=self._reference_frame_str,
+            lal_version=self.lal_version,
+            lalsimulation_version=self.lalsimulation_version)
diff --git a/bilby/gw/likelihood/basic.py b/bilby/gw/likelihood/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..88c83dcfa13927eaf84edccfcd68b9a8e3726699
--- /dev/null
+++ b/bilby/gw/likelihood/basic.py
@@ -0,0 +1,93 @@
+import numpy as np
+
+from ...core.likelihood import Likelihood
+
+
+class BasicGravitationalWaveTransient(Likelihood):
+
+    def __init__(self, interferometers, waveform_generator):
+        """
+
+        A likelihood object, able to compute the likelihood of the data given
+        some model parameters
+
+        The simplest frequency-domain gravitational wave transient likelihood. Does
+        not include distance/phase marginalization.
+
+
+        Parameters
+        ==========
+        interferometers: list
+            A list of `bilby.gw.detector.Interferometer` instances - contains the
+            detector data and power spectral densities
+        waveform_generator: bilby.gw.waveform_generator.WaveformGenerator
+            An object which computes the frequency-domain strain of the signal,
+            given some set of parameters
+
+        """
+        super(BasicGravitationalWaveTransient, self).__init__(dict())
+        self.interferometers = interferometers
+        self.waveform_generator = waveform_generator
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(interferometers={},\n\twaveform_generator={})' \
+            .format(self.interferometers, self.waveform_generator)
+
+    def noise_log_likelihood(self):
+        """ Calculates the real part of noise log-likelihood
+
+        Returns
+        =======
+        float: The real part of the noise log likelihood
+
+        """
+        log_l = 0
+        for interferometer in self.interferometers:
+            log_l -= 2. / self.waveform_generator.duration * np.sum(
+                abs(interferometer.frequency_domain_strain) ** 2 /
+                interferometer.power_spectral_density_array)
+        return log_l.real
+
+    def log_likelihood(self):
+        """ Calculates the real part of log-likelihood value
+
+        Returns
+        =======
+        float: The real part of the log likelihood
+
+        """
+        log_l = 0
+        waveform_polarizations = \
+            self.waveform_generator.frequency_domain_strain(
+                self.parameters.copy())
+        if waveform_polarizations is None:
+            return np.nan_to_num(-np.inf)
+        for interferometer in self.interferometers:
+            log_l += self.log_likelihood_interferometer(
+                waveform_polarizations, interferometer)
+        return log_l.real
+
+    def log_likelihood_interferometer(self, waveform_polarizations,
+                                      interferometer):
+        """
+
+        Parameters
+        ==========
+        waveform_polarizations: dict
+            Dictionary containing the desired waveform polarization modes and the related strain
+        interferometer: bilby.gw.detector.Interferometer
+            The Interferometer object we want to have the log-likelihood for
+
+        Returns
+        =======
+        float: The real part of the log-likelihood for this interferometer
+
+        """
+        signal_ifo = interferometer.get_detector_response(
+            waveform_polarizations, self.parameters)
+
+        log_l = - 2. / self.waveform_generator.duration * np.vdot(
+            interferometer.frequency_domain_strain - signal_ifo,
+            (interferometer.frequency_domain_strain - signal_ifo) /
+            interferometer.power_spectral_density_array)
+        return log_l.real
diff --git a/bilby/gw/likelihood/multiband.py b/bilby/gw/likelihood/multiband.py
new file mode 100644
index 0000000000000000000000000000000000000000..f361742c123af9cd5f27d236b76d1f3e1a4efc7b
--- /dev/null
+++ b/bilby/gw/likelihood/multiband.py
@@ -0,0 +1,613 @@
+
+import math
+
+import numpy as np
+
+from .base import GravitationalWaveTransient
+from ...core.utils import (
+    logger, speed_of_light, solar_mass, radius_of_earth,
+    gravitational_constant, round_up_to_power_of_two
+)
+
+
+class MBGravitationalWaveTransient(GravitationalWaveTransient):
+    """A multi-banded likelihood object
+
+    This uses the method described in S. Morisaki, 2021, arXiv: 2104.07813.
+
+    Parameters
+    ----------
+    interferometers: list, bilby.gw.detector.InterferometerList
+        A list of `bilby.detector.Interferometer` instances - contains the detector data and power spectral densities
+    waveform_generator: `bilby.waveform_generator.WaveformGenerator`
+        An object which computes the frequency-domain strain of the signal, given some set of parameters
+    reference_chirp_mass: float
+        A reference chirp mass for determining the frequency banding
+    highest_mode: int, optional
+        The maximum magnetic number of gravitational-wave moments. Default is 2
+    linear_interpolation: bool, optional
+        If True, the linear-interpolation method is used for the computation of (h, h). If False, the IFFT-FFT method
+        is used. Default is True.
+    accuracy_factor: float, optional
+        A parameter to determine the accuracy of multi-banding. The larger this factor is, the more accurate the
+        approximation is. This corresponds to L in the paper. Default is 5.
+    time_offset: float, optional
+        (end time of data) - (maximum arrival time). If None, it is inferred from the prior of geocent time.
+    delta_f_end: float, optional
+        The frequency scale with which waveforms at the high-frequency end are smoothed. If None, it is determined from
+        the prior of geocent time.
+    maximum_banding_frequency: float, optional
+        A maximum frequency for multi-banding. If specified, the low-frequency limit of a band does not exceed it.
+    minimum_banding_duration: float, optional
+        A minimum duration for multi-banding. If specified, the duration of a band is not smaller than it.
+    distance_marginalization: bool, optional
+        If true, marginalize over distance in the likelihood. This uses a look up table calculated at run time. The
+        distance prior is set to be a delta function at the minimum distance allowed in the prior being marginalised
+        over.
+    phase_marginalization: bool, optional
+        If true, marginalize over phase in the likelihood. This is done analytically using a Bessel function. The phase
+        prior is set to be a delta function at phase=0.
+    priors: dict, bilby.prior.PriorDict
+        A dictionary of priors containing at least the geocent_time prior
+    distance_marginalization_lookup_table: (dict, str), optional
+        If a dict, dictionary containing the lookup_table, distance_array, (distance) prior_array, and
+        reference_distance used to construct the table. If a string the name of a file containing these quantities. The
+        lookup table is stored after construction in either the provided string or a default location:
+        '.distance_marginalization_lookup_dmin{}_dmax{}_n{}.npz'
+    reference_frame: (str, bilby.gw.detector.InterferometerList, list), optional
+        Definition of the reference frame for the sky location.
+        - "sky": sample in RA/dec, this is the default
+        - e.g., "H1L1", ["H1", "L1"], InterferometerList(["H1", "L1"]):
+          sample in azimuth and zenith, `azimuth` and `zenith` defined in the frame where the z-axis is aligned the the
+          vector connecting H1 and L1.
+    time_reference: str, optional
+        Name of the reference for the sampled time parameter.
+        - "geocent"/"geocenter": sample in the time at the Earth's center, this is the default
+        - e.g., "H1": sample in the time of arrival at H1
+
+    Returns
+    -------
+    Likelihood: `bilby.core.likelihood.Likelihood`
+        A likelihood object, able to compute the likelihood of the data given some model parameters
+
+    """
+    def __init__(
+            self, interferometers, waveform_generator, reference_chirp_mass, highest_mode=2, linear_interpolation=True,
+            accuracy_factor=5, time_offset=None, delta_f_end=None, maximum_banding_frequency=None,
+            minimum_banding_duration=0., distance_marginalization=False, phase_marginalization=False, priors=None,
+            distance_marginalization_lookup_table=None, reference_frame="sky", time_reference="geocenter"
+    ):
+        super(MBGravitationalWaveTransient, self).__init__(
+            interferometers=interferometers, waveform_generator=waveform_generator, priors=priors,
+            distance_marginalization=distance_marginalization, phase_marginalization=phase_marginalization,
+            time_marginalization=False, distance_marginalization_lookup_table=distance_marginalization_lookup_table,
+            jitter_time=False, reference_frame=reference_frame, time_reference=time_reference
+        )
+        self.reference_chirp_mass = reference_chirp_mass
+        self.highest_mode = highest_mode
+        self.linear_interpolation = linear_interpolation
+        self.accuracy_factor = accuracy_factor
+        self.time_offset = time_offset
+        self.delta_f_end = delta_f_end
+        self.minimum_frequency = np.min([i.minimum_frequency for i in self.interferometers])
+        self.maximum_frequency = np.max([i.maximum_frequency for i in self.interferometers])
+        self.maximum_banding_frequency = maximum_banding_frequency
+        self.minimum_banding_duration = minimum_banding_duration
+        self.setup_multibanding()
+
+    @property
+    def reference_chirp_mass(self):
+        return self._reference_chirp_mass
+
+    @property
+    def reference_chirp_mass_in_second(self):
+        return gravitational_constant * self._reference_chirp_mass * solar_mass / speed_of_light**3.
+
+    @reference_chirp_mass.setter
+    def reference_chirp_mass(self, reference_chirp_mass):
+        if isinstance(reference_chirp_mass, int) or isinstance(reference_chirp_mass, float):
+            self._reference_chirp_mass = reference_chirp_mass
+        else:
+            raise TypeError("reference_chirp_mass must be a number")
+
+    @property
+    def highest_mode(self):
+        return self._highest_mode
+
+    @highest_mode.setter
+    def highest_mode(self, highest_mode):
+        if isinstance(highest_mode, int) or isinstance(highest_mode, float):
+            self._highest_mode = highest_mode
+        else:
+            raise TypeError("highest_mode must be a number")
+
+    @property
+    def linear_interpolation(self):
+        return self._linear_interpolation
+
+    @linear_interpolation.setter
+    def linear_interpolation(self, linear_interpolation):
+        if isinstance(linear_interpolation, bool):
+            self._linear_interpolation = linear_interpolation
+        else:
+            raise TypeError("linear_interpolation must be a bool")
+
+    @property
+    def accuracy_factor(self):
+        return self._accuracy_factor
+
+    @accuracy_factor.setter
+    def accuracy_factor(self, accuracy_factor):
+        if isinstance(accuracy_factor, int) or isinstance(accuracy_factor, float):
+            self._accuracy_factor = accuracy_factor
+        else:
+            raise TypeError("accuracy_factor must be a number")
+
+    @property
+    def time_offset(self):
+        return self._time_offset
+
+    @time_offset.setter
+    def time_offset(self, time_offset):
+        """
+        This sets the time offset assumed when frequency bands are constructed. The default value is (the
+        maximum offset of geocent time in the prior range) +  (light-traveling time of the Earth). If the
+        prior does not contain 'geocent_time', 2.12 seconds is used. It is calculated assuming that the
+        maximum offset of geocent time is 2.1 seconds, which is the value for the standard prior used by
+        LIGO-Virgo-KAGRA.
+        """
+        time_parameter = self.time_reference + "_time"
+        if time_parameter == "geocent_time":
+            safety = radius_of_earth / speed_of_light
+        else:
+            safety = 2 * radius_of_earth / speed_of_light
+        if time_offset is not None:
+            if isinstance(time_offset, int) or isinstance(time_offset, float):
+                self._time_offset = time_offset
+            else:
+                raise TypeError("time_offset must be a number")
+        elif self.priors is not None and time_parameter in self.priors:
+            self._time_offset = (
+                self.interferometers.start_time + self.interferometers.duration
+                - self.priors[time_parameter].minimum + safety
+            )
+        else:
+            self._time_offset = 2.12
+            logger.warning("time offset can not be inferred. Use the standard time offset of {} seconds.".format(
+                self._time_offset))
+
+    @property
+    def delta_f_end(self):
+        return self._delta_f_end
+
+    @delta_f_end.setter
+    def delta_f_end(self, delta_f_end):
+        """
+        This sets the frequency scale of tapering the high-frequency end of waveform, to avoid the issues of
+        abrupt termination of waveform described in Sec. 2. F of arXiv: 2104.07813. This needs to be much
+        larger than the inverse of the minimum time offset, and the default value is 100 times of that. If
+        the prior does not contain 'geocent_time' and the minimum time offset can not be computed, 53Hz is
+        used. It is computed assuming that the minimum offset of geocent time is 1.9 seconds, which is the
+        value for the standard prior used by LIGO-Virgo-KAGRA.
+        """
+        time_parameter = self.time_reference + "_time"
+        if time_parameter == "geocent_time":
+            safety = radius_of_earth / speed_of_light
+        else:
+            safety = 2 * radius_of_earth / speed_of_light
+        if delta_f_end is not None:
+            if isinstance(delta_f_end, int) or isinstance(delta_f_end, float):
+                self._delta_f_end = delta_f_end
+            else:
+                raise TypeError("delta_f_end must be a number")
+        elif self.priors is not None and time_parameter in self.priors:
+            self._delta_f_end = 100 / (
+                self.interferometers.start_time + self.interferometers.duration
+                - self.priors[time_parameter].maximum - safety
+            )
+        else:
+            self._delta_f_end = 53.
+            logger.warning("delta_f_end can not be inferred. Use the standard delta_f_end of {} Hz.".format(
+                self._delta_f_end))
+
+    @property
+    def maximum_banding_frequency(self):
+        return self._maximum_banding_frequency
+
+    @maximum_banding_frequency.setter
+    def maximum_banding_frequency(self, maximum_banding_frequency):
+        r"""
+        This sets the upper limit on a starting frequency of a band. The default value is the frequency at
+        which f - 1 / \sqrt(- d\tau / df) starts to decrease, because the bisection search of the starting
+        frequency does not work from that frequency. The stationary phase approximation is not valid at such
+        a high frequency, which can break down the approximation. It is calculated from the 0PN formula of
+        time-to-merger \tau(f). The user-specified frequency is used if it is lower than that frequency.
+        """
+        fmax_tmp = (
+            (15 / 968)**(3 / 5) * (self.highest_mode / (2 * np.pi))**(8 / 5)
+            / self.reference_chirp_mass_in_second
+        )
+        if maximum_banding_frequency is not None:
+            if isinstance(maximum_banding_frequency, int) or isinstance(maximum_banding_frequency, float):
+                if maximum_banding_frequency < fmax_tmp:
+                    fmax_tmp = maximum_banding_frequency
+                else:
+                    logger.warning("The input maximum_banding_frequency is too large."
+                                   "It is set to be {} Hz.".format(fmax_tmp))
+            else:
+                raise TypeError("maximum_banding_frequency must be a number")
+        self._maximum_banding_frequency = fmax_tmp
+
+    @property
+    def minimum_banding_duration(self):
+        return self._minimum_banding_duration
+
+    @minimum_banding_duration.setter
+    def minimum_banding_duration(self, minimum_banding_duration):
+        if isinstance(minimum_banding_duration, int) or isinstance(minimum_banding_duration, float):
+            self._minimum_banding_duration = minimum_banding_duration
+        else:
+            raise TypeError("minimum_banding_duration must be a number")
+
+    def setup_multibanding(self):
+        """Set up frequency bands and coefficients needed for likelihood evaluations"""
+        self._setup_frequency_bands()
+        self._setup_integers()
+        self._setup_waveform_frequency_points()
+        self._setup_linear_coefficients()
+        if self.linear_interpolation:
+            self._setup_quadratic_coefficients_linear_interp()
+        else:
+            self._setup_quadratic_coefficients_ifft_fft()
+
+    def _tau(self, f):
+        """Compute time-to-merger from the input frequency. This uses the 0PN formula.
+
+        Parameters
+        ----------
+        f: float
+            input frequency
+
+        Returns
+        -------
+        tau: float
+            time-to-merger
+
+        """
+        f_22 = 2 * f / self.highest_mode
+        return (
+            5 / 256 * self.reference_chirp_mass_in_second
+            * (np.pi * self.reference_chirp_mass_in_second * f_22) ** (-8 / 3)
+        )
+
+    def _dtaudf(self, f):
+        """Compute the derivative of time-to-merger with respect to a starting frequency. This uses the 0PN formula.
+
+        Parameters
+        ----------
+        f: float
+            input frequency
+
+        Returns
+        -------
+        dtaudf: float
+            derivative of time-to-merger
+
+        """
+        f_22 = 2 * f / self.highest_mode
+        return (
+            -5 / 96 * self.reference_chirp_mass_in_second
+            * (np.pi * self.reference_chirp_mass_in_second * f_22) ** (-8. / 3.) / f
+        )
+
+    def _find_starting_frequency(self, duration, fnow):
+        """Find the starting frequency of the next band satisfying (10) and
+        (51) of arXiv: 2104.07813.
+
+        Parameters
+        ----------
+        duration: float
+            duration of the next band
+        fnow: float
+            starting frequency of the current band
+
+        Returns
+        -------
+        fnext: float or None
+            starting frequency of the next band. None if a frequency satisfying the conditions does not exist.
+        dfnext: float or None
+            frequency scale with which waveforms are smoothed. None if a frequency satisfying the conditions does not
+            exist.
+
+        """
+        def _is_above_fnext(f):
+            """This function returns True if f > fnext"""
+            cond1 = (
+                duration - self.time_offset - self._tau(f)
+                - self.accuracy_factor * np.sqrt(-self._dtaudf(f))
+            ) > 0
+            cond2 = f - 1. / np.sqrt(-self._dtaudf(f)) - fnow > 0
+            return cond1 and cond2
+        # Bisection search for fnext
+        fmin, fmax = fnow, self.maximum_banding_frequency
+        if not _is_above_fnext(fmax):
+            return None, None
+        while fmax - fmin > 1e-2 / duration:
+            f = (fmin + fmax) / 2.
+            if _is_above_fnext(f):
+                fmax = f
+            else:
+                fmin = f
+        return f, 1. / np.sqrt(-self._dtaudf(f))
+
+    def _setup_frequency_bands(self):
+        r"""Set up frequency bands. The durations of bands geometrically decrease T, T/2. T/4, ..., where T is the
+        original duration. This sets the following instance variables.
+
+        durations: durations of bands (T^(b) in the paper)
+        fb_dfb: the list of tuples, which contain starting frequencies (f^(b) in the paper) and frequency scales for
+        smoothing waveforms (\Delta f^(b) in the paper) of bands
+
+        """
+        self.durations = [self.interferometers.duration]
+        self.fb_dfb = [(self.minimum_frequency, 0.)]
+        dnext = self.interferometers.duration / 2
+        while dnext > max(self.time_offset, self.minimum_banding_duration):
+            fnow, _ = self.fb_dfb[-1]
+            fnext, dfnext = self._find_starting_frequency(dnext, fnow)
+            if fnext is not None and fnext < min(self.maximum_frequency, self.maximum_banding_frequency):
+                self.durations.append(dnext)
+                self.fb_dfb.append((fnext, dfnext))
+                dnext /= 2
+            else:
+                break
+        self.fb_dfb.append((self.maximum_frequency + self.delta_f_end, self.delta_f_end))
+        logger.info("The total frequency range is divided into {} bands with frequency intervals of {}.".format(
+            len(self.durations), ", ".join(["1/{} Hz".format(d) for d in self.durations])))
+
+    def _setup_integers(self):
+        """Set up integers needed for likelihood evaluations. This sets the following instance variables.
+
+        Nbs: the numbers of samples of downsampled data (N^(b) in the paper)
+        Mbs: the numbers of samples of shortened data (M^(b) in the paper)
+        Ks_Ke: start and end frequency indices of bands (K^(b)_s and K^(b)_e in the paper)
+
+        """
+        self.Nbs = []
+        self.Mbs = []
+        self.Ks_Ke = []
+        for b in range(len(self.durations)):
+            dnow = self.durations[b]
+            fnow, dfnow = self.fb_dfb[b]
+            fnext, _ = self.fb_dfb[b + 1]
+            Nb = max(round_up_to_power_of_two(2. * (fnext * self.interferometers.duration + 1.)), 2**b)
+            self.Nbs.append(Nb)
+            self.Mbs.append(Nb // 2**b)
+            self.Ks_Ke.append((math.ceil((fnow - dfnow) * dnow), math.floor(fnext * dnow)))
+
+    def _setup_waveform_frequency_points(self):
+        """Set up frequency points where waveforms are evaluated. Frequency points are reordered because some waveform
+        models raise an error if the input frequencies are not increasing. This adds frequency_points into the
+        waveform_arguments of waveform_generator. This sets the following instance variables.
+
+        banded_frequency_points: ndarray of total banded frequency points
+        start_end_idxs: list of tuples containing start and end indices of each band
+        unique_to_original_frequencies: indices converting unique frequency
+        points into the original duplicated banded frequencies
+
+        """
+        self.banded_frequency_points = np.array([])
+        self.start_end_idxs = []
+        start_idx = 0
+        for i in range(len(self.fb_dfb) - 1):
+            d = self.durations[i]
+            Ks, Ke = self.Ks_Ke[i]
+            self.banded_frequency_points = np.append(self.banded_frequency_points, np.arange(Ks, Ke + 1) / d)
+            end_idx = start_idx + Ke - Ks
+            self.start_end_idxs.append((start_idx, end_idx))
+            start_idx = end_idx + 1
+        unique_frequencies, idxs = np.unique(self.banded_frequency_points, return_inverse=True)
+        self.waveform_generator.waveform_arguments['frequencies'] = unique_frequencies
+        self.unique_to_original_frequencies = idxs
+        logger.info("The number of frequency points where waveforms are evaluated is {}.".format(
+            len(unique_frequencies)))
+        logger.info("The speed-up gain of multi-banding is {}.".format(
+            (self.maximum_frequency - self.minimum_frequency) * self.interferometers.duration /
+            len(unique_frequencies)))
+
+    def _window(self, f, b):
+        """Compute window function in the b-th band
+
+        Parameters
+        ----------
+        f: float or ndarray
+            frequency at which the window function is computed
+        b: int
+
+        Returns
+        -------
+        window: float
+            window function at f
+        """
+        fnow, dfnow = self.fb_dfb[b]
+        fnext, dfnext = self.fb_dfb[b + 1]
+
+        @np.vectorize
+        def _vectorized_window(f):
+            if fnow - dfnow < f < fnow:
+                return (1. + np.cos(np.pi * (f - fnow) / dfnow)) / 2.
+            elif fnow <= f <= fnext - dfnext:
+                return 1.
+            elif fnext - dfnext < f < fnext:
+                return (1. - np.cos(np.pi * (f - fnext) / dfnext)) / 2.
+            else:
+                return 0.
+
+        return _vectorized_window(f)
+
+    def _setup_linear_coefficients(self):
+        """Set up coefficients by which waveforms are multiplied to compute (d, h)"""
+        self.linear_coeffs = dict((ifo.name, np.array([])) for ifo in self.interferometers)
+        N = self.Nbs[-1]
+        for ifo in self.interferometers:
+            logger.info("Pre-computing linear coefficients for {}".format(ifo.name))
+            fddata = np.zeros(N // 2 + 1, dtype=complex)
+            fddata[:len(ifo.frequency_domain_strain)][ifo.frequency_mask] += \
+                ifo.frequency_domain_strain[ifo.frequency_mask] / ifo.power_spectral_density_array[ifo.frequency_mask]
+            for b in range(len(self.fb_dfb) - 1):
+                start_idx, end_idx = self.start_end_idxs[b]
+                windows = self._window(self.banded_frequency_points[start_idx:end_idx + 1], b)
+                fddata_in_ith_band = np.copy(fddata[:int(self.Nbs[b] / 2 + 1)])
+                fddata_in_ith_band[-1] = 0.  # zeroing data at the Nyquist frequency
+                tddata = np.fft.irfft(fddata_in_ith_band)[-self.Mbs[b]:]
+                Ks, Ke = self.Ks_Ke[b]
+                fddata_in_ith_band = np.fft.rfft(tddata)[Ks:Ke + 1]
+                self.linear_coeffs[ifo.name] = np.append(
+                    self.linear_coeffs[ifo.name], (4. / self.durations[b]) * windows * np.conj(fddata_in_ith_band))
+
+    def _setup_quadratic_coefficients_linear_interp(self):
+        """Set up coefficients by which the squares of waveforms are multiplied to compute (h, h) for the
+        linear-interpolation algorithm"""
+        logger.info("Linear-interpolation algorithm is used for (h, h).")
+        self.quadratic_coeffs = dict((ifo.name, np.array([])) for ifo in self.interferometers)
+        N = self.Nbs[-1]
+        for ifo in self.interferometers:
+            logger.info("Pre-computing quadratic coefficients for {}".format(ifo.name))
+            full_frequencies = np.arange(N // 2 + 1) / ifo.duration
+            full_inv_psds = np.zeros(N // 2 + 1)
+            full_inv_psds[:len(ifo.power_spectral_density_array)][ifo.frequency_mask] = \
+                1. / ifo.power_spectral_density_array[ifo.frequency_mask]
+            for i in range(len(self.fb_dfb) - 1):
+                start_idx, end_idx = self.start_end_idxs[i]
+                banded_frequencies = self.banded_frequency_points[start_idx:end_idx + 1]
+                coeffs = np.zeros(len(banded_frequencies))
+                for k in range(len(coeffs) - 1):
+                    if k == 0:
+                        start_idx_in_sum = 0
+                    else:
+                        start_idx_in_sum = math.ceil(ifo.duration * banded_frequencies[k])
+                    if k == len(coeffs) - 2:
+                        end_idx_in_sum = len(full_frequencies) - 1
+                    else:
+                        end_idx_in_sum = math.ceil(ifo.duration * banded_frequencies[k + 1]) - 1
+                    window_over_psd = (
+                        full_inv_psds[start_idx_in_sum:end_idx_in_sum + 1]
+                        * self._window(full_frequencies[start_idx_in_sum:end_idx_in_sum + 1], i)
+                    )
+                    frequencies_in_sum = full_frequencies[start_idx_in_sum:end_idx_in_sum + 1]
+                    coeffs[k] += 4 * self.durations[i] / ifo.duration * np.sum(
+                        (banded_frequencies[k + 1] - frequencies_in_sum) * window_over_psd
+                    )
+                    coeffs[k + 1] += 4 * self.durations[i] / ifo.duration * np.sum(
+                        (frequencies_in_sum - banded_frequencies[k]) * window_over_psd
+                    )
+                self.quadratic_coeffs[ifo.name] = np.append(self.quadratic_coeffs[ifo.name], coeffs)
+
+    def _setup_quadratic_coefficients_ifft_fft(self):
+        """Set up coefficients needed for the IFFT-FFT algorithm to compute (h, h)"""
+        logger.info("IFFT-FFT algorithm is used for (h, h).")
+        N = self.Nbs[-1]
+        # variables defined below correspond to \hat{N}^(b), \hat{T}^(b), \tilde{I}^(b)_{c, k}, h^(b)_{c, m} and
+        # \sqrt{w^(b)(f^(b)_k)} \tilde{h}(f^(b)_k) in the paper
+        Nhatbs = [min(2 * Mb, Nb) for Mb, Nb in zip(self.Mbs, self.Nbs)]
+        self.Tbhats = [self.interferometers.duration * Nbhat / Nb for Nb, Nbhat in zip(self.Nbs, Nhatbs)]
+        self.Ibcs = dict((ifo.name, []) for ifo in self.interferometers)
+        self.hbcs = dict((ifo.name, []) for ifo in self.interferometers)
+        self.wths = dict((ifo.name, []) for ifo in self.interferometers)
+        for ifo in self.interferometers:
+            logger.info("Pre-computing quadratic coefficients for {}".format(ifo.name))
+            full_inv_psds = np.zeros(N // 2 + 1)
+            full_inv_psds[:len(ifo.power_spectral_density_array)][ifo.frequency_mask] = (
+                1 / ifo.power_spectral_density_array[ifo.frequency_mask]
+            )
+            for b in range(len(self.fb_dfb) - 1):
+                Imb = np.fft.irfft(full_inv_psds[:self.Nbs[b] // 2 + 1])
+                half_length = Nhatbs[b] // 2
+                Imbc = np.append(Imb[:half_length + 1], Imb[-(Nhatbs[b] - half_length - 1):])
+                self.Ibcs[ifo.name].append(np.fft.rfft(Imbc))
+                # Allocate arrays for IFFT-FFT operations
+                self.hbcs[ifo.name].append(np.zeros(Nhatbs[b]))
+                self.wths[ifo.name].append(np.zeros(self.Mbs[b] // 2 + 1, dtype=complex))
+        # precompute windows and their squares
+        self.windows = np.array([])
+        self.square_root_windows = np.array([])
+        for b in range(len(self.fb_dfb) - 1):
+            start, end = self.start_end_idxs[b]
+            ws = self._window(self.banded_frequency_points[start:end + 1], b)
+            self.windows = np.append(self.windows, ws)
+            self.square_root_windows = np.append(self.square_root_windows, np.sqrt(ws))
+
+    def calculate_snrs(self, waveform_polarizations, interferometer):
+        """
+        Compute the snrs for multi-banding
+
+        Parameters
+        ----------
+        waveform_polarizations: waveform
+        interferometer: bilby.gw.detector.Interferometer
+
+        Returns
+        -------
+        snrs: named tuple of snrs
+
+        """
+        strain = np.zeros(len(self.banded_frequency_points), dtype=complex)
+        for mode in waveform_polarizations:
+            response = interferometer.antenna_response(
+                self.parameters['ra'], self.parameters['dec'],
+                self.parameters['geocent_time'], self.parameters['psi'],
+                mode
+            )
+            strain += waveform_polarizations[mode][self.unique_to_original_frequencies] * response
+
+        dt = interferometer.time_delay_from_geocenter(
+            self.parameters['ra'], self.parameters['dec'],
+            self.parameters['geocent_time'])
+        dt_geocent = self.parameters['geocent_time'] - interferometer.strain_data.start_time
+        ifo_time = dt_geocent + dt
+
+        calib_factor = interferometer.calibration_model.get_calibration_factor(
+            self.banded_frequency_points, prefix='recalib_{}_'.format(interferometer.name), **self.parameters)
+
+        strain *= np.exp(-1j * 2. * np.pi * self.banded_frequency_points * ifo_time)
+        strain *= np.conjugate(calib_factor)
+
+        d_inner_h = np.dot(strain, self.linear_coeffs[interferometer.name])
+
+        if self.linear_interpolation:
+            optimal_snr_squared = np.vdot(
+                np.real(strain * np.conjugate(strain)),
+                self.quadratic_coeffs[interferometer.name]
+            )
+        else:
+            optimal_snr_squared = 0.
+            for b in range(len(self.fb_dfb) - 1):
+                Ks, Ke = self.Ks_Ke[b]
+                start_idx, end_idx = self.start_end_idxs[b]
+                Mb = self.Mbs[b]
+                if b == 0:
+                    optimal_snr_squared += (4. / self.interferometers.duration) * np.vdot(
+                        np.real(strain[start_idx:end_idx + 1] * np.conjugate(strain[start_idx:end_idx + 1])),
+                        interferometer.frequency_mask[Ks:Ke + 1] * self.windows[start_idx:end_idx + 1]
+                        / interferometer.power_spectral_density_array[Ks:Ke + 1])
+                else:
+                    self.wths[interferometer.name][b][Ks:Ke + 1] = (
+                        self.square_root_windows[start_idx:end_idx + 1] * strain[start_idx:end_idx + 1]
+                    )
+                    self.hbcs[interferometer.name][b][-Mb:] = np.fft.irfft(self.wths[interferometer.name][b])
+                    thbc = np.fft.rfft(self.hbcs[interferometer.name][b])
+                    optimal_snr_squared += (4. / self.Tbhats[b]) * np.vdot(
+                        np.real(thbc * np.conjugate(thbc)), self.Ibcs[interferometer.name][b])
+
+        complex_matched_filter_snr = d_inner_h / (optimal_snr_squared**0.5)
+
+        return self._CalculatedSNRs(
+            d_inner_h=d_inner_h, optimal_snr_squared=optimal_snr_squared,
+            complex_matched_filter_snr=complex_matched_filter_snr,
+            d_inner_h_squared_tc_array=None,
+            d_inner_h_array=None,
+            optimal_snr_squared_array=None)
+
+    def _rescale_signal(self, signal, new_distance):
+        for mode in signal:
+            signal[mode] *= self._ref_dist / new_distance
diff --git a/bilby/gw/likelihood/roq.py b/bilby/gw/likelihood/roq.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d459082a1bee60066cdd56a289c9d6e51eb2d47
--- /dev/null
+++ b/bilby/gw/likelihood/roq.py
@@ -0,0 +1,514 @@
+
+import json
+
+import numpy as np
+
+from .base import GravitationalWaveTransient
+from ...core.utils import BilbyJsonEncoder, decode_bilby_json
+from ...core.utils import (
+    logger, create_frequency_series, speed_of_light, radius_of_earth
+)
+from ..prior import CBCPriorDict
+from ..utils import build_roq_weights
+
+
+class ROQGravitationalWaveTransient(GravitationalWaveTransient):
+    """A reduced order quadrature likelihood object
+
+    This uses the method described in Smith et al., (2016) Phys. Rev. D 94,
+    044031. A public repository of the ROQ data is available from
+    https://git.ligo.org/lscsoft/ROQ_data.
+
+    Parameters
+    ==========
+    interferometers: list, bilby.gw.detector.InterferometerList
+        A list of `bilby.detector.Interferometer` instances - contains the
+        detector data and power spectral densities
+    waveform_generator: `bilby.waveform_generator.WaveformGenerator`
+        An object which computes the frequency-domain strain of the signal,
+        given some set of parameters
+    linear_matrix: str, array_like
+        Either a string point to the file from which to load the linear_matrix
+        array, or the array itself.
+    quadratic_matrix: str, array_like
+        Either a string point to the file from which to load the
+        quadratic_matrix array, or the array itself.
+    roq_params: str, array_like
+        Parameters describing the domain of validity of the ROQ basis.
+    roq_params_check: bool
+        If true, run tests using the roq_params to check the prior and data are
+        valid for the ROQ
+    roq_scale_factor: float
+        The ROQ scale factor used.
+    priors: dict, bilby.prior.PriorDict
+        A dictionary of priors containing at least the geocent_time prior
+        Warning: when using marginalisation the dict is overwritten which will change the
+        the dict you are passing in. If this behaviour is undesired, pass `priors.copy()`.
+    distance_marginalization_lookup_table: (dict, str), optional
+        If a dict, dictionary containing the lookup_table, distance_array,
+        (distance) prior_array, and reference_distance used to construct
+        the table.
+        If a string the name of a file containing these quantities.
+        The lookup table is stored after construction in either the
+        provided string or a default location:
+        '.distance_marginalization_lookup_dmin{}_dmax{}_n{}.npz'
+    reference_frame: (str, bilby.gw.detector.InterferometerList, list), optional
+        Definition of the reference frame for the sky location.
+        - "sky": sample in RA/dec, this is the default
+        - e.g., "H1L1", ["H1", "L1"], InterferometerList(["H1", "L1"]):
+          sample in azimuth and zenith, `azimuth` and `zenith` defined in the
+          frame where the z-axis is aligned the the vector connecting H1
+          and L1.
+    time_reference: str, optional
+        Name of the reference for the sampled time parameter.
+        - "geocent"/"geocenter": sample in the time at the Earth's center,
+          this is the default
+        - e.g., "H1": sample in the time of arrival at H1
+
+    """
+    def __init__(
+            self, interferometers, waveform_generator, priors,
+            weights=None, linear_matrix=None, quadratic_matrix=None,
+            roq_params=None, roq_params_check=True, roq_scale_factor=1,
+            distance_marginalization=False, phase_marginalization=False,
+            distance_marginalization_lookup_table=None,
+            reference_frame="sky", time_reference="geocenter"
+
+    ):
+        super(ROQGravitationalWaveTransient, self).__init__(
+            interferometers=interferometers,
+            waveform_generator=waveform_generator, priors=priors,
+            distance_marginalization=distance_marginalization,
+            phase_marginalization=phase_marginalization,
+            time_marginalization=False,
+            distance_marginalization_lookup_table=distance_marginalization_lookup_table,
+            jitter_time=False,
+            reference_frame=reference_frame,
+            time_reference=time_reference
+        )
+
+        self.roq_params_check = roq_params_check
+        self.roq_scale_factor = roq_scale_factor
+        if isinstance(roq_params, np.ndarray) or roq_params is None:
+            self.roq_params = roq_params
+        elif isinstance(roq_params, str):
+            self.roq_params_file = roq_params
+            self.roq_params = np.genfromtxt(roq_params, names=True)
+        else:
+            raise TypeError("roq_params should be array or str")
+        if isinstance(weights, dict):
+            self.weights = weights
+        elif isinstance(weights, str):
+            self.weights = self.load_weights(weights)
+        else:
+            self.weights = dict()
+            if isinstance(linear_matrix, str):
+                logger.info(
+                    "Loading linear matrix from {}".format(linear_matrix))
+                linear_matrix = np.load(linear_matrix).T
+            if isinstance(quadratic_matrix, str):
+                logger.info(
+                    "Loading quadratic_matrix from {}".format(quadratic_matrix))
+                quadratic_matrix = np.load(quadratic_matrix).T
+            self._set_weights(linear_matrix=linear_matrix,
+                              quadratic_matrix=quadratic_matrix)
+        self.frequency_nodes_linear = \
+            waveform_generator.waveform_arguments['frequency_nodes_linear']
+        self.frequency_nodes_quadratic = \
+            waveform_generator.waveform_arguments['frequency_nodes_quadratic']
+
+    def calculate_snrs(self, waveform_polarizations, interferometer):
+        """
+        Compute the snrs for ROQ
+
+        Parameters
+        ==========
+        waveform_polarizations: waveform
+        interferometer: bilby.gw.detector.Interferometer
+
+        """
+
+        f_plus = interferometer.antenna_response(
+            self.parameters['ra'], self.parameters['dec'],
+            self.parameters['geocent_time'], self.parameters['psi'], 'plus')
+        f_cross = interferometer.antenna_response(
+            self.parameters['ra'], self.parameters['dec'],
+            self.parameters['geocent_time'], self.parameters['psi'], 'cross')
+
+        dt = interferometer.time_delay_from_geocenter(
+            self.parameters['ra'], self.parameters['dec'],
+            self.parameters['geocent_time'])
+        dt_geocent = self.parameters['geocent_time'] - interferometer.strain_data.start_time
+        ifo_time = dt_geocent + dt
+
+        calib_linear = interferometer.calibration_model.get_calibration_factor(
+            self.frequency_nodes_linear,
+            prefix='recalib_{}_'.format(interferometer.name), **self.parameters)
+        calib_quadratic = interferometer.calibration_model.get_calibration_factor(
+            self.frequency_nodes_quadratic,
+            prefix='recalib_{}_'.format(interferometer.name), **self.parameters)
+
+        h_plus_linear = f_plus * waveform_polarizations['linear']['plus'] * calib_linear
+        h_cross_linear = f_cross * waveform_polarizations['linear']['cross'] * calib_linear
+        h_plus_quadratic = (
+            f_plus * waveform_polarizations['quadratic']['plus'] * calib_quadratic
+        )
+        h_cross_quadratic = (
+            f_cross * waveform_polarizations['quadratic']['cross'] * calib_quadratic
+        )
+
+        indices, in_bounds = self._closest_time_indices(
+            ifo_time, self.weights['time_samples'])
+        if not in_bounds:
+            logger.debug("SNR calculation error: requested time at edge of ROQ time samples")
+            return self._CalculatedSNRs(
+                d_inner_h=np.nan_to_num(-np.inf), optimal_snr_squared=0,
+                complex_matched_filter_snr=np.nan_to_num(-np.inf),
+                d_inner_h_squared_tc_array=None,
+                d_inner_h_array=None,
+                optimal_snr_squared_array=None)
+
+        d_inner_h_tc_array = np.einsum(
+            'i,ji->j', np.conjugate(h_plus_linear + h_cross_linear),
+            self.weights[interferometer.name + '_linear'][indices])
+
+        d_inner_h = self._interp_five_samples(
+            self.weights['time_samples'][indices], d_inner_h_tc_array, ifo_time)
+
+        optimal_snr_squared = \
+            np.vdot(np.abs(h_plus_quadratic + h_cross_quadratic)**2,
+                    self.weights[interferometer.name + '_quadratic'])
+
+        with np.errstate(invalid="ignore"):
+            complex_matched_filter_snr = d_inner_h / (optimal_snr_squared**0.5)
+        d_inner_h_squared_tc_array = None
+
+        return self._CalculatedSNRs(
+            d_inner_h=d_inner_h, optimal_snr_squared=optimal_snr_squared,
+            complex_matched_filter_snr=complex_matched_filter_snr,
+            d_inner_h_squared_tc_array=d_inner_h_squared_tc_array,
+            d_inner_h_array=None,
+            optimal_snr_squared_array=None)
+
+    @staticmethod
+    def _closest_time_indices(time, samples):
+        """
+        Get the closest five times
+
+        Parameters
+        ==========
+        time: float
+            Time to check
+        samples: array-like
+            Available times
+
+        Returns
+        =======
+        indices: list
+            Indices nearest to time
+        in_bounds: bool
+            Whether the indices are for valid times
+        """
+        closest = int((time - samples[0]) / (samples[1] - samples[0]))
+        indices = [closest + ii for ii in [-2, -1, 0, 1, 2]]
+        in_bounds = (indices[0] >= 0) & (indices[-1] < samples.size)
+        return indices, in_bounds
+
+    @staticmethod
+    def _interp_five_samples(time_samples, values, time):
+        """
+        Interpolate a function of time with its values at the closest five times.
+        The algorithm is explained in https://dcc.ligo.org/T2100224.
+
+        Parameters
+        ==========
+        time_samples: array-like
+            Closest 5 times
+        values: array-like
+            The values of the function at closest 5 times
+        time: float
+            Time at which the function is calculated
+
+        Returns
+        =======
+        value: float
+            The value of the function at the input time
+        """
+        r1 = (-values[0] + 8. * values[1] - 14. * values[2] + 8. * values[3] - values[4]) / 4.
+        r2 = values[2] - 2. * values[3] + values[4]
+        a = (time_samples[3] - time) / (time_samples[1] - time_samples[0])
+        b = 1. - a
+        c = (a**3. - a) / 6.
+        d = (b**3. - b) / 6.
+        return a * values[2] + b * values[3] + c * r1 + d * r2
+
+    def perform_roq_params_check(self, ifo=None):
+        """ Perform checking that the prior and data are valid for the ROQ
+
+        Parameters
+        ==========
+        ifo: bilby.gw.detector.Interferometer
+            The interferometer
+        """
+        if self.roq_params_check is False:
+            logger.warning("No ROQ params checking performed")
+            return
+        else:
+            if getattr(self, "roq_params_file", None) is not None:
+                msg = ("Check ROQ params {} with roq_scale_factor={}"
+                       .format(self.roq_params_file, self.roq_scale_factor))
+            else:
+                msg = ("Check ROQ params with roq_scale_factor={}"
+                       .format(self.roq_scale_factor))
+            logger.info(msg)
+
+        roq_params = self.roq_params
+        roq_minimum_frequency = roq_params['flow'] * self.roq_scale_factor
+        roq_maximum_frequency = roq_params['fhigh'] * self.roq_scale_factor
+        roq_segment_length = roq_params['seglen'] / self.roq_scale_factor
+        roq_minimum_chirp_mass = roq_params['chirpmassmin'] / self.roq_scale_factor
+        roq_maximum_chirp_mass = roq_params['chirpmassmax'] / self.roq_scale_factor
+        roq_minimum_component_mass = roq_params['compmin'] / self.roq_scale_factor
+
+        if ifo.maximum_frequency > roq_maximum_frequency:
+            raise BilbyROQParamsRangeError(
+                "Requested maximum frequency {} larger than ROQ basis fhigh {}"
+                .format(ifo.maximum_frequency, roq_maximum_frequency)
+            )
+        if ifo.minimum_frequency < roq_minimum_frequency:
+            raise BilbyROQParamsRangeError(
+                "Requested minimum frequency {} lower than ROQ basis flow {}"
+                .format(ifo.minimum_frequency, roq_minimum_frequency)
+            )
+        if ifo.strain_data.duration != roq_segment_length:
+            raise BilbyROQParamsRangeError(
+                "Requested duration differs from ROQ basis seglen")
+
+        priors = self.priors
+        if isinstance(priors, CBCPriorDict) is False:
+            logger.warning("Unable to check ROQ parameter bounds: priors not understood")
+            return
+
+        if priors.minimum_chirp_mass is None:
+            logger.warning("Unable to check minimum chirp mass ROQ bounds")
+        elif priors.minimum_chirp_mass < roq_minimum_chirp_mass:
+            raise BilbyROQParamsRangeError(
+                "Prior minimum chirp mass {} less than ROQ basis bound {}"
+                .format(priors.minimum_chirp_mass, roq_minimum_chirp_mass)
+            )
+
+        if priors.maximum_chirp_mass is None:
+            logger.warning("Unable to check maximum_chirp mass ROQ bounds")
+        elif priors.maximum_chirp_mass > roq_maximum_chirp_mass:
+            raise BilbyROQParamsRangeError(
+                "Prior maximum chirp mass {} greater than ROQ basis bound {}"
+                .format(priors.maximum_chirp_mass, roq_maximum_chirp_mass)
+            )
+
+        if priors.minimum_component_mass is None:
+            logger.warning("Unable to check minimum component mass ROQ bounds")
+        elif priors.minimum_component_mass < roq_minimum_component_mass:
+            raise BilbyROQParamsRangeError(
+                "Prior minimum component mass {} less than ROQ basis bound {}"
+                .format(priors.minimum_component_mass, roq_minimum_component_mass)
+            )
+
+    def _set_weights(self, linear_matrix, quadratic_matrix):
+        """
+        Setup the time-dependent ROQ weights.
+        See https://dcc.ligo.org/LIGO-T2100125 for the detail of how to compute them.
+
+        Parameters
+        ==========
+        linear_matrix, quadratic_matrix: array_like
+            Arrays of the linear and quadratic basis
+
+        """
+
+        time_space = self._get_time_resolution()
+        number_of_time_samples = int(self.interferometers.duration / time_space)
+        try:
+            import pyfftw
+            ifft_input = pyfftw.empty_aligned(number_of_time_samples, dtype=complex)
+            ifft_output = pyfftw.empty_aligned(number_of_time_samples, dtype=complex)
+            ifft = pyfftw.FFTW(ifft_input, ifft_output, direction='FFTW_BACKWARD')
+        except ImportError:
+            pyfftw = None
+            logger.warning("You do not have pyfftw installed, falling back to numpy.fft.")
+            ifft_input = np.zeros(number_of_time_samples, dtype=complex)
+            ifft = np.fft.ifft
+        earth_light_crossing_time = 2 * radius_of_earth / speed_of_light + 5 * time_space
+        start_idx = max(
+            0,
+            int(np.floor((
+                self.priors['{}_time'.format(self.time_reference)].minimum
+                - earth_light_crossing_time
+                - self.interferometers.start_time
+            ) / time_space))
+        )
+        end_idx = min(
+            number_of_time_samples - 1,
+            int(np.ceil((
+                self.priors['{}_time'.format(self.time_reference)].maximum
+                + earth_light_crossing_time
+                - self.interferometers.start_time
+            ) / time_space))
+        )
+        self.weights['time_samples'] = np.arange(start_idx, end_idx + 1) * time_space
+        logger.info("Using {} ROQ time samples".format(len(self.weights['time_samples'])))
+
+        for ifo in self.interferometers:
+            if self.roq_params is not None:
+                self.perform_roq_params_check(ifo)
+                # Get scaled ROQ quantities
+                roq_scaled_minimum_frequency = self.roq_params['flow'] * self.roq_scale_factor
+                roq_scaled_maximum_frequency = self.roq_params['fhigh'] * self.roq_scale_factor
+                roq_scaled_segment_length = self.roq_params['seglen'] / self.roq_scale_factor
+                # Generate frequencies for the ROQ
+                roq_frequencies = create_frequency_series(
+                    sampling_frequency=roq_scaled_maximum_frequency * 2,
+                    duration=roq_scaled_segment_length)
+                roq_mask = roq_frequencies >= roq_scaled_minimum_frequency
+                roq_frequencies = roq_frequencies[roq_mask]
+                overlap_frequencies, ifo_idxs, roq_idxs = np.intersect1d(
+                    ifo.frequency_array[ifo.frequency_mask], roq_frequencies,
+                    return_indices=True)
+            else:
+                overlap_frequencies = ifo.frequency_array[ifo.frequency_mask]
+                roq_idxs = np.arange(linear_matrix.shape[0], dtype=int)
+                ifo_idxs = np.arange(sum(ifo.frequency_mask))
+                if len(ifo_idxs) != len(roq_idxs):
+                    raise ValueError(
+                        "Mismatch between ROQ basis and frequency array for "
+                        "{}".format(ifo.name))
+            logger.info(
+                "Building ROQ weights for {} with {} frequencies between {} "
+                "and {}.".format(
+                    ifo.name, len(overlap_frequencies),
+                    min(overlap_frequencies), max(overlap_frequencies)))
+
+            ifft_input[:] *= 0.
+            self.weights[ifo.name + '_linear'] = \
+                np.zeros((len(self.weights['time_samples']), linear_matrix.shape[1]), dtype=complex)
+            data_over_psd = (
+                ifo.frequency_domain_strain[ifo.frequency_mask][ifo_idxs]
+                / ifo.power_spectral_density_array[ifo.frequency_mask][ifo_idxs]
+            )
+            nonzero_idxs = ifo_idxs + int(ifo.frequency_array[ifo.frequency_mask][0] * self.interferometers.duration)
+            for i, basis_element in enumerate(linear_matrix[roq_idxs].T):
+                ifft_input[nonzero_idxs] = data_over_psd * np.conj(basis_element)
+                self.weights[ifo.name + '_linear'][:, i] = ifft(ifft_input)[start_idx:end_idx + 1]
+            self.weights[ifo.name + '_linear'] *= 4. * number_of_time_samples / self.interferometers.duration
+
+            self.weights[ifo.name + '_quadratic'] = build_roq_weights(
+                1 /
+                ifo.power_spectral_density_array[ifo.frequency_mask][ifo_idxs],
+                quadratic_matrix[roq_idxs].real,
+                1 / ifo.strain_data.duration)
+
+            logger.info("Finished building weights for {}".format(ifo.name))
+
+        if pyfftw is not None:
+            pyfftw.forget_wisdom()
+
+    def save_weights(self, filename, format='npz'):
+        if format not in filename:
+            filename += "." + format
+        logger.info("Saving ROQ weights to {}".format(filename))
+        if format == 'json':
+            with open(filename, 'w') as file:
+                json.dump(self.weights, file, indent=2, cls=BilbyJsonEncoder)
+        elif format == 'npz':
+            np.savez(filename, **self.weights)
+
+    @staticmethod
+    def load_weights(filename, format=None):
+        if format is None:
+            format = filename.split(".")[-1]
+        if format not in ["json", "npz"]:
+            raise IOError("Format {} not recognized.".format(format))
+        logger.info("Loading ROQ weights from {}".format(filename))
+        if format == "json":
+            with open(filename, 'r') as file:
+                weights = json.load(file, object_hook=decode_bilby_json)
+        elif format == "npz":
+            # Wrap in dict to load data into memory
+            weights = dict(np.load(filename))
+        return weights
+
+    def _get_time_resolution(self):
+        """
+        This method estimates the time resolution given the optimal SNR of the
+        signal in the detector. This is then used when constructing the weights
+        for the ROQ.
+
+        A minimum resolution is set by assuming the SNR in each detector is at
+        least 10. When the SNR is not available the SNR is assumed to be 30 in
+        each detector.
+
+        Returns
+        =======
+        delta_t: float
+            Time resolution
+        """
+
+        def calc_fhigh(freq, psd, scaling=20.):
+            """
+
+            Parameters
+            ==========
+            freq: array-like
+                Frequency array
+            psd: array-like
+                Power spectral density
+            scaling: float
+                SNR dependent scaling factor
+
+            Returns
+            =======
+            f_high: float
+                The maximum frequency which must be considered
+            """
+            from scipy.integrate import simps
+            integrand1 = np.power(freq, -7. / 3) / psd
+            integral1 = simps(integrand1, freq)
+            integrand3 = np.power(freq, 2. / 3.) / (psd * integral1)
+            f_3_bar = simps(integrand3, freq)
+
+            f_high = scaling * f_3_bar**(1 / 3)
+
+            return f_high
+
+        def c_f_scaling(snr):
+            return (np.pi**2 * snr**2 / 6)**(1 / 3)
+
+        inj_snr_sq = 0
+        for ifo in self.interferometers:
+            inj_snr_sq += max(10, ifo.meta_data.get('optimal_SNR', 30))**2
+
+        psd = ifo.power_spectral_density_array[ifo.frequency_mask]
+        freq = ifo.frequency_array[ifo.frequency_mask]
+        fhigh = calc_fhigh(freq, psd, scaling=c_f_scaling(inj_snr_sq**0.5))
+
+        delta_t = fhigh**-1
+
+        # Apply a safety factor to ensure the time step is short enough
+        delta_t = delta_t / 5
+
+        # duration / delta_t needs to be a power of 2 for IFFT
+        number_of_time_samples = max(
+            self.interferometers.duration / delta_t,
+            self.interferometers.frequency_array[-1] * self.interferometers.duration + 1)
+        number_of_time_samples = int(2**np.ceil(np.log2(number_of_time_samples)))
+        delta_t = self.interferometers.duration / number_of_time_samples
+        logger.info("ROQ time-step = {}".format(delta_t))
+        return delta_t
+
+    def _rescale_signal(self, signal, new_distance):
+        for kind in ['linear', 'quadratic']:
+            for mode in signal[kind]:
+                signal[kind][mode] *= self._ref_dist / new_distance
+
+
+class BilbyROQParamsRangeError(Exception):
+    pass
diff --git a/bilby/gw/prior.py b/bilby/gw/prior.py
index 7f63d91ee2ebf79fff874b46479822f161e499c2..1bf3c69981d91b2edb10fa808b0ce5ffbeecae06 100644
--- a/bilby/gw/prior.py
+++ b/bilby/gw/prior.py
@@ -20,7 +20,7 @@ from .conversion import (
     generate_all_bbh_parameters,
     chirp_mass_and_mass_ratio_to_total_mass,
     total_mass_and_mass_ratio_to_component_masses)
-from .cosmology import get_cosmology
+from .cosmology import get_cosmology, z_at_value
 from .source import PARAMETER_SETS
 from .utils import calculate_time_to_merger
 
@@ -68,7 +68,7 @@ def convert_to_flat_in_component_mass_prior(result, fraction=0.25):
         priors[key] = Constraint(priors[key].minimum, priors[key].maximum, key, latex_label=priors[key].latex_label)
     for key in ['mass_1', 'mass_2']:
         priors[key] = Uniform(priors[key].minimum, priors[key].maximum, key, latex_label=priors[key].latex_label,
-                              unit="$M_{\odot}$")
+                              unit=r"$M_{\odot}$")
 
     weights = np.array(result.get_weights_by_new_prior(old_priors, priors,
                                                        prior_names=['chirp_mass', 'mass_ratio', 'mass_1', 'mass_2']))
@@ -170,7 +170,6 @@ class Cosmological(Interped):
         recalculate_array: boolean
             Determines if the distance arrays are recalculated
         """
-        from astropy.cosmology import z_at_value
         cosmology = get_cosmology(self.cosmology)
         limit_dict[self.name] = value
         if self.name == 'redshift':
@@ -277,7 +276,7 @@ class UniformComovingVolume(Cosmological):
 
 
 class UniformSourceFrame(Cosmological):
-    """
+    r"""
     Prior for redshift which is uniform in comoving volume and source frame
     time.
 
@@ -295,7 +294,7 @@ class UniformSourceFrame(Cosmological):
 class UniformInComponentsChirpMass(PowerLaw):
 
     def __init__(self, minimum, maximum, name='chirp_mass',
-                 latex_label='$\mathcal{M}$', unit=None, boundary=None):
+                 latex_label=r'$\mathcal{M}$', unit=None, boundary=None):
         """
         Prior distribution for chirp mass which is uniform in component masses.
 
@@ -925,36 +924,36 @@ Prior._default_latex_labels = {
     'mass_1': '$m_1$',
     'mass_2': '$m_2$',
     'total_mass': '$M$',
-    'chirp_mass': '$\mathcal{M}$',
+    'chirp_mass': r'$\mathcal{M}$',
     'mass_ratio': '$q$',
-    'symmetric_mass_ratio': '$\eta$',
+    'symmetric_mass_ratio': r'$\eta$',
     'a_1': '$a_1$',
     'a_2': '$a_2$',
-    'tilt_1': '$\\theta_1$',
-    'tilt_2': '$\\theta_2$',
-    'cos_tilt_1': '$\cos\\theta_1$',
-    'cos_tilt_2': '$\cos\\theta_2$',
-    'phi_12': '$\Delta\phi$',
-    'phi_jl': '$\phi_{JL}$',
+    'tilt_1': r'$\theta_1$',
+    'tilt_2': r'$\theta_2$',
+    'cos_tilt_1': r'$\cos\theta_1$',
+    'cos_tilt_2': r'$\cos\theta_2$',
+    'phi_12': r'$\Delta\phi$',
+    'phi_jl': r'$\phi_{JL}$',
     'luminosity_distance': '$d_L$',
-    'dec': '$\mathrm{DEC}$',
-    'ra': '$\mathrm{RA}$',
-    'iota': '$\iota$',
-    'cos_iota': '$\cos\iota$',
-    'theta_jn': '$\\theta_{JN}$',
-    'cos_theta_jn': '$\cos\\theta_{JN}$',
-    'psi': '$\psi$',
-    'phase': '$\phi$',
+    'dec': r'$\mathrm{DEC}$',
+    'ra': r'$\mathrm{RA}$',
+    'iota': r'$\iota$',
+    'cos_iota': r'$\cos\iota$',
+    'theta_jn': r'$\theta_{JN}$',
+    'cos_theta_jn': r'$\cos\theta_{JN}$',
+    'psi': r'$\psi$',
+    'phase': r'$\phi$',
     'geocent_time': '$t_c$',
     'time_jitter': '$t_j$',
-    'lambda_1': '$\\Lambda_1$',
-    'lambda_2': '$\\Lambda_2$',
-    'lambda_tilde': '$\\tilde{\\Lambda}$',
-    'delta_lambda_tilde': '$\\delta\\tilde{\\Lambda}$',
-    'chi_1': '$\\chi_1$',
-    'chi_2': '$\\chi_2$',
-    'chi_1_in_plane': '$\\chi_{1, \perp}$',
-    'chi_2_in_plane': '$\\chi_{2, \perp}$',
+    'lambda_1': r'$\Lambda_1$',
+    'lambda_2': r'$\Lambda_2$',
+    'lambda_tilde': r'$\tilde{\Lambda}$',
+    'delta_lambda_tilde': r'$\delta\tilde{\Lambda}$',
+    'chi_1': r'$\chi_1$',
+    'chi_2': r'$\chi_2$',
+    'chi_1_in_plane': r'$\chi_{1, \perp}$',
+    'chi_2_in_plane': r'$\chi_{2, \perp}$',
 }
 
 
@@ -1057,7 +1056,7 @@ class CalibrationPriorDict(PriorDict):
                                    boundary=boundary)
         for ii in range(n_nodes):
             name = "recalib_{}_phase_{}".format(label, ii)
-            latex_label = "$\\phi^{}_{}$".format(label, ii)
+            latex_label = r"$\phi^{}_{}$".format(label, ii)
             prior[name] = Gaussian(mu=phase_mean_nodes[ii],
                                    sigma=phase_sigma_nodes[ii],
                                    name=name, latex_label=latex_label,
@@ -1118,7 +1117,7 @@ class CalibrationPriorDict(PriorDict):
                                    boundary='reflective')
         for ii in range(n_nodes):
             name = "recalib_{}_phase_{}".format(label, ii)
-            latex_label = "$\\phi^{}_{}$".format(label, ii)
+            latex_label = r"$\phi^{}_{}$".format(label, ii)
             prior[name] = Gaussian(mu=phase_mean_nodes[ii],
                                    sigma=phase_sigma_nodes[ii],
                                    name=name, latex_label=latex_label,
diff --git a/bilby/gw/result.py b/bilby/gw/result.py
index fc599232b3a8cf71633825a4c941b61347b12ba3..5c95fe0f149625ca9360e6fbe5c8aa21d83c46cd 100644
--- a/bilby/gw/result.py
+++ b/bilby/gw/result.py
@@ -185,7 +185,7 @@ class CompactBinaryCoalescenceResult(CoreResult):
             if len(amp_params) > 0:
                 amplitude = 100 * np.column_stack([posterior[param] for param in amp_params])
                 plot_spline_pos(logfreqs, amplitude, color=color, level=level,
-                                label="{0} (mean, {1}$\%$)".format(ifo.upper(), int(level * 100)))
+                                label=r"{0} (mean, {1}$\%$)".format(ifo.upper(), int(level * 100)))
 
             # Phase calibration model
             plt.sca(ax2)
@@ -194,7 +194,7 @@ class CompactBinaryCoalescenceResult(CoreResult):
             if len(phase_params) > 0:
                 phase = np.column_stack([posterior[param] for param in phase_params])
                 plot_spline_pos(logfreqs, phase, color=color, level=level,
-                                label="{0} (mean, {1}$\%$)".format(ifo.upper(), int(level * 100)),
+                                label=r"{0} (mean, {1}$\%$)".format(ifo.upper(), int(level * 100)),
                                 xform=spline_angle_xform)
 
         ax1.tick_params(labelsize=.75 * font_size)
@@ -204,7 +204,7 @@ class CompactBinaryCoalescenceResult(CoreResult):
         ax2.set_xscale('log')
 
         ax2.set_xlabel('Frequency [Hz]', fontsize=font_size)
-        ax1.set_ylabel('Amplitude [$\%$]', fontsize=font_size)
+        ax1.set_ylabel(r'Amplitude [$\%$]', fontsize=font_size)
         ax2.set_ylabel('Phase [deg]', fontsize=font_size)
 
         filename = os.path.join(outdir, self.label + '_calibration.' + format)
@@ -585,7 +585,7 @@ class CompactBinaryCoalescenceResult(CoreResult):
                 plot_frequencies,
                 np.percentile(fd_waveforms, lower_percentile, axis=0),
                 np.percentile(fd_waveforms, upper_percentile, axis=0),
-                color=WAVEFORM_COLOR, label='{}\% credible interval'.format(
+                color=WAVEFORM_COLOR, label=r'{}\% credible interval'.format(
                     int(upper_percentile - lower_percentile)),
                 alpha=0.3)
             axs[1].plot(
diff --git a/bilby/gw/utils.py b/bilby/gw/utils.py
index a52bfd663e7ed58e07b6ceda3f947414925c4918..c975f915a1098f05c2dde92ed08d0d7784861418 100644
--- a/bilby/gw/utils.py
+++ b/bilby/gw/utils.py
@@ -387,54 +387,31 @@ def zenith_azimuth_to_ra_dec(zenith, azimuth, geocent_time, ifos):
 
 def get_event_time(event):
     """
-    Get the merger time for known GW events.
-
-    See https://www.gw-openscience.org/catalog/GWTC-1-confident/html/
-    Last update https://arxiv.org/abs/1811.12907:
-
-    - GW150914
-    - GW151012
-    - GW151226
-    - GW170104
-    - GW170608
-    - GW170729
-    - GW170809
-    - GW170814
-    - GW170817
-    - GW170818
-    - GW170823
+    Get the merger time for known GW events using the gwosc package
 
     Parameters
-    ==========
+    ----------
     event: str
-        Event descriptor, this can deal with some prefixes, e.g.,
-        '151012', 'GW151012', 'LVT151012'
+        Event name, e.g. GW150914
 
     Returns
-    =======
+    -------
     event_time: float
         Merger time
-    """
-    event_times = {'150914': 1126259462.4,
-                   '151012': 1128678900.4,
-                   '151226': 1135136350.6,
-                   '170104': 1167559936.6,
-                   '170608': 1180922494.5,
-                   '170729': 1185389807.3,
-                   '170809': 1186302519.8,
-                   '170814': 1186741861.5,
-                   '170817': 1187008882.4,
-                   '170818': 1187058327.1,
-                   '170823': 1187529256.5}
-    if 'GW' or 'LVT' in event:
-        event = event[-6:]
 
+    Raises
+    ------
+    ImportError
+        If the gwosc package is not installed
+    ValueError
+        If the event is not in the gwosc dataset
+    """
     try:
-        event_time = event_times[event[-6:]]
-        return event_time
-    except KeyError:
-        print('Unknown event {}.'.format(event))
-        return None
+        from gwosc import datasets
+    except ImportError:
+        raise ImportError("You do not have the gwosc package installed")
+
+    return datasets.event_gps(event)
 
 
 def get_open_strain_data(
diff --git a/containers/dockerfile-template b/containers/dockerfile-template
index 82354c22f95190242a5e34cd4da67ab6767583f4..fc74e7a08d623cbf6d82957f5ce3466c5b4f1e6a 100644
--- a/containers/dockerfile-template
+++ b/containers/dockerfile-template
@@ -13,31 +13,36 @@ RUN /bin/bash -c "source activate ${{conda_env}}"
 RUN conda info
 RUN python --version
 
-# Install pymultinest requirements
-RUN apt-get update --allow-releaseinfo-change
-RUN apt-get install -y libblas3 libblas-dev liblapack3 liblapack-dev \
-libatlas3-base libatlas-base-dev cmake build-essential gfortran
-
 # Install conda-installable programs
 RUN conda install -n ${{conda_env}} -y matplotlib numpy scipy pandas astropy flake8 mock
 RUN conda install -n ${{conda_env}} -c anaconda coverage configargparse future
-
-# Install conda-forge-installable programs
-RUN conda install -n ${{conda_env}} -c conda-forge black ligo-gracedb gwpy lalsuite ligo.skymap pytest-cov deepdish arviz
+RUN conda install -n ${{conda_env}} -c conda-forge black pytest-cov deepdish arviz
 
 # Install pip-requirements
 RUN pip install --upgrade pip
-RUN pip install --upgrade setuptools coverage-badge
+RUN pip install --upgrade setuptools coverage-badge parameterized
 
 # Install documentation requirements
 RUN pip install sphinx numpydoc nbsphinx sphinx_rtd_theme sphinx-tabs autodoc
 
 # Install dependencies and samplers
-RUN pip install corner lalsuite theano healpy cython tables
-RUN pip install cpnest dynesty emcee nestle ptemcee pymc3 kombine dnest4 nessai zeus-mcmc
+RUN pip install corner healpy cython tables
+RUN conda install -n ${{conda_env}} -c conda-forge dynesty emcee nestle ptemcee
 RUN conda install -n ${{conda_env}} -c conda-forge pymultinest ultranest
+RUN conda install -n ${{conda_env}} -c conda-forge cpnest kombine dnest4 zeus-mcmc
+RUN conda install -n ${{conda_env}} -c conda-forge pytorch
+RUN conda install -n ${{conda_env}} -c conda-forge theano-pymc
+RUN conda install -n ${{conda_env}} -c conda-forge pymc3
+RUN pip install nessai
 
 # Install Polychord
+RUN apt-get update --allow-releaseinfo-change
+RUN apt-get install -y build-essential
+RUN apt-get install -y libblas3 libblas-dev
+RUN apt-get install -y liblapack3 liblapack-dev
+RUN apt-get install -y libatlas3-base libatlas-base-dev
+RUN apt-get install -y gfortran
+
 RUN git clone https://github.com/PolyChord/PolyChordLite.git \
 && (cd PolyChordLite && python setup.py --no-mpi install)
 
@@ -45,6 +50,10 @@ RUN git clone https://github.com/PolyChord/PolyChordLite.git \
 RUN git clone https://github.com/jellis18/PTMCMCSampler.git \
 && (cd PTMCMCSampler && python setup.py install)
 
+# Install GW packages
+RUN conda install -n ${{conda_env}} -c conda-forge python-lalsimulation
+RUN pip install ligo-gracedb gwpy ligo.skymap
+
 # Add the ROQ data to the image
 RUN mkdir roq_basis \
     && cd roq_basis \
diff --git a/containers/v3-dockerfile-test-suite-python37 b/containers/v3-dockerfile-test-suite-python37
index b633f76f8f237421793518add92348a7c53804a5..bb4d1996639deb30d1975f6aa9d958b4068fe959 100644
--- a/containers/v3-dockerfile-test-suite-python37
+++ b/containers/v3-dockerfile-test-suite-python37
@@ -15,31 +15,36 @@ RUN /bin/bash -c "source activate ${conda_env}"
 RUN conda info
 RUN python --version
 
-# Install pymultinest requirements
-RUN apt-get update --allow-releaseinfo-change
-RUN apt-get install -y libblas3 libblas-dev liblapack3 liblapack-dev \
-libatlas3-base libatlas-base-dev cmake build-essential gfortran
-
 # Install conda-installable programs
 RUN conda install -n ${conda_env} -y matplotlib numpy scipy pandas astropy flake8 mock
 RUN conda install -n ${conda_env} -c anaconda coverage configargparse future
-
-# Install conda-forge-installable programs
-RUN conda install -n ${conda_env} -c conda-forge black ligo-gracedb gwpy lalsuite ligo.skymap pytest-cov deepdish arviz
+RUN conda install -n ${conda_env} -c conda-forge black pytest-cov deepdish arviz
 
 # Install pip-requirements
 RUN pip install --upgrade pip
-RUN pip install --upgrade setuptools coverage-badge
+RUN pip install --upgrade setuptools coverage-badge parameterized
 
 # Install documentation requirements
 RUN pip install sphinx numpydoc nbsphinx sphinx_rtd_theme sphinx-tabs autodoc
 
 # Install dependencies and samplers
-RUN pip install corner lalsuite theano healpy cython tables
-RUN pip install cpnest dynesty emcee nestle ptemcee pymc3 kombine dnest4 nessai zeus-mcmc
+RUN pip install corner healpy cython tables
+RUN conda install -n ${conda_env} -c conda-forge dynesty emcee nestle ptemcee
 RUN conda install -n ${conda_env} -c conda-forge pymultinest ultranest
+RUN conda install -n ${conda_env} -c conda-forge cpnest kombine dnest4 zeus-mcmc
+RUN conda install -n ${conda_env} -c conda-forge pytorch
+RUN conda install -n ${conda_env} -c conda-forge theano-pymc
+RUN conda install -n ${conda_env} -c conda-forge pymc3
+RUN pip install nessai
 
 # Install Polychord
+RUN apt-get update --allow-releaseinfo-change
+RUN apt-get install -y build-essential
+RUN apt-get install -y libblas3 libblas-dev
+RUN apt-get install -y liblapack3 liblapack-dev
+RUN apt-get install -y libatlas3-base libatlas-base-dev
+RUN apt-get install -y gfortran
+
 RUN git clone https://github.com/PolyChord/PolyChordLite.git \
 && (cd PolyChordLite && python setup.py --no-mpi install)
 
@@ -47,6 +52,10 @@ RUN git clone https://github.com/PolyChord/PolyChordLite.git \
 RUN git clone https://github.com/jellis18/PTMCMCSampler.git \
 && (cd PTMCMCSampler && python setup.py install)
 
+# Install GW packages
+RUN conda install -n ${conda_env} -c conda-forge python-lalsimulation
+RUN pip install ligo-gracedb gwpy ligo.skymap
+
 # Add the ROQ data to the image
 RUN mkdir roq_basis \
     && cd roq_basis \
diff --git a/containers/v3-dockerfile-test-suite-python38 b/containers/v3-dockerfile-test-suite-python38
index 98d03802ba11a15480d1ccd2d9065a054f18fe9c..e5724e0c884428442c68aff473dab1438d80ee01 100644
--- a/containers/v3-dockerfile-test-suite-python38
+++ b/containers/v3-dockerfile-test-suite-python38
@@ -15,31 +15,36 @@ RUN /bin/bash -c "source activate ${conda_env}"
 RUN conda info
 RUN python --version
 
-# Install pymultinest requirements
-RUN apt-get update --allow-releaseinfo-change
-RUN apt-get install -y libblas3 libblas-dev liblapack3 liblapack-dev \
-libatlas3-base libatlas-base-dev cmake build-essential gfortran
-
 # Install conda-installable programs
 RUN conda install -n ${conda_env} -y matplotlib numpy scipy pandas astropy flake8 mock
 RUN conda install -n ${conda_env} -c anaconda coverage configargparse future
-
-# Install conda-forge-installable programs
-RUN conda install -n ${conda_env} -c conda-forge black ligo-gracedb gwpy lalsuite ligo.skymap pytest-cov deepdish arviz
+RUN conda install -n ${conda_env} -c conda-forge black pytest-cov deepdish arviz
 
 # Install pip-requirements
 RUN pip install --upgrade pip
-RUN pip install --upgrade setuptools coverage-badge
+RUN pip install --upgrade setuptools coverage-badge parameterized
 
 # Install documentation requirements
 RUN pip install sphinx numpydoc nbsphinx sphinx_rtd_theme sphinx-tabs autodoc
 
 # Install dependencies and samplers
-RUN pip install corner lalsuite theano healpy cython tables
-RUN pip install cpnest dynesty emcee nestle ptemcee pymc3 kombine dnest4 nessai zeus-mcmc
+RUN pip install corner healpy cython tables
+RUN conda install -n ${conda_env} -c conda-forge dynesty emcee nestle ptemcee
 RUN conda install -n ${conda_env} -c conda-forge pymultinest ultranest
+RUN conda install -n ${conda_env} -c conda-forge cpnest kombine dnest4 zeus-mcmc
+RUN conda install -n ${conda_env} -c conda-forge pytorch
+RUN conda install -n ${conda_env} -c conda-forge theano-pymc
+RUN conda install -n ${conda_env} -c conda-forge pymc3
+RUN pip install nessai
 
 # Install Polychord
+RUN apt-get update --allow-releaseinfo-change
+RUN apt-get install -y build-essential
+RUN apt-get install -y libblas3 libblas-dev
+RUN apt-get install -y liblapack3 liblapack-dev
+RUN apt-get install -y libatlas3-base libatlas-base-dev
+RUN apt-get install -y gfortran
+
 RUN git clone https://github.com/PolyChord/PolyChordLite.git \
 && (cd PolyChordLite && python setup.py --no-mpi install)
 
@@ -47,6 +52,10 @@ RUN git clone https://github.com/PolyChord/PolyChordLite.git \
 RUN git clone https://github.com/jellis18/PTMCMCSampler.git \
 && (cd PTMCMCSampler && python setup.py install)
 
+# Install GW packages
+RUN conda install -n ${conda_env} -c conda-forge python-lalsimulation
+RUN pip install ligo-gracedb gwpy ligo.skymap
+
 # Add the ROQ data to the image
 RUN mkdir roq_basis \
     && cd roq_basis \
diff --git a/containers/v3-dockerfile-test-suite-python39 b/containers/v3-dockerfile-test-suite-python39
index 6b64f65e0d139a924d3f951d0c9cb9249f0f9811..106cd4573d5d57c832bf54b686a92d0a0fe94064 100644
--- a/containers/v3-dockerfile-test-suite-python39
+++ b/containers/v3-dockerfile-test-suite-python39
@@ -15,31 +15,36 @@ RUN /bin/bash -c "source activate ${conda_env}"
 RUN conda info
 RUN python --version
 
-# Install pymultinest requirements
-RUN apt-get update --allow-releaseinfo-change
-RUN apt-get install -y libblas3 libblas-dev liblapack3 liblapack-dev \
-libatlas3-base libatlas-base-dev cmake build-essential gfortran
-
 # Install conda-installable programs
 RUN conda install -n ${conda_env} -y matplotlib numpy scipy pandas astropy flake8 mock
 RUN conda install -n ${conda_env} -c anaconda coverage configargparse future
-
-# Install conda-forge-installable programs
-RUN conda install -n ${conda_env} -c conda-forge black ligo-gracedb gwpy lalsuite ligo.skymap pytest-cov deepdish arviz
+RUN conda install -n ${conda_env} -c conda-forge black pytest-cov deepdish arviz
 
 # Install pip-requirements
 RUN pip install --upgrade pip
-RUN pip install --upgrade setuptools coverage-badge
+RUN pip install --upgrade setuptools coverage-badge parameterized
 
 # Install documentation requirements
 RUN pip install sphinx numpydoc nbsphinx sphinx_rtd_theme sphinx-tabs autodoc
 
 # Install dependencies and samplers
-RUN pip install corner lalsuite theano healpy cython tables
-RUN pip install cpnest dynesty emcee nestle ptemcee pymc3 kombine dnest4 nessai zeus-mcmc
+RUN pip install corner healpy cython tables
+RUN conda install -n ${conda_env} -c conda-forge dynesty emcee nestle ptemcee
 RUN conda install -n ${conda_env} -c conda-forge pymultinest ultranest
+RUN conda install -n ${conda_env} -c conda-forge cpnest kombine dnest4 zeus-mcmc
+RUN conda install -n ${conda_env} -c conda-forge pytorch
+RUN conda install -n ${conda_env} -c conda-forge theano-pymc
+RUN conda install -n ${conda_env} -c conda-forge pymc3
+RUN pip install nessai
 
 # Install Polychord
+RUN apt-get update --allow-releaseinfo-change
+RUN apt-get install -y build-essential
+RUN apt-get install -y libblas3 libblas-dev
+RUN apt-get install -y liblapack3 liblapack-dev
+RUN apt-get install -y libatlas3-base libatlas-base-dev
+RUN apt-get install -y gfortran
+
 RUN git clone https://github.com/PolyChord/PolyChordLite.git \
 && (cd PolyChordLite && python setup.py --no-mpi install)
 
@@ -47,6 +52,10 @@ RUN git clone https://github.com/PolyChord/PolyChordLite.git \
 RUN git clone https://github.com/jellis18/PTMCMCSampler.git \
 && (cd PTMCMCSampler && python setup.py install)
 
+# Install GW packages
+RUN conda install -n ${conda_env} -c conda-forge python-lalsimulation
+RUN pip install ligo-gracedb gwpy ligo.skymap
+
 # Add the ROQ data to the image
 RUN mkdir roq_basis \
     && cd roq_basis \
diff --git a/docs/samplers.txt b/docs/samplers.txt
index 8e26550cb1c2d564d8c1c4ed3810890ed214cf98..fad1d62ba2953e46bce4e635c98269c0e8aeecb1 100644
--- a/docs/samplers.txt
+++ b/docs/samplers.txt
@@ -70,6 +70,7 @@ MCMC samplers
 - emcee :code:`bilby.core.sampler.emcee.Emcee`
 - ptemcee :code:`bilby.core.sampler.ptemcee.Ptemcee`
 - pymc3 :code:`bilby.core.sampler.pymc3.Pymc3`
+- zeus :code:`bilby.core.sampler.zeus.Zeus`
 
 
 -------------------
diff --git a/docs/transient-gw-data.txt b/docs/transient-gw-data.txt
index 78501e6d56b10110b46e3be839adf9b1a1d1ec49..1fd041acab60a0f5bb116965167be02fbab410df 100644
--- a/docs/transient-gw-data.txt
+++ b/docs/transient-gw-data.txt
@@ -91,6 +91,21 @@ You can also set the strain data without any noise at all
 Injecting a signal
 ------------------
 
-If you wish to inject a signal into the data, you can use this function
+If you wish to inject a signal into the data, you can use this function:
 
    >>> bilby.gw.detector.Interferometer.inject_signal
+
+To inject a signal a `bilby.gw.waveform_generator.WaveformGenerator` is required.
+This is where you must specify the injection's duration, start-time, waveform.
+Additionally, a `parameter_conversion` function can be passed. The default is :code:`parameter_conversion = bilby.gw.conversion.convert_to_lal_binary_black_hole_parameters`.
+This expects injection parameters to be provided in the detector-frame, e.g.
+
+.. code-block:: python
+
+    >>> parameters = dict(
+        mass_1=36., mass_2=29.,
+        a_1=0.4, a_2=0.3, tilt_1=0.5, tilt_2=1.0, phi_12=1.7, phi_jl=0.3,
+        luminosity_distance=2000., theta_jn=0.4, psi=2.659,
+        phase=1.3, geocent_time=1126259642.413, ra=1.375, dec=-1.2108
+        )
+
diff --git a/examples/core_examples/hyper_parameter_example.py b/examples/core_examples/hyper_parameter_example.py
index 5c920989c054a88978ff400f922b3ec4049ee126..4ce066e938ef2e8d9ff571d10ee154e0af9e871f 100644
--- a/examples/core_examples/hyper_parameter_example.py
+++ b/examples/core_examples/hyper_parameter_example.py
@@ -4,6 +4,7 @@ An example of how to use bilby to perform parameter estimation for hyper params
 """
 import matplotlib.pyplot as plt
 import numpy as np
+
 from bilby.core.likelihood import GaussianLikelihood
 from bilby.core.prior import Uniform
 from bilby.core.result import make_pp_plot
@@ -97,8 +98,8 @@ hp_likelihood = HyperparameterLikelihood(
 )
 
 hp_priors = dict(
-    mu=Uniform(-10, 10, "mu", "$\mu_{c0}$"),
-    sigma=Uniform(0, 10, "sigma", "$\sigma_{c0}$"),
+    mu=Uniform(-10, 10, "mu", r"$\mu_{c0}$"),
+    sigma=Uniform(0, 10, "sigma", r"$\sigma_{c0}$"),
 )
 
 # And run sampler
diff --git a/examples/core_examples/slabspike_example.py b/examples/core_examples/slabspike_example.py
index 9455834218033e6dde3cc021626a7de1364e0395..30521fa28c4220694f09b378f90bf59b28edb476 100644
--- a/examples/core_examples/slabspike_example.py
+++ b/examples/core_examples/slabspike_example.py
@@ -7,10 +7,11 @@ up to three Gaussians.
 
 """
 
-import bilby
 import matplotlib.pyplot as plt
 import numpy as np
 
+import bilby
+
 outdir = "outdir"
 label = "slabspike"
 bilby.utils.check_directory_exists_and_if_not_mkdir(outdir)
@@ -102,22 +103,22 @@ priors["amplitude_2"] = bilby.core.prior.SlabSpikePrior(
 # See e.g. https://github.com/GregoryAshton/kookaburra/blob/master/src/priors.py#L72 for an implementation.
 # We resolve this by not letting the priors overlap in this case.
 priors["mu_0"] = bilby.core.prior.Uniform(
-    minimum=-5, maximum=-2, name="mu_0", latex_label="$\mu_0$"
+    minimum=-5, maximum=-2, name="mu_0", latex_label=r"$\mu_0$"
 )
 priors["mu_1"] = bilby.core.prior.Uniform(
-    minimum=-2, maximum=2, name="mu_1", latex_label="$\mu_1$"
+    minimum=-2, maximum=2, name="mu_1", latex_label=r"$\mu_1$"
 )
 priors["mu_2"] = bilby.core.prior.Uniform(
-    minimum=2, maximum=5, name="mu_2", latex_label="$\mu_2$"
+    minimum=2, maximum=5, name="mu_2", latex_label=r"$\mu_2$"
 )
 priors["sigma_0"] = bilby.core.prior.LogUniform(
-    minimum=0.01, maximum=10, name="sigma_0", latex_label="$\sigma_0$"
+    minimum=0.01, maximum=10, name="sigma_0", latex_label=r"$\sigma_0$"
 )
 priors["sigma_1"] = bilby.core.prior.LogUniform(
-    minimum=0.01, maximum=10, name="sigma_1", latex_label="$\sigma_1$"
+    minimum=0.01, maximum=10, name="sigma_1", latex_label=r"$\sigma_1$"
 )
 priors["sigma_2"] = bilby.core.prior.LogUniform(
-    minimum=0.01, maximum=10, name="sigma_2", latex_label="$\sigma_2$"
+    minimum=0.01, maximum=10, name="sigma_2", latex_label=r"$\sigma_2$"
 )
 
 # Setting up the likelihood and running the samplers works the same as elsewhere.
diff --git a/requirements.txt b/requirements.txt
index c3bf8ebb8cd090bad2f14fb49ae7620aec7cb436..99b3eedbbf9e86b09300ce89e8b466e824c62536 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,4 +11,4 @@ tqdm
 h5py
 tables
 astropy
-attrs
+attrs
\ No newline at end of file
diff --git a/sampler_requirements.txt b/sampler_requirements.txt
index 443a96ab2a1ea2f4f7474436437a43d2b4afa0e5..d60093c8b1b3e2abd5af5397f02f1986655f5e04 100644
--- a/sampler_requirements.txt
+++ b/sampler_requirements.txt
@@ -10,3 +10,4 @@ ultranest>=3.0.0
 dnest4
 nessai>=0.2.3
 schwimmbad
+zeus-mcmc>=2.3.0
diff --git a/setup.cfg b/setup.cfg
index 4a86df8ebe1f77c62f4e6de88f45610732c0b3fe..7f999dcf7d5ee889b4a528f229d1d17801451a55 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,7 +1,7 @@
 [flake8]
 exclude = .git,docs,build,dist,test,*__init__.py
 max-line-length = 120
-ignore = E129 W503 W504 W605 E203 E402
+ignore = E129 W503 W504 E203 E402
 
 [tool:pytest]
 addopts =
diff --git a/setup.py b/setup.py
index d0594a673aeba14655d18345c26a212d797f6268..0c3cc35f40e164780339509526bf959a98e9e0ad 100644
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@ if python_version < (3, 7):
 
 
 def write_version_file(version):
-    """ Writes a file with version information to be used at run time
+    """Writes a file with version information to be used at run time
 
     Parameters
     ----------
@@ -27,31 +27,32 @@ def write_version_file(version):
     """
     try:
         git_log = subprocess.check_output(
-            ['git', 'log', '-1', '--pretty=%h %ai']).decode('utf-8')
-        git_diff = (subprocess.check_output(['git', 'diff', '.']) +
-                    subprocess.check_output(
-                        ['git', 'diff', '--cached', '.'])).decode('utf-8')
-        if git_diff == '':
-            git_status = '(CLEAN) ' + git_log
+            ["git", "log", "-1", "--pretty=%h %ai"]
+        ).decode("utf-8")
+        git_diff = (
+            subprocess.check_output(["git", "diff", "."])
+            + subprocess.check_output(["git", "diff", "--cached", "."])
+        ).decode("utf-8")
+        if git_diff == "":
+            git_status = "(CLEAN) " + git_log
         else:
-            git_status = '(UNCLEAN) ' + git_log
+            git_status = "(UNCLEAN) " + git_log
     except Exception as e:
-        print("Unable to obtain git version information, exception: {}"
-              .format(e))
-        git_status = 'release'
+        print("Unable to obtain git version information, exception: {}".format(e))
+        git_status = "release"
 
-    version_file = '.version'
+    version_file = ".version"
     if os.path.isfile(version_file) is False:
-        with open('bilby/' + version_file, 'w+') as f:
-            f.write('{}: {}'.format(version, git_status))
+        with open("bilby/" + version_file, "w+") as f:
+            f.write("{}: {}".format(version, git_status))
 
     return version_file
 
 
 def get_long_description():
-    """ Finds the README and reads in the description """
+    """Finds the README and reads in the description"""
     here = os.path.abspath(os.path.dirname(__file__))
-    with open(os.path.join(here, 'README.rst')) as f:
+    with open(os.path.join(here, "README.rst")) as f:
         long_description = f.read()
     return long_description
 
@@ -69,36 +70,55 @@ def readfile(filename):
     return filecontents
 
 
-VERSION = '1.1.3'
+VERSION = '1.1.4'
 version_file = write_version_file(VERSION)
 long_description = get_long_description()
 
-setup(name='bilby',
-      description='A user-friendly Bayesian inference library',
-      long_description=long_description,
-      long_description_content_type="text/x-rst",
-      url='https://git.ligo.org/lscsoft/bilby',
-      author='Greg Ashton, Moritz Huebner, Paul Lasky, Colm Talbot',
-      author_email='paul.lasky@monash.edu',
-      license="MIT",
-      version=VERSION,
-      packages=['bilby', 'bilby.core', 'bilby.core.prior', 'bilby.core.sampler',
-                'bilby.core.utils', 'bilby.gw', 'bilby.gw.detector',
-                'bilby.gw.sampler', 'bilby.hyper', 'bilby.gw.eos', 'bilby.bilby_mcmc',
-                'cli_bilby'],
-      package_dir={'bilby': 'bilby', 'cli_bilby': 'cli_bilby'},
-      package_data={'bilby.gw': ['prior_files/*'],
-                    'bilby.gw.detector': ['noise_curves/*.txt', 'detectors/*'],
-                    'bilby.gw.eos': ['eos_tables/*.dat'],
-                    'bilby': [version_file]},
-      python_requires='>=3.6',
-      install_requires=get_requirements(),
-      entry_points={'console_scripts':
-                    ['bilby_plot=cli_bilby.plot_multiple_posteriors:main',
-                     'bilby_result=cli_bilby.bilby_result:main']
-                    },
-      classifiers=[
-          "Programming Language :: Python :: 3.6",
-          "Programming Language :: Python :: 3.7",
-          "License :: OSI Approved :: MIT License",
-          "Operating System :: OS Independent"])
+setup(
+    name="bilby",
+    description="A user-friendly Bayesian inference library",
+    long_description=long_description,
+    long_description_content_type="text/x-rst",
+    url="https://git.ligo.org/lscsoft/bilby",
+    author="Greg Ashton, Moritz Huebner, Paul Lasky, Colm Talbot",
+    author_email="paul.lasky@monash.edu",
+    license="MIT",
+    version=VERSION,
+    packages=[
+        "bilby",
+        "bilby.bilby_mcmc",
+        "bilby.core",
+        "bilby.core.prior",
+        "bilby.core.sampler",
+        "bilby.core.utils",
+        "bilby.gw",
+        "bilby.gw.detector",
+        "bilby.gw.eos",
+        "bilby.gw.likelihood",
+        "bilby.gw.sampler",
+        "bilby.hyper",
+        "cli_bilby",
+    ],
+    package_dir={"bilby": "bilby", "cli_bilby": "cli_bilby"},
+    package_data={
+        "bilby.gw": ["prior_files/*"],
+        "bilby.gw.detector": ["noise_curves/*.txt", "detectors/*"],
+        "bilby.gw.eos": ["eos_tables/*.dat"],
+        "bilby": [version_file],
+    },
+    python_requires=">=3.7",
+    install_requires=get_requirements(),
+    entry_points={
+        "console_scripts": [
+            "bilby_plot=cli_bilby.plot_multiple_posteriors:main",
+            "bilby_result=cli_bilby.bilby_result:main",
+        ]
+    },
+    classifiers=[
+        "Programming Language :: Python :: 3.7",
+        "Programming Language :: Python :: 3.8",
+        "Programming Language :: Python :: 3.9",
+        "License :: OSI Approved :: MIT License",
+        "Operating System :: OS Independent",
+    ],
+)
diff --git a/test/check_author_list.py b/test/check_author_list.py
index c43f0f0030d657148ec5991faf807bac6b4192ca..98352318a503eb588b566d6b6b368e24caa5f1d9 100644
--- a/test/check_author_list.py
+++ b/test/check_author_list.py
@@ -14,10 +14,26 @@ lines = subprocess.check_output(["git", "shortlog", "HEAD", "-sn"]).decode("utf-
 if len(lines) == 0:
     raise Exception("No authors to check against")
 
+
+def remove_accents(raw_text):
+
+    raw_text = re.sub(u"[àáâãäå]", 'a', raw_text)
+    raw_text = re.sub(u"[èéêë]", 'e', raw_text)
+    raw_text = re.sub(u"[ìíîï]", 'i', raw_text)
+    raw_text = re.sub(u"[òóôõö]", 'o', raw_text)
+    raw_text = re.sub(u"[ùúûü]", 'u', raw_text)
+    raw_text = re.sub(u"[ýÿ]", 'y', raw_text)
+    raw_text = re.sub(u"[ß]", 'ss', raw_text)
+    raw_text = re.sub(u"[ñ]", 'n', raw_text)
+
+    return(raw_text)
+
+
 fail_test = False
 for line in lines:
     line = line.replace(".", " ")
     line = re.sub('([A-Z][a-z]+)', r' \1', re.sub('([A-Z]+)', r' \1', line))
+    line = remove_accents(line)
     for element in line.split()[1:]:
         element = element.lower()
         if element not in AUTHORS_list and element not in special_cases:
diff --git a/test/core/prior/base_test.py b/test/core/prior/base_test.py
index bafa10fa90e7495ccf0999f900580aee65aa719a..49887de5032580096b1acdf321f5a83534ab5d3c 100644
--- a/test/core/prior/base_test.py
+++ b/test/core/prior/base_test.py
@@ -100,7 +100,7 @@ class TestPrior(unittest.TestCase):
     def test_default_label_assignment(self):
         self.prior.name = "chirp_mass"
         self.prior.latex_label = None
-        self.assertEqual(self.prior.latex_label, "$\mathcal{M}$")
+        self.assertEqual(self.prior.latex_label, r"$\mathcal{M}$")
 
     def test_default_label_assignment_default(self):
         self.assertTrue(self.prior.latex_label, self.prior.name)
diff --git a/test/core/prior/dict_test.py b/test/core/prior/dict_test.py
index 3b8487fa1f49ea9c25459c738cc16e58252a5f76..03881612a61404db58ebc43d2944fc833480643b 100644
--- a/test/core/prior/dict_test.py
+++ b/test/core/prior/dict_test.py
@@ -66,7 +66,7 @@ class TestPriorDict(unittest.TestCase):
                 name="chirp_mass",
                 minimum=25,
                 maximum=100,
-                latex_label="$\mathcal{M}$",
+                latex_label=r"$\mathcal{M}$",
             ),
             mass_ratio=bilby.core.prior.Uniform(
                 name="mass_ratio",
@@ -176,7 +176,7 @@ class TestPriorDict(unittest.TestCase):
                     name="chirp_mass",
                     minimum=25,
                     maximum=100,
-                    latex_label="$\mathcal{M}$",
+                    latex_label=r"$\mathcal{M}$",
                 ),
                 mass_ratio=bilby.core.prior.Uniform(
                     name="mass_ratio",
diff --git a/test/core/result_test.py b/test/core/result_test.py
index b2a7c24c1776f295fe0964094b42242c00704bd0..f49e4a3a25727f45d268df0952cc4659ebd9d1be 100644
--- a/test/core/result_test.py
+++ b/test/core/result_test.py
@@ -134,6 +134,25 @@ class TestResult(unittest.TestCase):
         with self.assertRaises(IOError):
             bilby.core.result.read_in_result(filename="not/a/file.json")
 
+        with self.assertRaises(IOError):
+            incomplete_json = """
+{
+  "label": "label",
+  "outdir": "outdir",
+  "sampler": "dynesty",
+  "log_evidence": 0,
+  "log_evidence_err": 0,
+  "log_noise_evidence": 0,
+  "log_bayes_factor": 0,
+  "priors": {
+    "chirp_mass": {
+"""
+            with open("{}/incomplete.json".format(self.result.outdir), "wb") as ff:
+                ff.write(incomplete_json)
+            bilby.core.result.read_in_result(
+                filename="{}/incomplete.json".format(self.result.outdir)
+            )
+
     def test_unset_priors(self):
         result = bilby.core.result.Result(
             label="label",
diff --git a/test/core/sampler/nessai_test.py b/test/core/sampler/nessai_test.py
index 15b0f26afe179c7ce0b70f6cd234761142f605de..6f902f71d8f8e43a53c824345356bdd389eaa922 100644
--- a/test/core/sampler/nessai_test.py
+++ b/test/core/sampler/nessai_test.py
@@ -22,53 +22,8 @@ class TestNessai(unittest.TestCase):
             plot=False,
             skip_import_verification=True,
         )
-        self.expected = dict(
-            output="outdir/label_nessai/",
-            nlive=1000,
-            stopping=0.1,
-            resume=True,
-            max_iteration=None,
-            checkpointing=True,
-            seed=1234,
-            acceptance_threshold=0.01,
-            analytic_priors=False,
-            maximum_uninformed=1000,
-            uninformed_proposal=None,
-            uninformed_proposal_kwargs=None,
-            flow_class=None,
-            flow_config=None,
-            training_frequency=None,
-            reset_weights=False,
-            reset_permutations=False,
-            reset_acceptance=False,
-            train_on_empty=True,
-            cooldown=100,
-            memory=False,
-            poolsize=None,
-            drawsize=None,
-            max_poolsize_scale=10,
-            update_poolsize=False,
-            latent_prior='truncated_gaussian',
-            draw_latent_kwargs=None,
-            compute_radius_with_all=False,
-            min_radius=False,
-            max_radius=50,
-            check_acceptance=False,
-            fuzz=1.0,
-            expansion_fraction=1.0,
-            rescale_parameters=True,
-            rescale_bounds=[-1, 1],
-            update_bounds=False,
-            boundary_inversion=False,
-            inversion_type='split', detect_edges=False,
-            detect_edges_kwargs=None,
-            reparameterisations=None,
-            n_pool=None,
-            max_threads=1,
-            pytorch_threads=None,
-            plot=False,
-            proposal_plots=False
-        )
+        self.expected = self.sampler.default_kwargs
+        self.expected['output'] = 'outdir/label_nessai/'
 
     def tearDown(self):
         del self.likelihood
@@ -76,16 +31,16 @@ class TestNessai(unittest.TestCase):
         del self.sampler
         del self.expected
 
-    def test_default_kwargs(self):
-        expected = self.expected.copy()
-        self.assertDictEqual(expected, self.sampler.kwargs)
-
     def test_translate_kwargs_nlive(self):
         expected = self.expected.copy()
+        # nlive in the default kwargs is not a fixed but depends on the
+        # version of nessai, so get the value here and use it when setting
+        # the equivalent kwargs.
+        nlive = expected["nlive"]
         for equiv in bilby.core.sampler.base_sampler.NestedSampler.npoints_equiv_kwargs:
             new_kwargs = self.sampler.kwargs.copy()
             del new_kwargs["nlive"]
-            new_kwargs[equiv] = 1000
+            new_kwargs[equiv] = nlive
             self.sampler.kwargs = new_kwargs
             self.assertDictEqual(expected, self.sampler.kwargs)
 
@@ -117,10 +72,10 @@ class TestNessai(unittest.TestCase):
         self.sampler.kwargs = new_kwargs
         self.assertDictEqual(expected, self.sampler.kwargs)
 
-    @patch("builtins.open", mock_open(read_data='{"nlive": 2000}'))
+    @patch("builtins.open", mock_open(read_data='{"nlive": 4000}'))
     def test_update_from_config_file(self):
         expected = self.expected.copy()
-        expected["nlive"] = 2000
+        expected["nlive"] = 4000
         new_kwargs = self.expected.copy()
         new_kwargs["config_file"] = "config_file.json"
         self.sampler.kwargs = new_kwargs
diff --git a/test/core/sampler/pymc3_test.py b/test/core/sampler/pymc3_test.py
index ad1df20f4a077038c2046e260c26880ec1737aec..9fb710c344159f96a1b3707011ff1aa817f706bf 100644
--- a/test/core/sampler/pymc3_test.py
+++ b/test/core/sampler/pymc3_test.py
@@ -8,6 +8,10 @@ import bilby
 
 
 @pytest.mark.skipif(sys.version_info[1] <= 6, reason="pymc3 is broken in py36")
+@pytest.mark.xfail(
+    raises=AttributeError,
+    reason="Dependency issue with pymc3 causes attribute error on import",
+)
 class TestPyMC3(unittest.TestCase):
     def setUp(self):
         self.likelihood = MagicMock()
diff --git a/test/core/sampler/zeus_test.py b/test/core/sampler/zeus_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7bd9289cf5e3115a017c8eeae50e21b99c12ed8
--- /dev/null
+++ b/test/core/sampler/zeus_test.py
@@ -0,0 +1,64 @@
+import unittest
+
+from mock import MagicMock
+
+import bilby
+
+
+class TestZeus(unittest.TestCase):
+    def setUp(self):
+        self.likelihood = MagicMock()
+        self.priors = bilby.core.prior.PriorDict(
+            dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
+        )
+        self.sampler = bilby.core.sampler.Zeus(
+            self.likelihood,
+            self.priors,
+            outdir="outdir",
+            label="label",
+            use_ratio=False,
+            plot=False,
+            skip_import_verification=True,
+        )
+
+    def tearDown(self):
+        del self.likelihood
+        del self.priors
+        del self.sampler
+
+    def test_default_kwargs(self):
+        expected = dict(
+            nwalkers=500,
+            args=[],
+            kwargs={},
+            pool=None,
+            log_prob0=None,
+            start=None,
+            blobs0=None,
+            iterations=100,
+            thin=1,
+        )
+        self.assertDictEqual(expected, self.sampler.kwargs)
+
+    def test_translate_kwargs(self):
+        expected = dict(
+            nwalkers=100,
+            args=[],
+            kwargs={},
+            pool=None,
+            log_prob0=None,
+            start=None,
+            blobs0=None,
+            iterations=100,
+            thin=1,
+        )
+        for equiv in bilby.core.sampler.base_sampler.MCMCSampler.nwalkers_equiv_kwargs:
+            new_kwargs = self.sampler.kwargs.copy()
+            del new_kwargs["nwalkers"]
+            new_kwargs[equiv] = 100
+            self.sampler.kwargs = new_kwargs
+            self.assertDictEqual(expected, self.sampler.kwargs)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/test/gw/detector/interferometer_test.py b/test/gw/detector/interferometer_test.py
index b01b57e17fe2a1562546c24e8f0fa28e84aebe47..3c879d22a766955bb5f37a764f075fd8f88a7281 100644
--- a/test/gw/detector/interferometer_test.py
+++ b/test/gw/detector/interferometer_test.py
@@ -1,5 +1,8 @@
 import sys
 import unittest
+
+import lal
+import lalsimulation
 import pytest
 from packaging import version
 from shutil import rmtree
@@ -552,5 +555,46 @@ class TestInterferometerEquals(unittest.TestCase):
         self.assertNotEqual(self.ifo_1, self.ifo_2)
 
 
+class TestInterferometerAntennaPatternAgainstLAL(unittest.TestCase):
+    def setUp(self):
+        self.name = "name"
+        self.ifo_names = ['H1', 'L1', 'V1', 'K1', 'GEO600', 'ET']
+        self.lal_prefixes = {'H1': 'H1', 'L1': 'L1', 'V1': 'V1', 'K1': 'K1', 'GEO600': 'G1', 'ET': 'E1'}
+        self.polarizations = ['plus', 'cross', 'breathing', 'longitudinal', 'x', 'y']
+        self.ifos = bilby.gw.detector.InterferometerList(self.ifo_names)
+        self.gpstime = 1305303144
+        self.trial = 100
+
+    def tearDown(self):
+        del self.name
+        del self.ifo_names
+        del self.lal_prefixes
+        del self.polarizations
+        del self.ifos
+        del self.gpstime
+        del self.trial
+
+    def test_antenna_pattern_vs_lal(self):
+        gmst = lal.GreenwichMeanSiderealTime(self.gpstime)
+        f_bilby = np.zeros((self.trial, 6))
+        f_lal = np.zeros((self.trial, 6))
+
+        for n, ifo_name in enumerate(self.ifo_names):
+            response = lalsimulation.DetectorPrefixToLALDetector(self.lal_prefixes[ifo_name]).response
+            ifo = self.ifos[n]
+            for i in range(self.trial):
+                ra = 2. * np.pi * np.random.uniform()
+                dec = np.pi * np.random.uniform() - np.pi / 2.
+                psi = np.pi * np.random.uniform()
+                f_lal[i] = lal.ComputeDetAMResponseExtraModes(response, ra, dec, psi, gmst)
+                for m, pol in enumerate(self.polarizations):
+                    f_bilby[i, m] = ifo.antenna_response(ra, dec, self.gpstime, psi, pol)
+
+            std = np.std(f_bilby - f_lal, axis=0)
+            for m, pol in enumerate(self.polarizations):
+                with self.subTest(':'.join((ifo_name, pol))):
+                    self.assertAlmostEqual(std[m], 0.0, places=7)
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/test/gw/detector/networks_test.py b/test/gw/detector/networks_test.py
index 4484cfb12d526ecb11940e390e78b2e10c9c21c7..b0d02e4eae2e58f7ad43aee61be3365c86137603 100644
--- a/test/gw/detector/networks_test.py
+++ b/test/gw/detector/networks_test.py
@@ -3,6 +3,7 @@ import unittest
 import pytest
 from shutil import rmtree
 from packaging import version
+from itertools import combinations
 
 import deepdish as dd
 import mock
@@ -119,7 +120,7 @@ class TestInterferometerList(unittest.TestCase):
             bilby.gw.detector.InterferometerList("string")
 
     def test_init_with_string_list(self):
-        """ Merely checks if this ends up in the right bracket """
+        """Merely checks if this ends up in the right bracket"""
         with mock.patch("bilby.gw.detector.networks.get_empty_interferometer") as m:
             m.side_effect = TypeError
             with self.assertRaises(TypeError):
@@ -240,8 +241,12 @@ class TestInterferometerList(unittest.TestCase):
 
     @patch.object(bilby.gw.detector.Interferometer, "inject_signal")
     def test_inject_signal_with_inj_pol(self, m):
-        self.ifo_list.inject_signal(injection_polarizations=dict(plus=1))
-        m.assert_called_with(parameters=None, injection_polarizations=dict(plus=1))
+        self.ifo_list.inject_signal(
+            injection_polarizations=dict(plus=1), raise_error=False
+        )
+        m.assert_called_with(
+            parameters=None, injection_polarizations=dict(plus=1), raise_error=False
+        )
         self.assertEqual(len(self.ifo_list), m.call_count)
 
     @patch.object(bilby.gw.detector.Interferometer, "inject_signal")
@@ -337,7 +342,9 @@ class TestInterferometerList(unittest.TestCase):
             recovered_ifo = bilby.gw.detector.InterferometerList.from_hdf5(filename)
             self.assertListEqual(self.ifo_list, recovered_ifo)
 
-    @pytest.mark.skipif(pandas_version_test or sys.version_info[0] < 3, reason=skip_reason)
+    @pytest.mark.skipif(
+        pandas_version_test or sys.version_info[0] < 3, reason=skip_reason
+    )
     def test_to_and_from_hdf5_wrong_class(self):
         dd.io.save("./outdir/psd.h5", self.ifo_list[0].power_spectral_density)
         filename = self.ifo_list._filename_from_outdir_label_extension(
@@ -354,6 +361,7 @@ class TestInterferometerList(unittest.TestCase):
 
     def test_to_and_from_pkl_wrong_class(self):
         import dill
+
         with open("./outdir/psd.pkl", "wb") as ff:
             dill.dump(self.ifo_list[0].power_spectral_density, ff)
         filename = self.ifo_list._filename_from_outdir_label_extension(
@@ -372,5 +380,40 @@ class TestInterferometerList(unittest.TestCase):
         ifos.plot_data(outdir=self.outdir)
 
 
+class TriangularInterferometerTest(unittest.TestCase):
+    def setUp(self):
+        self.triangular_ifo = bilby.gw.detector.get_empty_interferometer("ET")
+
+    def tearDown(self):
+        del self.triangular_ifo
+
+    def test_individual_positions(self):
+        """
+        Check that the distances between the vertices of the three
+        individual interferometers is approximately equal to the
+        length of the arms of the detector.
+        Calculation following:
+        https://www.movable-type.co.uk/scripts/latlong.html
+        Angles must be in radians
+        """
+
+        def a(delta_lat, delta_long, lat_1, lat_2):
+            return (
+                np.sin(delta_lat / 2) ** 2
+                + np.cos(lat_1) * np.cos(lat_2) * np.sin(delta_long / 2) ** 2
+            )
+
+        def c(a):
+            return 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
+
+        for pair in list(combinations(self.triangular_ifo, 2)):
+            delta_lat = np.radians(pair[1].latitude - pair[0].latitude)
+            delta_long = np.radians(pair[1].longitude - pair[0].longitude)
+            pair_a = a(delta_lat, delta_long, pair[0].latitude, pair[1].latitude)
+            pair_c = c(pair_a)
+            distance = bilby.core.utils.radius_of_earth * pair_c
+            self.assertAlmostEqual(distance / 1000, pair[0].length, delta=1)
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/test/gw/utils_test.py b/test/gw/utils_test.py
index a3a7582f1f2b94c796156fb9cc031c37a3bb5aee..d8c3286ac4ac3b58250a6f1394b19870d275eb9f 100644
--- a/test/gw/utils_test.py
+++ b/test/gw/utils_test.py
@@ -103,21 +103,13 @@ class TestGWUtils(unittest.TestCase):
     def test_get_event_time(self):
         events = [
             "GW150914",
-            "GW151012",
-            "GW151226",
             "GW170104",
-            "GW170608",
-            "GW170729",
-            "GW170809",
-            "GW170814",
-            "GW170817",
-            "GW170818",
-            "GW170823",
         ]
         for event in events:
             self.assertTrue(isinstance(gwutils.get_event_time(event), float))
 
-        self.assertTrue(gwutils.get_event_time("GW010290") is None)
+        with self.assertRaises(ValueError):
+            gwutils.get_event_time("GW010290")
 
     def test_read_frame_file(self):
         start_time = 0
diff --git a/test/integration/sampler_run_test.py b/test/integration/sampler_run_test.py
index c72ca91b71332c8a0c80852aee815dc245399323..0a0b1b19e23def083e6dd58f69ebfda780e4c3b2 100644
--- a/test/integration/sampler_run_test.py
+++ b/test/integration/sampler_run_test.py
@@ -25,8 +25,19 @@ class TestRunningSamplers(unittest.TestCase):
         self.priors = bilby.core.prior.PriorDict()
         self.priors["m"] = bilby.core.prior.Uniform(0, 5, boundary="periodic")
         self.priors["c"] = bilby.core.prior.Uniform(-2, 2, boundary="reflective")
+        self.kwargs = dict(
+            save=False,
+            conversion_function=self.conversion_function,
+        )
         bilby.core.utils.check_directory_exists_and_if_not_mkdir("outdir")
 
+    @staticmethod
+    def conversion_function(parameters, likelihood, prior):
+        converted = parameters.copy()
+        if "derived" not in converted:
+            converted["derived"] = converted["m"] * converted["c"]
+        return converted
+
     def tearDown(self):
         del self.likelihood
         del self.priors
@@ -34,17 +45,21 @@ class TestRunningSamplers(unittest.TestCase):
         shutil.rmtree("outdir")
 
     def test_run_cpnest(self):
-        _ = bilby.run_sampler(
+        pytest.importorskip("cpnest")
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="cpnest",
             nlive=100,
-            save=False,
             resume=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_dnest4(self):
-        _ = bilby.run_sampler(
+        pytest.importorskip("dnest4")
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="dnest4",
@@ -54,20 +69,26 @@ class TestRunningSamplers(unittest.TestCase):
             num_per_step=10,
             thread_steps=1,
             num_particles=50,
-            save=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_dynesty(self):
-        _ = bilby.run_sampler(
+        pytest.importorskip("dynesty")
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="dynesty",
             nlive=100,
-            save=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_dynamic_dynesty(self):
-        _ = bilby.run_sampler(
+        pytest.importorskip("dynesty")
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="dynamic_dynesty",
@@ -77,62 +98,79 @@ class TestRunningSamplers(unittest.TestCase):
             maxbatch=0,
             maxcall=100,
             bound="single",
-            save=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_emcee(self):
-        _ = bilby.run_sampler(
+        pytest.importorskip("emcee")
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="emcee",
             iterations=1000,
             nwalkers=10,
-            save=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_kombine(self):
-        _ = bilby.run_sampler(
+        pytest.importorskip("kombine")
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="kombine",
             iterations=2000,
             nwalkers=20,
-            save=False,
             autoburnin=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_nestle(self):
-        _ = bilby.run_sampler(
+        pytest.importorskip("nestle")
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="nestle",
             nlive=100,
-            save=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_nessai(self):
-        _ = bilby.run_sampler(
+        pytest.importorskip("nessai")
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="nessai",
             nlive=100,
             poolsize=1000,
             max_iteration=1000,
-            save=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_pypolychord(self):
         pytest.importorskip("pypolychord")
-        _ = bilby.run_sampler(
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="pypolychord",
             nlive=100,
-            save=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_ptemcee(self):
-        _ = bilby.run_sampler(
+        pytest.importorskip("ptemcee")
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="ptemcee",
@@ -141,60 +179,90 @@ class TestRunningSamplers(unittest.TestCase):
             burn_in_act=1,
             ntemps=1,
             frac_threshold=0.5,
-            save=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     @pytest.mark.skipif(sys.version_info[1] <= 6, reason="pymc3 is broken in py36")
+    @pytest.mark.xfail(
+        raises=AttributeError,
+        reason="Dependency issue with pymc3 causes attribute error on import",
+    )
     def test_run_pymc3(self):
-        _ = bilby.run_sampler(
+        pytest.importorskip("pymc3")
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="pymc3",
             draws=50,
             tune=50,
             n_init=250,
-            save=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_pymultinest(self):
-        _ = bilby.run_sampler(
+        pytest.importorskip("pymultinest")
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="pymultinest",
             nlive=100,
-            save=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_PTMCMCSampler(self):
-        _ = bilby.run_sampler(
+        pytest.importorskip("PTMCMCSampler")
+        res = bilby.run_sampler(
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="PTMCMCsampler",
             Niter=101,
             burn=2,
             isave=100,
-            save=False,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_ultranest(self):
+        pytest.importorskip("ultranest")
         # run using NestedSampler (with nlive specified)
-        _ = bilby.run_sampler(
-            likelihood=self.likelihood, priors=self.priors,
-            sampler="ultranest", nlive=100, save=False,
+        res = bilby.run_sampler(
+            likelihood=self.likelihood,
+            priors=self.priors,
+            sampler="ultranest",
+            nlive=100,
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
         # run using ReactiveNestedSampler (with no nlive given)
-        _ = bilby.run_sampler(
-            likelihood=self.likelihood, priors=self.priors,
-            sampler='ultranest', save=False,
+        res = bilby.run_sampler(
+            likelihood=self.likelihood,
+            priors=self.priors,
+            sampler="ultranest",
+            **self.kwargs,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
     def test_run_bilby_mcmc(self):
-        _ = bilby.run_sampler(
-            likelihood=self.likelihood, priors=self.priors,
-            sampler="bilby_mcmc", nsamples=200, save=False,
+        res = bilby.run_sampler(
+            likelihood=self.likelihood,
+            priors=self.priors,
+            sampler="bilby_mcmc",
+            nsamples=200,
+            **self.kwargs,
             printdt=1,
         )
+        assert "derived" in res.posterior
+        assert res.log_likelihood_evaluations is not None
 
 
 if __name__ == "__main__":