diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index a19412a78c0974cbf6e095ef4935b39e0e74d3a2..3b9191528de5aa7393365f818950019ada2c8e4a 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -33,6 +33,7 @@ stages:
     - python -c "import bilby.gw.sampler"
     - python -c "import bilby.hyper"
     - python -c "import cli_bilby"
+    - python test/import_test.py
     - for script in $(pip show -f bilby | grep "bin\/" | xargs -I {} basename {}); do
           ${script} --help;
       done
@@ -114,7 +115,6 @@ python-3.7-samplers:
     - python -m pip install .
 
     - pytest test/integration/sampler_run_test.py --durations 10
-    - pytest test/integration/sample_from_the_prior_test.py
 
 # test samplers on python 3.6
 python-3.6-samplers:
@@ -148,7 +148,6 @@ scheduled-python-3.7:
 
     # Run tests which are only done on schedule
     - pytest test/integration/example_test.py
-    - pytest test/integration/sample_from_the_prior_test.py
 
 plotting:
   stage: test
diff --git a/bilby/__init__.py b/bilby/__init__.py
index 30e2531b5a51b892836461372a54cbf8c8907f58..04a29e1bc27008ffed916a8f8f171bc6a2315c7c 100644
--- a/bilby/__init__.py
+++ b/bilby/__init__.py
@@ -29,7 +29,7 @@ __version__ = utils.get_version_information()
 
 if sys.version_info < (3,):
     raise ImportError(
-"""You are running bilby 0.6.4 on Python 2
+"""You are running bilby >= 0.6.4 on Python 2
 
 Bilby 0.6.4 and above are no longer compatible with Python 2, and you still
 ended up with this version installed. That's unfortunate; sorry about that.
diff --git a/bilby/core/grid.py b/bilby/core/grid.py
index ccc1e9d7dc24acace50f8b994973f760857a173e..8562e6d67b3afbaa863e85fe8125e62248a7bc41 100644
--- a/bilby/core/grid.py
+++ b/bilby/core/grid.py
@@ -1,12 +1,14 @@
-import numpy as np
-import os
 import json
+import os
 from collections import OrderedDict
 
+import numpy as np
+
 from .prior import Prior, PriorDict
-from .utils import (logtrapzexp, check_directory_exists_and_if_not_mkdir,
-                    logger)
-from .utils import BilbyJsonEncoder, load_json, move_old_file
+from .utils import (
+    logtrapzexp, check_directory_exists_and_if_not_mkdir, logger,
+    BilbyJsonEncoder, load_json, move_old_file
+)
 from .result import FileMovedError
 
 
diff --git a/bilby/core/prior/analytical.py b/bilby/core/prior/analytical.py
index 08e56a98d35d7a465e7f7c676c615904bffff19a..722d4553f38a95b058505088cd5a34653896ed61 100644
--- a/bilby/core/prior/analytical.py
+++ b/bilby/core/prior/analytical.py
@@ -4,7 +4,7 @@ from scipy.special._ufuncs import xlogy, erf, log1p, stdtrit, gammaln, stdtr, \
     btdtri, betaln, btdtr, gammaincinv, gammainc
 
 from .base import Prior
-from bilby.core.utils import logger
+from ..utils import logger
 
 
 class DeltaFunction(Prior):
@@ -148,8 +148,11 @@ class PowerLaw(Prior):
             normalising = (1 + self.alpha) / (self.maximum ** (1 + self.alpha) -
                                               self.minimum ** (1 + self.alpha))
 
-        return (self.alpha * np.nan_to_num(np.log(val)) + np.log(normalising)) + np.log(
-            1. * self.is_in_prior_range(val))
+        with np.errstate(divide='ignore', invalid='ignore'):
+            ln_in_range = np.log(1. * self.is_in_prior_range(val))
+            ln_p = self.alpha * np.nan_to_num(np.log(val)) + np.log(normalising)
+
+        return ln_p + ln_in_range
 
     def cdf(self, val):
         if self.alpha == -1:
@@ -713,7 +716,7 @@ class LogNormal(Prior):
                 _prob = np.exp(-(np.log(val) - self.mu) ** 2 / self.sigma ** 2 / 2)\
                     / np.sqrt(2 * np.pi) / val / self.sigma
         else:
-            _prob = np.zeros(len(val))
+            _prob = np.zeros(val.size)
             idx = (val > self.minimum)
             _prob[idx] = np.exp(-(np.log(val[idx]) - self.mu) ** 2 / self.sigma ** 2 / 2)\
                 / np.sqrt(2 * np.pi) / val[idx] / self.sigma
@@ -737,7 +740,7 @@ class LogNormal(Prior):
                 _ln_prob = -(np.log(val) - self.mu) ** 2 / self.sigma ** 2 / 2\
                     - np.log(np.sqrt(2 * np.pi) * val * self.sigma)
         else:
-            _ln_prob = -np.inf * np.ones(len(val))
+            _ln_prob = -np.inf * np.ones(val.size)
             idx = (val > self.minimum)
             _ln_prob[idx] = -(np.log(val[idx]) - self.mu) ** 2\
                 / self.sigma ** 2 / 2 - np.log(np.sqrt(2 * np.pi) * val[idx] * self.sigma)
@@ -750,7 +753,7 @@ class LogNormal(Prior):
             else:
                 _cdf = 0.5 + erf((np.log(val) - self.mu) / self.sigma / np.sqrt(2)) / 2
         else:
-            _cdf = np.zeros(len(val))
+            _cdf = np.zeros(val.size)
             _cdf[val > self.minimum] = 0.5 + erf((
                 np.log(val[val > self.minimum]) - self.mu) / self.sigma / np.sqrt(2)) / 2
         return _cdf
@@ -807,7 +810,7 @@ class Exponential(Prior):
             else:
                 _prob = np.exp(-val / self.mu) / self.mu
         else:
-            _prob = np.zeros(len(val))
+            _prob = np.zeros(val.size)
             _prob[val >= self.minimum] = np.exp(-val[val >= self.minimum] / self.mu) / self.mu
         return _prob
 
@@ -828,7 +831,7 @@ class Exponential(Prior):
             else:
                 _ln_prob = -val / self.mu - np.log(self.mu)
         else:
-            _ln_prob = -np.inf * np.ones(len(val))
+            _ln_prob = -np.inf * np.ones(val.size)
             _ln_prob[val >= self.minimum] = -val[val >= self.minimum] / self.mu - np.log(self.mu)
         return _ln_prob
 
@@ -839,7 +842,7 @@ class Exponential(Prior):
             else:
                 _cdf = 1. - np.exp(-val / self.mu)
         else:
-            _cdf = np.zeros(len(val))
+            _cdf = np.zeros(val.size)
             _cdf[val >= self.minimum] = 1. - np.exp(-val[val >= self.minimum] / self.mu)
         return _cdf
 
@@ -1010,7 +1013,7 @@ class Beta(Prior):
                 return _ln_prob
             return -np.inf
         else:
-            _ln_prob_sub = -np.inf * np.ones(len(val))
+            _ln_prob_sub = -np.inf * np.ones(val.size)
             idx = np.isfinite(_ln_prob) & (val >= self.minimum) & (val <= self.maximum)
             _ln_prob_sub[idx] = _ln_prob[idx]
             return _ln_prob_sub
@@ -1076,7 +1079,7 @@ class Logistic(Prior):
             else:
                 rescaled = self.mu + self.scale * np.log(val / (1. - val))
         else:
-            rescaled = np.inf * np.ones(len(val))
+            rescaled = np.inf * np.ones(val.size)
             rescaled[val == 0] = -np.inf
             rescaled[(val > 0) & (val < 1)] = self.mu + self.scale\
                 * np.log(val[(val > 0) & (val < 1)] / (1. - val[(val > 0) & (val < 1)]))
@@ -1263,7 +1266,7 @@ class Gamma(Prior):
             else:
                 _ln_prob = xlogy(self.k - 1, val) - val / self.theta - xlogy(self.k, self.theta) - gammaln(self.k)
         else:
-            _ln_prob = -np.inf * np.ones(len(val))
+            _ln_prob = -np.inf * np.ones(val.size)
             idx = (val >= self.minimum)
             _ln_prob[idx] = xlogy(self.k - 1, val[idx]) - val[idx] / self.theta\
                 - xlogy(self.k, self.theta) - gammaln(self.k)
@@ -1276,7 +1279,7 @@ class Gamma(Prior):
             else:
                 _cdf = gammainc(self.k, val / self.theta)
         else:
-            _cdf = np.zeros(len(val))
+            _cdf = np.zeros(val.size)
             _cdf[val >= self.minimum] = gammainc(self.k, val[val >= self.minimum] / self.theta)
         return _cdf
 
diff --git a/bilby/core/prior/base.py b/bilby/core/prior/base.py
index 05df00a7bad5a46021f6597617d833c6ee525816..023f1609f36f5adc77920ea5fda85e7c36fea6cb 100644
--- a/bilby/core/prior/base.py
+++ b/bilby/core/prior/base.py
@@ -5,7 +5,6 @@ import re
 
 import numpy as np
 import scipy.stats
-from scipy.integrate import cumtrapz
 from scipy.interpolate import interp1d
 
 from bilby.core.utils import infer_args_from_method, BilbyJsonEncoder, decode_bilby_json, logger, \
@@ -162,6 +161,7 @@ class Prior(object):
 
     def cdf(self, val):
         """ Generic method to calculate CDF, can be overwritten in subclass """
+        from scipy.integrate import cumtrapz
         if np.any(np.isinf([self.minimum, self.maximum])):
             raise ValueError(
                 "Unable to use the generic CDF calculation for priors with"
@@ -185,7 +185,8 @@ class Prior(object):
         np.nan
 
         """
-        return np.log(self.prob(val))
+        with np.errstate(divide='ignore'):
+            return np.log(self.prob(val))
 
     def is_in_prior_range(self, val):
         """Returns True if val is in the prior boundaries, zero otherwise
@@ -313,6 +314,10 @@ class Prior(object):
     def maximum(self, maximum):
         self._maximum = maximum
 
+    @property
+    def width(self):
+        return self.maximum - self.minimum
+
     def get_instantiation_dict(self):
         subclass_args = infer_args_from_method(self.__init__)
         dict_with_properties = get_dict_with_properties(self)
diff --git a/bilby/core/prior/conditional.py b/bilby/core/prior/conditional.py
index fbe48469dfdc71b5a38bfa5d50fa242f3478867e..107822828c52ed32111a9462c1d8f4325b143719 100644
--- a/bilby/core/prior/conditional.py
+++ b/bilby/core/prior/conditional.py
@@ -1,11 +1,11 @@
 import numpy as np
 
 from .base import Prior, PriorException
-from bilby.core.prior.interpolated import Interped
-from bilby.core.prior.analytical import DeltaFunction, PowerLaw, Uniform, LogUniform, \
+from .interpolated import Interped
+from .analytical import DeltaFunction, PowerLaw, Uniform, LogUniform, \
     SymmetricLogUniform, Cosine, Sine, Gaussian, TruncatedGaussian, HalfGaussian, \
     LogNormal, Exponential, StudentT, Beta, Logistic, Cauchy, Gamma, ChiSquared, FermiDirac
-from bilby.core.utils import infer_args_from_method, infer_parameters_from_function
+from ..utils import infer_args_from_method, infer_parameters_from_function
 
 
 def conditional_prior_factory(prior_class):
diff --git a/bilby/core/prior/dict.py b/bilby/core/prior/dict.py
index fc9918b04e37054a309b47390c588128c55648c8..aec3b7a83fcd03ea683437ce9a4912616f3f97d2 100644
--- a/bilby/core/prior/dict.py
+++ b/bilby/core/prior/dict.py
@@ -1,15 +1,14 @@
-from importlib import import_module
-from io import open as ioopen
 import json
 import os
+from importlib import import_module
+from io import open as ioopen
 
-from matplotlib.cbook import flatten
 import numpy as np
 
-from bilby.core.prior.analytical import DeltaFunction
-from bilby.core.prior.base import Prior, Constraint
-from bilby.core.prior.joint import JointPrior
-from bilby.core.utils import logger, check_directory_exists_and_if_not_mkdir, BilbyJsonEncoder, decode_bilby_json
+from .analytical import DeltaFunction
+from .base import Prior, Constraint
+from .joint import JointPrior
+from ..utils import logger, check_directory_exists_and_if_not_mkdir, BilbyJsonEncoder, decode_bilby_json
 
 
 class PriorDict(dict):
@@ -155,19 +154,21 @@ class PriorDict(dict):
     @classmethod
     def _get_from_json_dict(cls, prior_dict):
         try:
-            cls = getattr(
+            class_ = getattr(
                 import_module(prior_dict["__module__"]),
                 prior_dict["__name__"])
         except ImportError:
             logger.debug("Cannot import prior module {}.{}".format(
                 prior_dict["__module__"], prior_dict["__name__"]
             ))
+            class_ = cls
         except KeyError:
             logger.debug("Cannot find module name to load")
+            class_ = cls
         for key in ["__module__", "__name__", "__prior_dict__"]:
             if key in prior_dict:
                 del prior_dict[key]
-        obj = cls(dict())
+        obj = class_(dict())
         obj.from_dictionary(prior_dict)
         return obj
 
@@ -207,7 +208,15 @@ class PriorDict(dict):
                     module = __name__.replace(
                         '.' + os.path.basename(__file__).replace('.py', ''), ''
                     )
-                cls = getattr(import_module(module), cls, cls)
+                try:
+                    cls = getattr(import_module(module), cls, cls)
+                except ModuleNotFoundError:
+                    logger.error(
+                        "Cannot import prior class {} for entry: {}={}".format(
+                            cls, key, val
+                        )
+                    )
+                    raise
                 if key.lower() in ["conversion_function", "condition_func"]:
                     setattr(self, key, cls)
                 elif isinstance(cls, str):
@@ -368,6 +377,28 @@ class PriorDict(dict):
                 logger.debug('{} not a known prior.'.format(key))
         return samples
 
+    @property
+    def non_fixed_keys(self):
+        keys = self.keys()
+        keys = [k for k in keys if isinstance(self[k], Prior)]
+        keys = [k for k in keys if self[k].is_fixed is False]
+        keys = [k for k in keys if k not in self.constraint_keys]
+        return keys
+
+    @property
+    def fixed_keys(self):
+        return [
+            k for k, p in self.items()
+            if (p.is_fixed and k not in self.constraint_keys)
+        ]
+
+    @property
+    def constraint_keys(self):
+        return [
+            k for k, p in self.items()
+            if isinstance(p, Constraint)
+        ]
+
     def sample_subset_constrained(self, keys=iter([]), size=None):
         if size is None or size == 1:
             while True:
@@ -433,6 +464,9 @@ class PriorDict(dict):
         prob = np.product([self[key].prob(sample[key])
                            for key in sample], **kwargs)
 
+        return self.check_prob(sample, prob)
+
+    def check_prob(self, sample, prob):
         ratio = self.normalize_constraint_factor(tuple(sample.keys()))
         if np.all(prob == 0.):
             return prob
@@ -466,7 +500,9 @@ class PriorDict(dict):
         """
         ln_prob = np.sum([self[key].ln_prob(sample[key])
                           for key in sample], axis=axis)
+        return self.check_ln_prob(sample, ln_prob)
 
+    def check_ln_prob(self, sample, ln_prob):
         ratio = self.normalize_constraint_factor(tuple(sample.keys()))
         if np.all(np.isinf(ln_prob)):
             return ln_prob
@@ -496,6 +532,7 @@ class PriorDict(dict):
         =======
         list: List of floats containing the rescaled sample
         """
+        from matplotlib.cbook import flatten
         return list(flatten([self[key].rescale(sample) for key, sample in zip(keys, theta)]))
 
     def test_redundancy(self, key, disable_logging=False):
@@ -530,14 +567,6 @@ class PriorDict(dict):
         return self.__class__(dictionary=dict(self))
 
 
-class PriorSet(PriorDict):
-
-    def __init__(self, dictionary=None, filename=None):
-        """ DEPRECATED: USE PriorDict INSTEAD"""
-        logger.warning("The name 'PriorSet' is deprecated use 'PriorDict' instead")
-        super(PriorSet, self).__init__(dictionary, filename)
-
-
 class PriorDictException(Exception):
     """ General base class for all prior dict exceptions """
 
@@ -656,7 +685,8 @@ class ConditionalPriorDict(PriorDict):
         for key, value in sample.items():
             self[key].least_recently_sampled = value
         res = [self[key].prob(sample[key], **self.get_required_variables(key)) for key in sample]
-        return np.product(res, **kwargs)
+        prob = np.product(res, **kwargs)
+        return self.check_prob(sample, prob)
 
     def ln_prob(self, sample, axis=None):
         """
@@ -677,7 +707,8 @@ class ConditionalPriorDict(PriorDict):
         for key, value in sample.items():
             self[key].least_recently_sampled = value
         res = [self[key].ln_prob(sample[key], **self.get_required_variables(key)) for key in sample]
-        return np.sum(res, axis=axis)
+        ln_prob = np.sum(res, axis=axis)
+        return self.check_ln_prob(sample, ln_prob)
 
     def rescale(self, keys, theta):
         """Rescale samples from unit cube to prior
diff --git a/bilby/core/prior/interpolated.py b/bilby/core/prior/interpolated.py
index 992a75b50ab8c2a0647f9b42c896786ed4aec08c..ece311a5a5bb475cc543b29ffd37b949592d802f 100644
--- a/bilby/core/prior/interpolated.py
+++ b/bilby/core/prior/interpolated.py
@@ -1,9 +1,8 @@
 import numpy as np
-from scipy.integrate import cumtrapz
 from scipy.interpolate import interp1d
 
 from .base import Prior
-from bilby.core.utils import logger
+from ..utils import logger
 
 
 class Interped(Prior):
@@ -164,6 +163,7 @@ class Interped(Prior):
         self._initialize_attributes()
 
     def _initialize_attributes(self):
+        from scipy.integrate import cumtrapz
         if np.trapz(self._yy, self.xx) != 1:
             logger.debug('Supplied PDF for {} is not normalised, normalising.'.format(self.name))
         self._yy /= np.trapz(self._yy, self.xx)
diff --git a/bilby/core/prior/joint.py b/bilby/core/prior/joint.py
index b3ee2f0b88a38d1da374f87173a68ad402ce9c82..a10a0add2038e120acd82017a20656e440edd636 100644
--- a/bilby/core/prior/joint.py
+++ b/bilby/core/prior/joint.py
@@ -3,7 +3,7 @@ import scipy.stats
 from scipy.special import erfinv
 
 from .base import Prior, PriorException
-from bilby.core.utils import logger, infer_args_from_method, get_dict_with_properties
+from ..utils import logger, infer_args_from_method, get_dict_with_properties
 
 
 class BaseJointPriorDist(object):
diff --git a/bilby/core/prior/slabspike.py b/bilby/core/prior/slabspike.py
index 54872b8b1c1bcdea91ad567bb85a707207c36b52..8482b81460c799210e923166fd11a14fc53ea520 100644
--- a/bilby/core/prior/slabspike.py
+++ b/bilby/core/prior/slabspike.py
@@ -1,7 +1,7 @@
 import numpy as np
 
-from bilby.core.prior.base import Prior
-from bilby.core.utils import logger
+from .base import Prior
+from ..utils import logger
 
 
 class SlabSpikePrior(Prior):
diff --git a/bilby/core/result.py b/bilby/core/result.py
index b668c77191daf857a23ba8e905da7643e39e4369..fc709c512f1c0f18a61244363153929616bbbe14 100644
--- a/bilby/core/result.py
+++ b/bilby/core/result.py
@@ -1,22 +1,14 @@
 import inspect
+import json
 import os
 from collections import OrderedDict, namedtuple
 from copy import copy
-from distutils.version import LooseVersion
 from importlib import import_module
 from itertools import product
-from tqdm import tqdm
 
-import corner
-import h5py
-import json
-import matplotlib
-import matplotlib.pyplot as plt
-from matplotlib import lines as mpllines
 import numpy as np
 import pandas as pd
 import scipy.stats
-from scipy.special import logsumexp
 
 from . import utils
 from .utils import (
@@ -28,6 +20,7 @@ from .utils import (
     decode_bilby_json, docstring,
     recursively_save_dict_contents_to_group,
     recursively_load_dict_contents_from_group,
+    recursively_decode_bilby_json,
 )
 from .prior import Prior, PriorDict, DeltaFunction
 
@@ -126,6 +119,7 @@ def get_weights_for_reweighting(
     n_checkpoint: int
         Number of samples to reweight before writing a resume file
     """
+    from tqdm.auto import tqdm
 
     nposterior = len(result.posterior)
 
@@ -257,6 +251,7 @@ def reweight(result, label=None, new_likelihood=None, new_prior=None,
         An array of the natural-log priors from the old likelihood
 
     """
+    from scipy.special import logsumexp
 
     result = copy(result)
 
@@ -509,6 +504,7 @@ class Result(object):
     @classmethod
     @docstring(_load_doctstring.format(format="hdf5"))
     def from_hdf5(cls, filename=None, outdir=None, label=None):
+        import h5py
         filename = _determine_file_name(filename, outdir, label, 'hdf5', False)
         with h5py.File(filename, "r") as ff:
             data = recursively_load_dict_contents_from_group(ff, '/')
@@ -563,6 +559,17 @@ class Result(object):
         else:
             return ''
 
+    @property
+    def meta_data(self):
+        return self._meta_data
+
+    @meta_data.setter
+    def meta_data(self, meta_data):
+        if meta_data is None:
+            meta_data = dict()
+        meta_data = recursively_decode_bilby_json(meta_data)
+        self._meta_data = meta_data
+
     @property
     def priors(self):
         if self._priors is not None:
@@ -768,6 +775,7 @@ class Result(object):
                     with open(filename, 'w') as file:
                         json.dump(dictionary, file, indent=2, cls=BilbyJsonEncoder)
             elif extension == 'hdf5':
+                import h5py
                 dictionary["__module__"] = self.__module__
                 dictionary["__name__"] = self.__class__.__name__
                 with h5py.File(filename, 'w') as h5file:
@@ -982,6 +990,7 @@ class Result(object):
         figure: matplotlib.pyplot.figure
             A matplotlib figure object
         """
+        import matplotlib.pyplot as plt
         logger.info('Plotting {} marginal distribution'.format(key))
         label = self.get_latex_labels_from_parameter_keys([key])[0]
         fig, ax = plt.subplots()
@@ -1151,6 +1160,8 @@ class Result(object):
             A matplotlib figure instance
 
         """
+        import corner
+        import matplotlib.pyplot as plt
 
         # If in testing mode, not corner plots are generated
         if utils.command_line_args.bilby_test_mode:
@@ -1163,12 +1174,7 @@ class Result(object):
             truth_color='tab:orange', quantiles=[0.16, 0.84],
             levels=(1 - np.exp(-0.5), 1 - np.exp(-2), 1 - np.exp(-9 / 2.)),
             plot_density=False, plot_datapoints=True, fill_contours=True,
-            max_n_ticks=3)
-
-        if LooseVersion(matplotlib.__version__) < "2.1":
-            defaults_kwargs['hist_kwargs'] = dict(normed=True)
-        else:
-            defaults_kwargs['hist_kwargs'] = dict(density=True)
+            max_n_ticks=3, hist_kwargs=dict(density=True))
 
         if 'lionize' in kwargs and kwargs['lionize'] is True:
             defaults_kwargs['truth_color'] = 'tab:blue'
@@ -1280,6 +1286,7 @@ class Result(object):
     @latex_plot_format
     def plot_walkers(self, **kwargs):
         """ Method to plot the trace of the walkers in an ensemble MCMC plot """
+        import matplotlib.pyplot as plt
         if hasattr(self, 'walkers') is False:
             logger.warning("Cannot plot_walkers as no walkers are saved")
             return
@@ -1343,6 +1350,7 @@ class Result(object):
             Path to the outdir. Default is the one store in the result object.
 
         """
+        import matplotlib.pyplot as plt
 
         # Determine model_posterior, the subset of the full posterior which
         # should be passed into the model
@@ -1742,7 +1750,7 @@ class ResultList(list):
         else:
             raise TypeError("Could not append a non-Result type")
 
-    def combine(self):
+    def combine(self, shuffle=False):
         """
         Return the combined results in a :class:bilby.core.result.Result`
         object.
@@ -1765,11 +1773,20 @@ class ResultList(list):
         # check which kind of sampler was used: MCMC or Nested Sampling
         if result._nested_samples is not None:
             posteriors, result = self._combine_nested_sampled_runs(result)
+        elif result.sampler in ["bilbymcmc"]:
+            posteriors, result = self._combine_mcmc_sampled_runs(result)
         else:
             posteriors = [res.posterior for res in self]
 
         combined_posteriors = pd.concat(posteriors, ignore_index=True)
-        result.posterior = combined_posteriors.sample(len(combined_posteriors))  # shuffle
+
+        if shuffle:
+            result.posterior = combined_posteriors.sample(len(combined_posteriors))
+        else:
+            result.posterior = combined_posteriors
+
+        logger.info(f"Combined results have {len(result.posterior)} samples")
+
         return result
 
     def _combine_nested_sampled_runs(self, result):
@@ -1792,6 +1809,7 @@ class ResultList(list):
         result: bilby.core.result.Result
             The result object with the combined evidences.
         """
+        from scipy.special import logsumexp
         self.check_nested_samples()
 
         # Combine evidences
@@ -1818,6 +1836,47 @@ class ResultList(list):
         result.sampler_kwargs = None
         return posteriors, result
 
+    def _combine_mcmc_sampled_runs(self, result):
+        """
+        Combine multiple MCMC sampling runs.
+
+        Currently this keeps all posterior samples from each run
+
+        Parameters
+        ----------
+        result: bilby.core.result.Result
+            The result object to put the new samples in.
+
+        Returns
+        -------
+        posteriors: list
+            A list of pandas DataFrames containing the reduced sample set from
+            each run.
+        result: bilby.core.result.Result
+            The result object with the combined evidences.
+        """
+
+        from scipy.special import logsumexp
+
+        # Combine evidences
+        log_evidences = np.array([res.log_evidence for res in self])
+        result.log_evidence = logsumexp(log_evidences, b=1. / len(self))
+        result.log_bayes_factor = result.log_evidence - result.log_noise_evidence
+
+        # Propogate uncertainty in combined evidence
+        log_errs = [res.log_evidence_err for res in self if np.isfinite(res.log_evidence_err)]
+        if len(log_errs) > 0:
+            result.log_evidence_err = 0.5 * logsumexp(2 * np.array(log_errs), b=1. / len(self))
+        else:
+            result.log_evidence_err = np.nan
+
+        # Combined posteriors with a weighting
+        posteriors = list()
+        for res in self:
+            posteriors.append(res.posterior)
+
+        return posteriors, result
+
     def check_nested_samples(self):
         for res in self:
             try:
@@ -1884,6 +1943,8 @@ def plot_multiple(results, filename=None, labels=None, colours=None,
         A matplotlib figure instance
 
     """
+    import matplotlib.pyplot as plt
+    import matplotlib.lines as mpllines
 
     kwargs['show_titles'] = False
     kwargs['truths'] = None
@@ -1975,6 +2036,7 @@ def make_pp_plot(results, filename=None, save=True, confidence_interval=[0.68, 0
         matplotlib figure and a NamedTuple with attributes `combined_pvalue`,
         `pvalues`, and `names`.
     """
+    import matplotlib.pyplot as plt
 
     if keys is None:
         keys = results[0].search_parameter_keys
diff --git a/bilby/core/sampler/__init__.py b/bilby/core/sampler/__init__.py
index 1334ac0ace9f2cd925263903e78526b679b17e23..93202387f35767e4e58dad4298ca309c4b441506 100644
--- a/bilby/core/sampler/__init__.py
+++ b/bilby/core/sampler/__init__.py
@@ -6,7 +6,6 @@ from collections import OrderedDict
 import bilby
 from ..utils import command_line_args, logger, loaded_modules_dict
 from ..prior import PriorDict, DeltaFunction
-
 from .base_sampler import Sampler, SamplingMarginalisedParameterError
 from .cpnest import Cpnest
 from .dynamic_dynesty import DynamicDynesty
diff --git a/bilby/core/sampler/dynamic_dynesty.py b/bilby/core/sampler/dynamic_dynesty.py
index 88b16763b77cc3d5a6c81494980f2d059bda3554..8bb6d647aad013c5be957cfde6311f10f9feda82 100644
--- a/bilby/core/sampler/dynamic_dynesty.py
+++ b/bilby/core/sampler/dynamic_dynesty.py
@@ -1,6 +1,5 @@
 
 import os
-import dill as pickle
 import signal
 
 import numpy as np
@@ -168,18 +167,20 @@ class DynamicDynesty(Dynesty):
     def write_current_state(self):
         """
         """
+        import dill
         check_directory_exists_and_if_not_mkdir(self.outdir)
         with open(self.resume_file, 'wb') as file:
-            pickle.dump(self, file)
+            dill.dump(self, file)
 
     def read_saved_state(self, continuing=False):
         """
         """
+        import dill
 
         logger.debug("Reading resume file {}".format(self.resume_file))
         if os.path.isfile(self.resume_file):
             with open(self.resume_file, 'rb') as file:
-                self = pickle.load(file)
+                self = dill.load(file)
         else:
             logger.debug(
                 "Failed to read resume file {}".format(self.resume_file))
diff --git a/bilby/core/sampler/dynesty.py b/bilby/core/sampler/dynesty.py
index d453ffd7d9e1c233cc7625dbe75026552c45f1a8..b4595f96916bb0b82ca064be66e40a0fea9aa8ba 100644
--- a/bilby/core/sampler/dynesty.py
+++ b/bilby/core/sampler/dynesty.py
@@ -1,13 +1,10 @@
 import datetime
-import dill
 import os
 import sys
-import pickle
 import signal
 import time
+import warnings
 
-from tqdm.auto import tqdm
-import matplotlib.pyplot as plt
 import numpy as np
 from pandas import DataFrame
 
@@ -20,11 +17,6 @@ from ..utils import (
 from .base_sampler import Sampler, NestedSampler
 from ..result import rejection_sample
 
-from numpy import linalg
-from dynesty.utils import unitcheck
-import warnings
-
-
 _likelihood = None
 _priors = None
 _search_parameter_keys = None
@@ -130,7 +122,7 @@ class Dynesty(NestedSampler):
     """
     default_kwargs = dict(bound='multi', sample='rwalk',
                           verbose=True, periodic=None, reflective=None,
-                          check_point_delta_t=600, nlive=1000,
+                          check_point_delta_t=1800, nlive=1000,
                           first_update=None, walks=100,
                           npdim=None, rstate=None, queue_size=1, pool=None,
                           use_pool=None, live_points=None,
@@ -218,6 +210,7 @@ class Dynesty(NestedSampler):
                     kwargs['queue_size'] = kwargs.pop(equiv)
 
     def _verify_kwargs_against_default_kwargs(self):
+        from tqdm.auto import tqdm
         if not self.kwargs['walks']:
             self.kwargs['walks'] = self.ndim * 10
         if not self.kwargs['update_interval']:
@@ -323,6 +316,7 @@ class Dynesty(NestedSampler):
 
     def run_sampler(self):
         import dynesty
+        import dill
         logger.info("Using dynesty version {}".format(dynesty.__version__))
 
         if self.kwargs.get("sample", "rwalk") == "rwalk":
@@ -376,7 +370,7 @@ class Dynesty(NestedSampler):
         check_directory_exists_and_if_not_mkdir(self.outdir)
         dynesty_result = "{}/{}_dynesty.pickle".format(self.outdir, self.label)
         with open(dynesty_result, 'wb') as file:
-            pickle.dump(out, file)
+            dill.dump(out, file)
 
         self._generate_result(out)
         self.calc_likelihood_count()
@@ -482,6 +476,7 @@ class Dynesty(NestedSampler):
         """
         from ... import __version__ as bilby_version
         from dynesty import __version__ as dynesty_version
+        import dill
         versions = dict(bilby=bilby_version, dynesty=dynesty_version)
         if os.path.isfile(self.resume_file):
             logger.info("Reading resume file {}".format(self.resume_file))
@@ -560,6 +555,7 @@ class Dynesty(NestedSampler):
 
         from ... import __version__ as bilby_version
         from dynesty import __version__ as dynesty_version
+        import dill
         check_directory_exists_and_if_not_mkdir(self.outdir)
         end_time = datetime.datetime.now()
         if hasattr(self, 'start_time'):
@@ -605,6 +601,7 @@ class Dynesty(NestedSampler):
         df.to_csv(filename, index=False, header=True, sep=' ')
 
     def plot_current_state(self):
+        import matplotlib.pyplot as plt
         if self.check_point_plot:
             import dynesty.plotting as dyplot
             labels = [label.replace('_', ' ') for label in self.search_parameter_keys]
@@ -704,6 +701,7 @@ class Dynesty(NestedSampler):
 
 def sample_rwalk_bilby(args):
     """ Modified bilby-implemented version of dynesty.sampling.sample_rwalk """
+    from dynesty.utils import unitcheck
 
     # Unzipping.
     (u, loglstar, axes, scale,
@@ -737,7 +735,7 @@ def sample_rwalk_bilby(args):
 
         # Propose a direction on the unit n-sphere.
         drhat = rstate.randn(n)
-        drhat /= linalg.norm(drhat)
+        drhat /= np.linalg.norm(drhat)
 
         # Scale based on dimensionality.
         dr = drhat * rstate.rand() ** (1.0 / n)
diff --git a/bilby/core/sampler/emcee.py b/bilby/core/sampler/emcee.py
index 81e4eedd0ac072bdc2fccb4e7ad5359727ebd9f2..4718519841143a3d004eaf5be76ccb9c9652d456 100644
--- a/bilby/core/sampler/emcee.py
+++ b/bilby/core/sampler/emcee.py
@@ -1,14 +1,13 @@
-from collections import namedtuple
 import os
 import signal
 import shutil
-from shutil import copyfile
 import sys
+from collections import namedtuple
+from distutils.version import LooseVersion
+from shutil import copyfile
 
 import numpy as np
 from pandas import DataFrame
-from distutils.version import LooseVersion
-import dill as pickle
 
 from ..utils import logger, check_directory_exists_and_if_not_mkdir
 from .base_sampler import MCMCSampler, SamplerError
@@ -254,13 +253,14 @@ class Emcee(MCMCSampler):
 
     def checkpoint(self):
         """ Writes a pickle file of the sampler to disk using dill """
+        import dill
         logger.info("Checkpointing sampler to file {}"
                     .format(self.checkpoint_info.sampler_file))
         with open(self.checkpoint_info.sampler_file, 'wb') as f:
             # Overwrites the stored sampler chain with one that is truncated
             # to only the completed steps
             self.sampler._chain = self.sampler_chain
-            pickle.dump(self._sampler, f)
+            dill.dump(self._sampler, f)
 
     def checkpoint_and_exit(self, signum, frame):
         logger.info("Recieved signal {}".format(signum))
@@ -283,10 +283,11 @@ class Emcee(MCMCSampler):
         if hasattr(self, '_sampler'):
             pass
         elif self.resume and os.path.isfile(self.checkpoint_info.sampler_file):
+            import dill
             logger.info("Resuming run from checkpoint file {}"
                         .format(self.checkpoint_info.sampler_file))
             with open(self.checkpoint_info.sampler_file, 'rb') as f:
-                self._sampler = pickle.load(f)
+                self._sampler = dill.load(f)
             self._set_pos0_for_resume()
         else:
             self._initialise_sampler()
diff --git a/bilby/core/sampler/fake_sampler.py b/bilby/core/sampler/fake_sampler.py
index 1992d74b1628f1d937100bd1645d1d0105fb128e..8d218472d13f7769c4b88cdd0155cb8f7c04dd0d 100644
--- a/bilby/core/sampler/fake_sampler.py
+++ b/bilby/core/sampler/fake_sampler.py
@@ -1,5 +1,6 @@
 
 import numpy as np
+
 from .base_sampler import Sampler
 from ..result import read_in_result
 
diff --git a/bilby/core/sampler/kombine.py b/bilby/core/sampler/kombine.py
index d077499cdb27a75b8dbbd1545ef5b54327be2b6d..83947fc88378c5401508eac458192141cd9f221e 100644
--- a/bilby/core/sampler/kombine.py
+++ b/bilby/core/sampler/kombine.py
@@ -1,7 +1,9 @@
-from ..utils import logger
-import numpy as np
 import os
+
+import numpy as np
+
 from .emcee import Emcee
+from ..utils import logger
 
 
 class Kombine(Emcee):
diff --git a/bilby/core/sampler/ptemcee.py b/bilby/core/sampler/ptemcee.py
index 2be681f37664ba0e52d4b60a5182658cc9b3ca2e..471439a1dd7a38400e05c3557b334cdba0f29da2 100644
--- a/bilby/core/sampler/ptemcee.py
+++ b/bilby/core/sampler/ptemcee.py
@@ -1,18 +1,14 @@
-
-import os
-import datetime
 import copy
+import datetime
+import logging
+import os
 import signal
 import sys
 import time
-import dill
 from collections import namedtuple
-import logging
 
 import numpy as np
 import pandas as pd
-import matplotlib.pyplot as plt
-import scipy.signal
 
 from ..utils import logger, check_directory_exists_and_if_not_mkdir
 from .base_sampler import SamplerError, MCMCSampler
@@ -372,6 +368,7 @@ class Ptemcee(MCMCSampler):
         import ptemcee
 
         if os.path.isfile(self.resume_file) and self.resume is True:
+            import dill
             logger.info("Resume data {} found".format(self.resume_file))
             with open(self.resume_file, "rb") as file:
                 data = dill.load(file)
@@ -833,10 +830,12 @@ def check_iteration(
 
 
 def get_max_gradient(x, axis=0, window_length=11, polyorder=2, smooth=False):
+    from scipy.signal import savgol_filter
     if smooth:
-        x = scipy.signal.savgol_filter(
-            x, axis=axis, window_length=window_length, polyorder=3)
-    return np.max(scipy.signal.savgol_filter(
+        x = savgol_filter(
+            x, axis=axis, window_length=window_length, polyorder=3
+        )
+    return np.max(savgol_filter(
         x, axis=axis, window_length=window_length, polyorder=polyorder,
         deriv=1))
 
@@ -963,6 +962,7 @@ def checkpoint(
     Q_list,
     time_per_check,
 ):
+    import dill
     logger.info("Writing checkpoint and diagnostics")
     ndim = sampler.dim
 
@@ -1002,6 +1002,7 @@ def checkpoint(
 def plot_walkers(walkers, nburn, thin, parameter_labels, outdir, label,
                  discard=0):
     """ Method to plot the trace of the walkers in an ensemble MCMC plot """
+    import matplotlib.pyplot as plt
     nwalkers, nsteps, ndim = walkers.shape
     if np.isnan(nburn):
         nburn = nsteps
@@ -1053,6 +1054,7 @@ def plot_walkers(walkers, nburn, thin, parameter_labels, outdir, label,
 def plot_tau(
     tau_list_n, tau_list, search_parameter_keys, outdir, label, tau, autocorr_tau,
 ):
+    import matplotlib.pyplot as plt
     fig, ax = plt.subplots()
     for i, key in enumerate(search_parameter_keys):
         ax.plot(tau_list_n, np.array(tau_list)[:, i], label=key)
@@ -1065,6 +1067,7 @@ def plot_tau(
 
 
 def plot_mean_log_posterior(mean_log_posterior, outdir, label):
+    import matplotlib.pyplot as plt
 
     ntemps, nsteps = mean_log_posterior.shape
     ymax = np.max(mean_log_posterior)
@@ -1085,6 +1088,7 @@ def plot_mean_log_posterior(mean_log_posterior, outdir, label):
 def compute_evidence(sampler, log_likelihood_array, outdir, label, discard, nburn, thin,
                      iteration, make_plots=True):
     """ Computes the evidence using thermodynamic integration """
+    import matplotlib.pyplot as plt
     betas = sampler.betas
     # We compute the evidence without the burnin samples, but we do not thin
     lnlike = log_likelihood_array[:, :, discard + nburn : iteration]
diff --git a/bilby/core/utils.py b/bilby/core/utils.py
index 7bfb1faa98f6112e02498eb17ed3c0e7eacd843b..acc412d8fc463f5a1163508632cba825dd42990e 100644
--- a/bilby/core/utils.py
+++ b/bilby/core/utils.py
@@ -1,26 +1,22 @@
-
-from distutils.spawn import find_executable
-import logging
-import os
-import shutil
-import sys
-from math import fmod
 import argparse
-import inspect
 import functools
+import inspect
+import json
+import logging
+import multiprocessing
+import os
 import types
+import shutil
 import subprocess
-import multiprocessing
+import sys
+from distutils.spawn import find_executable
 from importlib import import_module
 from numbers import Number
-import json
-import warnings
 
 import numpy as np
+import pandas as pd
 from scipy.interpolate import interp2d
 from scipy.special import logsumexp
-import pandas as pd
-import matplotlib.pyplot as plt
 
 logger = logging.getLogger('bilby')
 
@@ -323,37 +319,6 @@ def theta_phi_to_ra_dec(theta, phi, gmst):
     return ra, dec
 
 
-def gps_time_to_gmst(gps_time):
-    """
-    Convert gps time to Greenwich mean sidereal time in radians
-
-    This method assumes a constant rotation rate of earth since 00:00:00, 1 Jan. 2000
-    A correction has been applied to give the exact correct value for 00:00:00, 1 Jan. 2018
-    Error accumulates at a rate of ~0.0001 radians/decade.
-
-    Parameters
-    ==========
-    gps_time: float
-        gps time
-
-    Returns
-    =======
-    float: Greenwich mean sidereal time in radians
-
-    """
-    warnings.warn(
-        "Function gps_time_to_gmst deprecated, use "
-        "lal.GreenwichMeanSiderealTime(time) instead",
-        DeprecationWarning)
-    omega_earth = 2 * np.pi * (1 / 365.2425 + 1) / 86400.
-    gps_2000 = 630720013.
-    gmst_2000 = (6 + 39. / 60 + 51.251406103947375 / 3600) * np.pi / 12
-    correction_2018 = -0.00017782487379358614
-    sidereal_time = omega_earth * (gps_time - gps_2000) + gmst_2000 + correction_2018
-    gmst = fmod(sidereal_time, 2 * np.pi)
-    return gmst
-
-
 def create_white_noise(sampling_frequency, duration):
     """ Create white_noise which is then coloured by a given PSD
 
@@ -543,11 +508,7 @@ def check_directory_exists_and_if_not_mkdir(directory):
     """
     if directory == "":
         return
-    elif not os.path.exists(directory):
-        os.makedirs(directory)
-        logger.debug('Making directory {}'.format(directory))
-    else:
-        logger.debug('Directory {} exists'.format(directory))
+    os.makedirs(directory, exist_ok=True)
 
 
 def set_up_command_line_arguments():
@@ -1143,6 +1104,28 @@ def decode_astropy_quantity(dct):
         return dct
 
 
+def recursively_decode_bilby_json(dct):
+    """
+    Recursively call `bilby_decode_json`
+
+    Parameters
+    ----------
+    dct: dict
+        The dictionary to decode
+
+    Returns
+    -------
+    dct: dict
+        The original dictionary with all the elements decode if possible
+    """
+    dct = decode_bilby_json(dct)
+    if isinstance(dct, dict):
+        for key in dct:
+            if isinstance(dct[key], dict):
+                dct[key] = recursively_decode_bilby_json(dct[key])
+    return dct
+
+
 def reflect(u):
     """
     Iteratively reflect a number until it is contained in [0, 1].
@@ -1208,6 +1191,7 @@ def latex_plot_format(func):
     """
     @functools.wraps(func)
     def wrapper_decorator(*args, **kwargs):
+        import matplotlib.pyplot as plt
         from matplotlib import rcParams
 
         if "BILBY_STYLE" in kwargs:
@@ -1341,10 +1325,14 @@ def decode_from_hdf5(item):
     elif isinstance(item, (bytes, bytearray)):
         output = item.decode()
     elif isinstance(item, np.ndarray):
-        if "|S" in str(item.dtype) or isinstance(item[0], bytes):
+        if item.size == 0:
+            output = item
+        elif "|S" in str(item.dtype) or isinstance(item[0], bytes):
             output = [it.decode() for it in item]
         else:
             output = item
+    elif isinstance(item, np.bool_):
+        output = bool(item)
     else:
         output = item
     return output
@@ -1398,9 +1386,11 @@ def encode_for_hdf5(item):
     elif isinstance(item, pd.Series):
         output = item.to_dict()
     elif inspect.isfunction(item) or inspect.isclass(item):
-        output = dict(__module__=item.__module__, __name__=item.__name__)
+        output = dict(__module__=item.__module__, __name__=item.__name__, __class__=True)
     elif isinstance(item, dict):
         output = item.copy()
+    elif isinstance(item, tuple):
+        output = {str(ii): elem for ii, elem in enumerate(item)}
     else:
         raise ValueError(f'Cannot save {type(item)} type')
     return output
diff --git a/bilby/gw/conversion.py b/bilby/gw/conversion.py
index f034dfe8458178c91c8b340a8f5ab424ff243cf2..5c4577d6bfb85aaa478310cc3930d9da7078bd57 100644
--- a/bilby/gw/conversion.py
+++ b/bilby/gw/conversion.py
@@ -1,7 +1,6 @@
 import sys
 import multiprocessing
 
-from tqdm.auto import tqdm
 import numpy as np
 from pandas import DataFrame
 
@@ -12,13 +11,6 @@ from .utils import lalsim_SimInspiralTransformPrecessingNewInitialConditions
 from .eos.eos import SpectralDecompositionEOS, EOSFamily, IntegrateTOV
 from .cosmology import get_cosmology
 
-try:
-    from astropy import units
-    from astropy.cosmology import z_at_value
-except ImportError:
-    logger.debug("You do not have astropy installed currently. You will"
-                 " not be able to use some of the prebuilt functions.")
-
 
 def redshift_to_luminosity_distance(redshift, cosmology=None):
     cosmology = get_cosmology(cosmology)
@@ -32,12 +24,16 @@ def redshift_to_comoving_distance(redshift, cosmology=None):
 
 @np.vectorize
 def luminosity_distance_to_redshift(distance, cosmology=None):
+    from astropy import units
+    from astropy.cosmology import z_at_value
     cosmology = get_cosmology(cosmology)
     return z_at_value(cosmology.luminosity_distance, distance * units.Mpc)
 
 
 @np.vectorize
 def comoving_distance_to_redshift(distance, cosmology=None):
+    from astropy import units
+    from astropy.cosmology import z_at_value
     cosmology = get_cosmology(cosmology)
     return z_at_value(cosmology.comoving_distance, distance * units.Mpc)
 
@@ -258,16 +254,18 @@ def convert_to_lal_binary_black_hole_parameters(parameters):
     for angle in ['tilt_1', 'tilt_2', 'theta_jn']:
         cos_angle = str('cos_' + angle)
         if cos_angle in converted_parameters.keys():
-            converted_parameters[angle] =\
-                np.arccos(converted_parameters[cos_angle])
+            with np.errstate(invalid="ignore"):
+                converted_parameters[angle] =\
+                    np.arccos(converted_parameters[cos_angle])
 
     if "delta_phase" in original_keys:
-        converted_parameters["phase"] = np.mod(
-            converted_parameters["delta_phase"]
-            - np.sign(np.cos(converted_parameters["theta_jn"]))
-            * converted_parameters["psi"],
-            2 * np.pi
-        )
+        with np.errstate(invalid="ignore"):
+            converted_parameters["phase"] = np.mod(
+                converted_parameters["delta_phase"]
+                - np.sign(np.cos(converted_parameters["theta_jn"]))
+                * converted_parameters["psi"],
+                2 * np.pi
+            )
 
     added_keys = [key for key in converted_parameters.keys()
                   if key not in original_keys]
@@ -516,7 +514,8 @@ def chirp_mass_and_mass_ratio_to_total_mass(chirp_mass, mass_ratio):
         Mass of the lighter object
     """
 
-    return chirp_mass * (1 + mass_ratio) ** 1.2 / mass_ratio ** 0.6
+    with np.errstate(invalid="ignore"):
+        return chirp_mass * (1 + mass_ratio) ** 1.2 / mass_ratio ** 0.6
 
 
 def component_masses_to_chirp_mass(mass_1, mass_2):
@@ -1148,6 +1147,7 @@ def compute_snrs(sample, likelihood):
                 sample['{}_optimal_snr'.format(ifo.name)] = \
                     per_detector_snr.optimal_snr_squared.real ** 0.5
         else:
+            from tqdm.auto import tqdm
             logger.info(
                 'Computing SNRs for every sample.')
 
@@ -1212,6 +1212,7 @@ def generate_posterior_samples_from_marginalized_likelihood(
         return samples
     elif not isinstance(samples, DataFrame):
         raise ValueError("Unable to handle input samples of type {}".format(type(samples)))
+    from tqdm.auto import tqdm
 
     logger.info('Reconstructing marginalised parameters.')
 
@@ -1242,6 +1243,7 @@ def generate_sky_frame_parameters(samples, likelihood):
         return
     elif not isinstance(samples, DataFrame):
         raise ValueError
+    from tqdm.auto import tqdm
 
     logger.info('Generating sky frame parameters.')
     new_samples = list()
diff --git a/bilby/gw/cosmology.py b/bilby/gw/cosmology.py
index 861fdf23f99ef2e8a3190ad47ec0eeaddb44f7d4..ded3f5eb6b01c01c1066afdacb0c13f8f20a7a97 100644
--- a/bilby/gw/cosmology.py
+++ b/bilby/gw/cosmology.py
@@ -1,19 +1,20 @@
-from ..core.utils import logger
+DEFAULT_COSMOLOGY = None
+COSMOLOGY = [None, str(None)]
 
-try:
+
+def _set_default_cosmology():
     from astropy import cosmology as cosmo
-    DEFAULT_COSMOLOGY = cosmo.Planck15
-    COSMOLOGY = [DEFAULT_COSMOLOGY, DEFAULT_COSMOLOGY.name]
-except ImportError:
-    logger.debug("You do not have astropy installed currently. You will"
-                 " not be able to use some of the prebuilt functions.")
-    DEFAULT_COSMOLOGY = None
-    COSMOLOGY = [None, str(None)]
+    global DEFAULT_COSMOLOGY, COSMOLOGY
+    if DEFAULT_COSMOLOGY is None:
+        DEFAULT_COSMOLOGY = cosmo.Planck15
+        COSMOLOGY = [DEFAULT_COSMOLOGY, DEFAULT_COSMOLOGY.name]
 
 
 def get_cosmology(cosmology=None):
+    from astropy import cosmology as cosmo
+    _set_default_cosmology()
     if cosmology is None:
-        cosmology = COSMOLOGY[0]
+        cosmology = DEFAULT_COSMOLOGY
     elif isinstance(cosmology, str):
         cosmology = cosmo.__dict__[cosmology]
     return cosmology
@@ -41,6 +42,8 @@ def set_cosmology(cosmology=None):
     cosmo: astropy.cosmology.FLRW
         Cosmology instance
     """
+    from astropy import cosmology as cosmo
+    _set_default_cosmology()
     if cosmology is None:
         cosmology = DEFAULT_COSMOLOGY
     elif isinstance(cosmology, cosmo.FLRW):
diff --git a/bilby/gw/detector/__init__.py b/bilby/gw/detector/__init__.py
index 37d6027634365d2fd7f349a694d0836e19434278..b68313c0fb2f84c3c33dac380a0a1fbadb4c53a2 100644
--- a/bilby/gw/detector/__init__.py
+++ b/bilby/gw/detector/__init__.py
@@ -5,13 +5,6 @@ from .networks import *
 from .psd import *
 from .strain_data import *
 
-try:
-    import lal
-    import lalsimulation as lalsim
-except ImportError:
-    logger.debug("You do not have lalsuite installed currently. You will"
-                 " not be able to use some of the prebuilt functions.")
-
 
 def get_safe_signal_duration(mass_1, mass_2, a_1, a_2, tilt_1, tilt_2, flow=10):
     """ Calculate the safe signal duration, given the parameters
@@ -30,8 +23,10 @@ def get_safe_signal_duration(mass_1, mass_2, a_1, a_2, tilt_1, tilt_2, flow=10):
         to the nearest power of 2)
 
     """
-    chirp_time = lalsim.SimInspiralChirpTimeBound(
-        flow, mass_1 * lal.MSUN_SI, mass_2 * lal.MSUN_SI,
+    from lal import MSUN_SI
+    from lalsimulation import SimInspiralChirpTimeBound
+    chirp_time = SimInspiralChirpTimeBound(
+        flow, mass_1 * MSUN_SI, mass_2 * MSUN_SI,
         a_1 * np.cos(tilt_1), a_2 * np.cos(tilt_2))
     return max(2**(int(np.log2(chirp_time)) + 1), 4)
 
@@ -329,7 +324,7 @@ def load_data_by_channel_name(
     ifo = get_empty_interferometer(det)
 
     ifo.set_strain_data_from_channel_name(
-        channel = channel_name,
+        channel=channel_name,
         sampling_frequency=sampling_frequency,
         duration=segment_duration,
         start_time=start_time)
diff --git a/bilby/gw/detector/calibration.py b/bilby/gw/detector/calibration.py
index 88a5a664fbc8f148b6628b50532ec651254d77cf..b52eb9284307bb8f435f0418923bf53ee83c5eee 100644
--- a/bilby/gw/detector/calibration.py
+++ b/bilby/gw/detector/calibration.py
@@ -2,7 +2,6 @@
 """
 
 import numpy as np
-import tables
 from scipy.interpolate import interp1d
 
 
@@ -29,6 +28,7 @@ def read_calibration_file(filename, frequency_array, number_of_response_curves,
         Shape is (number_of_response_curves x len(frequency_array))
 
     """
+    import tables
 
     calibration_file = tables.open_file(filename, 'r')
     calibration_amplitude = \
@@ -71,6 +71,7 @@ def write_calibration_file(filename, frequency_array, calibration_draws, calibra
         Parameters used to generate the random draws of the calibration response curves
 
     """
+    import tables
 
     calibration_file = tables.open_file(filename, 'w')
     deltaR_group = calibration_file.create_group(calibration_file.root, 'deltaR')
diff --git a/bilby/gw/detector/interferometer.py b/bilby/gw/detector/interferometer.py
index 72a00168e5a4e1e08f3bc055f9c787ed199ea9d8..7155d0f4445f5c11eae8327845a87dcd3e8622d7 100644
--- a/bilby/gw/detector/interferometer.py
+++ b/bilby/gw/detector/interferometer.py
@@ -2,7 +2,6 @@ import os
 import sys
 
 import numpy as np
-from matplotlib import pyplot as plt
 
 from ...core import utils
 from ...core.utils import docstring, logger
@@ -12,13 +11,6 @@ from .calibration import Recalibrate
 from .geometry import InterferometerGeometry
 from .strain_data import InterferometerStrainData
 
-try:
-    import gwpy
-    import gwpy.signal
-except ImportError:
-    logger.debug("You do not have gwpy installed currently. You will "
-                 " not be able to use some of the prebuilt functions.")
-
 
 class Interferometer(object):
     """Class for the Interferometer """
@@ -599,6 +591,7 @@ class Interferometer(object):
                    header='f h(f)')
 
     def plot_data(self, signal=None, outdir='.', label=None):
+        import matplotlib.pyplot as plt
         if utils.command_line_args.bilby_test_mode:
             return
 
@@ -656,23 +649,26 @@ class Interferometer(object):
             plotting.
 
         """
+        import matplotlib.pyplot as plt
+        from gwpy.timeseries import TimeSeries
+        from gwpy.signal.filter_design import bandpass, concatenate_zpks, notch
 
         # We use the gwpy timeseries to perform bandpass and notching
         if notches is None:
             notches = list()
-        timeseries = gwpy.timeseries.TimeSeries(
+        timeseries = TimeSeries(
             data=self.strain_data.time_domain_strain, times=self.strain_data.time_array)
         zpks = []
         if bandpass_frequencies is not None:
-            zpks.append(gwpy.signal.filter_design.bandpass(
+            zpks.append(bandpass(
                 bandpass_frequencies[0], bandpass_frequencies[1],
                 self.strain_data.sampling_frequency))
         if notches is not None:
             for line in notches:
-                zpks.append(gwpy.signal.filter_design.notch(
+                zpks.append(notch(
                     line, self.strain_data.sampling_frequency))
         if len(zpks) > 0:
-            zpk = gwpy.signal.filter_design.concatenate_zpks(*zpks)
+            zpk = concatenate_zpks(*zpks)
             strain = timeseries.filter(zpk, filtfilt=False)
         else:
             strain = timeseries
diff --git a/bilby/gw/detector/networks.py b/bilby/gw/detector/networks.py
index 8abd64c3f7ed1eac15f61a90925b8b323fb9efb4..d354f2bf48a9f98c2d21b981fe7179affca7d3ac 100644
--- a/bilby/gw/detector/networks.py
+++ b/bilby/gw/detector/networks.py
@@ -1,16 +1,13 @@
 import os
 import sys
-import warnings
 
 import numpy as np
 import math
 
 from ...core import utils
 from ...core.utils import logger
-from .. import utils as gwutils
 from .interferometer import Interferometer
 from .psd import PowerSpectralDensity
-from .strain_data import InterferometerStrainData
 
 
 class InterferometerList(list):
@@ -324,73 +321,6 @@ class TriangularInterferometer(InterferometerList):
             longitude += np.arctan(length * np.cos(xarm_azimuth * np.pi / 180) * 1e3 / utils.radius_of_earth)
 
 
-def get_event_data(
-        event, interferometer_names=None, duration=4, roll_off=0.2,
-        psd_offset=-1024, psd_duration=100, cache=True, outdir='outdir',
-        label=None, plot=True, filter_freq=None, **kwargs):
-    """
-    Get open data for a specified event.
-
-    Parameters
-    ==========
-    event: str
-        Event descriptor, this can deal with some prefixes, e.g., '150914',
-        'GW150914', 'LVT151012'
-    interferometer_names: list, optional
-        List of interferometer identifiers, e.g., 'H1'.
-        If None will look for data in 'H1', 'V1', 'L1'
-    duration: float
-        Time duration to search for.
-    roll_off: float
-        The roll-off (in seconds) used in the Tukey window.
-    psd_offset, psd_duration: float
-        The power spectral density (psd) is estimated using data from
-        `center_time+psd_offset` to `center_time+psd_offset + psd_duration`.
-    cache: bool
-        Whether or not to store the acquired data.
-    outdir: str
-        Directory where the psd files are saved
-    label: str
-        If given, an identifying label used in generating file names.
-    plot: bool
-        If true, create an ASD + strain plot
-    filter_freq: float
-        Low pass filter frequency
-    **kwargs:
-        All keyword arguments are passed to
-        `gwpy.timeseries.TimeSeries.fetch_open_data()`.
-
-    Returns
-    ======
-    list: A list of bilby.gw.detector.Interferometer objects
-    """
-
-    warnings.warn(
-        "get_event_data is deprecated, use set_strain_data_from_gwpy instead",
-        DeprecationWarning
-    )
-
-    event_time = gwutils.get_event_time(event)
-
-    interferometers = []
-
-    if interferometer_names is None:
-        interferometer_names = ['H1', 'L1', 'V1']
-
-    for name in interferometer_names:
-        try:
-            interferometers.append(get_interferometer_with_open_data(
-                name, trigger_time=event_time, duration=duration, roll_off=roll_off,
-                psd_offset=psd_offset, psd_duration=psd_duration, cache=cache,
-                outdir=outdir, label=label, plot=plot, filter_freq=filter_freq,
-                **kwargs))
-        except ValueError as e:
-            logger.debug("Error raised {}".format(e))
-            logger.warning('No data found for {}.'.format(name))
-
-    return InterferometerList(interferometers)
-
-
 def get_empty_interferometer(name):
     """
     Get an interferometer with standard parameters for known detectors.
@@ -452,90 +382,3 @@ def load_interferometer(filename):
     else:
         raise IOError("{} could not be loaded. Invalid parameter 'shape'.".format(filename))
     return ifo
-
-
-def get_interferometer_with_open_data(
-        name, trigger_time, duration=4, start_time=None, roll_off=0.2,
-        psd_offset=-1024, psd_duration=100, cache=True, outdir='outdir',
-        label=None, plot=True, filter_freq=None, **kwargs):
-    """
-    Helper function to obtain an Interferometer instance with appropriate
-    PSD and data, given an center_time.
-
-    Parameters
-    ==========
-
-    name: str
-        Detector name, e.g., 'H1'.
-    trigger_time: float
-        Trigger GPS time.
-    duration: float, optional
-        The total time (in seconds) to analyse. Defaults to 4s.
-    start_time: float, optional
-        Beginning of the segment, if None, the trigger is placed 2s before the end
-        of the segment.
-    roll_off: float
-        The roll-off (in seconds) used in the Tukey window.
-    psd_offset, psd_duration: float
-        The power spectral density (psd) is estimated using data from
-        `center_time+psd_offset` to `center_time+psd_offset + psd_duration`.
-    cache: bool, optional
-        Whether or not to store the acquired data
-    outdir: str
-        Directory where the psd files are saved
-    label: str
-        If given, an identifying label used in generating file names.
-    plot: bool
-        If true, create an ASD + strain plot
-    filter_freq: float
-        Low pass filter frequency
-    **kwargs:
-        All keyword arguments are passed to
-        `gwpy.timeseries.TimeSeries.fetch_open_data()`.
-
-    Returns
-    =======
-    bilby.gw.detector.Interferometer: An Interferometer instance with a PSD and frequency-domain strain data.
-
-    """
-
-    warnings.warn(
-        "get_interferometer_with_open_data is deprecated, use set_strain_data_from_gwpy instead",
-        DeprecationWarning
-    )
-
-    logger.warning(
-        "Parameter estimation for real interferometer data in bilby is in "
-        "alpha testing at the moment: the routines for windowing and filtering"
-        " have not been reviewed.")
-
-    utils.check_directory_exists_and_if_not_mkdir(outdir)
-
-    if start_time is None:
-        start_time = trigger_time + 2 - duration
-
-    strain = InterferometerStrainData(roll_off=roll_off)
-    strain.set_from_open_data(
-        name=name, start_time=start_time, duration=duration,
-        outdir=outdir, cache=cache, **kwargs)
-    strain.low_pass_filter(filter_freq)
-
-    strain_psd = InterferometerStrainData(roll_off=roll_off)
-    strain_psd.set_from_open_data(
-        name=name, start_time=start_time + duration + psd_offset,
-        duration=psd_duration, outdir=outdir, cache=cache, **kwargs)
-    # Low pass filter
-    strain_psd.low_pass_filter(filter_freq)
-    # Create and save PSDs
-    psd_frequencies, psd_array = strain_psd.create_power_spectral_density(
-        name=name, outdir=outdir, fft_length=strain.duration)
-
-    interferometer = get_empty_interferometer(name)
-    interferometer.power_spectral_density = PowerSpectralDensity(
-        psd_array=psd_array, frequency_array=psd_frequencies)
-    interferometer.strain_data = strain
-
-    if plot:
-        interferometer.plot_data(outdir=outdir, label=label)
-
-    return interferometer
diff --git a/bilby/gw/detector/psd.py b/bilby/gw/detector/psd.py
index e0ddd12d58a394c382aa644503ef45ee37abf084..a3948f96621056d41f5ca5554a46d276e0097bb6 100644
--- a/bilby/gw/detector/psd.py
+++ b/bilby/gw/detector/psd.py
@@ -359,7 +359,8 @@ class PowerSpectralDensity(object):
 
         """
         white_noise, frequencies = utils.create_white_noise(sampling_frequency, duration)
-        frequency_domain_strain = self.__power_spectral_density_interpolated(frequencies) ** 0.5 * white_noise
+        with np.errstate(invalid="ignore"):
+            frequency_domain_strain = self.__power_spectral_density_interpolated(frequencies) ** 0.5 * white_noise
         out_of_bounds = (frequencies < min(self.frequency_array)) | (frequencies > max(self.frequency_array))
         frequency_domain_strain[out_of_bounds] = 0 * (1 + 1j)
         return frequency_domain_strain, frequencies
diff --git a/bilby/gw/detector/strain_data.py b/bilby/gw/detector/strain_data.py
index 71d8acf6d52ee7f1eae4b6e176607ebd44523e17..b6e2b46c9a5579892d31c16bc43f60bc13e469a0 100644
--- a/bilby/gw/detector/strain_data.py
+++ b/bilby/gw/detector/strain_data.py
@@ -1,5 +1,4 @@
 import numpy as np
-from scipy.signal.windows import tukey
 
 from ...core import utils
 from ...core.series import CoupledTimeAndFrequencySeries
@@ -7,19 +6,6 @@ from ...core.utils import logger
 from .. import utils as gwutils
 from ..utils import PropertyAccessor
 
-try:
-    import gwpy
-    import gwpy.signal
-except ImportError:
-    logger.debug("You do not have gwpy installed currently. You will "
-                 " not be able to use some of the prebuilt functions.")
-
-try:
-    import lal
-except ImportError:
-    logger.debug("You do not have lalsuite installed currently. You will"
-                 " not be able to use some of the prebuilt functions.")
-
 
 class InterferometerStrainData(object):
     """ Strain data for an interferometer """
@@ -198,6 +184,7 @@ class InterferometerStrainData(object):
         window: array
             Window function over time array
         """
+        from scipy.signal.windows import tukey
         if roll_off is not None:
             self.roll_off = roll_off
         elif alpha is not None:
@@ -253,11 +240,15 @@ class InterferometerStrainData(object):
         """
         Output the time series strain data as a :class:`gwpy.timeseries.TimeSeries`.
         """
+        try:
+            from gwpy.timeseries import TimeSeries
+        except ModuleNotFoundError:
+            raise ModuleNotFoundError("Cannot output strain data as gwpy TimeSeries")
 
-        return gwpy.timeseries.TimeSeries(self.time_domain_strain,
-                                          sample_rate=self.sampling_frequency,
-                                          t0=self.start_time,
-                                          channel=self.channel)
+        return TimeSeries(
+            self.time_domain_strain, sample_rate=self.sampling_frequency,
+            t0=self.start_time, channel=self.channel
+        )
 
     def to_pycbc_timeseries(self):
         """
@@ -265,37 +256,48 @@ class InterferometerStrainData(object):
         """
 
         try:
-            import pycbc
-        except ImportError:
-            raise ImportError("Cannot output strain data as PyCBC TimeSeries")
+            from pycbc.types.timeseries import TimeSeries
+            from lal import LIGOTimeGPS
+        except ModuleNotFoundError:
+            raise ModuleNotFoundError("Cannot output strain data as PyCBC TimeSeries")
 
-        return pycbc.types.timeseries.TimeSeries(self.time_domain_strain,
-                                                 delta_t=(1. / self.sampling_frequency),
-                                                 epoch=lal.LIGOTimeGPS(self.start_time))
+        return TimeSeries(
+            self.time_domain_strain, delta_t=(1. / self.sampling_frequency),
+            epoch=LIGOTimeGPS(self.start_time)
+        )
 
     def to_lal_timeseries(self):
         """
         Output the time series strain data as a LAL TimeSeries object.
         """
+        try:
+            from lal import CreateREAL8TimeSeries, LIGOTimeGPS, SecondUnit
+        except ModuleNotFoundError:
+            raise ModuleNotFoundError("Cannot output strain data as PyCBC TimeSeries")
 
-        laldata = lal.CreateREAL8TimeSeries("",
-                                            lal.LIGOTimeGPS(self.start_time),
-                                            0., (1. / self.sampling_frequency),
-                                            lal.SecondUnit,
-                                            len(self.time_domain_strain))
-        laldata.data.data[:] = self.time_domain_strain
+        lal_data = CreateREAL8TimeSeries(
+            "", LIGOTimeGPS(self.start_time), 0, 1 / self.sampling_frequency,
+            SecondUnit, len(self.time_domain_strain)
+        )
+        lal_data.data.data[:] = self.time_domain_strain
 
-        return laldata
+        return lal_data
 
     def to_gwpy_frequencyseries(self):
         """
         Output the frequency series strain data as a :class:`gwpy.frequencyseries.FrequencySeries`.
         """
+        try:
+            from gwpy.frequencyseries import FrequencySeries
+        except ModuleNotFoundError:
+            raise ModuleNotFoundError("Cannot output strain data as gwpy FrequencySeries")
 
-        return gwpy.frequencyseries.FrequencySeries(self.frequency_domain_strain,
-                                                    frequencies=self.frequency_array,
-                                                    epoch=self.start_time,
-                                                    channel=self.channel)
+        return FrequencySeries(
+            self.frequency_domain_strain,
+            frequencies=self.frequency_array,
+            epoch=self.start_time,
+            channel=self.channel
+        )
 
     def to_pycbc_frequencyseries(self):
         """
@@ -303,31 +305,42 @@ class InterferometerStrainData(object):
         """
 
         try:
-            import pycbc
+            from pycbc.types.frequencyseries import FrequencySeries
+            from lal import LIGOTimeGPS
         except ImportError:
             raise ImportError("Cannot output strain data as PyCBC FrequencySeries")
 
-        return pycbc.types.frequencyseries.FrequencySeries(self.frequency_domain_strain,
-                                                           delta_f=(self.frequency_array[1] - self.frequency_array[0]),
-                                                           epoch=lal.LIGOTimeGPS(self.start_time))
+        return FrequencySeries(
+            self.frequency_domain_strain,
+            delta_f=1 / self.duration,
+            epoch=LIGOTimeGPS(self.start_time)
+        )
 
     def to_lal_frequencyseries(self):
         """
         Output the frequency series strain data as a LAL FrequencySeries object.
         """
-
-        laldata = lal.CreateCOMPLEX16FrequencySeries("",
-                                                     lal.LIGOTimeGPS(self.start_time),
-                                                     self.frequency_array[0],
-                                                     (self.frequency_array[1] - self.frequency_array[0]),
-                                                     lal.SecondUnit,
-                                                     len(self.frequency_domain_strain))
-        laldata.data.data[:] = self.frequency_domain_strain
-
-        return laldata
+        try:
+            from lal import CreateCOMPLEX16FrequencySeries, LIGOTimeGPS, SecondUnit
+        except ModuleNotFoundError:
+            raise ModuleNotFoundError("Cannot output strain data as PyCBC TimeSeries")
+
+        lal_data = CreateCOMPLEX16FrequencySeries(
+            "",
+            LIGOTimeGPS(self.start_time),
+            self.frequency_array[0],
+            1 / self.duration,
+            SecondUnit,
+            len(self.frequency_domain_strain)
+        )
+        lal_data.data.data[:] = self.frequency_domain_strain
+
+        return lal_data
 
     def low_pass_filter(self, filter_freq=None):
         """ Low pass filter the data """
+        from gwpy.signal.filter_design import lowpass
+        from gwpy.timeseries import TimeSeries
 
         if filter_freq is None:
             logger.debug(
@@ -342,10 +355,8 @@ class InterferometerStrainData(object):
             return
 
         logger.debug("Applying low pass filter with filter frequency {}".format(filter_freq))
-        bp = gwpy.signal.filter_design.lowpass(
-            filter_freq, self.sampling_frequency)
-        strain = gwpy.timeseries.TimeSeries(
-            self.time_domain_strain, sample_rate=self.sampling_frequency)
+        bp = lowpass(filter_freq, self.sampling_frequency)
+        strain = TimeSeries(self.time_domain_strain, sample_rate=self.sampling_frequency)
         strain = strain.filter(bp, filtfilt=True)
         self._time_domain_strain = strain.value
 
@@ -379,6 +390,7 @@ class InterferometerStrainData(object):
             The frequencies and power spectral density array
 
         """
+        from gwpy.timeseries import TimeSeries
 
         data = self.time_domain_strain
 
@@ -394,7 +406,7 @@ class InterferometerStrainData(object):
                 data = data[idxs]
 
         # WARNING this line can cause issues if the data is non-contiguous
-        strain = gwpy.timeseries.TimeSeries(data=data, sample_rate=self.sampling_frequency)
+        strain = TimeSeries(data=data, sample_rate=self.sampling_frequency)
         psd_alpha = 2 * self.roll_off / fft_length
         logger.info(
             "Tukey window PSD data with alpha={}, roll off={}".format(
@@ -500,8 +512,9 @@ class InterferometerStrainData(object):
             The data to use
 
         """
+        from gwpy.timeseries import TimeSeries
         logger.debug('Setting data using provided gwpy TimeSeries object')
-        if type(time_series) != gwpy.timeseries.TimeSeries:
+        if not isinstance(time_series, TimeSeries):
             raise ValueError("Input time_series is not a gwpy TimeSeries")
         self._times_and_frequencies = \
             CoupledTimeAndFrequencySeries(duration=time_series.duration.value,
@@ -557,7 +570,8 @@ class InterferometerStrainData(object):
             The path to the file to read in
 
         """
-        timeseries = gwpy.timeseries.TimeSeries.read(filename, format='csv')
+        from gwpy.timeseries import TimeSeries
+        timeseries = TimeSeries.read(filename, format='csv')
         self.set_from_gwpy_timeseries(timeseries)
 
     def set_from_frequency_domain_strain(
@@ -700,6 +714,7 @@ class InterferometerStrainData(object):
             The sampling frequency (in Hz)
 
         """
+        from gwpy.timeseries import TimeSeries
         channel_comp = channel.split(':')
         if len(channel_comp) != 2:
             raise IndexError('Channel name must have format `IFO:Channel`')
@@ -709,8 +724,7 @@ class InterferometerStrainData(object):
             start_time=start_time)
 
         logger.info('Fetching data using channel {}'.format(channel))
-        strain = gwpy.timeseries.TimeSeries.get(
-            channel, start_time, start_time + duration)
+        strain = TimeSeries.get(channel, start_time, start_time + duration)
         strain = strain.resample(sampling_frequency)
 
         self.set_from_gwpy_timeseries(strain)
diff --git a/bilby/gw/eos/eos.py b/bilby/gw/eos/eos.py
index b2a45dc222c08756ec033b96b864f49813e8842e..f63c2b83fd2cb0d8d371addd7dcc472f51efb043 100644
--- a/bilby/gw/eos/eos.py
+++ b/bilby/gw/eos/eos.py
@@ -1,9 +1,6 @@
 import os
 import numpy as np
-import matplotlib.pyplot as plt
-from scipy.integrate import cumtrapz, quad
 from scipy.interpolate import interp1d, CubicSpline
-from scipy.optimize import minimize_scalar
 
 from .tov_solver import IntegrateTOV
 from ...core import utils
@@ -58,6 +55,7 @@ class TabularEOS(object):
     """
 
     def __init__(self, eos, sampling_flag=False, warning_flag=False):
+        from scipy.integrate import cumtrapz
 
         self.sampling_flag = sampling_flag
         self.warning_flag = warning_flag
@@ -398,6 +396,7 @@ class TabularEOS(object):
         fig: matplotlib.figure.Figure
             EOS plot.
         """
+        import matplotlib.pyplot as plt
 
         # Set data based on specified representation
         varnames = rep.split('-')
@@ -538,7 +537,7 @@ class SpectralDecompositionEOS(TabularEOS):
         return 1. / spectral_adiabatic_index(self.gammas, x)
 
     def mu(self, x):
-
+        from scipy.integrate import quad
         return np.exp(-quad(self.__mu_integrand, 0, x)[0])
 
     def __eps_integrand(self, x):
@@ -546,7 +545,7 @@ class SpectralDecompositionEOS(TabularEOS):
         return np.exp(x) * self.mu(x) / spectral_adiabatic_index(self.gammas, x)
 
     def energy_density(self, x, eps0):
-
+        from scipy.integrate import quad
         quad_result, quad_err = quad(self.__eps_integrand, 0, x)
         eps_of_x = (eps0 * C_CGS ** 2.) / self.mu(x) + self.p0 / self.mu(x) * quad_result
         return eps_of_x
@@ -638,6 +637,7 @@ class EOSFamily(object):
     populated here via the TOV solver upon object construction.
     """
     def __init__(self, eos, npts=500):
+        from scipy.optimize import minimize_scalar
         self.eos = eos
 
         # FIXME: starting_energy_density is set somewhat arbitrarily
@@ -781,6 +781,7 @@ class EOSFamily(object):
         fig: matplotlib.figure.Figure
             EOS Family plot.
         """
+        import matplotlib.pyplot as plt
 
         # Set data based on specified representation
         varnames = rep.split('-')
diff --git a/bilby/gw/eos/tov_solver.py b/bilby/gw/eos/tov_solver.py
index 5b5d7e13260ebff83318ab1478299db1a43f3602..0135086efaaf3bc9ac7741b03be8c4367e44a926 100644
--- a/bilby/gw/eos/tov_solver.py
+++ b/bilby/gw/eos/tov_solver.py
@@ -1,7 +1,6 @@
 # Monica Rizzo, 2019
 
 import numpy as np
-from scipy.integrate import solve_ivp
 
 
 class IntegrateTOV:
@@ -111,6 +110,7 @@ class IntegrateTOV:
         """
         Evolves TOV+k2 equations and returns final quantities
         """
+        from scipy.integrate import solve_ivp
 
         # integration settings the same as in lalsimulation
         rel_err = 1e-4
diff --git a/bilby/gw/likelihood.py b/bilby/gw/likelihood.py
index 0f113683972874cf4d9267b6e07682205c6bd53e..e0558c04d986e3f76c15f262395640dff3382e6e 100644
--- a/bilby/gw/likelihood.py
+++ b/bilby/gw/likelihood.py
@@ -1,20 +1,12 @@
 
-import gc
 import os
 import json
 import copy
 
 import numpy as np
-import scipy.integrate as integrate
-from scipy.interpolate import interp1d
 import pandas as pd
-from tqdm import tqdm
-
-try:
-    from scipy.special import logsumexp
-except ImportError:
-    from scipy.misc import logsumexp
-from scipy.special import i0e
+from scipy.interpolate import interp1d
+from scipy.special import logsumexp, i0e
 
 from ..core.likelihood import Likelihood
 from ..core.utils import BilbyJsonEncoder, decode_bilby_json
@@ -26,8 +18,7 @@ from .detector import InterferometerList, get_empty_interferometer, calibration
 from .prior import BBHPriorDict, CBCPriorDict, Cosmological
 from .source import lal_binary_black_hole
 from .utils import (
-    noise_weighted_inner_product, build_roq_weights, blockwise_dot_product,
-    zenith_azimuth_to_ra_dec)
+    noise_weighted_inner_product, build_roq_weights, zenith_azimuth_to_ra_dec)
 from .waveform_generator import WaveformGenerator
 from collections import namedtuple
 
@@ -165,8 +156,12 @@ class GravitationalWaveTransient(Likelihood):
             priors['geocent_time'] = float(self.interferometers.start_time)
             if self.jitter_time:
                 priors['time_jitter'] = Uniform(
-                    minimum=- self._delta_tc / 2, maximum=self._delta_tc / 2,
-                    boundary='periodic')
+                    minimum=- self._delta_tc / 2,
+                    maximum=self._delta_tc / 2,
+                    boundary='periodic',
+                    name="time_jitter",
+                    latex_label="$t_j$"
+                )
             self._marginalized_parameters.append('geocent_time')
         elif self.jitter_time:
             logger.debug(
@@ -939,10 +934,11 @@ class GravitationalWaveTransient(Likelihood):
         self.cache_lookup_table()
 
     def _setup_phase_marginalization(self, min_bound=-5, max_bound=10):
+        x_values = np.logspace(min_bound, max_bound, int(1e6))
         self._bessel_function_interped = interp1d(
-            np.logspace(-5, max_bound, int(1e6)), np.logspace(-5, max_bound, int(1e6)) +
-            np.log([i0e(snr) for snr in np.logspace(-5, max_bound, int(1e6))]),
-            bounds_error=False, fill_value=(0, np.nan))
+            x_values, x_values + np.log([i0e(snr) for snr in x_values]),
+            bounds_error=False, fill_value=(0, np.nan)
+        )
 
     def _setup_time_marginalization(self):
         self._delta_tc = 2 / self.waveform_generator.sampling_frequency
@@ -980,6 +976,7 @@ class GravitationalWaveTransient(Likelihood):
                         self.number_of_response_curves, self.starting_index)
 
             else:  # generate the fake curves
+                from tqdm.auto import tqdm
                 self.calibration_parameter_draws[interferometer.name] =\
                     pd.DataFrame(calibration_priors.sample(self.number_of_response_curves))
 
@@ -1360,7 +1357,8 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
             np.vdot(np.abs(h_plus_quadratic + h_cross_quadratic)**2,
                     self.weights[interferometer.name + '_quadratic'])
 
-        complex_matched_filter_snr = d_inner_h / (optimal_snr_squared**0.5)
+        with np.errstate(invalid="ignore"):
+            complex_matched_filter_snr = d_inner_h / (optimal_snr_squared**0.5)
         d_inner_h_squared_tc_array = None
 
         return self._CalculatedSNRs(
@@ -1464,7 +1462,9 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
                         roq_minimum_component_mass))
 
     def _set_weights(self, linear_matrix, quadratic_matrix):
-        """ Setup the time-dependent ROQ weights.
+        """
+        Setup the time-dependent ROQ weights.
+        See https://dcc.ligo.org/LIGO-T2100125 for the detail of how to compute them.
 
         Parameters
         ==========
@@ -1474,15 +1474,26 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
         """
 
         time_space = self._get_time_resolution()
+        number_of_time_samples = int(self.interferometers.duration / time_space)
+        try:
+            import pyfftw
+            ifft_input = pyfftw.empty_aligned(number_of_time_samples, dtype=complex)
+            ifft_output = pyfftw.empty_aligned(number_of_time_samples, dtype=complex)
+            ifft = pyfftw.FFTW(ifft_input, ifft_output, direction='FFTW_BACKWARD')
+        except ImportError:
+            pyfftw = None
+            logger.warning("You do not have pyfftw installed, falling back to numpy.fft.")
+            ifft_input = np.zeros(number_of_time_samples, dtype=complex)
+            ifft = np.fft.ifft
         # Maximum delay time to geocentre + 5 steps
         earth_light_crossing_time = radius_of_earth / speed_of_light + 5 * time_space
-        delta_times = np.arange(
-            self.priors['{}_time'.format(self.time_reference)].minimum - earth_light_crossing_time,
-            self.priors['{}_time'.format(self.time_reference)].maximum + earth_light_crossing_time,
-            time_space)
-        time_samples = delta_times - self.interferometers.start_time
-        self.weights['time_samples'] = time_samples
-        logger.info("Using {} ROQ time samples".format(len(time_samples)))
+        start_idx = max(0, np.int(np.floor((self.priors['{}_time'.format(self.time_reference)].minimum -
+                        earth_light_crossing_time - self.interferometers.start_time) / time_space)))
+        end_idx = min(number_of_time_samples - 1, np.int(np.ceil((
+                      self.priors['{}_time'.format(self.time_reference)].maximum + earth_light_crossing_time -
+                      self.interferometers.start_time) / time_space)))
+        self.weights['time_samples'] = np.arange(start_idx, end_idx + 1) * time_space
+        logger.info("Using {} ROQ time samples".format(len(self.weights['time_samples'])))
 
         for ifo in self.interferometers:
             if self.roq_params is not None:
@@ -1503,8 +1514,8 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
             else:
                 overlap_frequencies = ifo.frequency_array[ifo.frequency_mask]
                 roq_idxs = np.arange(linear_matrix.shape[0], dtype=int)
-                ifo_idxs = [True] * sum(ifo.frequency_mask)
-                if sum(ifo_idxs) != len(roq_idxs):
+                ifo_idxs = np.arange(sum(ifo.frequency_mask))
+                if len(ifo_idxs) != len(roq_idxs):
                     raise ValueError(
                         "Mismatch between ROQ basis and frequency array for "
                         "{}".format(ifo.name))
@@ -1514,34 +1525,16 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
                     ifo.name, len(overlap_frequencies),
                     min(overlap_frequencies), max(overlap_frequencies)))
 
-            logger.debug("Preallocate array")
-            tc_shifted_data = np.zeros((
-                len(self.weights['time_samples']), len(overlap_frequencies)),
-                dtype=complex)
-
-            logger.debug("Calculate shifted data")
-            data = ifo.frequency_domain_strain[ifo.frequency_mask][ifo_idxs]
-            prefactor = (
-                data /
+            ifft_input[:] *= 0.
+            self.weights[ifo.name + '_linear'] = \
+                np.zeros((len(self.weights['time_samples']), linear_matrix.shape[1]), dtype=complex)
+            data_over_psd = ifo.frequency_domain_strain[ifo.frequency_mask][ifo_idxs] / \
                 ifo.power_spectral_density_array[ifo.frequency_mask][ifo_idxs]
-            )
-            for j in range(len(self.weights['time_samples'])):
-                tc_shifted_data[j] = prefactor * np.exp(
-                    2j * np.pi * overlap_frequencies * time_samples[j])
-
-            # to not kill all computers this minimises the memory usage of the
-            # required inner products
-            max_block_gigabytes = 4
-            max_elements = int((max_block_gigabytes * 2 ** 30) / 8)
-
-            logger.debug("Apply dot product")
-            self.weights[ifo.name + '_linear'] = blockwise_dot_product(
-                tc_shifted_data,
-                linear_matrix[roq_idxs],
-                max_elements) * 4 / ifo.strain_data.duration
-
-            del tc_shifted_data, overlap_frequencies
-            gc.collect()
+            nonzero_idxs = ifo_idxs + int(ifo.frequency_array[ifo.frequency_mask][0] * self.interferometers.duration)
+            for i, basis_element in enumerate(linear_matrix[roq_idxs].T):
+                ifft_input[nonzero_idxs] = data_over_psd * np.conj(basis_element)
+                self.weights[ifo.name + '_linear'][:, i] = ifft(ifft_input)[start_idx:end_idx + 1]
+            self.weights[ifo.name + '_linear'] *= 4. * number_of_time_samples / self.interferometers.duration
 
             self.weights[ifo.name + '_quadratic'] = build_roq_weights(
                 1 /
@@ -1551,6 +1544,9 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
 
             logger.info("Finished building weights for {}".format(ifo.name))
 
+        if pyfftw is not None:
+            pyfftw.forget_wisdom()
+
     def save_weights(self, filename, format='npz'):
         if format not in filename:
             filename += "." + format
@@ -1609,10 +1605,11 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
             f_high: float
                 The maximum frequency which must be considered
             """
+            from scipy.integrate import simps
             integrand1 = np.power(freq, -7. / 3) / psd
-            integral1 = integrate.simps(integrand1, freq)
+            integral1 = simps(integrand1, freq)
             integrand3 = np.power(freq, 2. / 3.) / (psd * integral1)
-            f_3_bar = integrate.simps(integrand3, freq)
+            f_3_bar = simps(integrand3, freq)
 
             f_high = scaling * f_3_bar**(1 / 3)
 
@@ -1634,6 +1631,12 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
         # Apply a safety factor to ensure the time step is short enough
         delta_t = delta_t / 5
 
+        # duration / delta_t needs to be a power of 2 for IFFT
+        number_of_time_samples = max(
+            self.interferometers.duration / delta_t,
+            self.interferometers.frequency_array[-1] * self.interferometers.duration + 1)
+        number_of_time_samples = np.int(2**np.ceil(np.log2(number_of_time_samples)))
+        delta_t = self.interferometers.duration / number_of_time_samples
         logger.info("ROQ time-step = {}".format(delta_t))
         return delta_t
 
diff --git a/bilby/gw/prior.py b/bilby/gw/prior.py
index 899e98060cf7023eb616ec4eeda6ee14089c6e30..739cbf6a02b638c5c41634ee57fe34af2f8b5386 100644
--- a/bilby/gw/prior.py
+++ b/bilby/gw/prior.py
@@ -3,7 +3,6 @@ import copy
 
 import numpy as np
 from scipy.interpolate import InterpolatedUnivariateSpline, interp1d
-from scipy.integrate import cumtrapz
 from scipy.special import hyp2f1
 from scipy.stats import norm
 
@@ -21,12 +20,7 @@ from .conversion import (
     chirp_mass_and_mass_ratio_to_total_mass,
     total_mass_and_mass_ratio_to_component_masses)
 from .cosmology import get_cosmology
-
-try:
-    from astropy import cosmology as cosmo, units
-except ImportError:
-    logger.debug("You do not have astropy installed currently. You will"
-                 " not be able to use some of the prebuilt functions.")
+from .source import PARAMETER_SETS
 
 
 DEFAULT_PRIOR_DIR = os.path.join(os.path.dirname(__file__), 'prior_files')
@@ -99,6 +93,7 @@ class Cosmological(Interped):
 
     @property
     def _default_args_dict(self):
+        from astropy import units
         return dict(
             redshift=dict(name='redshift', latex_label='$z$', unit=None),
             luminosity_distance=dict(
@@ -108,6 +103,7 @@ class Cosmological(Interped):
 
     def __init__(self, minimum, maximum, cosmology=None, name=None,
                  latex_label=None, unit=None, boundary=None):
+        from astropy import units
         self.cosmology = get_cosmology(cosmology)
         if name not in self._default_args_dict:
             raise ValueError(
@@ -172,6 +168,7 @@ class Cosmological(Interped):
         recalculate_array: boolean
             Determines if the distance arrays are recalculated
         """
+        from astropy.cosmology import z_at_value
         cosmology = get_cosmology(self.cosmology)
         limit_dict[self.name] = value
         if self.name == 'redshift':
@@ -183,8 +180,9 @@ class Cosmological(Interped):
             if value == 0:
                 limit_dict['redshift'] = 0
             else:
-                limit_dict['redshift'] = cosmo.z_at_value(
-                    cosmology.luminosity_distance, value * self.unit)
+                limit_dict['redshift'] = z_at_value(
+                    cosmology.luminosity_distance, value * self.unit
+                )
             limit_dict['comoving_distance'] = (
                 cosmology.comoving_distance(limit_dict['redshift']).value
             )
@@ -192,8 +190,9 @@ class Cosmological(Interped):
             if value == 0:
                 limit_dict['redshift'] = 0
             else:
-                limit_dict['redshift'] = cosmo.z_at_value(
-                    cosmology.comoving_distance, value * self.unit)
+                limit_dict['redshift'] = z_at_value(
+                    cosmology.comoving_distance, value * self.unit
+                )
             limit_dict['luminosity_distance'] = (
                 cosmology.luminosity_distance(limit_dict['redshift']).value
             )
@@ -255,8 +254,10 @@ class Cosmological(Interped):
         """
         Get a dictionary containing the arguments needed to reproduce this object.
         """
+        from astropy.cosmology.core import Cosmology
+        from astropy import units
         dict_with_properties = super(Cosmological, self)._repr_dict
-        if isinstance(dict_with_properties['cosmology'], cosmo.core.Cosmology):
+        if isinstance(dict_with_properties['cosmology'], Cosmology):
             if dict_with_properties['cosmology'].name is not None:
                 dict_with_properties['cosmology'] = dict_with_properties['cosmology'].name
         if isinstance(dict_with_properties['unit'], units.Unit):
@@ -369,11 +370,21 @@ class UniformInComponentsMassRatio(Prior):
 
     def rescale(self, val):
         self.test_valid_for_rescaling(val)
-        return self.icdf(val)
+        resc = self.icdf(val)
+        if resc.ndim == 0:
+            return resc.item()
+        else:
+            return resc
 
     def prob(self, val):
         in_prior = (val >= self.minimum) & (val <= self.maximum)
-        return (1. + val)**(2. / 5.) / (val**(6. / 5.)) / self.norm * in_prior
+        with np.errstate(invalid="ignore"):
+            prob = (1. + val)**(2. / 5.) / (val**(6. / 5.)) / self.norm * in_prior
+        return prob
+
+    def ln_prob(self, val):
+        with np.errstate(divide="ignore"):
+            return np.log(self.prob(val))
 
 
 class AlignedSpin(Interped):
@@ -488,7 +499,8 @@ class ConditionalChiInPlane(ConditionalBasePrior):
         )
 
     def ln_prob(self, val, **required_variables):
-        return np.log(self.prob(val, **required_variables))
+        with np.errstate(divide="ignore"):
+            return np.log(self.prob(val, **required_variables))
 
     def cdf(self, val, **required_variables):
         r"""
@@ -539,9 +551,11 @@ class ConditionalChiInPlane(ConditionalBasePrior):
         return chi_aligned * ((self._reference_maximum / chi_aligned) ** (2 * val) - 1) ** 0.5
 
     def _condition_function(self, reference_params, **kwargs):
-        return dict(minimum=0, maximum=(
-            self._reference_maximum ** 2 - kwargs[self._required_variables[0]] ** 2
-        ) ** 0.5)
+        with np.errstate(invalid="ignore"):
+            maximum = np.sqrt(
+                self._reference_maximum ** 2 - kwargs[self._required_variables[0]] ** 2
+            )
+        return dict(minimum=0, maximum=maximum)
 
     def __repr__(self):
         return Prior.__repr__(self)
@@ -633,6 +647,43 @@ class CBCPriorDict(ConditionalPriorDict):
             logger.warning("Unable to determine minimum component mass")
             return None
 
+    def is_nonempty_intersection(self, pset):
+        """ Check if keys in self exist in the PARAMETER_SETS pset """
+        if len(PARAMETER_SETS[pset].intersection(self.non_fixed_keys)) > 0:
+            return True
+        else:
+            return False
+
+    @property
+    def spin(self):
+        """ Return true if priors include any spin parameters """
+        return self.is_nonempty_intersection("spin")
+
+    @property
+    def precession(self):
+        """ Return true if priors include any precession parameters """
+        return self.is_nonempty_intersection("precession_only")
+
+    @property
+    def intrinsic(self):
+        """ Return true if priors include any intrinsic parameters """
+        return self.is_nonempty_intersection("intrinsic")
+
+    @property
+    def extrinsic(self):
+        """ Return true if priors include any extrinsic parameters """
+        return self.is_nonempty_intersection("extrinsic")
+
+    @property
+    def mass(self):
+        """ Return true if priors include any mass parameters """
+        return self.is_nonempty_intersection("mass")
+
+    @property
+    def phase(self):
+        """ Return true if priors include phase parameters """
+        return self.is_nonempty_intersection("phase")
+
 
 class BBHPriorDict(CBCPriorDict):
     def __init__(self, dictionary=None, filename=None, aligned_spin=False,
@@ -827,6 +878,11 @@ class BNSPriorDict(CBCPriorDict):
                 redundant = True
         return redundant
 
+    @property
+    def tidal(self):
+        """ Return true if priors include phase parameters """
+        return self.is_nonempty_intersection("tidal")
+
 
 Prior._default_latex_labels = {
     'mass_1': '$m_1$',
@@ -853,12 +909,16 @@ Prior._default_latex_labels = {
     'psi': '$\psi$',
     'phase': '$\phi$',
     'geocent_time': '$t_c$',
+    'time_jitter': '$t_j$',
     'lambda_1': '$\\Lambda_1$',
     'lambda_2': '$\\Lambda_2$',
     'lambda_tilde': '$\\tilde{\\Lambda}$',
     'delta_lambda_tilde': '$\\delta\\tilde{\\Lambda}$',
     'chi_1': '$\\chi_1$',
-    'chi_2': '$\\chi_2$'}
+    'chi_2': '$\\chi_2$',
+    'chi_1_in_plane': '$\\chi_{1, \perp}$',
+    'chi_2_in_plane': '$\\chi_{2, \perp}$',
+}
 
 
 class CalibrationPriorDict(PriorDict):
@@ -1113,6 +1173,7 @@ class HealPixMapPriorDist(BaseJointPriorDist):
         """
         Method that builds the inverse cdf of the P(pixel) distribution for rescaling
         """
+        from scipy.integrate import cumtrapz
         yy = self._all_interped(self.pix_xx)
         yy /= np.trapz(yy, self.pix_xx)
         YY = cumtrapz(yy, self.pix_xx, initial=0)
diff --git a/bilby/gw/result.py b/bilby/gw/result.py
index 7116a5bf82bdb5723f9eba80fcdbf0186d34a599..fc599232b3a8cf71633825a4c941b61347b12ba3 100644
--- a/bilby/gw/result.py
+++ b/bilby/gw/result.py
@@ -1,10 +1,7 @@
-
 import json
-import pickle
 import os
+import pickle
 
-import matplotlib.pyplot as plt
-from matplotlib import rcParams
 import numpy as np
 
 from ..core.result import Result as CoreResult
@@ -149,6 +146,7 @@ class CompactBinaryCoalescenceResult(CoreResult):
         format: str
             Format to save the plot, default=png, options are png/pdf
         """
+        import matplotlib.pyplot as plt
         if format not in ["png", "pdf"]:
             raise ValueError("Format should be one of png or pdf")
 
@@ -394,6 +392,8 @@ class CompactBinaryCoalescenceResult(CoreResult):
                 )
             )
         else:
+            import matplotlib.pyplot as plt
+            from matplotlib import rcParams
             old_font_size = rcParams["font.size"]
             rcParams["font.size"] = 20
             fig, axs = plt.subplots(
@@ -734,6 +734,8 @@ class CompactBinaryCoalescenceResult(CoreResult):
             If true, load the cached pickle file (default name), or the
             pickle-file give as a path.
         """
+        import matplotlib.pyplot as plt
+        from matplotlib import rcParams
 
         try:
             from astropy.time import Time
diff --git a/bilby/gw/source.py b/bilby/gw/source.py
index 96e61afc7c06cab2c57d925d9cda0e2ca20ab08f..4f3e3452d815f981705cbc460ec92bf3d498556f 100644
--- a/bilby/gw/source.py
+++ b/bilby/gw/source.py
@@ -10,13 +10,6 @@ from .utils import (lalsim_GetApproximantFromString,
                     lalsim_SimInspiralWaveformParamsInsertTidalLambda2,
                     lalsim_SimInspiralChooseFDWaveformSequence)
 
-try:
-    import lal
-    import lalsimulation as lalsim
-except ImportError:
-    logger.debug("You do not have lalsuite installed currently. You will"
-                 " not be able to use some of the prebuilt functions.")
-
 
 def lal_binary_black_hole(
         frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,
@@ -293,6 +286,9 @@ def _base_lal_cbc_fd_waveform(
     =======
     dict: A dictionary with the plus and cross polarisation strain modes
     """
+    import lal
+    import lalsimulation as lalsim
+
     waveform_approximant = waveform_kwargs['waveform_approximant']
     reference_frequency = waveform_kwargs['reference_frequency']
     minimum_frequency = waveform_kwargs['minimum_frequency']
@@ -417,19 +413,6 @@ def _base_lal_cbc_fd_waveform(
     return dict(plus=h_plus, cross=h_cross)
 
 
-def roq(
-        frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,
-        phi_12, a_2, tilt_2, phi_jl, theta_jn, phase, **waveform_arguments):
-    logger.warning(
-        'bilby.gw.source.roq has been deprecated and will be removed. Use '
-        'bilby.gw.source.binary_black_hole_roq instead.')
-    return binary_black_hole_roq(
-        frequency_array=frequency_array, mass_1=mass_1, mass_2=mass_2,
-        luminosity_distance=luminosity_distance, a_1=a_1, tilt_1=tilt_1,
-        phi_12=phi_12, a_2=a_2, tilt_2=tilt_2, phi_jl=phi_jl,
-        theta_jn=theta_jn, phase=phase, **waveform_arguments)
-
-
 def binary_black_hole_roq(
         frequency_array, mass_1, mass_2, luminosity_distance, a_1, tilt_1,
         phi_12, a_2, tilt_2, phi_jl, theta_jn, phase, **waveform_arguments):
@@ -509,6 +492,7 @@ def _base_roq_waveform(
         Dict containing plus and cross modes evaluated at the linear and
         quadratic frequency nodes.
     """
+    from lal import CreateDict
     frequency_nodes_linear = waveform_arguments['frequency_nodes_linear']
     frequency_nodes_quadratic = waveform_arguments['frequency_nodes_quadratic']
     reference_frequency = waveform_arguments['reference_frequency']
@@ -519,7 +503,7 @@ def _base_roq_waveform(
     mass_1 = mass_1 * utils.solar_mass
     mass_2 = mass_2 * utils.solar_mass
 
-    waveform_dictionary = lal.CreateDict()
+    waveform_dictionary = CreateDict()
     lalsim_SimInspiralWaveformParamsInsertTidalLambda1(
         waveform_dictionary, lambda_1)
     lalsim_SimInspiralWaveformParamsInsertTidalLambda2(
@@ -606,3 +590,38 @@ def supernova_pca_model(
                          pc_coeff4 * pc4 + pc_coeff5 * pc5)
 
     return {'plus': h_plus, 'cross': h_cross}
+
+
+precession_only = {
+    "tilt_1", "tilt_2", "phi_12", "phi_jl", "chi_1_in_plane", "chi_2_in_plane",
+}
+
+spin = {
+    "a_1", "a_2", "tilt_1", "tilt_2", "phi_12", "phi_jl", "chi_1", "chi_2",
+    "chi_1_in_plane", "chi_2_in_plane",
+}
+mass = {
+    "chirp_mass", "mass_ratio", "total_mass", "mass_1", "mass_2",
+    "symmetric_mass_ratio",
+}
+primary_spin_and_q = {
+    "a_1", "chi_1", "mass_ratio"
+}
+tidal = {
+    "lambda_1", "lambda_2", "lambda_tilde", "delta_lambda_tilde"
+}
+phase = {
+    "phase", "delta_phase",
+}
+extrinsic = {
+    "azimuth", "zenith", "luminosity_distance", "psi", "theta_jn",
+    "cos_theta_jn", "geocent_time", "time_jitter", "ra", "dec",
+    "H1_time", "L1_time", "V1_time",
+}
+
+PARAMETER_SETS = dict(
+    spin=spin, mass=mass, phase=phase, extrinsic=extrinsic,
+    tidal=tidal, primary_spin_and_q=primary_spin_and_q,
+    intrinsic=spin.union(mass).union(phase).union(tidal),
+    precession_only=precession_only,
+)
diff --git a/bilby/gw/utils.py b/bilby/gw/utils.py
index cf7c0f3e65d8cc0edcc584d6caa7830b96dff45a..685752456e0b5b582003da6853d3d4b23e4c1bb0 100644
--- a/bilby/gw/utils.py
+++ b/bilby/gw/utils.py
@@ -1,29 +1,15 @@
-import os
 import json
+import os
 from math import fmod
 
 import numpy as np
 from scipy.interpolate import interp1d
-import matplotlib.pyplot as plt
 
 from ..core.utils import (ra_dec_to_theta_phi,
                           speed_of_light, logger, run_commandline,
                           check_directory_exists_and_if_not_mkdir,
                           SamplesSummary, theta_phi_to_ra_dec)
 
-try:
-    from gwpy.timeseries import TimeSeries
-except ImportError:
-    logger.debug("You do not have gwpy installed currently. You will "
-                 " not be able to use some of the prebuilt functions.")
-
-try:
-    import lal
-    import lalsimulation as lalsim
-except ImportError:
-    logger.debug("You do not have lalsuite installed currently. You will"
-                 " not be able to use some of the prebuilt functions.")
-
 
 def asd_from_freq_series(freq_data, df):
     """
@@ -88,7 +74,7 @@ def time_delay_geocentric(detector1, detector2, ra, dec, time):
     float: Time delay between the two detectors in the geocentric frame
 
     """
-    gmst = fmod(lal.GreenwichMeanSiderealTime(time), 2 * np.pi)
+    gmst = fmod(greenwich_mean_sidereal_time(time), 2 * np.pi)
     theta, phi = ra_dec_to_theta_phi(ra, dec, gmst)
     omega = np.array([np.sin(theta) * np.cos(phi), np.sin(theta) * np.sin(phi), np.cos(theta)])
     delta_d = detector2 - detector1
@@ -122,7 +108,7 @@ def get_polarization_tensor(ra, dec, time, psi, mode):
     array_like: A 3x3 representation of the polarization_tensor for the specified mode.
 
     """
-    gmst = fmod(lal.GreenwichMeanSiderealTime(time), 2 * np.pi)
+    gmst = fmod(greenwich_mean_sidereal_time(time), 2 * np.pi)
     theta, phi = ra_dec_to_theta_phi(ra, dec, gmst)
     u = np.array([np.cos(phi) * np.cos(theta), np.cos(theta) * np.sin(phi), -np.sin(theta)])
     v = np.array([-np.sin(phi), np.cos(phi), 0])
@@ -391,7 +377,7 @@ def zenith_azimuth_to_ra_dec(zenith, azimuth, geocent_time, ifos):
         The zenith and azimuthal angles in the sky frame.
     """
     theta, phi = zenith_azimuth_to_theta_phi(zenith, azimuth, ifos)
-    gmst = lal.GreenwichMeanSiderealTime(geocent_time)
+    gmst = greenwich_mean_sidereal_time(geocent_time)
     ra, dec = theta_phi_to_ra_dec(theta, phi, gmst)
     ra = ra % (2 * np.pi)
     return ra, dec
@@ -478,6 +464,7 @@ def get_open_strain_data(
         fails, this function retruns `None`.
 
     """
+    from gwpy.timeseries import TimeSeries
     filename = '{}/{}_{}_{}.txt'.format(outdir, name, start_time, end_time)
 
     if buffer_time < 0:
@@ -529,6 +516,7 @@ def read_frame_file(file_name, start_time, end_time, channel=None, buffer_time=0
     strain: gwpy.timeseries.TimeSeries
 
     """
+    from gwpy.timeseries import TimeSeries
     loaded = False
     strain = None
 
@@ -713,74 +701,6 @@ def build_roq_weights(data, basis, deltaF):
     return weights
 
 
-def blockwise_dot_product(matrix_a, matrix_b, max_elements=int(2 ** 27),
-                          out=None):
-    """
-    Memory efficient
-    Computes the dot product of two matrices in a block-wise fashion.
-    Only blocks of `matrix_a` with a maximum size of `max_elements` will be
-    processed simultaneously.
-
-    Parameters
-    ==========
-    matrix_a, matrix_b: array-like
-        Matrices to be dot producted, matrix_b is complex conjugated.
-    max_elements: int
-        Maximum number of elements to consider simultaneously, should be memory
-        limited.
-    out: array-like
-        Output array
-
-    Returns
-    =======
-    out: array-like
-        Dot producted array
-    """
-    def block_slices(dim_size, block_size):
-        """Generator that yields slice objects for indexing into
-        sequential blocks of an array along a particular axis
-        Useful for blockwise dot
-        """
-        count = 0
-        while True:
-            yield slice(count, count + block_size, 1)
-            count += block_size
-            if count > dim_size:
-                return
-
-    matrix_b = np.conjugate(matrix_b)
-    m, n = matrix_a.shape
-    n1, o = matrix_b.shape
-    if n1 != n:
-        raise ValueError(
-            'Matrices are not aligned, matrix a has shape ' +
-            '{}, matrix b has shape {}.'.format(matrix_a.shape, matrix_b.shape))
-
-    if matrix_a.flags.f_contiguous:
-        # prioritize processing as many columns of matrix_a as possible
-        max_cols = max(1, max_elements // m)
-        max_rows = max_elements // max_cols
-
-    else:
-        # prioritize processing as many rows of matrix_a as possible
-        max_rows = max(1, max_elements // n)
-        max_cols = max_elements // max_rows
-
-    if out is None:
-        out = np.empty((m, o), dtype=np.result_type(matrix_a, matrix_b))
-    elif out.shape != (m, o):
-        raise ValueError('Output array has incorrect dimensions.')
-
-    for mm in block_slices(m, max_rows):
-        out[mm, :] = 0
-        for nn in block_slices(n, max_cols):
-            a_block = matrix_a[mm, nn].copy()  # copy to force a read
-            out[mm, :] += np.dot(a_block, matrix_b[nn, :])
-            del a_block
-
-    return out
-
-
 def convert_args_list_to_float(*args_list):
     """ Converts inputs to floats, returns a list in the same order as the input"""
     try:
@@ -793,17 +713,19 @@ def convert_args_list_to_float(*args_list):
 def lalsim_SimInspiralTransformPrecessingNewInitialConditions(
         theta_jn, phi_jl, tilt_1, tilt_2, phi_12, a_1, a_2, mass_1, mass_2,
         reference_frequency, phase):
+    from lalsimulation import SimInspiralTransformPrecessingNewInitialConditions
 
     args_list = convert_args_list_to_float(
         theta_jn, phi_jl, tilt_1, tilt_2, phi_12, a_1, a_2, mass_1, mass_2,
         reference_frequency, phase)
 
-    return lalsim.SimInspiralTransformPrecessingNewInitialConditions(*args_list)
+    return SimInspiralTransformPrecessingNewInitialConditions(*args_list)
 
 
 def lalsim_GetApproximantFromString(waveform_approximant):
+    from lalsimulation import GetApproximantFromString
     if isinstance(waveform_approximant, str):
-        return lalsim.GetApproximantFromString(waveform_approximant)
+        return GetApproximantFromString(waveform_approximant)
     else:
         raise ValueError("waveform_approximant must be of type str")
 
@@ -840,6 +762,7 @@ def lalsim_SimInspiralFD(
     waveform_dictionary: None, lal.Dict
     approximant: int, str
     """
+    from lalsimulation import SimInspiralFD
 
     args = convert_args_list_to_float(
         mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z,
@@ -849,7 +772,7 @@ def lalsim_SimInspiralFD(
 
     approximant = _get_lalsim_approximant(approximant)
 
-    return lalsim.SimInspiralFD(*args, waveform_dictionary, approximant)
+    return SimInspiralFD(*args, waveform_dictionary, approximant)
 
 
 def lalsim_SimInspiralChooseFDWaveform(
@@ -884,6 +807,7 @@ def lalsim_SimInspiralChooseFDWaveform(
     waveform_dictionary: None, lal.Dict
     approximant: int, str
     """
+    from lalsimulation import SimInspiralChooseFDWaveform
 
     args = convert_args_list_to_float(
         mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z,
@@ -893,7 +817,7 @@ def lalsim_SimInspiralChooseFDWaveform(
 
     approximant = _get_lalsim_approximant(approximant)
 
-    return lalsim.SimInspiralChooseFDWaveform(*args, waveform_dictionary, approximant)
+    return SimInspiralChooseFDWaveform(*args, waveform_dictionary, approximant)
 
 
 def _get_lalsim_approximant(approximant):
@@ -931,6 +855,8 @@ def lalsim_SimInspiralChooseFDWaveformSequence(
     approximant: int, str
     frequency_array: np.ndarray, lal.REAL8Vector
     """
+    from lal import REAL8Vector, CreateREAL8Vector
+    from lalsimulation import SimInspiralChooseFDWaveformSequence
 
     [mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y, spin_2z,
      luminosity_distance, iota, phase, reference_frequency] = convert_args_list_to_float(
@@ -944,12 +870,12 @@ def lalsim_SimInspiralChooseFDWaveformSequence(
     else:
         raise ValueError("approximant not an int")
 
-    if not isinstance(frequency_array, lal.REAL8Vector):
+    if not isinstance(frequency_array, REAL8Vector):
         old_frequency_array = frequency_array.copy()
-        frequency_array = lal.CreateREAL8Vector(len(old_frequency_array))
+        frequency_array = CreateREAL8Vector(len(old_frequency_array))
         frequency_array.data = old_frequency_array
 
-    return lalsim.SimInspiralChooseFDWaveformSequence(
+    return SimInspiralChooseFDWaveformSequence(
         phase, mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y,
         spin_2z, reference_frequency, luminosity_distance, iota,
         waveform_dictionary, approximant, frequency_array)
@@ -957,23 +883,25 @@ def lalsim_SimInspiralChooseFDWaveformSequence(
 
 def lalsim_SimInspiralWaveformParamsInsertTidalLambda1(
         waveform_dictionary, lambda_1):
+    from lalsimulation import SimInspiralWaveformParamsInsertTidalLambda1
     try:
         lambda_1 = float(lambda_1)
     except ValueError:
         raise ValueError("Unable to convert lambda_1 to float")
 
-    return lalsim.SimInspiralWaveformParamsInsertTidalLambda1(
+    return SimInspiralWaveformParamsInsertTidalLambda1(
         waveform_dictionary, lambda_1)
 
 
 def lalsim_SimInspiralWaveformParamsInsertTidalLambda2(
         waveform_dictionary, lambda_2):
+    from lalsimulation import SimInspiralWaveformParamsInsertTidalLambda2
     try:
         lambda_2 = float(lambda_2)
     except ValueError:
         raise ValueError("Unable to convert lambda_2 to float")
 
-    return lalsim.SimInspiralWaveformParamsInsertTidalLambda2(
+    return SimInspiralWaveformParamsInsertTidalLambda2(
         waveform_dictionary, lambda_2)
 
 
@@ -1020,6 +948,7 @@ def plot_spline_pos(log_freqs, samples, nfreqs=100, level=0.9, color='k', label=
         Function to transform the spline into plotted values.
 
     """
+    import matplotlib.pyplot as plt
     freq_points = np.exp(log_freqs)
     freqs = np.logspace(min(log_freqs), max(log_freqs), nfreqs, base=np.exp(1))
 
@@ -1084,3 +1013,9 @@ class PropertyAccessor(object):
 
     def __set__(self, instance, value):
         setattr(getattr(instance, self.container_instance_name), self.property_name, value)
+
+
+def greenwich_mean_sidereal_time(time):
+    from lal import GreenwichMeanSiderealTime
+    time = float(time)
+    return GreenwichMeanSiderealTime(time)
diff --git a/optional_requirements.txt b/optional_requirements.txt
index cd16a99f5eef1d8839dffb60db9623b4771b04e3..aad48e7d087f2717a6f42483fc95805d3e763fd3 100644
--- a/optional_requirements.txt
+++ b/optional_requirements.txt
@@ -3,3 +3,5 @@ lalsuite
 gwpy
 theano
 plotly
+tables
+pyfftw
diff --git a/requirements.txt b/requirements.txt
index 05d86e2f5459f636abf631452a48535d12d4393f..b56251a2531ea04700fb7a6fd0cfe1964af45453 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,12 +1,11 @@
 dynesty
 emcee
 corner
-numpy<1.20
-matplotlib>=2.0
+numpy
+matplotlib>=2.1
 scipy>=0.16
 pandas
 mock
 dill
 tqdm
 h5py
-tables
diff --git a/sampler_requirements.txt b/sampler_requirements.txt
index 2e7155585bedfbc9630e2a2406dc3c171d95f7b0..69a03701aa337928a0112046a74f6a44677487fe 100644
--- a/sampler_requirements.txt
+++ b/sampler_requirements.txt
@@ -3,8 +3,7 @@ dynesty
 emcee
 nestle
 ptemcee
-pymc3==3.6; python_version <= '2.7'
-pymc3>=3.6; python_version > '3.4'
+pymc3>=3.6
 pymultinest
 kombine
 ultranest>=3.0.0
diff --git a/setup.cfg b/setup.cfg
index a9ae5b2d2c05c11e57bbd05cbc5b1b3ea3eb7a8a..8dd2e017db2db04a5d92044bc4fc5ed0040bd572 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,6 +5,7 @@ ignore = E129 W503 W504 W605 E203 E402
 
 [tool:pytest]
 addopts =
+    --ignore test/import_test.py
     --ignore test/integration/other_test.py
     --ignore test/integration/example_test.py
     --ignore test/integration/sample_from_the_prior_test.py
diff --git a/test/core/prior/dict_test.py b/test/core/prior/dict_test.py
index a004feab16f25fcdc809fab263b0986f45c2772b..546bcd4652cb592755630a4d356cda1517f1203f 100644
--- a/test/core/prior/dict_test.py
+++ b/test/core/prior/dict_test.py
@@ -41,11 +41,6 @@ class TestPriorDict(unittest.TestCase):
         priors = bilby.core.prior.PriorDict(self.priors)
         self.assertEqual(priors, priors.copy())
 
-    def test_prior_set(self):
-        priors_dict = bilby.core.prior.PriorDict(self.priors)
-        priors_set = bilby.core.prior.PriorSet(self.priors)
-        self.assertEqual(priors_dict, priors_set)
-
     def test_prior_set_is_dict(self):
         self.assertIsInstance(self.prior_set_from_dict, dict)
 
diff --git a/test/core/sampler/dynesty_test.py b/test/core/sampler/dynesty_test.py
index 0509ff51067cff2ab8f608924a6f0783460b6f96..ec8e2274f0582fce8b512edd50ae94b7760e4ba3 100644
--- a/test/core/sampler/dynesty_test.py
+++ b/test/core/sampler/dynesty_test.py
@@ -34,7 +34,7 @@ class TestDynesty(unittest.TestCase):
             periodic=None,
             reflective=None,
             verbose=True,
-            check_point_delta_t=600,
+            check_point_delta_t=1800,
             nlive=1000,
             first_update=None,
             npdim=None,
@@ -90,7 +90,7 @@ class TestDynesty(unittest.TestCase):
             periodic=[],
             reflective=[],
             verbose=True,
-            check_point_delta_t=600,
+            check_point_delta_t=1800,
             nlive=1000,
             first_update=None,
             npdim=None,
diff --git a/test/gw/cosmology_test.py b/test/gw/cosmology_test.py
index 158375d5a5dcb172c03b4c9267c8a792b4d5f509..0864c4cc03afd995f22cc1190978e012baebac3a 100644
--- a/test/gw/cosmology_test.py
+++ b/test/gw/cosmology_test.py
@@ -46,7 +46,7 @@ class TestGetCosmology(unittest.TestCase):
         self.assertEqual(cosmology.get_cosmology("WMAP9").name, "WMAP9")
 
     def test_getting_cosmology_with_default(self):
-        self.assertEqual(cosmology.get_cosmology(), cosmology.COSMOLOGY[0])
+        self.assertEqual(cosmology.get_cosmology().name, "Planck15")
 
 
 if __name__ == "__main__":
diff --git a/test/gw/likelihood_test.py b/test/gw/likelihood_test.py
index 8f8bbcfc150af3d39db1f47304a95fea217e0e17..adea14483de062a6c48c31194f366594cd9293da 100644
--- a/test/gw/likelihood_test.py
+++ b/test/gw/likelihood_test.py
@@ -830,7 +830,7 @@ class TestROQLikelihood(unittest.TestCase):
         roq_wfg = bilby.gw.waveform_generator.WaveformGenerator(
             duration=self.duration,
             sampling_frequency=self.sampling_frequency,
-            frequency_domain_source_model=bilby.gw.source.roq,
+            frequency_domain_source_model=bilby.gw.source.binary_black_hole_roq,
             waveform_arguments=dict(
                 frequency_nodes_linear=fnodes_linear,
                 frequency_nodes_quadratic=fnodes_quadratic,
@@ -1080,7 +1080,7 @@ class TestRescaledROQLikelihood(unittest.TestCase):
         self.roq_wfg = bilby.gw.waveform_generator.WaveformGenerator(
             duration=self.duration,
             sampling_frequency=self.sampling_frequency,
-            frequency_domain_source_model=bilby.gw.source.roq,
+            frequency_domain_source_model=bilby.gw.source.binary_black_hole_roq,
             waveform_arguments=dict(
                 frequency_nodes_linear=fnodes_linear,
                 frequency_nodes_quadratic=fnodes_quadratic,
diff --git a/test/gw/source_test.py b/test/gw/source_test.py
index ec4a2929ad96638b541ae7d39ac567d2c9fe7c4d..24c7346d663ca1de19dd616ea10dc73f52747b8e 100644
--- a/test/gw/source_test.py
+++ b/test/gw/source_test.py
@@ -236,7 +236,7 @@ class TestROQBBH(unittest.TestCase):
     def test_roq_runs_valid_parameters(self):
         self.parameters.update(self.waveform_kwargs)
         self.assertIsInstance(
-            bilby.gw.source.roq(self.frequency_array, **self.parameters), dict
+            bilby.gw.source.binary_black_hole_roq(self.frequency_array, **self.parameters), dict
         )
 
     def test_roq_fails_without_frequency_nodes(self):
@@ -244,7 +244,7 @@ class TestROQBBH(unittest.TestCase):
         del self.parameters["frequency_nodes_linear"]
         del self.parameters["frequency_nodes_quadratic"]
         with self.assertRaises(KeyError):
-            bilby.gw.source.roq(self.frequency_array, **self.parameters)
+            bilby.gw.source.binary_black_hole_roq(self.frequency_array, **self.parameters)
 
 
 if __name__ == "__main__":
diff --git a/test/gw/utils_test.py b/test/gw/utils_test.py
index fee929faaae9572b6d351f610b5085a84bd13a54..a3a7582f1f2b94c796156fb9cc031c37a3bb5aee 100644
--- a/test/gw/utils_test.py
+++ b/test/gw/utils_test.py
@@ -3,9 +3,10 @@ import os
 from shutil import rmtree
 
 import numpy as np
-import gwpy
 import lal
 import lalsimulation as lalsim
+from gwpy.timeseries import TimeSeries
+from gwpy.detector import Channel
 from scipy.stats import ks_2samp
 
 import bilby
@@ -125,8 +126,8 @@ class TestGWUtils(unittest.TestCase):
         N = 100
         times = np.linspace(start_time, end_time, N)
         data = np.random.normal(0, 1, N)
-        ts = gwpy.timeseries.TimeSeries(data=data, times=times, t0=0)
-        ts.channel = gwpy.detector.Channel(channel)
+        ts = TimeSeries(data=data, times=times, t0=0)
+        ts.channel = Channel(channel)
         ts.name = channel
         filename = os.path.join(self.outdir, "test.gwf")
         ts.write(filename, format="gwf")
@@ -158,7 +159,7 @@ class TestGWUtils(unittest.TestCase):
         )
         self.assertTrue(np.all(strain.value == data[:-1]))
 
-        ts = gwpy.timeseries.TimeSeries(data=data, times=times, t0=0)
+        ts = TimeSeries(data=data, times=times, t0=0)
         ts.name = "NOT-A-KNOWN-CHANNEL"
         ts.write(filename, format="gwf")
         strain = gwutils.read_frame_file(filename, start_time=None, end_time=None)
diff --git a/test/import_test.py b/test/import_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..29a4c602d094e1ed04ea0f086db7048c7bf4536e
--- /dev/null
+++ b/test/import_test.py
@@ -0,0 +1,22 @@
+import sys
+
+import bilby  # noqa
+
+unique_packages = set(sys.modules)
+
+unwanted = {
+    "lal", "lalsimulation", "matplotlib",
+    "h5py", "dill", "tqdm", "tables", "deepdish", "corner",
+}
+
+for filename in ["sampler_requirements.txt", "optional_requirements.txt"]:
+    with open(filename, "r") as ff:
+        packages = ff.readlines()
+        for package in packages:
+            package = package.split(">")[0].split("<")[0].split("=")[0].strip()
+            unwanted.add(package)
+
+if not unique_packages.isdisjoint(unwanted):
+    raise ImportError(
+        f"{' '.join(unique_packages.intersection(unwanted))} imported with Bilby"
+    )
diff --git a/test/integration/sample_from_the_prior_test.py b/test/integration/sample_from_the_prior_test.py
index e48c3574dc79c44b6b73089f2b3a71d24d4ea6c1..64bb0bed105f4b983632eef689903830ccec6411 100644
--- a/test/integration/sample_from_the_prior_test.py
+++ b/test/integration/sample_from_the_prior_test.py
@@ -90,10 +90,11 @@ class Test(unittest.TestCase):
             likelihood=likelihood,
             priors=priors,
             sampler="dynesty",
-            npoints=1000,
-            walks=100,
+            nlive=1000,
+            nact=10,
             outdir=self.outdir,
             label=label,
+            save=False
         )
         pvalues = [
             ks_2samp_wrapper(
diff --git a/test/integration/sampler_run_test.py b/test/integration/sampler_run_test.py
index 1e263986b7e513773628f1ea733f28df70d1f51b..46f09bbb6988e839d8c4390f278b470b1d882fab 100644
--- a/test/integration/sampler_run_test.py
+++ b/test/integration/sampler_run_test.py
@@ -48,7 +48,12 @@ class TestRunningSamplers(unittest.TestCase):
             likelihood=self.likelihood,
             priors=self.priors,
             sampler="dnest4",
-            nlive=100,
+            max_num_levels=2,
+            num_steps=10,
+            new_level_interval=10,
+            num_per_step=10,
+            thread_steps=1,
+            num_particles=50,
             save=False,
         )
 
@@ -147,7 +152,7 @@ class TestRunningSamplers(unittest.TestCase):
             sampler="pymc3",
             draws=50,
             tune=50,
-            n_init=1000,
+            n_init=250,
             save=False,
         )