diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index bbaa78a4c9d18f6ea78e1c6e278b7f9f0e0c5856..c22cc98c804f21558a74509f5b32ef375a31e993 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -79,18 +79,9 @@ scheduled-python-3.7:
   image: bilbydev/bilby-test-suite-python37
   only:
     - schedules
-  before_script:
-    # Install the dependencies specified in the Pipfile
-    - pipenv install --three --python=/opt/conda/bin/python --system --deploy
   script:
     - python setup.py install
 
-    # Run pyflakes
-    - flake8 .
-
-    # Run tests
-    - pytest
-
     # Run tests which are only done on schedule
     - pytest test/example_test.py
     - pytest test/gw_example_test.py
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 93611c64ce88e72da6dc99f6b85f83ad78b51908..7ef9d51562de405aeeac90ff76703740aef5068e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,23 @@
 ### Removed
 -
 
+## [0.4.2] 2019-03-21
+
+### Added
+- Fermi-Dirac and SymmetricLogUniform prior distributions
+- Multivariate Gaussian example and BNS example
+- Added standard GWOSC channel names
+- Initial work on a fake sampler for testing
+- Option for aligned spins
+- Results file command line interface
+- Full reconstruction of marginalized parameters
+
+### Changed
+- Fixed scheduled tests and simplify testing environment
+- JSON result files can now be gzipped
+- Reduced ROQ memory usage
+- Default checkpointing in cpnest
+
 ## [0.4.1] 2019-03-04
 
 ### Added
diff --git a/bilby/core/__init__.py b/bilby/core/__init__.py
index 9d1dd51e24d6debec2c663f0128742a77337e370..3946f24a4339cc4fa456df4c53dd50bd2fde75c9 100644
--- a/bilby/core/__init__.py
+++ b/bilby/core/__init__.py
@@ -1,2 +1,2 @@
 from __future__ import absolute_import
-from . import likelihood, prior, result, sampler, series, utils
+from . import grid, likelihood, prior, result, sampler, series, utils
diff --git a/bilby/core/grid.py b/bilby/core/grid.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f782ba48b3cc9103935b9c6778dbf64eecba857
--- /dev/null
+++ b/bilby/core/grid.py
@@ -0,0 +1,282 @@
+from __future__ import division
+
+import numpy as np
+
+from .prior import Prior, PriorDict
+from .utils import logtrapzexp
+
+
+class Grid(object):
+
+    def __init__(self, likelihood, priors, grid_size=101):
+        """
+
+        Parameters
+        ----------
+        likelihood: bilby.likelihood.Likelihood
+        priors: bilby.prior.PriorDict
+        grid_size: int, list, dict
+            Size of the grid, can be any of
+            - int: all dimensions will have equal numbers of points
+            - list: dimensions will use these points/this number of points in
+            order of priors
+            - dict: as for list
+        """
+        self.likelihood = likelihood
+        self.priors = PriorDict(priors)
+        self.n_dims = len(priors)
+        self.parameter_names = list(self.priors.keys())
+
+        self.sample_points = dict()
+        self._get_sample_points(grid_size)
+        # evaluate the prior on the grid points
+        self._ln_prior = self.priors.ln_prob(
+            {key: self.mesh_grid[i].flatten() for i, key in
+             enumerate(self.parameter_names)}, axis=0).reshape(
+            self.mesh_grid[0].shape)
+        self._ln_likelihood = None
+
+        # evaluate the likelihood on the grid points
+        self._evaluate()
+
+    @property
+    def ln_prior(self):
+        return self._ln_prior
+
+    @property
+    def prior(self):
+        return np.exp(self.ln_prior)
+
+    @property
+    def ln_likelihood(self):
+        if self._ln_likelihood is None:
+            self._evaluate()
+        return self._ln_likelihood
+
+    @property
+    def ln_posterior(self):
+        return self.ln_likelihood + self.ln_prior
+
+    def marginalize(self, log_array, parameters=None, not_parameters=None):
+        """
+        Marginalize over a list of parameters.
+
+        Parameters
+        ----------
+        log_array: array_like
+            A :class:`numpy.ndarray` of log likelihood/posterior values.
+        parameters: list, str
+            A list, or single string, of parameters to marginalize over. If None
+            then all parameters will be marginalized over.
+        not_parameters: list, str
+            Instead of a list of parameters to marginalize over you can list
+            the set of parameter to *not* marginalize over.
+
+        Returns
+        -------
+        out_array: array_like
+            An array containing the marginalized log likelihood/posterior.
+        """
+
+        if parameters is None:
+            params = list(self.parameter_names)
+
+            if not_parameters is not None:
+                if isinstance(not_parameters, str):
+                    not_params = [not_parameters]
+                elif isinstance(not_parameters, list):
+                    not_params = not_parameters
+                else:
+                    raise TypeError("Parameters names must be a list or string")
+
+                for name in list(params):
+                    if name in not_params:
+                        params.remove(name)
+        elif isinstance(parameters, str):
+            params = [parameters]
+        elif isinstance(parameters, list):
+            params = parameters
+        else:
+            raise TypeError("Parameters names must be a list or string")
+
+        out_array = log_array.copy()
+        names = list(self.parameter_names)
+
+        for name in params:
+            out_array = self._marginalize_single(out_array, name, names)
+
+        return out_array
+
+    def _marginalize_single(self, log_array, name, non_marg_names=None):
+        """
+        Marginalize the log likelihood/posterior over a single given parameter.
+
+        Parameters
+        ----------
+        log_array: array_like
+            A :class:`numpy.ndarray` of log likelihood/posterior values.
+        name: str
+            The name of the parameter to marginalize over.
+        non_marg_names: list
+            A list of parameter names that have not been marginalized over.
+
+        Returns
+        -------
+        out: array_like
+            An array containing the marginalized log likelihood/posterior.
+        """
+
+        if name not in self.parameter_names:
+            raise ValueError("'{}' is not a recognised "
+                             "parameter".format(name))
+
+        if non_marg_names is None:
+            non_marg_names = list(self.parameter_names)
+
+        axis = non_marg_names.index(name)
+        non_marg_names.remove(name)
+
+        places = self.sample_points[name]
+
+        if len(places) > 1:
+            out = np.apply_along_axis(
+                logtrapzexp, axis, log_array, places[1] - places[0])
+        else:
+            # no marginalisation required, just remove the singleton dimension
+            z = log_array.shape
+            q = np.arange(0, len(z)).astype(int) != axis
+            out = np.reshape(log_array, tuple((np.array(list(z)))[q]))
+
+        return out
+
+    @property
+    def ln_evidence(self):
+        return self.marginalize(self.ln_posterior)
+
+    @property
+    def log_evidence(self):
+        return self.ln_evidence
+
+    def marginalize_ln_likelihood(self, parameter=None, not_parameter=None):
+        """
+        Marginalize the ln likelihood over either the specified parameter or
+        all but the specified "not_parameter". If neither is specified the
+        ln likelihood will be fully marginalized over.
+
+        Parameters
+        ----------
+        parameter: str, optional
+            Name of the parameter to marginalize over.
+        not_parameter: str, optional
+            Name of the parameter to not marginalize over.
+        Returns
+        -------
+        array-like:
+            The marginalized ln likelihood.
+        """
+        return self.marginalize(self.ln_likelihood, parameters=parameter,
+                                not_parameters=not_parameter)
+
+    def marginalize_ln_posterior(self, parameter=None, not_parameter=None):
+        """
+        Marginalize the ln posterior over either the specified parameter or all
+        but the specified "not_parameter". If neither is specified the
+        ln posterior will be fully marginalized over.
+
+        Parameters
+        ----------
+        parameter: str, optional
+            Name of the parameter to marginalize over.
+        not_parameter: str, optional
+            Name of the parameter to not marginalize over.
+        Returns
+        -------
+        array-like:
+            The marginalized ln posterior.
+        """
+        return self.marginalize(self.ln_posterior, parameters=parameter,
+                                not_parameters=not_parameter)
+
+    def marginalize_likelihood(self, parameter=None, not_parameter=None):
+        """
+        Marginalize the likelihood over either the specified parameter or all
+        but the specified "not_parameter". If neither is specified the
+        likelihood will be fully marginalized over.
+
+        Parameters
+        ----------
+        parameter: str, optional
+            Name of the parameter to marginalize over.
+        not_parameter: str, optional
+            Name of the parameter to not marginalize over.
+        Returns
+        -------
+        array-like:
+            The marginalized likelihood.
+        """
+        ln_like = self.marginalize(self.ln_likelihood, parameters=parameter,
+                                   not_parameters=not_parameter)
+        # NOTE: this outputs will not be properly normalised
+        return np.exp(ln_like - np.max(ln_like))
+
+    def marginalize_posterior(self, parameter=None, not_parameter=None):
+        """
+        Marginalize the posterior over either the specified parameter or all
+        but the specified "not_parameter". If neither is specified the
+        posterior will be fully marginalized over.
+
+        Parameters
+        ----------
+        parameter: str, optional
+            Name of the parameter to marginalize over.
+        not_parameter: str, optional
+            Name of the parameter to not marginalize over.
+        Returns
+        -------
+        array-like:
+            The marginalized posterior.
+        """
+        ln_post = self.marginalize(self.ln_posterior, parameters=parameter,
+                                   not_parameters=not_parameter)
+        # NOTE: this outputs will not be properly normalised
+        return np.exp(ln_post - np.max(ln_post))
+
+    def _evaluate(self):
+        self._ln_likelihood = np.empty(self.mesh_grid[0].shape)
+        self._evaluate_recursion(0)
+
+    def _evaluate_recursion(self, dimension):
+        if dimension == self.n_dims:
+            current_point = tuple([[int(np.where(
+                self.likelihood.parameters[name] ==
+                self.sample_points[name])[0])] for name in self.parameter_names])
+            self._ln_likelihood[current_point] = self.likelihood.log_likelihood()
+        else:
+            name = self.parameter_names[dimension]
+            for ii in range(self._ln_likelihood.shape[dimension]):
+                self.likelihood.parameters[name] = self.sample_points[name][ii]
+                self._evaluate_recursion(dimension + 1)
+
+    def _get_sample_points(self, grid_size):
+        for ii, key in enumerate(self.parameter_names):
+            if isinstance(self.priors[key], Prior):
+                if isinstance(grid_size, int):
+                    self.sample_points[key] = self.priors[key].rescale(
+                        np.linspace(0, 1, grid_size))
+                elif isinstance(grid_size, list):
+                    if isinstance(grid_size[ii], int):
+                        self.sample_points[key] = self.priors[key].rescale(
+                            np.linspace(0, 1, grid_size[ii]))
+                    else:
+                        self.sample_points[key] = grid_size[ii]
+                elif isinstance(grid_size, dict):
+                    if isinstance(grid_size[key], int):
+                        self.sample_points[key] = self.priors[key].rescale(
+                            np.linspace(0, 1, grid_size[key]))
+                    else:
+                        self.sample_points[key] = grid_size[key]
+
+        # set the mesh of points
+        self.mesh_grid = np.meshgrid(
+            *(self.sample_points[key] for key in self.parameter_names),
+            indexing='ij')
diff --git a/bilby/core/prior.py b/bilby/core/prior.py
index 4e731629ad51b3a6d70fa12a079dfd02363ca025..3bf5788d712226c9cc550e489326895ebb99874b 100644
--- a/bilby/core/prior.py
+++ b/bilby/core/prior.py
@@ -771,6 +771,86 @@ class LogUniform(PowerLaw):
             logger.warning('You specified a uniform-in-log prior with minimum={}'.format(self.minimum))
 
 
+class SymmetricLogUniform(Prior):
+
+    def __init__(self, minimum, maximum, name=None, latex_label=None,
+                 unit=None):
+        """Symmetric Log-Uniform distribtions with bounds
+
+        This is identical to a Log-Uniform distribition, but mirrored about
+        the zero-axis and subsequently normalized. As such, the distribution
+        has support on the two regions [-maximum, -minimum] and [minimum,
+        maximum].
+
+        Parameters
+        ----------
+        minimum: float
+            See superclass
+        maximum: float
+            See superclass
+        name: str
+            See superclass
+        latex_label: str
+            See superclass
+        unit: str
+            See superclass
+        """
+        Prior.__init__(self, name=name, latex_label=latex_label,
+                       minimum=minimum, maximum=maximum, unit=unit)
+
+    def rescale(self, val):
+        """
+        'Rescale' a sample from the unit line element to the power-law prior.
+
+        This maps to the inverse CDF. This has been analytically solved for this case.
+
+        Parameters
+        ----------
+        val: float
+            Uniform probability
+
+        Returns
+        -------
+        float: Rescaled probability
+        """
+        Prior.test_valid_for_rescaling(val)
+        if val < 0.5:
+            return -self.maximum * np.exp(-2 * val * np.log(self.maximum / self.minimum))
+        elif val > 0.5:
+            return self.minimum * np.exp(np.log(self.maximum / self.minimum) * (2 * val - 1))
+        else:
+            raise ValueError("Rescale not valid for val=0.5")
+
+    def prob(self, val):
+        """Return the prior probability of val
+
+        Parameters
+        ----------
+        val: float
+
+        Returns
+        -------
+        float: Prior probability of val
+        """
+        return (
+            np.nan_to_num(0.5 / np.abs(val) / np.log(self.maximum / self.minimum)) *
+            self.is_in_prior_range(val))
+
+    def ln_prob(self, val):
+        """Return the logarithmic prior probability of val
+
+        Parameters
+        ----------
+        val: float
+
+        Returns
+        -------
+        float:
+
+        """
+        return np.nan_to_num(- np.log(2 * np.abs(val)) - np.log(np.log(self.maximum / self.minimum)))
+
+
 class Cosine(Prior):
 
     def __init__(self, name=None, latex_label=None, unit=None,
@@ -1775,6 +1855,107 @@ class FromFile(Interped):
             logger.warning(r"x\tp(x)")
 
 
+class FermiDirac(Prior):
+    def __init__(self, sigma, mu=None, r=None, name=None, latex_label=None,
+                 unit=None):
+        """A Fermi-Dirac type prior, with a fixed lower boundary at zero
+        (see, e.g. Section 2.3.5 of [1]_). The probability distribution
+        is defined by Equation 22 of [1]_.
+
+        Parameters
+        ----------
+        sigma: float (required)
+            The range over which the attenuation of the distribution happens
+        mu: float
+            The point at which the distribution falls to 50% of its maximum
+            value
+        r: float
+            A value giving mu/sigma. This can be used instead of specifying
+            mu.
+        
+        References
+        ----------
+
+        .. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
+           <https:arxiv.org/abs/1705.08978v1>`_, 2017.
+        """
+        Prior.__init__(self, name=name, latex_label=latex_label, unit=unit, minimum=0.)
+
+        self.sigma = sigma
+
+        if mu is None and r is None:
+            raise ValueError("For the Fermi-Dirac prior either a 'mu' value or 'r' "
+                             "value must be given.")
+
+        if r is None and mu is not None:
+            self.mu = mu
+            self.r = self.mu / self.sigma
+        else:
+            self.r = r
+            self.mu = self.sigma * self.r
+
+        if self.r <= 0. or self.sigma <= 0.:
+            raise ValueError("For the Fermi-Dirac prior the values of sigma and r "
+                             "must be positive.")
+
+    def rescale(self, val):
+        """
+        'Rescale' a sample from the unit line element to the appropriate Fermi-Dirac prior.
+
+        This maps to the inverse CDF. This has been analytically solved for this case,
+        see Equation 24 of [1]_.
+
+        References
+        ----------
+
+        .. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
+           <https:arxiv.org/abs/1705.08978v1>`_, 2017.
+        """
+        Prior.test_valid_for_rescaling(val)
+
+        inv = (-np.exp(-1. * self.r) + (1. + np.exp(self.r))**-val +
+               np.exp(-1. * self.r) * (1. + np.exp(self.r))**-val)
+
+        # if val is 1 this will cause inv to be negative (due to numerical
+        # issues), so return np.inf
+        if isinstance(val, (float, int)):
+            if inv < 0:
+                return np.inf
+            else:
+                return -self.sigma * np.log(inv)
+        else:
+            idx = inv >= 0.
+            tmpinv = np.inf * np.ones(len(val))
+            tmpinv[idx] = -self.sigma * np.log(inv[idx])
+            return tmpinv
+
+    def prob(self, val):
+        """Return the prior probability of val.
+
+        Parameters
+        ----------
+        val: float
+
+        Returns
+        -------
+        float: Prior probability of val
+        """
+        return np.exp(self.ln_prob(val))
+
+    def ln_prob(self, val):
+        norm = -np.log(self.sigma * np.log(1. + np.exp(self.r)))
+        if isinstance(val, (float, int)):
+            if val < self.minimum:
+                return -np.inf
+            else:
+                return norm - np.logaddexp((val / self.sigma) - self.r, 0.)
+        else:
+            lnp = -np.inf * np.ones(len(val))
+            idx = val >= self.minimum
+            lnp[idx] = norm - np.logaddexp((val[idx] / self.sigma) - self.r, 0.)
+            return lnp
+
+
 class MultivariateGaussianDist(object):
 
     def __init__(self, names, nmodes=1, mus=None, sigmas=None, corrcoefs=None,
@@ -2481,3 +2662,4 @@ class MultivariateNormal(MultivariateGaussian):
         """
         MultivariateGaussian.__init__(self, mvg, name=name,
                                       latex_label=latex_label, unit=unit)
+
diff --git a/bilby/core/result.py b/bilby/core/result.py
index 5dbb6fa89d76fccb933c2ff26652e3dbef76d41e..43c36491e4f09645be08b32858c0047e3b3fdd5d 100644
--- a/bilby/core/result.py
+++ b/bilby/core/result.py
@@ -20,7 +20,7 @@ from .utils import (logger, infer_parameters_from_function,
 from .prior import Prior, PriorDict, DeltaFunction
 
 
-def result_file_name(outdir, label, extension='json'):
+def result_file_name(outdir, label, extension='json', gzip=False):
     """ Returns the standard filename used for a result file
 
     Parameters
@@ -31,18 +31,23 @@ def result_file_name(outdir, label, extension='json'):
         Naming scheme of the output file
     extension: str, optional
         Whether to save as `hdf5` or `json`
+    gzip: bool, optional
+        Set to True to append `.gz` to the extension for saving in gzipped format
 
     Returns
     -------
     str: File name of the output file
     """
     if extension in ['json', 'hdf5']:
-        return '{}/{}_result.{}'.format(outdir, label, extension)
+        if extension == 'json' and gzip:
+            return '{}/{}_result.{}.gz'.format(outdir, label, extension)
+        else:
+            return '{}/{}_result.{}'.format(outdir, label, extension)
     else:
         raise ValueError("Extension type {} not understood".format(extension))
 
 
-def _determine_file_name(filename, outdir, label, extension):
+def _determine_file_name(filename, outdir, label, extension, gzip):
     """ Helper method to determine the filename """
     if filename is not None:
         return filename
@@ -50,10 +55,10 @@ def _determine_file_name(filename, outdir, label, extension):
         if (outdir is None) and (label is None):
             raise ValueError("No information given to load file")
         else:
-            return result_file_name(outdir, label, extension)
+            return result_file_name(outdir, label, extension, gzip)
 
 
-def read_in_result(filename=None, outdir=None, label=None, extension='json'):
+def read_in_result(filename=None, outdir=None, label=None, extension='json', gzip=False):
     """ Reads in a stored bilby result object
 
     Parameters
@@ -65,10 +70,13 @@ def read_in_result(filename=None, outdir=None, label=None, extension='json'):
         naming scheme.
 
     """
-    filename = _determine_file_name(filename, outdir, label, extension)
+    filename = _determine_file_name(filename, outdir, label, extension, gzip)
 
     # Get the actual extension (may differ from the default extension if the filename is given)
     extension = os.path.splitext(filename)[1].lstrip('.')
+    if extension == 'gz':  # gzipped file
+        extension = os.path.splitext(os.path.splitext(filename)[0])[1].lstrip('.')
+
     if 'json' in extension:
         result = Result.from_json(filename=filename)
     elif ('hdf5' in extension) or ('h5' in extension):
@@ -91,7 +99,7 @@ class Result(object):
                  log_prior_evaluations=None, sampling_time=None, nburn=None,
                  walkers=None, max_autocorrelation_time=None,
                  parameter_labels=None, parameter_labels_with_unit=None,
-                 version=None):
+                 gzip=False, version=None):
         """ A class to store the results of the sampling run
 
         Parameters
@@ -129,6 +137,8 @@ class Result(object):
             The estimated maximum autocorrelation time for MCMC samplers
         parameter_labels, parameter_labels_with_unit: list
             Lists of the latex-formatted parameter labels
+        gzip: bool
+            Set to True to gzip the results file (if using json format)
         version: str,
             Version information for software used to generate the result. Note,
             this information is generated when the result object is initialized
@@ -191,7 +201,7 @@ class Result(object):
 
         """
         import deepdish
-        filename = _determine_file_name(filename, outdir, label, 'hdf5')
+        filename = _determine_file_name(filename, outdir, label, 'hdf5', False)
 
         if os.path.isfile(filename):
             dictionary = deepdish.io.load(filename)
@@ -209,7 +219,7 @@ class Result(object):
             raise IOError("No result '{}' found".format(filename))
 
     @classmethod
-    def from_json(cls, filename=None, outdir=None, label=None):
+    def from_json(cls, filename=None, outdir=None, label=None, gzip=False):
         """ Read in a saved .json data file
 
         Parameters
@@ -229,11 +239,17 @@ class Result(object):
                     If no bilby.core.result.Result is found in the path
 
         """
-        filename = _determine_file_name(filename, outdir, label, 'json')
+        filename = _determine_file_name(filename, outdir, label, 'json', gzip)
 
         if os.path.isfile(filename):
-            with open(filename, 'r') as file:
-                dictionary = json.load(file, object_hook=decode_bilby_json)
+            if gzip or os.path.splitext(filename)[1].lstrip('.') == 'gz':
+                import gzip
+                with gzip.GzipFile(filename, 'r') as file:
+                    json_str = file.read().decode('utf-8')
+                dictionary = json.loads(json_str, object_hook=decode_bilby_json)
+            else:
+                with open(filename, 'r') as file:
+                    dictionary = json.load(file, object_hook=decode_bilby_json)
             for key in dictionary.keys():
                 # Convert the loaded priors to bilby prior type
                 if key == 'priors':
@@ -381,7 +397,7 @@ class Result(object):
                 pass
         return dictionary
 
-    def save_to_file(self, overwrite=False, outdir=None, extension='json'):
+    def save_to_file(self, overwrite=False, outdir=None, extension='json', gzip=False):
         """
         Writes the Result to a json or deepdish h5 file
 
@@ -394,9 +410,12 @@ class Result(object):
             Path to the outdir. Default is the one stored in the result object.
         extension: str, optional {json, hdf5}
             Determines the method to use to store the data
+        gzip: bool, optional
+            If true, and outputing to a json file, this will gzip the resulting
+            file and add '.gz' to the file extension.
         """
         outdir = self._safe_outdir_creation(outdir, self.save_to_file)
-        file_name = result_file_name(outdir, self.label, extension)
+        file_name = result_file_name(outdir, self.label, extension, gzip)
 
         if os.path.isfile(file_name):
             if overwrite:
@@ -423,8 +442,15 @@ class Result(object):
 
         try:
             if extension == 'json':
-                with open(file_name, 'w') as file:
-                    json.dump(dictionary, file, indent=2, cls=BilbyJsonEncoder)
+                if gzip:
+                    import gzip
+                    # encode to a string
+                    json_str = json.dumps(dictionary, cls=BilbyJsonEncoder).encode('utf-8')
+                    with gzip.GzipFile(file_name, 'w') as file:
+                        file.write(json_str)
+                else:
+                    with open(file_name, 'w') as file:
+                        json.dump(dictionary, file, indent=2, cls=BilbyJsonEncoder)
             elif extension == 'hdf5':
                 import deepdish
                 for key in dictionary:
diff --git a/bilby/core/sampler/__init__.py b/bilby/core/sampler/__init__.py
index 0b529ca2acc995ac53f3b78de065079b6a690959..c8a5d2307e808ee6dab2d1ff4529edc7b7a04576 100644
--- a/bilby/core/sampler/__init__.py
+++ b/bilby/core/sampler/__init__.py
@@ -44,8 +44,8 @@ if command_line_args.sampler_help:
 def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
                 sampler='dynesty', use_ratio=None, injection_parameters=None,
                 conversion_function=None, plot=False, default_priors_file=None,
-                clean=None, meta_data=None, save=True, result_class=None,
-                **kwargs):
+                clean=None, meta_data=None, save=True, gzip=False,
+                result_class=None, **kwargs):
     """
     The primary interface to easy parameter estimation
 
@@ -88,6 +88,8 @@ def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
     save: bool
         If true, save the priors and results to disk.
         If hdf5, save as an hdf5 file instead of json.
+    gzip: bool
+        If true, and save is true, gzip the saved results file. 
     result_class: bilby.core.result.Result, or child of
         The result class to use. By default, `bilby.core.result.Result` is used,
         but objects which inherit from this class can be given providing
@@ -190,7 +192,7 @@ def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
         result.save_to_file(extension='hdf5')
         logger.info("Results saved to {}/".format(outdir))
     elif save:
-        result.save_to_file()
+        result.save_to_file(gzip=gzip)
         logger.info("Results saved to {}/".format(outdir))
     if plot:
         result.plot_corner()
diff --git a/bilby/core/sampler/dynesty.py b/bilby/core/sampler/dynesty.py
index cfaecff1cc66adac5701d0cfd5a3dcb3e5696f08..a51ce406d75e2061af47270484a0f68a2d872199 100644
--- a/bilby/core/sampler/dynesty.py
+++ b/bilby/core/sampler/dynesty.py
@@ -378,6 +378,7 @@ class Dynesty(NestedSampler):
 
     def _run_test(self):
         import dynesty
+        import pandas as pd
         self.sampler = dynesty.NestedSampler(
             loglikelihood=self.log_likelihood,
             prior_transform=self.prior_transform,
@@ -387,7 +388,8 @@ class Dynesty(NestedSampler):
 
         self.sampler.run_nested(**sampler_kwargs)
 
-        self.result.samples = np.random.uniform(0, 1, (100, self.ndim))
+        self.result.samples = pd.DataFrame(
+            self.priors.sample(100))[self.search_parameter_keys].values
         self.result.log_evidence = np.nan
         self.result.log_evidence_err = np.nan
         return self.result
diff --git a/bilby/core/utils.py b/bilby/core/utils.py
index dfb2117f84689cc06618ae6e393432a644aac701..5e97a641bf314671dac813a9129194a8261ebaac 100644
--- a/bilby/core/utils.py
+++ b/bilby/core/utils.py
@@ -11,6 +11,7 @@ import json
 
 import numpy as np
 from scipy.interpolate import interp2d
+from scipy.special import logsumexp
 import pandas as pd
 
 logger = logging.getLogger('bilby')
@@ -683,6 +684,25 @@ def derivatives(vals, func, releps=1e-3, abseps=None, mineps=1e-9, reltol=1e-3,
     return grads
 
 
+def logtrapzexp(lnf, dx):
+    """
+    Perform trapezium rule integration for the logarithm of a function on a regular grid.
+
+    Parameters
+    ----------
+    lnf: array_like
+        A :class:`numpy.ndarray` of values that are the natural logarithm of a function
+    dx: (array_like, float)
+        A :class:`numpy.ndarray` of steps sizes between values in the function, or a
+        single step size value.
+
+    Returns
+    -------
+    The natural logarithm of the area under the function.
+    """
+    return np.log(dx / 2.) + logsumexp([logsumexp(lnf[:-1]), logsumexp(lnf[1:])])
+
+
 def run_commandline(cl, log_level=20, raise_error=True, return_output=True):
     """Run a string cmd as a subprocess, check for errors and return output.
 
diff --git a/bilby/gw/detector.py b/bilby/gw/detector.py
index b3b18e07ba314984e2c7a841ab8d07077a600862..d2151bf23c0e29662a30c5d1ffdcd6c74a961eeb 100644
--- a/bilby/gw/detector.py
+++ b/bilby/gw/detector.py
@@ -357,8 +357,14 @@ class InterferometerStrainData(object):
         -------
         array_like: An array of boolean values
         """
-        return ((self.frequency_array >= self.minimum_frequency) &
-                (self.frequency_array <= self.maximum_frequency))
+        try:
+            return self._frequency_mask
+        except AttributeError:
+            frequency_array = self._times_and_frequencies.frequency_array
+            mask = ((frequency_array >= self.minimum_frequency) &
+                    (frequency_array <= self.maximum_frequency))
+            self._frequency_mask = mask
+            return self._frequency_mask
 
     @property
     def alpha(self):
@@ -1604,24 +1610,29 @@ class Interferometer(object):
             return
 
         fig, ax = plt.subplots()
-        ax.loglog(self.frequency_array,
-                  gwutils.asd_from_freq_series(freq_data=self.frequency_domain_strain,
-                                               df=(self.frequency_array[1] - self.frequency_array[0])),
+        df = self.frequency_array[1] - self.frequency_array[0]
+        asd = gwutils.asd_from_freq_series(
+            freq_data=self.frequency_domain_strain, df=df)
+
+        ax.loglog(self.frequency_array[self.frequency_mask],
+                  asd[self.frequency_mask],
                   color='C0', label=self.name)
-        ax.loglog(self.frequency_array,
-                  self.amplitude_spectral_density_array,
-                  color='C1', lw=0.5, label=self.name + ' ASD')
+        ax.loglog(self.frequency_array[self.frequency_mask],
+                  self.amplitude_spectral_density_array[self.frequency_mask],
+                  color='C1', lw=1.0, label=self.name + ' ASD')
         if signal is not None:
-            ax.loglog(self.frequency_array,
-                      gwutils.asd_from_freq_series(freq_data=signal,
-                                                   df=(self.frequency_array[1] - self.frequency_array[0])),
+            signal_asd = gwutils.asd_from_freq_series(
+                freq_data=signal, df=df)
+
+            ax.loglog(self.frequency_array[self.frequency_mask],
+                      signal_asd[self.frequency_mask],
                       color='C2',
                       label='Signal')
         ax.grid(True)
-        ax.set_ylabel(r'strain [strain/$\sqrt{\rm Hz}$]')
-        ax.set_xlabel(r'frequency [Hz]')
-        ax.set_xlim(20, 2000)
+        ax.set_ylabel(r'Strain [strain/$\sqrt{\rm Hz}$]')
+        ax.set_xlabel(r'Frequency [Hz]')
         ax.legend(loc='best')
+        fig.tight_layout()
         if label is None:
             fig.savefig(
                 '{}/{}_frequency_domain_data.png'.format(outdir, self.name))
diff --git a/bilby/gw/likelihood.py b/bilby/gw/likelihood.py
index 6da8c8363c1ea56b92ff3561a1afd3eab40f6dfe..c5d2b46bae88404318398944788497c1697fe5a2 100644
--- a/bilby/gw/likelihood.py
+++ b/bilby/gw/likelihood.py
@@ -53,6 +53,14 @@ class GravitationalWaveTransient(likelihood.Likelihood):
         This is done analytically using a Bessel function.
     priors: dict, optional
         If given, used in the distance and phase marginalization.
+    distance_marginalization_lookup_table: (dict, str), optional
+        If a dict, dictionary containing the lookup_table, distance_array,
+        (distance) prior_array, and reference_distance used to construct
+        the table.
+        If a string the name of a file containing these quantities.
+        The lookup table is stored after construction in either the
+        provided string or a default location:
+        '.distance_marginalization_lookup_dmin{}_dmax{}_n{}.npz'
 
     Returns
     -------
@@ -62,8 +70,10 @@ class GravitationalWaveTransient(likelihood.Likelihood):
 
     """
 
-    def __init__(self, interferometers, waveform_generator, time_marginalization=False, distance_marginalization=False,
-                 phase_marginalization=False, priors=None):
+    def __init__(self, interferometers, waveform_generator,
+                 time_marginalization=False, distance_marginalization=False,
+                 phase_marginalization=False, priors=None,
+                 distance_marginalization_lookup_table=None):
 
         self.waveform_generator = waveform_generator
         likelihood.Likelihood.__init__(self, dict())
@@ -79,7 +89,8 @@ class GravitationalWaveTransient(likelihood.Likelihood):
             phase_marginalization=self.phase_marginalization,
             distance_marginalization=self.distance_marginalization,
             waveform_arguments=waveform_generator.waveform_arguments,
-            frequency_domain_source_model=str(waveform_generator.frequency_domain_source_model))
+            frequency_domain_source_model=str(
+                waveform_generator.frequency_domain_source_model))
 
         if self.time_marginalization:
             self._check_prior_is_set(key='geocent_time')
@@ -93,10 +104,16 @@ class GravitationalWaveTransient(likelihood.Likelihood):
             priors['phase'] = float(0)
 
         if self.distance_marginalization:
+            self._lookup_table_filename = None
             self._check_prior_is_set(key='luminosity_distance')
-            self._distance_array = np.linspace(self.priors['luminosity_distance'].minimum,
-                                               self.priors['luminosity_distance'].maximum, int(1e4))
-            self._setup_distance_marginalization()
+            self._distance_array = np.linspace(
+                self.priors['luminosity_distance'].minimum,
+                self.priors['luminosity_distance'].maximum, int(1e4))
+            self.distance_prior_array = np.array(
+                [self.priors['luminosity_distance'].prob(distance)
+                 for distance in self._distance_array])
+            self._setup_distance_marginalization(
+                distance_marginalization_lookup_table)
             priors['luminosity_distance'] = float(self._ref_dist)
 
     def __repr__(self):
@@ -292,68 +309,74 @@ class GravitationalWaveTransient(likelihood.Likelihood):
             return np.hstack((-np.logspace(3, -3, self._dist_margd_loglikelihood_array.shape[1] / 2),
                               np.logspace(-3, 10, self._dist_margd_loglikelihood_array.shape[1] / 2)))
 
-    def _setup_distance_marginalization(self):
-        self._create_lookup_table()
+    def _setup_distance_marginalization(self, lookup_table=None):
+        if isinstance(lookup_table, str) or lookup_table is None:
+            self.cached_lookup_table_filename = lookup_table
+            lookup_table = self.load_lookup_table(
+                self.cached_lookup_table_filename)
+        if isinstance(lookup_table, dict):
+            if self._test_cached_lookup_table(lookup_table):
+                self._dist_margd_loglikelihood_array = lookup_table[
+                    'lookup_table']
+            else:
+                self._create_lookup_table()
+        else:
+            self._create_lookup_table()
         self._interp_dist_margd_loglikelihood = UnsortedInterp2d(
             self._rho_mf_ref_array, self._rho_opt_ref_array,
             self._dist_margd_loglikelihood_array)
 
     @property
     def cached_lookup_table_filename(self):
-        dmin = self._distance_array[0]
-        dmax = self._distance_array[-1]
-        n = len(self._distance_array)
-        cached_lookup_table_filename = (
-            '.distance_marginalization_lookup_dmin{}_dmax{}_n{}_v1.npy'
-            .format(dmin, dmax, n))
-        return cached_lookup_table_filename
-
-    @property
-    def cached_lookup_table(self):
-        """ Reads in the cached lookup table
-
-        Returns
-        -------
-        cached_lookup_table: np.ndarray
-            Columns are _distance_array, distance_prior_array,
-            dist_marged_log_l_tc_array. This is only returned if the file
-            exists and the first two columns match the equivalent values
-            stored on disk.
-
-        """
-
-        if os.path.exists(self.cached_lookup_table_filename):
-            loaded_file = np.load(self.cached_lookup_table_filename)
+        if self._lookup_table_filename is None:
+            dmin = self._distance_array[0]
+            dmax = self._distance_array[-1]
+            n = len(self._distance_array)
+            self._lookup_table_filename = (
+                '.distance_marginalization_lookup_dmin{}_dmax{}_n{}.npz'
+                .format(dmin, dmax, n))
+        return self._lookup_table_filename
+
+    @cached_lookup_table_filename.setter
+    def cached_lookup_table_filename(self, filename):
+        if isinstance(filename, str):
+            if filename[-4:] != '.npz':
+                filename += '.npz'
+        self._lookup_table_filename = filename
+
+    def load_lookup_table(self, filename):
+        if os.path.exists(filename):
+            loaded_file = dict(np.load(filename))
             if self._test_cached_lookup_table(loaded_file):
+                logger.info('Loaded distance marginalisation lookup table from '
+                            '{}.'.format(filename))
                 return loaded_file
+            else:
+                logger.info('Loaded distance marginalisation lookup table does '
+                            'not match prior')
+                return None
+        elif isinstance(filename, str):
+            logger.info('Distance marginalisation file {} does not '
+                        'exist'.format(filename))
+            return None
         else:
             return None
 
-    @cached_lookup_table.setter
-    def cached_lookup_table(self, lookup_table):
-        np.save(self.cached_lookup_table_filename, lookup_table)
+    def cache_lookup_table(self):
+        np.savez(self.cached_lookup_table_filename,
+                 distance_array=self._distance_array,
+                 prior_array=self.distance_prior_array,
+                 lookup_table=self._dist_margd_loglikelihood_array,
+                 reference_distance=self._ref_dist)
 
-    def _test_cached_lookup_table(self, lookup_table):
-        cond_a = np.all(self._distance_array == lookup_table[0])
-        cond_b = np.all(self.distance_prior_array == lookup_table[1])
-        if cond_a and cond_b:
-            return True
+    def _test_cached_lookup_table(self, loaded_file):
+        cond_a = np.all(self._distance_array == loaded_file['distance_array'])
+        cond_b = np.all(self.distance_prior_array == loaded_file['prior_array'])
+        cond_c = self._ref_dist == loaded_file['reference_distance']
+        return all([cond_a, cond_b, cond_c])
 
     def _create_lookup_table(self):
         """ Make the lookup table """
-
-        self.distance_prior_array = np.array(
-            [self.priors['luminosity_distance'].prob(distance)
-             for distance in self._distance_array])
-
-        # Check if a cached lookup table exists in file
-        cached_lookup_table = self.cached_lookup_table
-        if cached_lookup_table is not None:
-            self._dist_margd_loglikelihood_array = cached_lookup_table[-1]
-            logger.info("Using the cached lookup table {}"
-                        .format(os.path.abspath(self.cached_lookup_table_filename)))
-            return
-
         logger.info('Building lookup table for distance marginalisation.')
 
         self._dist_margd_loglikelihood_array = np.zeros((400, 800))
@@ -370,10 +393,7 @@ class GravitationalWaveTransient(likelihood.Likelihood):
         log_norm = logsumexp(0. / self._distance_array - 0. / self._distance_array ** 2.,
                              b=self.distance_prior_array * self._delta_distance)
         self._dist_margd_loglikelihood_array -= log_norm
-        self.cached_lookup_table = np.array([
-            self._distance_array,
-            self.distance_prior_array,
-            self._dist_margd_loglikelihood_array])
+        self.cache_lookup_table()
 
     def _setup_phase_marginalization(self):
         self._bessel_function_interped = interp1d(
@@ -513,16 +533,26 @@ class ROQGravitationalWaveTransient(GravitationalWaveTransient):
         array, or the array itself.
     priors: dict, bilby.prior.PriorDict
         A dictionary of priors containing at least the geocent_time prior
+    distance_marginalization_lookup_table: (dict, str), optional
+        If a dict, dictionary containing the lookup_table, distance_array,
+        (distance) prior_array, and reference_distance used to construct
+        the table.
+        If a string the name of a file containing these quantities.
+        The lookup table is stored after construction in either the
+        provided string or a default location:
+        '.distance_marginalization_lookup_dmin{}_dmax{}_n{}.npz'
 
     """
     def __init__(self, interferometers, waveform_generator, priors,
                  weights=None, linear_matrix=None, quadratic_matrix=None,
-                 distance_marginalization=False, phase_marginalization=False):
+                 distance_marginalization=False, phase_marginalization=False,
+                 distance_marginalization_lookup_table=None):
         GravitationalWaveTransient.__init__(
             self, interferometers=interferometers,
             waveform_generator=waveform_generator, priors=priors,
             distance_marginalization=distance_marginalization,
-            phase_marginalization=phase_marginalization)
+            phase_marginalization=phase_marginalization,
+            distance_marginalization_lookup_table=distance_marginalization_lookup_table)
 
         self.time_samples = np.arange(
             self.priors['geocent_time'].minimum - 0.045,
diff --git a/examples/injection_examples/marginalized_likelihood.py b/examples/injection_examples/marginalized_likelihood.py
index f5538e074ac94d1b1cd108fdf601d6d8b38897e0..c6ab3362ca98a57a5a423c62c44f78ab54148493 100644
--- a/examples/injection_examples/marginalized_likelihood.py
+++ b/examples/injection_examples/marginalized_likelihood.py
@@ -48,7 +48,7 @@ priors['geocent_time'] = bilby.core.prior.Uniform(
     name='geocent_time', latex_label='$t_c$', unit='$s$')
 # These parameters will not be sampled
 for key in ['a_1', 'a_2', 'tilt_1', 'tilt_2', 'phi_12', 'phi_jl', 'theta_jn', 'ra',
-            'dec']:
+            'dec', 'mass_1', 'mass_2']:
     priors[key] = injection_parameters[key]
 
 # Initialise GravitationalWaveTransient
diff --git a/examples/other_examples/grid_example.py b/examples/other_examples/grid_example.py
new file mode 100644
index 0000000000000000000000000000000000000000..af38eb259aab5feda4e726bbec49397cde018e8f
--- /dev/null
+++ b/examples/other_examples/grid_example.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+"""
+An example of how to use bilby to perform paramater estimation for
+non-gravitational wave data. In this case, fitting a linear function to
+data with background Gaussian noise
+
+"""
+from __future__ import division
+import bilby
+import numpy as np
+import matplotlib.pyplot as plt
+
+# A few simple setup steps
+label = 'linear_regression_grid'
+outdir = 'outdir'
+bilby.utils.check_directory_exists_and_if_not_mkdir(outdir)
+
+
+# First, we define our "signal model", in this case a simple linear function
+def model(time, m, c):
+    return time * m + c
+
+
+# Now we define the injection parameters which we make simulated data with
+injection_parameters = dict(m=2.5, c=0.0)
+
+# For this example, we'll use standard Gaussian noise
+
+# These lines of code generate the fake data. Note the ** just unpacks the
+# contents of the injection_parameters when calling the model function.
+sampling_frequency = 10
+time_duration = 10
+time = np.arange(0, time_duration, 1 / sampling_frequency)
+N = len(time)
+sigma = np.random.normal(1, 0.01, N)
+data = model(time, **injection_parameters) + np.random.normal(0, sigma, N)
+
+# We quickly plot the data to check it looks sensible
+fig, ax = plt.subplots()
+ax.plot(time, data, 'o', label='data')
+ax.plot(time, model(time, **injection_parameters), '--r', label='signal')
+ax.set_xlabel('time')
+ax.set_ylabel('y')
+ax.legend()
+fig.savefig('{}/{}_data.png'.format(outdir, label))
+plt.close()
+
+# Now lets instantiate a version of our GaussianLikelihood, giving it
+# the time, data and signal model
+likelihood = bilby.likelihood.GaussianLikelihood(time, data, model, sigma)
+
+# From hereon, the syntax is exactly equivalent to other bilby examples
+# We make a prior
+priors = dict()
+priors['m'] = bilby.core.prior.Uniform(0, 5, 'm')
+priors['c'] = bilby.core.prior.Uniform(-2, 2, 'c')
+
+grid = bilby.core.grid.Grid(likelihood=likelihood, priors=priors)
diff --git a/examples/other_examples/linear_regression_grid.py b/examples/other_examples/linear_regression_grid.py
new file mode 100644
index 0000000000000000000000000000000000000000..6549c860a3cc9ddb6862a0426c153894347a5682
--- /dev/null
+++ b/examples/other_examples/linear_regression_grid.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+"""
+An example of how to use bilby to perform paramater estimation for
+fitting a linear function to data with background Gaussian noise.
+This will compare the output of using a stochastic sampling method
+to evaluating the posterior on a grid.
+"""
+from __future__ import division
+import numpy as np
+import matplotlib.pyplot as plt
+
+import bilby
+
+# A few simple setup steps
+label = 'linear_regression_grid'
+outdir = 'outdir'
+bilby.utils.check_directory_exists_and_if_not_mkdir(outdir)
+
+
+# First, we define our "signal model", in this case a simple linear function
+def model(time, m, c):
+    return time * m + c
+
+
+# Now we define the injection parameters which we make simulated data with
+injection_parameters = dict()
+injection_parameters['c'] = 0.2
+injection_parameters['m'] = 0.5
+
+# For this example, we'll use standard Gaussian noise
+
+# These lines of code generate the fake data. Note the ** just unpacks the
+# contents of the injection_parameters when calling the model function.
+sampling_frequency = 10
+time_duration = 10
+time = np.arange(0, time_duration, 1 / sampling_frequency)
+N = len(time)
+sigma = 3.
+data = model(time, **injection_parameters) + np.random.normal(0, sigma, N)
+
+# We quickly plot the data to check it looks sensible
+fig, ax = plt.subplots()
+ax.plot(time, data, 'o', label='data')
+ax.plot(time, model(time, **injection_parameters), '--r', label='signal')
+ax.set_xlabel('time')
+ax.set_ylabel('y')
+ax.legend()
+fig.savefig('{}/{}_data.png'.format(outdir, label))
+
+# Now lets instantiate a version of our GaussianLikelihood, giving it
+# the time, data and signal model
+likelihood = bilby.likelihood.GaussianLikelihood(time, data, model, sigma)
+
+# From hereon, the syntax is exactly equivalent to other bilby examples
+# We make a prior
+priors = bilby.core.prior.PriorDict()
+priors['m'] = bilby.core.prior.Uniform(0, 5, 'm')
+priors['c'] = bilby.core.prior.Uniform(-2, 2, 'c')
+
+# And run sampler
+result = bilby.run_sampler(
+    likelihood=likelihood, priors=priors, sampler='dynesty', nlive=500,
+    sample='unif', injection_parameters=injection_parameters, outdir=outdir,
+    label=label)
+fig = result.plot_corner(parameters=injection_parameters, save=False)
+
+grid = bilby.core.grid.Grid(likelihood, priors, grid_size={'m': 200, 'c': 100})
+
+# overplot the grid estimates
+grid_evidence = grid.log_evidence
+axes = fig.get_axes()
+axes[0].plot(grid.sample_points['c'], np.exp(grid.marginalize_ln_posterior(
+    not_parameter='c') - grid_evidence), 'k--')
+axes[3].plot(grid.sample_points['m'], np.exp(grid.marginalize_ln_posterior(
+    not_parameter='m') - grid_evidence), 'k--')
+axes[2].contour(grid.mesh_grid[1], grid.mesh_grid[0],
+                np.exp(grid.ln_posterior - np.max(grid.ln_posterior)))
+
+fig.savefig('{}/{}_corner.png'.format(outdir, label), dpi=300)
+
+# compare evidences
+print('Dynesty log(evidence): {}'.format(result.log_evidence))
+print('Grid log(evidence): {}'.format(grid_evidence))
diff --git a/examples/other_examples/multidimensional_gaussian.py b/examples/other_examples/multidimensional_gaussian.py
new file mode 100644
index 0000000000000000000000000000000000000000..451afe3e06b8c6904de63b4991a34225d850b14c
--- /dev/null
+++ b/examples/other_examples/multidimensional_gaussian.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+"""
+Testing the recovery of a multi-dimensional
+Gaussian with zero mean and unit variance
+"""
+from __future__ import division
+import bilby
+import numpy as np
+
+# A few simple setup steps
+label = "multidim_gaussian"
+outdir = "outdir"
+
+# Generating simulated data: generating n-dim Gaussian
+
+dim = 5
+mean = np.zeros(dim)
+cov = np.ones((dim, dim))
+data = np.random.multivariate_normal(mean, cov, 100)
+
+
+class MultidimGaussianLikelihood(bilby.Likelihood):
+    """
+        A multivariate Gaussian likelihood
+        with known analytic solution.
+
+        Parameters
+        ----------
+        data: array_like
+            The data to analyse
+        dim: int
+            The number of dimensions
+        """
+
+    def __init__(self, data, dim):
+        self.dim = dim
+        self.data = np.array(data)
+        self.N = len(data)
+        self.parameters = {}
+
+    def log_likelihood(self):
+        mu = np.array(
+            [self.parameters["mu_{0}".format(i)] for i in range(self.dim)]
+        )
+        sigma = np.array(
+            [self.parameters["sigma_{0}".format(i)] for i in range(self.dim)]
+        )
+        p = np.array([(self.data[n, :] - mu) / sigma for n in range(self.N)])
+        return np.sum(
+            -0.5 * (np.sum(p ** 2) + self.N * np.log(2 * np.pi * sigma ** 2))
+        )
+
+
+likelihood = MultidimGaussianLikelihood(data, dim)
+priors = bilby.core.prior.PriorDict()
+priors.update(
+    {
+        "mu_{0}".format(i): bilby.core.prior.Uniform(-5, 5, "mu")
+        for i in range(dim)
+    }
+)
+priors.update(
+    {
+        "sigma_{0}".format(i): bilby.core.prior.LogUniform(0.2, 5, "sigma")
+        for i in range(dim)
+    }
+)
+# And run sampler
+# The plot arg produces trace_plots useful for diagnostics
+result = bilby.run_sampler(
+    likelihood=likelihood,
+    priors=priors,
+    sampler="dynesty",
+    npoints=500,
+    walks=10,
+    outdir=outdir,
+    label=label,
+    plot=True,
+)
+result.plot_corner()
diff --git a/setup.py b/setup.py
index 02df8d39a3fba50000228726d142d4773a36f344..c5cc1b7198e0cf7903d1892ffa00bd6500c29c4b 100644
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@ def readfile(filename):
     return filecontents
 
 
-VERSION = '0.4.1'
+VERSION = '0.4.2'
 version_file = write_version_file(VERSION)
 long_description = get_long_description()
 
diff --git a/test/prior_test.py b/test/prior_test.py
index 71c0c4fde6763e58612516f494bc3d16cebc1c5a..e6d2bb4fe0af4a36de7f27c8cb577b172c3de7d2 100644
--- a/test/prior_test.py
+++ b/test/prior_test.py
@@ -163,6 +163,7 @@ class TestPriorClasses(unittest.TestCase):
             bilby.core.prior.Lorentzian(name='test', unit='unit', alpha=0, beta=1),
             bilby.core.prior.Gamma(name='test', unit='unit', k=1, theta=1),
             bilby.core.prior.ChiSquared(name='test', unit='unit', nu=2),
+            bilby.core.prior.FermiDirac(name='test', unit='unit', sigma=1., r=10.),
             bilby.gw.prior.AlignedSpin(name='test', unit='unit'),
             bilby.core.prior.MultivariateGaussian(mvg=mvg, name='testa', unit='unit'),
             bilby.core.prior.MultivariateGaussian(mvg=mvg, name='testb', unit='unit'),
@@ -353,6 +354,13 @@ class TestPriorClasses(unittest.TestCase):
         self.assertTrue(np.allclose(np.diag(mvg.covs[0]), np.square(sigma)))
         self.assertTrue(np.allclose(np.diag(np.fliplr(mvg.covs[0])), 2.*np.ones(2)))
 
+    def test_fermidirac_fail(self):
+        with self.assertRaises(ValueError):
+            bilby.core.prior.FermiDirac(name='test', unit='unit', sigma=1.)
+
+        with self.assertRaises(ValueError):
+            bilby.core.prior.FermiDirac(name='test', unit='unit', sigma=1., mu=-1)
+
     def test_probability_in_domain(self):
         """Test that the prior probability is non-negative in domain of validity and zero outside."""
         for prior in self.priors:
@@ -397,6 +405,8 @@ class TestPriorClasses(unittest.TestCase):
                 domain = np.linspace(0., 1e2, 5000)
             elif isinstance(prior, bilby.core.prior.Logistic):
                 domain = np.linspace(-1e2, 1e2, 1000)
+            elif isinstance(prior, bilby.core.prior.FermiDirac):
+                domain = np.linspace(0., 1e2, 1000)
             else:
                 domain = np.linspace(prior.minimum, prior.maximum, 1000)
             self.assertAlmostEqual(np.trapz(prior.prob(domain), domain), 1, 3)
@@ -456,7 +466,8 @@ class TestPriorClasses(unittest.TestCase):
                     bilby.core.prior.HalfGaussian, bilby.core.prior.LogNormal,
                     bilby.core.prior.Exponential, bilby.core.prior.StudentT,
                     bilby.core.prior.Logistic, bilby.core.prior.Cauchy,
-                    bilby.core.prior.Gamma, bilby.core.prior.MultivariateGaussian)):
+                    bilby.core.prior.Gamma, bilby.core.prior.MultivariateGaussian,
+                    bilby.core.prior.FermiDirac)):
                 continue
             prior.maximum = (prior.maximum + prior.minimum) / 2
             self.assertTrue(max(prior.sample(10000)) < prior.maximum)
@@ -468,7 +479,8 @@ class TestPriorClasses(unittest.TestCase):
                     bilby.core.prior.HalfGaussian, bilby.core.prior.LogNormal,
                     bilby.core.prior.Exponential, bilby.core.prior.StudentT,
                     bilby.core.prior.Logistic, bilby.core.prior.Cauchy,
-                    bilby.core.prior.Gamma, bilby.core.prior.MultivariateGaussian)):
+                    bilby.core.prior.Gamma, bilby.core.prior.MultivariateGaussian,
+                    bilby.core.prior.FermiDirac)):
                 continue
             prior.minimum = (prior.maximum + prior.minimum) / 2
             self.assertTrue(min(prior.sample(10000)) > prior.minimum)
diff --git a/test/result_test.py b/test/result_test.py
index 1d0dbefee70df890edbec43feb5be8eed3d496b5..7648b9708948d9c293e4ae0a3df268d46b74d37b 100644
--- a/test/result_test.py
+++ b/test/result_test.py
@@ -194,6 +194,26 @@ class TestResult(unittest.TestCase):
         self.assertEqual(self.result.priors['c'], loaded_result.priors['c'])
         self.assertEqual(self.result.priors['d'], loaded_result.priors['d'])
 
+    def test_save_and_load_gzip(self):
+        self.result.save_to_file(gzip=True)
+        loaded_result = bilby.core.result.read_in_result(
+            outdir=self.result.outdir, label=self.result.label, gzip=True)
+        self.assertTrue(np.array_equal
+                        (self.result.posterior.sort_values(by=['x']),
+                            loaded_result.posterior.sort_values(by=['x'])))
+        self.assertTrue(self.result.fixed_parameter_keys == loaded_result.fixed_parameter_keys)
+        self.assertTrue(self.result.search_parameter_keys == loaded_result.search_parameter_keys)
+        self.assertEqual(self.result.meta_data, loaded_result.meta_data)
+        self.assertEqual(self.result.injection_parameters, loaded_result.injection_parameters)
+        self.assertEqual(self.result.log_evidence, loaded_result.log_evidence)
+        self.assertEqual(self.result.log_noise_evidence, loaded_result.log_noise_evidence)
+        self.assertEqual(self.result.log_evidence_err, loaded_result.log_evidence_err)
+        self.assertEqual(self.result.log_bayes_factor, loaded_result.log_bayes_factor)
+        self.assertEqual(self.result.priors['x'], loaded_result.priors['x'])
+        self.assertEqual(self.result.priors['y'], loaded_result.priors['y'])
+        self.assertEqual(self.result.priors['c'], loaded_result.priors['c'])
+        self.assertEqual(self.result.priors['d'], loaded_result.priors['d'])
+
     def test_save_and_dont_overwrite_default(self):
         shutil.rmtree(
             '{}/{}_result.json.old'.format(self.result.outdir, self.result.label),