diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..d3f9cc29f023ec2aafd30eb5af8eeaec352caa09
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,16 @@
+.coverage
+build/
+dist/
+docs/_*
+tupak.egg-info/
+MANIFEST
+*.pyc
+*.png
+*.h5
+*.h5.old
+*.txt
+*.log
+*.dat
+*.version
+*.ipynb_checkpoints
+outdir/*
\ No newline at end of file
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 9a420d27cceb21f2758529f15f45a2f94f8bd3ee..786fca950b3bdb34f636becd514bc08adde636df 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -38,8 +38,13 @@ python-3:
     - pip install -r optional_requirements.txt
     - pip install 'coverage>=4.5'
     - pip install coverage-badge
+    - pip install flake8
   script:
     - python setup.py install
+
+    # Run pyflakes
+    - flake8 .
+
     # Run tests and collect coverage data
     - coverage --version
     - coverage erase
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 67da4e8b2b65026adcb2fbfe46b0e535b65d0a4b..5bf71af4c39ce868f2627bcc57b343e833d43294 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -31,6 +31,17 @@ Changes currently on master, but not under a tag.
 - Clean up of `detectors.py`: adds an `InterferometerStrainData` to handle strain data and `InterferometerSet` to handle multiple interferometers. All data setting should primarily be done through the `Interferometer.set_strain_data..` methods.
 - Fix the comments and units of `nfft` and `infft` and general improvement to documentation of data.
 - Fixed a bug in create_time_series
+- Enable marginalisation over calibration uncertainty in Inteferemeter data.
+- Fixed the normalisation of the marginalised `GravtitationalWaveTransient` likelihood.
+- Fixed a bug in the detector response.
+- Specifying detectors by name from the default command line options has been removed.
+- The prior on polarisation phase has been reduced to [0, pi].
+- More prior distributions added.
+- More samplers supported, pymc3
+- More core likelihoods, Poisson, Student's-t
+- Add support for eccentric BBH
+- Result print function fixed
+- Add snr functions as methods of `Interferometer`
 
 ## [0.2.0] 2018-06-17
 
diff --git a/cli_tupak/plot_multiple_posteriors.py b/cli_tupak/plot_multiple_posteriors.py
index e63b24834e0d4402f10daed059e78d20dfb0bf93..9db4d1320122391ad6f0eeb7679bd5536a5ea94b 100644
--- a/cli_tupak/plot_multiple_posteriors.py
+++ b/cli_tupak/plot_multiple_posteriors.py
@@ -4,7 +4,7 @@ import argparse
 def setup_command_line_args():
     parser = argparse.ArgumentParser(
         description="Plot corner plots from results files")
-    parser.add_argument("-r", "--results",  nargs='+',
+    parser.add_argument("-r", "--results", nargs='+',
                         help="List of results files to use.")
     parser.add_argument("-f", "--filename", default=None,
                         help="Output file name.")
diff --git a/examples/injection_examples/binary_neutron_star_example.py b/examples/injection_examples/binary_neutron_star_example.py
new file mode 100644
index 0000000000000000000000000000000000000000..631b2b532b6bdde40ce975687f1be22712a413f0
--- /dev/null
+++ b/examples/injection_examples/binary_neutron_star_example.py
@@ -0,0 +1,81 @@
+#!/bin/python
+"""
+Tutorial to demonstrate running parameter estimation on a binary neutron star
+system taking into account tidal deformabilities.
+
+This example estimates the masses using a uniform prior in both component masses
+and also estimates the tidal deformabilities using a uniform prior in both
+tidal deformabilities
+"""
+
+from __future__ import division, print_function
+
+import numpy as np
+
+import tupak
+
+# Specify the output directory and the name of the simulation.
+outdir = 'outdir'
+label = 'bns_example'
+tupak.core.utils.setup_logger(outdir=outdir, label=label)
+
+# Set up a random seed for result reproducibility.  This is optional!
+np.random.seed(88170235)
+
+# We are going to inject a binary neutron star waveform.  We first establish a
+# dictionary of parameters that includes all of the different waveform
+# parameters, including masses of the two black holes (mass_1, mass_2),
+# spins of both black holes (a_1,a_2) , etc. 
+injection_parameters = dict(
+    mass_1=1.5, mass_2=1.3, a_1=0.0, a_2=0.0, luminosity_distance=50.,
+    iota=0.4, psi=2.659, phase=1.3, geocent_time=1126259642.413,
+    ra=1.375, dec=-1.2108, lambda_1=400, lambda_2=450)
+
+# Set the duration and sampling frequency of the data segment that we're going
+# to inject the signal into. For the
+# TaylorF2 waveform, we cut the signal close to the isco frequency
+duration = 8
+sampling_frequency = 2*1570.
+start_time = injection_parameters['geocent_time'] + 2 - duration
+
+# Fixed arguments passed into the source model. The analysis starts at 40 Hz.
+waveform_arguments = dict(waveform_approximant='TaylorF2',
+                          reference_frequency=50., minimum_frequency=40.0)
+
+# Create the waveform_generator using a LAL Binary Neutron Star source function
+waveform_generator = tupak.gw.WaveformGenerator(
+    duration=duration, sampling_frequency=sampling_frequency,
+    frequency_domain_source_model=tupak.gw.source.lal_binary_neutron_star,
+    waveform_arguments=waveform_arguments)
+
+# Set up interferometers.  In this case we'll use three interferometers
+# (LIGO-Hanford (H1), LIGO-Livingston (L1), and Virgo (V1)).
+# These default to their design sensitivity and start at 40 Hz.
+interferometers = tupak.gw.detector.InterferometerList(['H1', 'L1', 'V1'])
+for interferometer in interferometers:
+    interferometer.minimum_frequency = 40
+interferometers.set_strain_data_from_power_spectral_densities(
+    sampling_frequency=sampling_frequency, duration=duration,
+    start_time=start_time)
+interferometers.inject_signal(parameters=injection_parameters,
+                              waveform_generator=waveform_generator)
+
+priors = tupak.gw.prior.BNSPriorSet()
+for key in ['a_1', 'a_2', 'psi', 'geocent_time', 'ra', 'dec',
+            'iota', 'luminosity_distance', 'phase']:
+    priors[key] = injection_parameters[key]
+    
+# Initialise the likelihood by passing in the interferometer data (IFOs)
+# and the waveoform generator
+likelihood = tupak.gw.GravitationalWaveTransient(
+    interferometers=interferometers, waveform_generator=waveform_generator,
+    time_marginalization=False, phase_marginalization=False,
+    distance_marginalization=False, prior=priors)
+
+# Run sampler.  In this case we're going to use the `nestle` sampler
+result = tupak.run_sampler(
+    likelihood=likelihood, priors=priors, sampler='nestle', npoints=1000,
+    injection_parameters=injection_parameters, outdir=outdir, label=label)
+
+result.plot_corner()
+
diff --git a/examples/injection_examples/using_gwin.py b/examples/injection_examples/using_gwin.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dc3a315953a62328d3962e4158758b34e985d65
--- /dev/null
+++ b/examples/injection_examples/using_gwin.py
@@ -0,0 +1,92 @@
+#!/bin/python
+"""
+An example of how to use tupak with `gwin` (https://github.com/gwastro/gwin) to
+perform CBC parameter estimation.
+
+To run this example, it is sufficient to use the pip-installable pycbc package,
+but the source installation of gwin. You can install this by cloning the
+repository (https://github.com/gwastro/gwin) and running
+
+$ python setup.py install
+
+A practical difference between gwin and tupak is that while fixed parameters
+are specified via the prior in tupak, in gwin, these are fixed at instantiation
+of the model. So, in the following, we only create priors for the parameters
+to be searched over.
+
+"""
+from __future__ import division, print_function
+import numpy as np
+import tupak
+
+import gwin
+from pycbc import psd as pypsd
+from pycbc.waveform.generator import (FDomainDetFrameGenerator,
+                                      FDomainCBCGenerator)
+
+label = 'using_gwin'
+
+# Search priors
+priors = dict()
+priors['distance'] = tupak.core.prior.Uniform(500, 2000, 'distance')
+priors['polarization'] = tupak.core.prior.Uniform(0, np.pi, 'iota')
+
+# Data variables
+seglen = 4
+sample_rate = 2048
+N = seglen * sample_rate / 2 + 1
+fmin = 30.
+
+# Injected signal variables
+injection_parameters = dict(mass1=38.6, mass2=29.3, spin1z=0, spin2z=0,
+                            tc=0, ra=3.1, dec=1.37, polarization=2.76,
+                            distance=1500)
+
+# These lines figure out which parameters are to be searched over
+variable_parameters = priors.keys()
+fixed_parameters = injection_parameters.copy()
+for key in priors:
+    fixed_parameters.pop(key)
+
+# These lines generate the `model` object - see https://gwin.readthedocs.io/en/latest/api/gwin.models.gaussian_noise.html
+generator = FDomainDetFrameGenerator(
+    FDomainCBCGenerator, 0.,
+    variable_args=variable_parameters, detectors=['H1', 'L1'],
+    delta_f=1. / seglen, f_lower=fmin,
+    approximant='IMRPhenomPv2', **fixed_parameters)
+signal = generator.generate(**injection_parameters)
+psd = pypsd.aLIGOZeroDetHighPower(int(N), 1. / seglen, 20.)
+psds = {'H1': psd, 'L1': psd}
+model = gwin.models.GaussianNoise(
+    variable_parameters, signal, generator, fmin, psds=psds)
+model.update(**injection_parameters)
+
+
+# This create a dummy class to convert the model into a tupak.likelihood object
+class GWINLikelihood(tupak.core.likelihood.Likelihood):
+    def __init__(self, model):
+        """ A likelihood to wrap around GWIN model objects
+
+        Parameters
+        ----------
+        model: gwin.model.GaussianNoise
+            A gwin model
+
+        """
+        self.model = model
+        self.parameters = {x: None for x in self.model.variable_params}
+
+    def log_likelihood(self):
+        self.model.update(**self.parameters)
+        return self.model.loglikelihood
+
+
+likelihood = GWINLikelihood(model)
+
+
+# Now run the inference
+result = tupak.run_sampler(
+    likelihood=likelihood, priors=priors, sampler='dynesty', npoints=500,
+    label=label)
+result.plot_corner()
+
diff --git a/examples/other_examples/linear_regression_pymc3_custom_likelihood.py b/examples/other_examples/linear_regression_pymc3_custom_likelihood.py
index 3b3ab8af138ead0dc1428e1737eb46ed1ea255ca..4d87b8110bf6c4490378c29b61df2e81656df6b9 100644
--- a/examples/other_examples/linear_regression_pymc3_custom_likelihood.py
+++ b/examples/other_examples/linear_regression_pymc3_custom_likelihood.py
@@ -99,7 +99,7 @@ class GaussianLikelihoodPyMC3(tupak.Likelihood):
         with sampler.pymc3_model:
             mdist = sampler.pymc3_priors['m']
             cdist = sampler.pymc3_priors['c']
-            
+
             mu = model(time, mdist, cdist)
 
             # set the likelihood distribution
diff --git a/optional_requirements.txt b/optional_requirements.txt
index c002738f5c87d5ff8f6533c0f94ae2f857d3df96..3e244261c52caa23509b0a05653db87c968bcbaa 100644
--- a/optional_requirements.txt
+++ b/optional_requirements.txt
@@ -1,4 +1,4 @@
 astropy
 lalsuite<=6.48.1.dev20180823
 gwpy
-theano
\ No newline at end of file
+theano
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..b3193025616f3b927a9577042f31dac094ad95df
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,4 @@
+[flake8]
+exclude = .git,docs,build,dist,test,examples,*__init__.py
+max-line-length = 160
+ignore = E129
diff --git a/setup.py b/setup.py
index 4fcb2bb002c5a39c875f5b2909e1f01ba6ccfd58..8eaf6904d10566f91386bc8148ce999bada4c1ab 100644
--- a/setup.py
+++ b/setup.py
@@ -22,8 +22,8 @@ def write_version_file(version):
     try:
         git_log = subprocess.check_output(
             ['git', 'log', '-1', '--pretty=%h %ai']).decode('utf-8')
-        git_diff = (subprocess.check_output(['git', 'diff', '.'])
-                    + subprocess.check_output(
+        git_diff = (subprocess.check_output(['git', 'diff', '.']) +
+                    subprocess.check_output(
                         ['git', 'diff', '--cached', '.'])).decode('utf-8')
         if git_diff == '':
             git_status = '(CLEAN) ' + git_log
@@ -50,7 +50,7 @@ def get_long_description():
     return long_description
 
 
-version = '0.2.1'
+version = '0.2.2'
 version_file = write_version_file(version)
 long_description = get_long_description()
 
diff --git a/test/calibration_tests.py b/test/calibration_tests.py
index b481e2936ed88ec6d4cf68136c495f0ef4d25539..dce70e53f192fb1a34fbee6dfc6319d82ff4218e 100644
--- a/test/calibration_tests.py
+++ b/test/calibration_tests.py
@@ -11,6 +11,11 @@ class TestBaseClass(unittest.TestCase):
     def tearDown(self):
         del self.model
 
+    def test_repr(self):
+        expected = 'Recalibrate(prefix={})'.format('\'recalib_\'')
+        actual = repr(self.model)
+        self.assertEqual(expected, actual)
+
     def test_calibration_factor(self):
         frequency_array = np.linspace(20, 1024, 1000)
         cal_factor = self.model.get_calibration_factor(frequency_array)
@@ -20,14 +25,22 @@ class TestBaseClass(unittest.TestCase):
 class TestCubicSpline(unittest.TestCase):
 
     def setUp(self):
+        self.prefix = 'recalib_'
+        self.minimum_frequency = 20
+        self.maximum_frequency = 1024
+        self.n_points = 5
         self.model = calibration.CubicSpline(
-            prefix='recalib_', minimum_frequency=20, maximum_frequency=1024,
-            n_points=5)
+            prefix=self.prefix, minimum_frequency=self.minimum_frequency,
+            maximum_frequency=self.maximum_frequency, n_points=self.n_points)
         self.parameters = {'recalib_{}_{}'.format(param, ii): 0.0
                            for ii in range(5)
                            for param in ['amplitude', 'phase']}
 
     def tearDown(self):
+        del self.prefix
+        del self.minimum_frequency
+        del self.maximum_frequency
+        del self.n_points
         del self.model
         del self.parameters
 
@@ -37,6 +50,12 @@ class TestCubicSpline(unittest.TestCase):
                                                        **self.parameters)
         assert np.alltrue(cal_factor.real == np.ones_like(frequency_array))
 
+    def test_repr(self):
+        expected = 'CubicSpline(prefix=\'{}\', minimum_frequency={}, maximum_frequency={}, n_points={})'\
+            .format(self.prefix, self.minimum_frequency, self.maximum_frequency, self.n_points)
+        actual = repr(self.model)
+        self.assertEqual(expected, actual)
+
 
 class TestCubicSplineRequiresFourNodes(unittest.TestCase):
 
diff --git a/test/conversion_tests.py b/test/conversion_tests.py
index 3130af9534630b925550bc4ca3082ed9f63cfd11..f0e4409f2d360c93ba93a96f6ac33c7b4089dc88 100644
--- a/test/conversion_tests.py
+++ b/test/conversion_tests.py
@@ -8,14 +8,35 @@ import numpy as np
 class TestBasicConversions(unittest.TestCase):
 
     def setUp(self):
-        self.mass_1 = 20
-        self.mass_2 = 10
-        self.mass_ratio = 0.5
-        self.total_mass = 30
-        self.chirp_mass = 200**0.6 / 30**0.2
-        self.symmetric_mass_ratio = 2/9
+        self.mass_1 = 1.4
+        self.mass_2 = 1.3
+        self.mass_ratio = 13/14
+        self.total_mass = 2.7
+        self.chirp_mass = (1.4 * 1.3)**0.6 / 2.7**0.2
+        self.symmetric_mass_ratio = (1.4 * 1.3) / 2.7**2
         self.cos_angle = -1
         self.angle = np.pi
+        self.lambda_1 = 300
+        self.lambda_2 = 300 * (14 / 13)**5
+        self.lambda_tilde = 8 / 13 * (
+            (1 + 7 * self.symmetric_mass_ratio
+             - 31 * self.symmetric_mass_ratio**2)
+            * (self.lambda_1 + self.lambda_2)
+            + (1 - 4 * self.symmetric_mass_ratio)**0.5
+            * (1 + 9 * self.symmetric_mass_ratio
+               - 11 * self.symmetric_mass_ratio**2)
+            * (self.lambda_1 - self.lambda_2)
+        )
+        self.delta_lambda = 1 / 2 * (
+                (1 - 4 * self.symmetric_mass_ratio)**0.5
+                * (1 - 13272 / 1319 * self.symmetric_mass_ratio
+                   + 8944 / 1319 * self.symmetric_mass_ratio**2)
+                * (self.lambda_1 + self.lambda_2)
+                + (1 - 15910 / 1319 * self.symmetric_mass_ratio
+                   + 32850 / 1319 * self.symmetric_mass_ratio**2
+                   + 3380 / 1319 * self.symmetric_mass_ratio**3)
+                * (self.lambda_1 - self.lambda_2)
+        )
 
     def tearDown(self):
         del self.mass_1
@@ -27,7 +48,8 @@ class TestBasicConversions(unittest.TestCase):
 
     def test_total_mass_and_mass_ratio_to_component_masses(self):
         mass_1, mass_2 = tupak.gw.conversion.total_mass_and_mass_ratio_to_component_masses(self.mass_ratio, self.total_mass)
-        self.assertTupleEqual((mass_1, mass_2), (self.mass_1, self.mass_2))
+        self.assertTrue(all([abs(mass_1 - self.mass_1) < 1e-5,
+                             abs(mass_2 - self.mass_2) < 1e-5]))
 
     def test_symmetric_mass_ratio_to_mass_ratio(self):
         mass_ratio = tupak.gw.conversion.symmetric_mass_ratio_to_mass_ratio(self.symmetric_mass_ratio)
@@ -61,6 +83,20 @@ class TestBasicConversions(unittest.TestCase):
         mass_ratio = tupak.gw.conversion.mass_1_and_chirp_mass_to_mass_ratio(self.mass_1, self.chirp_mass)
         self.assertAlmostEqual(self.mass_ratio, mass_ratio)
 
+    def test_lambda_tilde_to_lambda_1_lambda_2(self):
+        lambda_1, lambda_2 =\
+            tupak.gw.conversion.lambda_tilde_to_lambda_1_lambda_2(
+                self.lambda_tilde, self.mass_1, self.mass_2)
+        self.assertTrue(all([abs(self.lambda_1 - lambda_1) < 1e-5,
+                             abs(self.lambda_2 - lambda_2) < 1e-5]))
+
+    def test_lambda_tilde_delta_lambda_to_lambda_1_lambda_2(self):
+        lambda_1, lambda_2 =\
+            tupak.gw.conversion.lambda_tilde_delta_lambda_to_lambda_1_lambda_2(
+                self.lambda_tilde, self.delta_lambda, self.mass_1, self.mass_2)
+        self.assertTrue(all([abs(self.lambda_1 - lambda_1) < 1e-5,
+                             abs(self.lambda_2 - lambda_2) < 1e-5]))
+
 
 class TestConvertToLALBBHParams(unittest.TestCase):
 
diff --git a/test/detector_tests.py b/test/detector_tests.py
index 4f43d9c12932eb4667acfddd630f41f8dca029b9..a69ca162378220c3149148d31784c46972f2f69c 100644
--- a/test/detector_tests.py
+++ b/test/detector_tests.py
@@ -4,7 +4,12 @@ import tupak
 import unittest
 import mock
 from mock import MagicMock
+from mock import patch
 import numpy as np
+import scipy.signal.windows
+import gwpy
+import os
+import logging
 
 
 class TestDetector(unittest.TestCase):
@@ -188,14 +193,6 @@ class TestDetector(unittest.TestCase):
             self.ifo.yarm_azimuth = 12
             self.assertEqual(self.ifo.detector_tensor, 0)
 
-    def test_amplitude_spectral_density_array(self):
-        self.ifo.power_spectral_density.power_spectral_density_interpolated = MagicMock(return_value=np.array([1, 4]))
-        self.assertTrue(np.array_equal(self.ifo.amplitude_spectral_density_array, np.array([1, 2])))
-
-    def test_power_spectral_density_array(self):
-        self.ifo.power_spectral_density.power_spectral_density_interpolated = MagicMock(return_value=np.array([1, 4]))
-        self.assertTrue(np.array_equal(self.ifo.power_spectral_density_array, np.array([1, 4])))
-
     def test_antenna_response_default(self):
         with mock.patch('tupak.gw.utils.get_polarization_tensor') as m:
             with mock.patch('numpy.einsum') as n:
@@ -307,6 +304,17 @@ class TestDetector(unittest.TestCase):
             self.assertTrue(np.array_equal(expected[0], actual[0]))  # array-like element has to be evaluated separately
             self.assertListEqual(expected[1], actual[1])
 
+    def test_repr(self):
+        expected = 'Interferometer(name=\'{}\', power_spectral_density={}, minimum_frequency={}, ' \
+                   'maximum_frequency={}, length={}, latitude={}, longitude={}, elevation={}, xarm_azimuth={}, ' \
+                   'yarm_azimuth={}, xarm_tilt={}, yarm_tilt={})' \
+            .format(self.name, self.power_spectral_density, float(self.minimum_frequency),
+                    float(self.maximum_frequency), float(self.length), float(self.latitude), float(self.longitude),
+                    float(self.elevation), float(self.xarm_azimuth), float(self.yarm_azimuth), float(self.xarm_tilt),
+                    float(self.yarm_tilt))
+        print(repr(self.ifo))
+        self.assertEqual(expected, repr(self.ifo))
+
 
 class TestInterferometerStrainData(unittest.TestCase):
 
@@ -419,6 +427,569 @@ class TestInterferometerStrainData(unittest.TestCase):
         self.assertTrue(np.all(
             self.ifosd.frequency_domain_strain == frequency_domain_strain * self.ifosd.frequency_mask))
 
+    def test_time_array_when_set(self):
+        test_array = np.array([1])
+        self.ifosd.time_array = test_array
+        self.assertTrue(test_array, self.ifosd.time_array)
+
+    @patch.object(tupak.core.utils, 'create_time_series')
+    def test_time_array_when_not_set(self, m):
+        self.ifosd.start_time = 3
+        self.ifosd.sampling_frequency = 1000
+        self.ifosd.duration = 5
+        m.return_value = 4
+        self.assertEqual(m.return_value, self.ifosd.time_array)
+        m.assert_called_with(sampling_frequency=self.ifosd.sampling_frequency,
+                             duration=self.ifosd.duration,
+                             starting_time=self.ifosd.start_time)
+
+    def test_time_array_without_sampling_frequency(self):
+        self.ifosd.sampling_frequency = None
+        self.ifosd.duration = 4
+        with self.assertRaises(ValueError):
+            test = self.ifosd.time_array
+
+    def test_time_array_without_duration(self):
+        self.ifosd.sampling_frequency = 4096
+        self.ifosd.duration = None
+        with self.assertRaises(ValueError):
+            test = self.ifosd.time_array
+
+    def test_frequency_array_when_set(self):
+        test_array = np.array([1])
+        self.ifosd.frequency_array = test_array
+        self.assertTrue(test_array, self.ifosd.frequency_array)
+
+    @patch.object(tupak.core.utils, 'create_frequency_series')
+    def test_time_array_when_not_set(self, m):
+        self.ifosd.sampling_frequency = 1000
+        self.ifosd.duration = 5
+        m.return_value = 4
+        self.assertEqual(m.return_value, self.ifosd.frequency_array)
+        m.assert_called_with(sampling_frequency=self.ifosd.sampling_frequency,
+                             duration=self.ifosd.duration)
+
+    def test_frequency_array_without_sampling_frequency(self):
+        self.ifosd.sampling_frequency = None
+        self.ifosd.duration = 4
+        with self.assertRaises(ValueError):
+            test = self.ifosd.frequency_array
+
+    def test_frequency_array_without_duration(self):
+        self.ifosd.sampling_frequency = 4096
+        self.ifosd.duration = None
+        with self.assertRaises(ValueError):
+            test = self.ifosd.frequency_array
+
+    def test_time_within_data_before(self):
+        self.ifosd.start_time = 3
+        self.ifosd.duration = 2
+        self.assertFalse(self.ifosd.time_within_data(2))
+
+    def test_time_within_data_during(self):
+        self.ifosd.start_time = 3
+        self.ifosd.duration = 2
+        self.assertTrue(self.ifosd.time_within_data(3))
+        self.assertTrue(self.ifosd.time_within_data(4))
+        self.assertTrue(self.ifosd.time_within_data(5))
+
+    def test_time_within_data_after(self):
+        self.ifosd.start_time = 3
+        self.ifosd.duration = 2
+        self.assertFalse(self.ifosd.time_within_data(6))
+
+    def test_time_domain_window_no_roll_off_no_alpha(self):
+        self.ifosd._time_domain_strain = np.array([3])
+        self.ifosd.duration = 5
+        self.ifosd.roll_off = 2
+        expected_window = scipy.signal.windows.tukey(len(self.ifosd._time_domain_strain), alpha=self.ifosd.alpha)
+        self.assertEqual(expected_window,
+                         self.ifosd.time_domain_window())
+        self.assertEqual(np.mean(expected_window ** 2), self.ifosd.window_factor)
+
+    def test_time_domain_window_sets_roll_off_directly(self):
+        self.ifosd._time_domain_strain = np.array([3])
+        self.ifosd.duration = 5
+        self.ifosd.roll_off = 2
+        expected_roll_off = 6
+        self.ifosd.time_domain_window(roll_off=expected_roll_off)
+        self.assertEqual(expected_roll_off, self.ifosd.roll_off)
+
+    def test_time_domain_window_sets_roll_off_indirectly(self):
+        self.ifosd._time_domain_strain = np.array([3])
+        self.ifosd.duration = 5
+        self.ifosd.roll_off = 2
+        alpha = 4
+        expected_roll_off = alpha * self.ifosd.duration / 2
+        self.ifosd.time_domain_window(alpha=alpha)
+        self.assertEqual(expected_roll_off, self.ifosd.roll_off)
+
+    def test_time_domain_strain_when_set(self):
+        expected_strain = 5
+        self.ifosd._time_domain_strain = expected_strain
+        self.assertEqual(expected_strain, self.ifosd.time_domain_strain)
+
+    @patch('tupak.core.utils.infft')
+    def test_time_domain_strain_from_frequency_domain_strain(self, m):
+        m.return_value = 5
+        self.ifosd.sampling_frequency = 200
+        self.ifosd.duration = 4
+        self.ifosd._frequency_domain_strain = self.ifosd.frequency_array
+        self.ifosd.sampling_frequency = 123
+        self.assertEqual(m.return_value, self.ifosd.time_domain_strain)
+
+    def test_time_domain_strain_not_set(self):
+        self.ifosd._time_domain_strain = None
+        self.ifosd._frequency_domain_strain = None
+        with self.assertRaises(ValueError):
+            test = self.ifosd.time_domain_strain
+
+    def test_frequency_domain_strain_when_set(self):
+        self.ifosd.sampling_frequency = 200
+        self.ifosd.duration = 4
+        expected_strain = self.ifosd.frequency_array * self.ifosd.frequency_mask
+        self.ifosd._frequency_domain_strain = expected_strain
+        self.assertTrue(np.array_equal(expected_strain,
+                                       self.ifosd.frequency_domain_strain))
+
+    @patch('tupak.core.utils.nfft')
+    def test_frequency_domain_strain_from_frequency_domain_strain(self, m):
+        self.ifosd.start_time = 0
+        self.ifosd.duration = 4
+        self.ifosd.sampling_frequency = 200
+        m.return_value = self.ifosd.frequency_array, self.ifosd.frequency_array
+        self.ifosd._time_domain_strain = self.ifosd.time_array
+        self.assertTrue(np.array_equal(self.ifosd.frequency_array * self.ifosd.frequency_mask,
+                                       self.ifosd.frequency_domain_strain))
+
+    def test_frequency_domain_strain_not_set(self):
+        self.ifosd._time_domain_strain = None
+        self.ifosd._frequency_domain_strain = None
+        with self.assertRaises(ValueError):
+            test = self.ifosd.frequency_domain_strain
+
+    def test_set_frequency_domain_strain(self):
+        self.ifosd.duration = 4
+        self.ifosd.sampling_frequency = 200
+        self.ifosd.frequency_domain_strain = np.ones(len(self.ifosd.frequency_array))
+        self.assertTrue(np.array_equal(np.ones(len(self.ifosd.frequency_array)),
+                                       self.ifosd._frequency_domain_strain))
+
+    def test_set_frequency_domain_strain_wrong_length(self):
+        self.ifosd.duration = 4
+        self.ifosd.sampling_frequency = 200
+        with self.assertRaises(ValueError):
+            self.ifosd.frequency_domain_strain = np.array([1])
+
+
+class TestInterferometerList(unittest.TestCase):
+
+    def setUp(self):
+        self.frequency_arrays = np.linspace(0, 4096, 4097)
+        self.name1 = 'name1'
+        self.name2 = 'name2'
+        self.power_spectral_density1 = MagicMock()
+        self.power_spectral_density1.get_noise_realisation = MagicMock(return_value=(self.frequency_arrays,
+                                                                                     self.frequency_arrays))
+        self.power_spectral_density2 = MagicMock()
+        self.power_spectral_density2.get_noise_realisation = MagicMock(return_value=(self.frequency_arrays,
+                                                                                     self.frequency_arrays))
+        self.minimum_frequency1 = 10
+        self.minimum_frequency2 = 10
+        self.maximum_frequency1 = 20
+        self.maximum_frequency2 = 20
+        self.length1 = 30
+        self.length2 = 30
+        self.latitude1 = 1
+        self.latitude2 = 1
+        self.longitude1 = 2
+        self.longitude2 = 2
+        self.elevation1 = 3
+        self.elevation2 = 3
+        self.xarm_azimuth1 = 4
+        self.xarm_azimuth2 = 4
+        self.yarm_azimuth1 = 5
+        self.yarm_azimuth2 = 5
+        self.xarm_tilt1 = 0.
+        self.xarm_tilt2 = 0.
+        self.yarm_tilt1 = 0.
+        self.yarm_tilt2 = 0.
+        # noinspection PyTypeChecker
+        self.ifo1 = tupak.gw.detector.Interferometer(name=self.name1,
+                                                     power_spectral_density=self.power_spectral_density1,
+                                                     minimum_frequency=self.minimum_frequency1,
+                                                     maximum_frequency=self.maximum_frequency1, length=self.length1,
+                                                     latitude=self.latitude1, longitude=self.longitude1,
+                                                     elevation=self.elevation1,
+                                                     xarm_azimuth=self.xarm_azimuth1, yarm_azimuth=self.yarm_azimuth1,
+                                                     xarm_tilt=self.xarm_tilt1, yarm_tilt=self.yarm_tilt1)
+        self.ifo2 = tupak.gw.detector.Interferometer(name=self.name2,
+                                                     power_spectral_density=self.power_spectral_density2,
+                                                     minimum_frequency=self.minimum_frequency2,
+                                                     maximum_frequency=self.maximum_frequency2, length=self.length2,
+                                                     latitude=self.latitude2, longitude=self.longitude2,
+                                                     elevation=self.elevation2,
+                                                     xarm_azimuth=self.xarm_azimuth2, yarm_azimuth=self.yarm_azimuth2,
+                                                     xarm_tilt=self.xarm_tilt2, yarm_tilt=self.yarm_tilt2)
+        self.ifo1.strain_data.set_from_frequency_domain_strain(
+            self.frequency_arrays, sampling_frequency=4096, duration=2)
+        self.ifo2.strain_data.set_from_frequency_domain_strain(
+            self.frequency_arrays, sampling_frequency=4096, duration=2)
+        self.ifo_list = tupak.gw.detector.InterferometerList([self.ifo1, self.ifo2])
+
+    def tearDown(self):
+        del self.frequency_arrays
+        del self.name1
+        del self.name2
+        del self.power_spectral_density1
+        del self.power_spectral_density2
+        del self.minimum_frequency1
+        del self.minimum_frequency2
+        del self.maximum_frequency1
+        del self.maximum_frequency2
+        del self.length1
+        del self.length2
+        del self.latitude1
+        del self.latitude2
+        del self.longitude1
+        del self.longitude2
+        del self.elevation1
+        del self.elevation2
+        del self.xarm_azimuth1
+        del self.xarm_azimuth2
+        del self.yarm_azimuth1
+        del self.yarm_azimuth2
+        del self.xarm_tilt1
+        del self.xarm_tilt2
+        del self.yarm_tilt1
+        del self.yarm_tilt2
+        del self.ifo1
+        del self.ifo2
+        del self.ifo_list
+
+    def test_init_with_string(self):
+        with self.assertRaises(ValueError):
+            tupak.gw.detector.InterferometerList("string")
+
+    def test_init_with_string_list(self):
+        """ Merely checks if this ends up in the right bracket """
+        with mock.patch('tupak.gw.detector.get_empty_interferometer') as m:
+            m.side_effect = ValueError
+            with self.assertRaises(ValueError):
+                tupak.gw.detector.InterferometerList(['string'])
+
+    def test_init_with_other_object(self):
+        with self.assertRaises(ValueError):
+            tupak.gw.detector.InterferometerList([object()])
+
+    def test_init_with_actual_ifos(self):
+        ifo_list = tupak.gw.detector.InterferometerList([self.ifo1, self.ifo2])
+        self.assertEqual(self.ifo1, ifo_list[0])
+        self.assertEqual(self.ifo2, ifo_list[1])
+
+    def test_init_inconsistent_duration(self):
+        self.ifo2.strain_data.set_from_frequency_domain_strain(
+            np.linspace(0, 4096, 4097), sampling_frequency=4096, duration=3)
+        with self.assertRaises(ValueError):
+            tupak.gw.detector.InterferometerList([self.ifo1, self.ifo2])
+
+    def test_init_inconsistent_sampling_frequency(self):
+        self.ifo2.strain_data.set_from_frequency_domain_strain(
+            np.linspace(0, 4096, 4097), sampling_frequency=234, duration=2)
+        with self.assertRaises(ValueError):
+            tupak.gw.detector.InterferometerList([self.ifo1, self.ifo2])
+
+    def test_init_inconsistent_start_time(self):
+        self.ifo2.strain_data.start_time = 1
+        with self.assertRaises(ValueError):
+            tupak.gw.detector.InterferometerList([self.ifo1, self.ifo2])
+
+    @patch.object(tupak.gw.detector.Interferometer, 'set_strain_data_from_power_spectral_density')
+    def test_set_strain_data_from_power_spectral_density(self, m):
+        self.ifo_list.set_strain_data_from_power_spectral_densities(sampling_frequency=123, duration=6.2, start_time=3)
+        m.assert_called_with(sampling_frequency=123, duration=6.2, start_time=3)
+        self.assertEqual(len(self.ifo_list), m.call_count)
+
+    def test_inject_signal_pol_and_wg_none(self):
+        with self.assertRaises(ValueError):
+            self.ifo_list.inject_signal(injection_polarizations=None, waveform_generator=None)
+
+    @patch.object(tupak.gw.waveform_generator.WaveformGenerator, 'frequency_domain_strain')
+    def test_inject_signal_pol_none_calls_frequency_domain_strain(self, m):
+        waveform_generator = tupak.gw.waveform_generator.WaveformGenerator(
+            frequency_domain_source_model=lambda x, y, z: x)
+        self.ifo1.inject_signal = MagicMock(return_value=None)
+        self.ifo2.inject_signal = MagicMock(return_value=None)
+        self.ifo_list.inject_signal(parameters=None, waveform_generator=waveform_generator)
+        self.assertTrue(m.called)
+
+    @patch.object(tupak.gw.waveform_generator.WaveformGenerator, 'frequency_domain_strain')
+    def test_inject_signal_pol_none_sets_wg_parameters(self, m):
+        waveform_generator = tupak.gw.waveform_generator.WaveformGenerator(
+            frequency_domain_source_model=lambda x, y, z: x)
+        parameters = dict(y=1, z=2)
+        self.ifo1.inject_signal = MagicMock(return_value=None)
+        self.ifo2.inject_signal = MagicMock(return_value=None)
+        self.ifo_list.inject_signal(parameters=parameters, waveform_generator=waveform_generator)
+        self.assertDictEqual(parameters, waveform_generator.parameters)
+
+    @patch.object(tupak.gw.detector.Interferometer, 'inject_signal')
+    def test_inject_signal_with_inj_pol(self, m):
+        self.ifo_list.inject_signal(injection_polarizations=dict(plus=1))
+        m.assert_called_with(parameters=None, injection_polarizations=dict(plus=1))
+        self.assertEqual(len(self.ifo_list), m.call_count)
+
+    @patch.object(tupak.gw.detector.Interferometer, 'inject_signal')
+    def test_inject_signal_returns_expected_polarisations(self, m):
+        m.return_value = dict(plus=1, cross=2)
+        injection_polarizations = dict(plus=1, cross=2)
+        ifos_pol = self.ifo_list.inject_signal(injection_polarizations=injection_polarizations)
+        self.assertDictEqual(self.ifo1.inject_signal(injection_polarizations=injection_polarizations), ifos_pol[0])
+        self.assertDictEqual(self.ifo2.inject_signal(injection_polarizations=injection_polarizations), ifos_pol[1])
+
+    @patch.object(tupak.gw.detector.Interferometer, 'save_data')
+    def test_save_data(self, m):
+        self.ifo_list.save_data(outdir='test_outdir', label='test_outdir')
+        m.assert_called_with(outdir='test_outdir', label='test_outdir')
+        self.assertEqual(len(self.ifo_list), m.call_count)
+
+    def test_number_of_interferometers(self):
+        self.assertEqual(len(self.ifo_list), self.ifo_list.number_of_interferometers)
+
+    def test_duration(self):
+        self.assertEqual(self.ifo1.strain_data.duration, self.ifo_list.duration)
+        self.assertEqual(self.ifo2.strain_data.duration, self.ifo_list.duration)
+
+    def test_sampling_frequency(self):
+        self.assertEqual(self.ifo1.strain_data.sampling_frequency, self.ifo_list.sampling_frequency)
+        self.assertEqual(self.ifo2.strain_data.sampling_frequency, self.ifo_list.sampling_frequency)
+
+    def test_start_time(self):
+        self.assertEqual(self.ifo1.strain_data.start_time, self.ifo_list.start_time)
+        self.assertEqual(self.ifo2.strain_data.start_time, self.ifo_list.start_time)
+
+    def test_frequency_array(self):
+        self.assertTrue(np.array_equal(self.ifo1.strain_data.frequency_array, self.ifo_list.frequency_array))
+        self.assertTrue(np.array_equal(self.ifo2.strain_data.frequency_array, self.ifo_list.frequency_array))
+
+    def test_append_with_ifo(self):
+        self.ifo_list.append(self.ifo2)
+        names = [ifo.name for ifo in self.ifo_list]
+        self.assertListEqual([self.ifo1.name, self.ifo2.name, self.ifo2.name], names)
+
+    def test_append_with_ifo_list(self):
+        self.ifo_list.append(self.ifo_list)
+        names = [ifo.name for ifo in self.ifo_list]
+        self.assertListEqual([self.ifo1.name, self.ifo2.name, self.ifo1.name, self.ifo2.name], names)
+
+    def test_extend(self):
+        self.ifo_list.extend(self.ifo_list)
+        names = [ifo.name for ifo in self.ifo_list]
+        self.assertListEqual([self.ifo1.name, self.ifo2.name, self.ifo1.name, self.ifo2.name], names)
+
+    def test_insert(self):
+        new_ifo = self.ifo1
+        new_ifo.name = 'name3'
+        self.ifo_list.insert(1, new_ifo)
+        names = [ifo.name for ifo in self.ifo_list]
+        self.assertListEqual([self.ifo1.name, new_ifo.name, self.ifo2.name], names)
+
+
+class TestPowerSpectralDensityWithoutFiles(unittest.TestCase):
+
+    def setUp(self):
+        self.frequency_array = np.array([1., 2., 3.])
+        self.psd_array = np.array([16., 25., 36.])
+        self.asd_array = np.array([4., 5., 6.])
+
+    def tearDown(self):
+        del self.frequency_array
+        del self.psd_array
+        del self.asd_array
+
+    def test_init_with_asd_array(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, asd_array=self.asd_array)
+        self.assertTrue(np.array_equal(self.frequency_array, psd.frequency_array))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+
+    def test_init_with_psd_array(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, psd_array=self.psd_array)
+        self.assertTrue(np.array_equal(self.frequency_array, psd.frequency_array))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+
+    def test_setting_asd_array_after_init(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array)
+        psd.asd_array = self.asd_array
+        self.assertTrue(np.array_equal(self.frequency_array, psd.frequency_array))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+
+    def test_setting_psd_array_after_init(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array)
+        psd.psd_array = self.psd_array
+        self.assertTrue(np.array_equal(self.frequency_array, psd.frequency_array))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+
+    def test_power_spectral_density_interpolated_from_asd_array(self):
+        expected = np.array([25.])
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, asd_array = self.asd_array)
+        self.assertEqual(expected, psd.power_spectral_density_interpolated(2))
+
+    def test_power_spectral_density_interpolated_from_psd_array(self):
+        expected = np.array([25.])
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, psd_array = self.psd_array)
+        self.assertEqual(expected, psd.power_spectral_density_interpolated(2))
+
+    def test_from_amplitude_spectral_density_array(self):
+        actual = tupak.gw.detector.PowerSpectralDensity.from_amplitude_spectral_density_array(
+            frequency_array=self.frequency_array, asd_array=self.asd_array)
+        self.assertTrue(np.array_equal(self.psd_array, actual.psd_array))
+        self.assertTrue(np.array_equal(self.asd_array, actual.asd_array))
+
+    def test_from_power_spectral_density_array(self):
+        actual = tupak.gw.detector.PowerSpectralDensity.from_power_spectral_density_array(
+            frequency_array=self.frequency_array, psd_array=self.psd_array)
+        self.assertTrue(np.array_equal(self.psd_array, actual.psd_array))
+        self.assertTrue(np.array_equal(self.asd_array, actual.asd_array))
+
+    def test_repr(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, psd_array=self.psd_array)
+        expected = 'PowerSpectralDensity(frequency_array={}, psd_array={}, asd_array={})'.format(self.frequency_array,
+                                                                                                 self.psd_array,
+                                                                                                 self.asd_array)
+        self.assertEqual(expected, repr(psd))
+
+
+class TestPowerSpectralDensityWithFiles(unittest.TestCase):
+
+    def setUp(self):
+        self.dir = os.path.join(os.path.dirname(__file__), 'noise_curves')
+        os.mkdir(self.dir)
+        self.asd_file = os.path.join(os.path.dirname(__file__), 'noise_curves', 'asd_test_file.txt')
+        self.psd_file = os.path.join(os.path.dirname(__file__), 'noise_curves', 'psd_test_file.txt')
+        with open(self.asd_file, 'w') as f:
+            f.write('1.\t1.0e-21\n2.\t2.0e-21\n3.\t3.0e-21')
+        with open(self.psd_file, 'w') as f:
+            f.write('1.\t1.0e-42\n2.\t4.0e-42\n3.\t9.0e-42')
+        self.frequency_array = np.array([1.0, 2.0, 3.0])
+        self.asd_array = np.array([1.0e-21, 2.0e-21, 3.0e-21])
+        self.psd_array = np.array([1.0e-42, 4.0e-42, 9.0e-42])
+
+    def tearDown(self):
+        os.remove(self.asd_file)
+        os.remove(self.psd_file)
+        os.rmdir(self.dir)
+        del self.dir
+        del self.asd_array
+        del self.psd_array
+        del self.asd_file
+        del self.psd_file
+
+    def test_init_with_psd_file(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, psd_file=self.psd_file)
+        self.assertEqual(self.psd_file, psd.psd_file)
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+        self.assertTrue(np.allclose(self.asd_array, psd.asd_array, atol=1e-30))
+
+    def test_init_with_asd_file(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, asd_file=self.asd_file)
+        self.assertEqual(self.asd_file, psd.asd_file)
+        self.assertTrue(np.allclose(self.psd_array, psd.psd_array, atol=1e-60))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+
+    def test_setting_psd_array_after_init(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array)
+        psd.psd_file = self.psd_file
+        self.assertEqual(self.psd_file, psd.psd_file)
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+        self.assertTrue(np.allclose(self.asd_array, psd.asd_array, atol=1e-30))
+
+    def test_init_with_asd_array_after_init(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array)
+        psd.asd_file = self.asd_file
+        self.assertEqual(self.asd_file, psd.asd_file)
+        self.assertTrue(np.allclose(self.psd_array, psd.psd_array, atol=1e-60))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+
+    def test_power_spectral_density_interpolated_from_asd_file(self):
+        expected = np.array([4.0e-42])
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, asd_file=self.asd_file)
+        self.assertTrue(np.allclose(expected, psd.power_spectral_density_interpolated(2), atol=1e-60))
+
+    def test_power_spectral_density_interpolated_from_psd_file(self):
+        expected = np.array([4.0e-42])
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, psd_file=self.psd_file)
+        self.assertAlmostEqual(expected, psd.power_spectral_density_interpolated(2))
+
+    def test_from_amplitude_spectral_density_file(self):
+        psd = tupak.gw.detector.PowerSpectralDensity.from_amplitude_spectral_density_file(asd_file=self.asd_file)
+        self.assertEqual(self.asd_file, psd.asd_file)
+        self.assertTrue(np.allclose(self.psd_array, psd.psd_array, atol=1e-60))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+
+    def test_from_power_spectral_density_file(self):
+        psd = tupak.gw.detector.PowerSpectralDensity.from_power_spectral_density_file(psd_file=self.psd_file)
+        self.assertEqual(self.psd_file, psd.psd_file)
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+        self.assertTrue(np.allclose(self.asd_array, psd.asd_array, atol=1e-30))
+
+    def test_from_aligo(self):
+        psd = tupak.gw.detector.PowerSpectralDensity.from_aligo()
+        expected_filename = 'aLIGO_ZERO_DET_high_P_psd.txt'
+        expected = tupak.gw.detector.PowerSpectralDensity(psd_file=expected_filename)
+        actual_filename = psd.psd_file.split('/')[-1]
+        self.assertEqual(expected_filename, actual_filename)
+        self.assertTrue(np.allclose(expected.psd_array, psd.psd_array, atol=1e-60))
+        self.assertTrue(np.array_equal(expected.asd_array, psd.asd_array))
+
+    def test_check_file_psd_file_set_to_asd_file(self):
+        logger = logging.getLogger('tupak')
+        m = MagicMock()
+        logger.warning = m
+        psd = tupak.gw.detector.PowerSpectralDensity(psd_file=self.asd_file)
+        self.assertEqual(4, m.call_count)
+
+    def test_check_file_not_called_psd_file_set_to_psd_file(self):
+        logger = logging.getLogger('tupak')
+        m = MagicMock()
+        logger.warning = m
+        psd = tupak.gw.detector.PowerSpectralDensity(psd_file=self.psd_file)
+        self.assertEqual(0, m.call_count)
+
+    def test_check_file_asd_file_set_to_psd_file(self):
+        logger = logging.getLogger('tupak')
+        m = MagicMock()
+        logger.warning = m
+        psd = tupak.gw.detector.PowerSpectralDensity(asd_file=self.psd_file)
+        self.assertEqual(4, m.call_count)
+
+    def test_check_file_not_called_asd_file_set_to_asd_file(self):
+        logger = logging.getLogger('tupak')
+        m = MagicMock()
+        logger.warning = m
+        psd = tupak.gw.detector.PowerSpectralDensity(asd_file=self.asd_file)
+        self.assertEqual(0, m.call_count)
+
+    def test_from_frame_file(self):
+        expected_frequency_array = np.array([1., 2., 3.])
+        expected_psd_array = np.array([16., 25., 36.])
+        with mock.patch('tupak.gw.detector.InterferometerStrainData.set_from_frame_file') as m:
+            with mock.patch('tupak.gw.detector.InterferometerStrainData.create_power_spectral_density') as n:
+                n.return_value = expected_frequency_array, expected_psd_array
+                psd = tupak.gw.detector.PowerSpectralDensity.from_frame_file(frame_file=self.asd_file,
+                                                                             psd_start_time=0,
+                                                                             psd_duration=4)
+                self.assertTrue(np.array_equal(expected_frequency_array, psd.frequency_array))
+                self.assertTrue(np.array_equal(expected_psd_array, psd.psd_array))
+
+    def test_repr(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(psd_file=self.psd_file)
+        expected = 'PowerSpectralDensity(psd_file=\'{}\', asd_file=\'{}\')'.format(self.psd_file, None)
+        self.assertEqual(expected, repr(psd))
+
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/test/gw_likelihood_tests.py b/test/gw_likelihood_tests.py
index f28dec3327c0d8e2b9446c536fd88dd5ef976c70..fa8e53b6ec255b5defd1ad98ccd7da0f9f39b46b 100644
--- a/test/gw_likelihood_tests.py
+++ b/test/gw_likelihood_tests.py
@@ -44,7 +44,7 @@ class TestBasicGWTransient(unittest.TestCase):
         """Test log likelihood matches precomputed value"""
         self.likelihood.log_likelihood()
         self.assertAlmostEqual(self.likelihood.log_likelihood(),
-                               -4051.754299050376, 3)
+                               -4048.5113284032036, 3)
 
     def test_log_likelihood_ratio(self):
         """Test log likelihood ratio returns the correct value"""
@@ -61,6 +61,11 @@ class TestBasicGWTransient(unittest.TestCase):
                          np.nan_to_num(-np.inf))
         self.likelihood.waveform_generator.parameters['mass_2'] = 29
 
+    def test_repr(self):
+        expected = 'BasicGravitationalWaveTransient(interferometers={},\n\twaveform_generator={})'.format(
+            self.interferometers, self.waveform_generator)
+        self.assertEqual(expected, repr(self.likelihood))
+
 
 class TestGWTransient(unittest.TestCase):
 
@@ -116,7 +121,7 @@ class TestGWTransient(unittest.TestCase):
         """Test log likelihood matches precomputed value"""
         self.likelihood.log_likelihood()
         self.assertAlmostEqual(self.likelihood.log_likelihood(),
-                               -4051.754299050375, 3)
+                               -4048.5113284032036, 3)
 
     def test_log_likelihood_ratio(self):
         """Test log likelihood ratio returns the correct value"""
@@ -133,6 +138,12 @@ class TestGWTransient(unittest.TestCase):
                          np.nan_to_num(-np.inf))
         self.likelihood.waveform_generator.parameters['mass_2'] = 29
 
+    def test_repr(self):
+        expected = 'GravitationalWaveTransient(interferometers={},\n\twaveform_generator={},\n\t' \
+                   'time_marginalization={}, distance_marginalization={}, phase_marginalization={}, ' \
+                   'prior={})'.format(self.interferometers, self.waveform_generator, False, False, False, self.prior)
+        self.assertEqual(expected, repr(self.likelihood))
+
 
 class TestTimeMarginalization(unittest.TestCase):
 
diff --git a/test/likelihood_tests.py b/test/likelihood_tests.py
index a3d6da004a6b6690332c957acee07256c6b3d67b..9146cb3ac037d72522bdd6866c81f72a77626d0d 100644
--- a/test/likelihood_tests.py
+++ b/test/likelihood_tests.py
@@ -19,6 +19,11 @@ class TestLikelihoodBase(unittest.TestCase):
     def tearDown(self):
         del self.likelihood
 
+    def test_repr(self):
+        self.likelihood = tupak.core.likelihood.Likelihood(parameters=['a', 'b'])
+        expected = 'Likelihood(parameters=[\'a\', \'b\'])'
+        self.assertEqual(expected, repr(self.likelihood))
+
     def test_base_log_likelihood(self):
         self.assertTrue(np.isnan(self.likelihood.log_likelihood()))
 
@@ -99,6 +104,7 @@ class TestAnalytical1DLikelihood(unittest.TestCase):
     def test_set_func(self):
         def new_func(x):
             return x
+
         with self.assertRaises(AttributeError):
             # noinspection PyPropertyAccess
             self.analytical_1d_likelihood.func = new_func
@@ -124,6 +130,10 @@ class TestAnalytical1DLikelihood(unittest.TestCase):
                                          parameter2=self.parameter2_value)
         self.assertDictEqual(expected_model_parameters, self.analytical_1d_likelihood.model_parameters)
 
+    def test_repr(self):
+        expected = 'Analytical1DLikelihood(x={}, y={}, func={})'.format(self.x, self.y, self.func.__name__)
+        self.assertEqual(expected, repr(self.analytical_1d_likelihood))
+
 
 class TestGaussianLikelihood(unittest.TestCase):
 
@@ -181,6 +191,13 @@ class TestGaussianLikelihood(unittest.TestCase):
         likelihood.log_likelihood()
         self.assertTrue(likelihood.sigma is None)
 
+    def test_repr(self):
+        likelihood = tupak.core.likelihood.GaussianLikelihood(
+            self.x, self.y, self.function, sigma=self.sigma)
+        expected = 'GaussianLikelihood(x={}, y={}, func={}, sigma={})' \
+            .format(self.x, self.y, self.function.__name__, self.sigma)
+        self.assertEqual(expected, repr(likelihood))
+
 
 class TestStudentTLikelihood(unittest.TestCase):
 
@@ -257,6 +274,15 @@ class TestStudentTLikelihood(unittest.TestCase):
 
         self.assertAlmostEqual(4.0, likelihood.lam)
 
+    def test_repr(self):
+        nu = 0
+        sigma = 0.5
+        likelihood = tupak.core.likelihood.StudentTLikelihood(
+            self.x, self.y, self.function, nu=nu, sigma=sigma)
+        expected = 'StudentTLikelihood(x={}, y={}, func={}, nu={}, sigma={})' \
+            .format(self.x, self.y, self.function.__name__, nu, sigma)
+        self.assertEqual(expected, repr(likelihood))
+
 
 class TestPoissonLikelihood(unittest.TestCase):
 
@@ -356,6 +382,12 @@ class TestPoissonLikelihood(unittest.TestCase):
             m.return_value = 1
             self.assertEqual(0, poisson_likelihood.log_likelihood())
 
+    def test_repr(self):
+        likelihood = tupak.core.likelihood.PoissonLikelihood(
+            self.x, self.y, self.function)
+        expected = 'PoissonLikelihood(x={}, y={}, func={})'.format(self.x, self.y, self.function.__name__)
+        self.assertEqual(expected, repr(likelihood))
+
 
 class TestExponentialLikelihood(unittest.TestCase):
 
@@ -443,5 +475,102 @@ class TestExponentialLikelihood(unittest.TestCase):
             self.assertEqual(-3, exponential_likelihood.log_likelihood())
 
 
+class TestJointLikelihood(unittest.TestCase):
+
+    def setUp(self):
+        self.x = np.array([1, 2, 3])
+        self.y = np.array([1, 2, 3])
+        self.first_likelihood = tupak.core.likelihood.GaussianLikelihood(
+            x=self.x,
+            y=self.y,
+            func=lambda x, param1, param2: (param1 + param2) * x,
+            sigma=1)
+        self.second_likelihood = tupak.core.likelihood.PoissonLikelihood(
+            x=self.x,
+            y=self.y,
+            func=lambda x, param2, param3: (param2 + param3) * x)
+        self.third_likelihood = tupak.core.likelihood.ExponentialLikelihood(
+            x=self.x,
+            y=self.y,
+            func=lambda x, param4, param5: (param4 + param5) * x
+        )
+        self.joint_likelihood = tupak.core.likelihood.JointLikelihood(self.first_likelihood,
+                                                                      self.second_likelihood,
+                                                                      self.third_likelihood)
+
+        self.first_likelihood.parameters['param1'] = 1
+        self.first_likelihood.parameters['param2'] = 2
+        self.second_likelihood.parameters['param2'] = 2
+        self.second_likelihood.parameters['param3'] = 3
+        self.third_likelihood.parameters['param4'] = 4
+        self.third_likelihood.parameters['param5'] = 5
+
+        self.joint_likelihood.parameters['param1'] = 1
+        self.joint_likelihood.parameters['param2'] = 2
+        self.joint_likelihood.parameters['param3'] = 3
+        self.joint_likelihood.parameters['param4'] = 4
+        self.joint_likelihood.parameters['param5'] = 5
+
+    def tearDown(self):
+        del self.x
+        del self.y
+        del self.first_likelihood
+        del self.second_likelihood
+        del self.third_likelihood
+        del self.joint_likelihood
+
+    def test_parameters_consistent_from_init(self):
+        expected = dict(param1=1, param2=2, param3=3, param4=4, param5=5, )
+        self.assertDictEqual(expected, self.joint_likelihood.parameters)
+
+    def test_log_likelihood_correctly_sums(self):
+        expected = self.first_likelihood.log_likelihood() + \
+            self.second_likelihood.log_likelihood() + \
+            self.third_likelihood.log_likelihood()
+        self.assertEqual(expected, self.joint_likelihood.log_likelihood())
+
+    def test_log_likelihood_checks_parameter_updates(self):
+        self.first_likelihood.parameters['param2'] = 7
+        self.second_likelihood.parameters['param2'] = 7
+        self.joint_likelihood.parameters['param2'] = 7
+        expected = self.first_likelihood.log_likelihood() + \
+            self.second_likelihood.log_likelihood() + \
+            self.third_likelihood.log_likelihood()
+        self.assertEqual(expected, self.joint_likelihood.log_likelihood())
+
+    def test_list_element_parameters_are_updated(self):
+        self.joint_likelihood.parameters['param2'] = 7
+        self.assertEqual(self.joint_likelihood.parameters['param2'],
+                         self.joint_likelihood.likelihoods[0].parameters['param2'])
+        self.assertEqual(self.joint_likelihood.parameters['param2'],
+                         self.joint_likelihood.likelihoods[1].parameters['param2'])
+
+    def test_log_noise_likelihood(self):
+        self.first_likelihood.noise_log_likelihood = MagicMock(return_value=1)
+        self.second_likelihood.noise_log_likelihood = MagicMock(return_value=2)
+        self.third_likelihood.noise_log_likelihood = MagicMock(return_value=3)
+        self.joint_likelihood = tupak.core.likelihood.JointLikelihood(self.first_likelihood,
+                                                                      self.second_likelihood,
+                                                                      self.third_likelihood)
+        expected = self.first_likelihood.noise_log_likelihood() + \
+            self.second_likelihood.noise_log_likelihood() + \
+            self.third_likelihood.noise_log_likelihood()
+        self.assertEqual(expected, self.joint_likelihood.noise_log_likelihood())
+
+    def test_init_with_list_of_likelihoods(self):
+        with self.assertRaises(ValueError):
+            tupak.core.likelihood.JointLikelihood([self.first_likelihood, self.second_likelihood, self.third_likelihood])
+
+    def test_setting_single_likelihood(self):
+        self.joint_likelihood.likelihoods = self.first_likelihood
+        self.assertEqual(self.first_likelihood.log_likelihood(), self.joint_likelihood.log_likelihood())
+
+    # Appending is not supported
+    # def test_appending(self):
+    #     joint_likelihood = tupak.core.likelihood.JointLikelihood(self.first_likelihood, self.second_likelihood)
+    #     joint_likelihood.likelihoods.append(self.third_likelihood)
+    #     self.assertDictEqual(self.joint_likelihood.parameters, joint_likelihood.parameters)
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/test/prior_tests.py b/test/prior_tests.py
index 29124f747feb3eed472a9b902628e5111c1135f9..8f25a0292d89c7ecaf024c8ea075ca93744e285c 100644
--- a/test/prior_tests.py
+++ b/test/prior_tests.py
@@ -35,7 +35,7 @@ class TestPriorInstantiationWithoutOptionalPriors(unittest.TestCase):
 
     def test_base_repr(self):
         self.prior = tupak.core.prior.Prior(name='test_name', latex_label='test_label', minimum=0, maximum=1)
-        expected_string = "Prior(name='test_name', latex_label='test_label', minimum=0, maximum=1)"
+        expected_string = "Prior(name='test_name', latex_label='test_label', unit=None, minimum=0, maximum=1)"
         self.assertEqual(expected_string, self.prior.__repr__())
 
 
@@ -103,32 +103,32 @@ class TestPriorClasses(unittest.TestCase):
     def setUp(self):
 
         self.priors = [
-            tupak.core.prior.DeltaFunction(name='test', peak=1),
-            tupak.core.prior.Gaussian(name='test', mu=0, sigma=1),
-            tupak.core.prior.Normal(name='test', mu=0, sigma=1),
-            tupak.core.prior.PowerLaw(name='test', alpha=0, minimum=0, maximum=1),
-            tupak.core.prior.PowerLaw(name='test', alpha=2, minimum=1, maximum=1e2),
-            tupak.core.prior.Uniform(name='test', minimum=0, maximum=1),
-            tupak.core.prior.LogUniform(name='test', minimum=5e0, maximum=1e2),
+            tupak.core.prior.DeltaFunction(name='test', unit='unit', peak=1),
+            tupak.core.prior.Gaussian(name='test', unit='unit', mu=0, sigma=1),
+            tupak.core.prior.Normal(name='test', unit='unit', mu=0, sigma=1),
+            tupak.core.prior.PowerLaw(name='test', unit='unit', alpha=0, minimum=0, maximum=1),
+            tupak.core.prior.PowerLaw(name='test', unit='unit', alpha=2, minimum=1, maximum=1e2),
+            tupak.core.prior.Uniform(name='test', unit='unit', minimum=0, maximum=1),
+            tupak.core.prior.LogUniform(name='test', unit='unit', minimum=5e0, maximum=1e2),
             tupak.gw.prior.UniformComovingVolume(name='test', minimum=2e2, maximum=5e3),
-            tupak.core.prior.Sine(name='test'),
-            tupak.core.prior.Cosine(name='test'),
-            tupak.core.prior.Interped(name='test', xx=np.linspace(0, 10, 1000), yy=np.linspace(0, 10, 1000) ** 4,
+            tupak.core.prior.Sine(name='test', unit='unit'),
+            tupak.core.prior.Cosine(name='test', unit='unit'),
+            tupak.core.prior.Interped(name='test', unit='unit', xx=np.linspace(0, 10, 1000), yy=np.linspace(0, 10, 1000) ** 4,
                                       minimum=3, maximum=5),
-            tupak.core.prior.TruncatedGaussian(name='test', mu=1, sigma=0.4, minimum=-1, maximum=1),
-            tupak.core.prior.TruncatedNormal(name='test', mu=1, sigma=0.4, minimum=-1, maximum=1),
-            tupak.core.prior.HalfGaussian(name='test', sigma=1),
-            tupak.core.prior.HalfNormal(name='test', sigma=1),
-            tupak.core.prior.LogGaussian(name='test', mu=0, sigma=1),
-            tupak.core.prior.LogNormal(name='test', mu=0, sigma=1),
-            tupak.core.prior.Exponential(name='test', mu=1),
-            tupak.core.prior.StudentT(name='test', df=3, mu=0, scale=1),
-            tupak.core.prior.Beta(name='test', alpha=2.0, beta=2.0),
-            tupak.core.prior.Logistic(name='test', mu=0, scale=1),
-            tupak.core.prior.Cauchy(name='test', alpha=0, beta=1),
-            tupak.core.prior.Lorentzian(name='test', alpha=0, beta=1),
-            tupak.core.prior.Gamma(name='test', k=1, theta=1),
-            tupak.core.prior.ChiSquared(name='test', nu=2)
+            tupak.core.prior.TruncatedGaussian(name='test', unit='unit', mu=1, sigma=0.4, minimum=-1, maximum=1),
+            tupak.core.prior.TruncatedNormal(name='test', unit='unit', mu=1, sigma=0.4, minimum=-1, maximum=1),
+            tupak.core.prior.HalfGaussian(name='test', unit='unit', sigma=1),
+            tupak.core.prior.HalfNormal(name='test', unit='unit', sigma=1),
+            tupak.core.prior.LogGaussian(name='test', unit='unit', mu=0, sigma=1),
+            tupak.core.prior.LogNormal(name='test', unit='unit', mu=0, sigma=1),
+            tupak.core.prior.Exponential(name='test', unit='unit', mu=1),
+            tupak.core.prior.StudentT(name='test', unit='unit', df=3, mu=0, scale=1),
+            tupak.core.prior.Beta(name='test', unit='unit', alpha=2.0, beta=2.0),
+            tupak.core.prior.Logistic(name='test', unit='unit', mu=0, scale=1),
+            tupak.core.prior.Cauchy(name='test', unit='unit', alpha=0, beta=1),
+            tupak.core.prior.Lorentzian(name='test', unit='unit', alpha=0, beta=1),
+            tupak.core.prior.Gamma(name='test', unit='unit', k=1, theta=1),
+            tupak.core.prior.ChiSquared(name='test', unit='unit', nu=2)
         ]
 
     def test_minimum_rescaling(self):
@@ -235,6 +235,13 @@ class TestPriorClasses(unittest.TestCase):
                 domain = np.linspace(prior.minimum, prior.maximum, 1000)
             self.assertAlmostEqual(np.trapz(prior.prob(domain), domain), 1, 3)
 
+    def test_unit_setting(self):
+        for prior in self.priors:
+            if isinstance(prior, tupak.gw.prior.UniformComovingVolume):
+                self.assertEqual('Mpc', prior.unit)
+            else:
+                self.assertEqual('unit', prior.unit)
+
 
 class TestFillPrior(unittest.TestCase):
 
diff --git a/test/waveform_generator_tests.py b/test/waveform_generator_tests.py
index 4d67cc47d16967dc167f5f6ede0f02bde7a9f9f7..99dd5d3f005f3dd39503c2fbdc747d63f45a8e2e 100644
--- a/test/waveform_generator_tests.py
+++ b/test/waveform_generator_tests.py
@@ -32,6 +32,21 @@ class TestWaveformGeneratorInstantiationWithoutOptionalParameters(unittest.TestC
         del self.waveform_generator
         del self.simulation_parameters
 
+    def test_repr(self):
+        expected = 'WaveformGenerator(duration={}, sampling_frequency={}, start_time={}, ' \
+                   'frequency_domain_source_model={}, time_domain_source_model={}, parameters={}, ' \
+                   'parameter_conversion={}, non_standard_sampling_parameter_keys={}, waveform_arguments={})'\
+            .format(self.waveform_generator.duration,
+                    self.waveform_generator.sampling_frequency,
+                    self.waveform_generator.start_time,
+                    self.waveform_generator.frequency_domain_source_model.__name__,
+                    self.waveform_generator.time_domain_source_model,
+                    self.waveform_generator.parameters,
+                    None,
+                    self.waveform_generator.non_standard_sampling_parameter_keys,
+                    self.waveform_generator.waveform_arguments)
+        self.assertEqual(expected, repr(self.waveform_generator))
+
     def test_duration(self):
         self.assertEqual(self.waveform_generator.duration, 1)
 
diff --git a/tupak/core/__init__.py b/tupak/core/__init__.py
index 1329ed8e86ff0e7b23fed6c45aab0f9d4c92c78d..94d3b3b2ffc7020e9f38eba6f7f4d031078bb56a 100644
--- a/tupak/core/__init__.py
+++ b/tupak/core/__init__.py
@@ -3,4 +3,4 @@ import tupak.core.likelihood
 import tupak.core.prior
 import tupak.core.result
 import tupak.core.sampler
-import tupak.core.utils
+import tupak.core.utils
\ No newline at end of file
diff --git a/tupak/core/likelihood.py b/tupak/core/likelihood.py
index eefa05754f0321068bbb939f2e6db546ffed5016..d49513604523abeb67f4608057de7069b20095ee 100644
--- a/tupak/core/likelihood.py
+++ b/tupak/core/likelihood.py
@@ -3,6 +3,7 @@ from __future__ import division, print_function
 import numpy as np
 from scipy.special import gammaln
 from tupak.core.utils import infer_parameters_from_function
+import copy
 
 
 class Likelihood(object):
@@ -16,6 +17,9 @@ class Likelihood(object):
         """
         self.parameters = parameters
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(parameters={})'.format(self.parameters)
+
     def log_likelihood(self):
         """
 
@@ -68,6 +72,9 @@ class Analytical1DLikelihood(Likelihood):
         self.__func = func
         self.__function_keys = list(self.parameters.keys())
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(x={}, y={}, func={})'.format(self.x, self.y, self.func.__name__)
+
     @property
     def func(self):
         """ Make func read-only """
@@ -146,6 +153,10 @@ class GaussianLikelihood(Analytical1DLikelihood):
         if self.sigma is None:
             self.parameters['sigma'] = None
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(x={}, y={}, func={}, sigma={})'\
+            .format(self.x, self.y, self.func.__name__, self.sigma)
+
     def log_likelihood(self):
         return self.__summed_log_likelihood(sigma=self.__get_sigma())
 
@@ -159,8 +170,8 @@ class GaussianLikelihood(Analytical1DLikelihood):
         return self.parameters.get('sigma', self.sigma)
 
     def __summed_log_likelihood(self, sigma):
-        return -0.5 * (np.sum((self.residual / sigma) ** 2)
-                       + self.n * np.log(2 * np.pi * sigma ** 2))
+        return -0.5 * (np.sum((self.residual / sigma) ** 2) +
+                       self.n * np.log(2 * np.pi * sigma ** 2))
 
 
 class PoissonLikelihood(Analytical1DLikelihood):
@@ -188,6 +199,9 @@ class PoissonLikelihood(Analytical1DLikelihood):
 
         Analytical1DLikelihood.__init__(self, x=x, y=y, func=func)
 
+    def __repr__(self):
+        return Analytical1DLikelihood.__repr__(self)
+
     @property
     def y(self):
         """ Property assures that y-value is a positive integer. """
@@ -235,6 +249,9 @@ class ExponentialLikelihood(Analytical1DLikelihood):
         """
         Analytical1DLikelihood.__init__(self, x=x, y=y, func=func)
 
+    def __repr__(self):
+        return Analytical1DLikelihood.__repr__(self)
+
     @property
     def y(self):
         """ Property assures that y-value is positive. """
@@ -294,6 +311,10 @@ class StudentTLikelihood(Analytical1DLikelihood):
         if self.nu is None:
             self.parameters['nu'] = None
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(x={}, y={}, func={}, nu={}, sigma={})'\
+            .format(self.x, self.y, self.func.__name__, self.nu, self.sigma)
+
     @property
     def lam(self):
         """ Converts 'scale' to 'precision' """
@@ -313,5 +334,63 @@ class StudentTLikelihood(Analytical1DLikelihood):
         return self.parameters.get('nu', self.nu)
 
     def __summed_log_likelihood(self, nu):
-        return self.n * (gammaln((nu + 1.0) / 2.0) + .5 * np.log(self.lam / (nu * np.pi)) - gammaln(nu / 2.0)) \
-               - (nu + 1.0) / 2.0 * np.sum(np.log1p(self.lam * self.residual ** 2 / nu))
+        return (
+            self.n * (gammaln((nu + 1.0) / 2.0) + .5 * np.log(self.lam / (nu * np.pi)) -
+                      gammaln(nu / 2.0)) -
+            (nu + 1.0) / 2.0 * np.sum(np.log1p(self.lam * self.residual ** 2 / nu)))
+
+
+class JointLikelihood(Likelihood):
+    def __init__(self, *likelihoods):
+        """
+        A likelihood for combining pre-defined likelihoods.
+        The parameters dict is automagically combined through parameters dicts
+        of the given likelihoods. If parameters have different values have
+        initially different values across different likelihoods, the value
+        of the last given likelihood is chosen. This does not matter when
+        using the JointLikelihood for sampling, because the parameters will be
+        set consistently
+
+        Parameters
+        ----------
+        *likelihoods: tupak.core.likelihood.Likelihood
+            likelihoods to be combined parsed as arguments
+        """
+        self.likelihoods = likelihoods
+        Likelihood.__init__(self, parameters={})
+        self.__sync_parameters()
+
+    def __sync_parameters(self):
+        """ Synchronizes parameters between the likelihoods
+        so that all likelihoods share a single parameter dict."""
+        for likelihood in self.likelihoods:
+            self.parameters.update(likelihood.parameters)
+        for likelihood in self.likelihoods:
+            likelihood.parameters = self.parameters
+
+    @property
+    def likelihoods(self):
+        """ The list of likelihoods """
+        return self.__likelihoods
+
+    @likelihoods.setter
+    def likelihoods(self, likelihoods):
+        likelihoods = copy.deepcopy(likelihoods)
+        if isinstance(likelihoods, tuple) or isinstance(likelihoods, list):
+            if all(isinstance(likelihood, Likelihood) for likelihood in likelihoods):
+                self.__likelihoods = list(likelihoods)
+            else:
+                raise ValueError('Try setting the JointLikelihood like this\n'
+                                 'JointLikelihood(first_likelihood, second_likelihood, ...)')
+        elif isinstance(likelihoods, Likelihood):
+            self.__likelihoods = [likelihoods]
+        else:
+            raise ValueError('Input likelihood is not a list of tuple. You need to set multiple likelihoods.')
+
+    def log_likelihood(self):
+        """ This is just the sum of the log likelihoods of all parts of the joint likelihood"""
+        return sum([likelihood.log_likelihood() for likelihood in self.likelihoods])
+
+    def noise_log_likelihood(self):
+        """ This is just the sum of the noise likelihoods of all parts of the joint likelihood"""
+        return sum([likelihood.noise_log_likelihood() for likelihood in self.likelihoods])
diff --git a/tupak/core/prior.py b/tupak/core/prior.py
index 151883c6b66f850a818b60709e09fe9fe361df64..b42516afab61b7f8c2c97bce85d72cd05cae7b9d 100644
--- a/tupak/core/prior.py
+++ b/tupak/core/prior.py
@@ -10,7 +10,7 @@ from collections import OrderedDict
 
 from tupak.core.utils import logger
 from tupak.core import utils
-import tupak
+import tupak  # noqa
 
 
 class PriorSet(OrderedDict):
@@ -28,8 +28,8 @@ class PriorSet(OrderedDict):
         if isinstance(dictionary, dict):
             self.update(dictionary)
         elif type(dictionary) is str:
-            logger.debug('Argument "dictionary" is a string.'
-                         + ' Assuming it is intended as a file name.')
+            logger.debug('Argument "dictionary" is a string.' +
+                         ' Assuming it is intended as a file name.')
             self.read_in_file(dictionary)
         elif type(filename) is str:
             self.read_in_file(filename)
@@ -265,7 +265,8 @@ class Prior(object):
 
     _default_latex_labels = dict()
 
-    def __init__(self, name=None, latex_label=None, minimum=-np.inf, maximum=np.inf):
+    def __init__(self, name=None, latex_label=None, unit=None, minimum=-np.inf,
+                 maximum=np.inf):
         """ Implements a Prior object
 
         Parameters
@@ -274,6 +275,8 @@ class Prior(object):
             Name associated with prior.
         latex_label: str, optional
             Latex label associated with prior, used for plotting.
+        unit: str, optional
+            If given, a Latex string describing the units of the parameter.
         minimum: float, optional
             Minimum of the domain, default=-np.inf
         maximum: float, optional
@@ -282,6 +285,7 @@ class Prior(object):
         """
         self.name = name
         self.latex_label = latex_label
+        self.unit = unit
         self.minimum = minimum
         self.maximum = maximum
 
@@ -398,7 +402,7 @@ class Prior(object):
 
         """
         prior_name = self.__class__.__name__
-        args = ['name', 'latex_label', 'minimum', 'maximum']
+        args = ['name', 'latex_label', 'unit', 'minimum', 'maximum']
         args.extend(subclass_args)
 
         property_names = [p for p in dir(self.__class__) if isinstance(getattr(self.__class__, p), property)]
@@ -443,6 +447,22 @@ class Prior(object):
         else:
             self.__latex_label = latex_label
 
+    @property
+    def unit(self):
+        return self.__unit
+
+    @unit.setter
+    def unit(self, unit):
+        self.__unit = unit
+
+    @property
+    def latex_label_with_unit(self):
+        """ If a unit is specifed, returns a string of the latex label and unit """
+        if self.unit is not None:
+            return "{} [{}]".format(self.latex_label, self.unit)
+        else:
+            return self.latex_label
+
     @property
     def minimum(self):
         return self.__minimum
@@ -470,7 +490,7 @@ class Prior(object):
 
 class DeltaFunction(Prior):
 
-    def __init__(self, peak, name=None, latex_label=None):
+    def __init__(self, peak, name=None, latex_label=None, unit=None):
         """Dirac delta function prior, this always returns peak.
 
         Parameters
@@ -481,9 +501,12 @@ class DeltaFunction(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
 
         """
-        Prior.__init__(self, name, latex_label, minimum=peak, maximum=peak)
+        Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,
+                       minimum=peak, maximum=peak)
         self.peak = peak
 
     def rescale(self, val):
@@ -524,7 +547,8 @@ class DeltaFunction(Prior):
 
 class PowerLaw(Prior):
 
-    def __init__(self, alpha, minimum, maximum, name=None, latex_label=None):
+    def __init__(self, alpha, minimum, maximum, name=None, latex_label=None,
+                 unit=None):
         """Power law with bounds and alpha, spectral index
 
         Parameters
@@ -539,8 +563,11 @@ class PowerLaw(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        Prior.__init__(self, name, latex_label, minimum, maximum)
+        Prior.__init__(self, name=name, latex_label=latex_label,
+                       minimum=minimum, maximum=maximum, unit=unit)
         self.alpha = alpha
 
     def rescale(self, val):
@@ -580,8 +607,9 @@ class PowerLaw(Prior):
         if self.alpha == -1:
             return np.nan_to_num(1 / val / np.log(self.maximum / self.minimum)) * in_prior
         else:
-            return np.nan_to_num(val ** self.alpha * (1 + self.alpha) / (self.maximum ** (1 + self.alpha)
-                                                                         - self.minimum ** (1 + self.alpha))) * in_prior
+            return np.nan_to_num(val ** self.alpha * (1 + self.alpha) /
+                                 (self.maximum ** (1 + self.alpha) -
+                                  self.minimum ** (1 + self.alpha))) * in_prior
 
     def ln_prob(self, val):
         """Return the logarithmic prior probability of val
@@ -600,11 +628,10 @@ class PowerLaw(Prior):
         if self.alpha == -1:
             normalising = 1. / np.log(self.maximum / self.minimum)
         else:
-            normalising = (1 + self.alpha) / (self.maximum ** (1 + self.alpha)
-                                              - self.minimum ** (
-                                                          1 + self.alpha))
+            normalising = (1 + self.alpha) / (self.maximum ** (1 + self.alpha) -
+                                              self.minimum ** (1 + self.alpha))
 
-        return (self.alpha * np.log(val) + np.log(normalising)) + np.log(1.*in_prior)
+        return (self.alpha * np.log(val) + np.log(normalising)) + np.log(1. * in_prior)
 
     def __repr__(self):
         """Call to helper method in the super class."""
@@ -613,7 +640,8 @@ class PowerLaw(Prior):
 
 class Uniform(Prior):
 
-    def __init__(self, minimum, maximum, name=None, latex_label=None):
+    def __init__(self, minimum, maximum, name=None, latex_label=None,
+                 unit=None):
         """Uniform prior with bounds
 
         Parameters
@@ -626,8 +654,11 @@ class Uniform(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        Prior.__init__(self, name, latex_label, minimum, maximum)
+        Prior.__init__(self, name=name, latex_label=latex_label,
+                       minimum=minimum, maximum=maximum, unit=unit)
 
     def rescale(self, val):
         Prior.test_valid_for_rescaling(val)
@@ -645,7 +676,7 @@ class Uniform(Prior):
         float: Prior probability of val
         """
         return scipy.stats.uniform.pdf(val, loc=self.minimum,
-                                       scale=self.maximum-self.minimum)
+                                       scale=self.maximum - self.minimum)
 
     def ln_prob(self, val):
         """Return the log prior probability of val
@@ -659,12 +690,13 @@ class Uniform(Prior):
         float: log probability of val
         """
         return scipy.stats.uniform.logpdf(val, loc=self.minimum,
-                                          scale=self.maximum-self.minimum)
+                                          scale=self.maximum - self.minimum)
 
 
 class LogUniform(PowerLaw):
 
-    def __init__(self, minimum, maximum, name=None, latex_label=None):
+    def __init__(self, minimum, maximum, name=None, latex_label=None,
+                 unit=None):
         """Log-Uniform prior with bounds
 
         Parameters
@@ -677,8 +709,11 @@ class LogUniform(PowerLaw):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        PowerLaw.__init__(self, name=name, latex_label=latex_label, minimum=minimum, maximum=maximum, alpha=-1)
+        PowerLaw.__init__(self, name=name, latex_label=latex_label, unit=unit,
+                          minimum=minimum, maximum=maximum, alpha=-1)
         if self.minimum <= 0:
             logger.warning('You specified a uniform-in-log prior with minimum={}'.format(self.minimum))
 
@@ -689,7 +724,8 @@ class LogUniform(PowerLaw):
 
 class Cosine(Prior):
 
-    def __init__(self, name=None, latex_label=None, minimum=-np.pi / 2, maximum=np.pi / 2):
+    def __init__(self, name=None, latex_label=None, unit=None,
+                 minimum=-np.pi / 2, maximum=np.pi / 2):
         """Cosine prior with bounds
 
         Parameters
@@ -702,8 +738,11 @@ class Cosine(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        Prior.__init__(self, name, latex_label, minimum, maximum)
+        Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,
+                       minimum=minimum, maximum=maximum)
 
     def rescale(self, val):
         """
@@ -731,7 +770,8 @@ class Cosine(Prior):
 
 class Sine(Prior):
 
-    def __init__(self, name=None, latex_label=None, minimum=0, maximum=np.pi):
+    def __init__(self, name=None, latex_label=None, unit=None, minimum=0,
+                 maximum=np.pi):
         """Sine prior with bounds
 
         Parameters
@@ -744,8 +784,11 @@ class Sine(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        Prior.__init__(self, name, latex_label, minimum, maximum)
+        Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,
+                       minimum=minimum, maximum=maximum)
 
     def rescale(self, val):
         """
@@ -772,7 +815,7 @@ class Sine(Prior):
 
 
 class Gaussian(Prior):
-    def __init__(self, mu, sigma, name=None, latex_label=None):
+    def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):
         """Gaussian prior with mean mu and width sigma
 
         Parameters
@@ -785,8 +828,10 @@ class Gaussian(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        Prior.__init__(self, name, latex_label)
+        Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)
         self.mu = mu
         self.sigma = sigma
 
@@ -821,8 +866,8 @@ class Gaussian(Prior):
 
 
 class Normal(Gaussian):
-    
-    def __init__(self, mu, sigma, name=None, latex_label=None):
+
+    def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):
         """A synonym for the Gaussian distribution.
 
         Parameters
@@ -835,13 +880,17 @@ class Normal(Gaussian):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        Gaussian.__init__(self, mu, sigma, name=name, latex_label=latex_label)
+        Gaussian.__init__(self, mu=mu, sigma=sigma, name=name,
+                          latex_label=latex_label, unit=unit)
 
 
 class TruncatedGaussian(Prior):
 
-    def __init__(self, mu, sigma, minimum, maximum, name=None, latex_label=None):
+    def __init__(self, mu, sigma, minimum, maximum, name=None,
+                 latex_label=None, unit=None):
         """Truncated Gaussian prior with mean mu and width sigma
 
         https://en.wikipedia.org/wiki/Truncated_normal_distribution
@@ -860,8 +909,11 @@ class TruncatedGaussian(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        Prior.__init__(self, name=name, latex_label=latex_label, minimum=minimum, maximum=maximum)
+        Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,
+                       minimum=minimum, maximum=maximum)
         self.mu = mu
         self.sigma = sigma
 
@@ -899,7 +951,7 @@ class TruncatedGaussian(Prior):
         """
         in_prior = (val >= self.minimum) & (val <= self.maximum)
         return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (
-                2 * np.pi) ** 0.5 / self.sigma / self.normalisation * in_prior
+            2 * np.pi) ** 0.5 / self.sigma / self.normalisation * in_prior
 
     def __repr__(self):
         """Call to helper method in the super class."""
@@ -907,8 +959,9 @@ class TruncatedGaussian(Prior):
 
 
 class TruncatedNormal(TruncatedGaussian):
-    
-    def __init__(self, mu, sigma, minimum, maximum, name=None, latex_label=None):
+
+    def __init__(self, mu, sigma, minimum, maximum, name=None,
+                 latex_label=None, unit=None):
         """A synonym for the TruncatedGaussian distribution.
 
         Parameters
@@ -925,12 +978,16 @@ class TruncatedNormal(TruncatedGaussian):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        TruncatedGaussian.__init__(self, mu, sigma, minimum, maximum, name=name, latex_label=latex_label)
+        TruncatedGaussian.__init__(self, mu=mu, sigma=sigma, minimum=minimum,
+                                   maximum=maximum, name=name,
+                                   latex_label=latex_label, unit=unit)
 
 
 class HalfGaussian(TruncatedGaussian):
-    def __init__(self, sigma, name=None, latex_label=None):
+    def __init__(self, sigma, name=None, latex_label=None, unit=None):
         """A Gaussian with its mode at zero, and truncated to only be positive.
 
         Parameters
@@ -941,16 +998,20 @@ class HalfGaussian(TruncatedGaussian):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        TruncatedGaussian.__init__(self, 0., sigma, minimum=0., maximum=np.inf, name=name, latex_label=latex_label)
- 
+        TruncatedGaussian.__init__(self, 0., sigma, minimum=0., maximum=np.inf,
+                                   name=name, latex_label=latex_label,
+                                   unit=unit)
+
     def __repr__(self):
         """Call to helper method in the super class."""
         return Prior._subclass_repr_helper(self, subclass_args=['sigma'])
 
 
 class HalfNormal(HalfGaussian):
-    def __init__(self, sigma, name=None, latex_label=None):
+    def __init__(self, sigma, name=None, latex_label=None, unit=None):
         """A synonym for the HalfGaussian distribution.
 
         Parameters
@@ -961,12 +1022,16 @@ class HalfNormal(HalfGaussian):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
+
         """
-        HalfGaussian.__init__(self, sigma, name=name, latex_label=latex_label)
+        HalfGaussian.__init__(self, sigma=sigma, name=name,
+                              latex_label=latex_label, unit=unit)
 
 
 class LogNormal(Prior):
-    def __init__(self, mu, sigma, name=None, latex_label=None):
+    def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):
         """Log-normal prior with mean mu and width sigma
 
         https://en.wikipedia.org/wiki/Log-normal_distribution
@@ -981,8 +1046,12 @@ class LogNormal(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
+
         """
-        Prior.__init__(self, name=name, minimum=0., latex_label=latex_label)
+        Prior.__init__(self, name=name, minimum=0., latex_label=latex_label,
+                       unit=unit)
 
         if sigma <= 0.:
             raise ValueError("For the LogGaussian prior the standard deviation must be positive")
@@ -1022,7 +1091,7 @@ class LogNormal(Prior):
 
 
 class LogGaussian(LogNormal):
-    def __init__(self, mu, sigma, name=None, latex_label=None):
+    def __init__(self, mu, sigma, name=None, latex_label=None, unit=None):
         """Synonym of LogNormal prior
 
         https://en.wikipedia.org/wiki/Log-normal_distribution
@@ -1037,12 +1106,16 @@ class LogGaussian(LogNormal):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
+
         """
-        LogNormal.__init__(self, mu, sigma, name, latex_label)
+        LogNormal.__init__(self, mu=mu, sigma=sigma, name=name,
+                           latex_label=latex_label, unit=unit)
 
 
 class Exponential(Prior):
-    def __init__(self, mu, name=None, latex_label=None):
+    def __init__(self, mu, name=None, latex_label=None, unit=None):
         """Exponential prior with mean mu
 
         Parameters
@@ -1053,8 +1126,12 @@ class Exponential(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
+
         """
-        Prior.__init__(self, name=name, minimum=0., latex_label=latex_label)
+        Prior.__init__(self, name=name, minimum=0., latex_label=latex_label,
+                       unit=unit)
         self.mu = mu
 
     def rescale(self, val):
@@ -1089,7 +1166,8 @@ class Exponential(Prior):
 
 
 class StudentT(Prior):
-    def __init__(self, df, mu=0., scale=1., name=None, latex_label=None):
+    def __init__(self, df, mu=0., scale=1., name=None, latex_label=None,
+                 unit=None):
         """Student's t-distribution prior with number of degrees of freedom df,
         mean mu and scale
 
@@ -1107,9 +1185,11 @@ class StudentT(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        Prior.__init__(self, name, latex_label)
-        
+        Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)
+
         if df <= 0. or scale <= 0.:
             raise ValueError("For the StudentT prior the number of degrees of freedom and scale must be positive")
 
@@ -1150,7 +1230,7 @@ class StudentT(Prior):
 
 
 class Beta(Prior):
-    def __init__(self, alpha, beta, name=None, latex_label=None):
+    def __init__(self, alpha, beta, name=None, latex_label=None, unit=None):
         """Beta distribution
 
         https://en.wikipedia.org/wiki/Beta_distribution
@@ -1165,8 +1245,12 @@ class Beta(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
+
         """
-        Prior.__init__(self, minimum=0., maximum=1., name=name, latex_label=latex_label)
+        Prior.__init__(self, minimum=0., maximum=1., name=name,
+                       latex_label=latex_label, unit=unit)
 
         if alpha <= 0. or beta <= 0.:
             raise ValueError("alpha and beta must both be positive values")
@@ -1215,7 +1299,7 @@ class Beta(Prior):
             return spdf
 
         if isinstance(val, np.ndarray):
-            pdf = -np.inf*np.ones(len(val))
+            pdf = -np.inf * np.ones(len(val))
             pdf[np.isfinite(spdf)] = spdf[np.isfinite]
             return spdf
         else:
@@ -1227,7 +1311,7 @@ class Beta(Prior):
 
 
 class Logistic(Prior):
-    def __init__(self, mu, scale, name=None, latex_label=None):
+    def __init__(self, mu, scale, name=None, latex_label=None, unit=None):
         """Logistic distribution
 
         https://en.wikipedia.org/wiki/Logistic_distribution
@@ -1242,8 +1326,10 @@ class Logistic(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        Prior.__init__(self, name=name, latex_label=latex_label)
+        Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)
 
         if scale <= 0.:
             raise ValueError("For the Logistic prior the scale must be positive")
@@ -1284,7 +1370,7 @@ class Logistic(Prior):
 
 
 class Cauchy(Prior):
-    def __init__(self, alpha, beta, name=None, latex_label=None):
+    def __init__(self, alpha, beta, name=None, latex_label=None, unit=None):
         """Cauchy distribution
 
         https://en.wikipedia.org/wiki/Cauchy_distribution
@@ -1299,8 +1385,10 @@ class Cauchy(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        Prior.__init__(self, name=name, latex_label=latex_label)
+        Prior.__init__(self, name=name, latex_label=latex_label, unit=unit)
 
         if beta <= 0.:
             raise ValueError("For the Cauchy prior the scale must be positive")
@@ -1341,7 +1429,7 @@ class Cauchy(Prior):
 
 
 class Lorentzian(Cauchy):
-    def __init__(self, alpha, beta, name=None, latex_label=None):
+    def __init__(self, alpha, beta, name=None, latex_label=None, unit=None):
         """Synonym for the Cauchy distribution
 
         https://en.wikipedia.org/wiki/Cauchy_distribution
@@ -1356,12 +1444,15 @@ class Lorentzian(Cauchy):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        Cauchy.__init__(self, alpha, beta, name=name, latex_label=latex_label)
+        Cauchy.__init__(self, alpha=alpha, beta=beta, name=name,
+                        latex_label=latex_label, unit=unit)
 
 
 class Gamma(Prior):
-    def __init__(self, k, theta=1., name=None, latex_label=None):
+    def __init__(self, k, theta=1., name=None, latex_label=None, unit=None):
         """Gamma distribution
 
         https://en.wikipedia.org/wiki/Gamma_distribution
@@ -1376,8 +1467,11 @@ class Gamma(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
-        Prior.__init__(self, name=name, minimum=0., latex_label=latex_label)
+        Prior.__init__(self, name=name, minimum=0., latex_label=latex_label,
+                       unit=unit)
 
         if k <= 0 or theta <= 0:
             raise ValueError("For the Gamma prior the shape and scale must be positive")
@@ -1419,7 +1513,7 @@ class Gamma(Prior):
 
 
 class ChiSquared(Gamma):
-    def __init__(self, nu, name=None, latex_label=None):
+    def __init__(self, nu, name=None, latex_label=None, unit=None):
         """Chi-squared distribution
 
         https://en.wikipedia.org/wiki/Chi-squared_distribution
@@ -1432,17 +1526,21 @@ class ChiSquared(Gamma):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
         """
 
         if nu <= 0 or not isinstance(nu, int):
             raise ValueError("For the ChiSquared prior the number of degrees of freedom must be a positive integer")
 
-        Gamma.__init__(self, name=name, k=nu/2., theta=2., latex_label=latex_label)
+        Gamma.__init__(self, name=name, k=nu / 2., theta=2.,
+                       latex_label=latex_label, unit=unit)
 
 
 class Interped(Prior):
 
-    def __init__(self, xx, yy, minimum=np.nan, maximum=np.nan, name=None, latex_label=None):
+    def __init__(self, xx, yy, minimum=np.nan, maximum=np.nan, name=None,
+                 latex_label=None, unit=None):
         """Creates an interpolated prior function from arrays of xx and yy=p(xx)
 
         Parameters
@@ -1459,6 +1557,8 @@ class Interped(Prior):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
 
         Attributes
         -------
@@ -1475,7 +1575,7 @@ class Interped(Prior):
         self.xx = xx
         self.yy = yy
         self.__all_interpolated = interp1d(x=xx, y=yy, bounds_error=False, fill_value=0)
-        Prior.__init__(self, name, latex_label,
+        Prior.__init__(self, name=name, latex_label=latex_label, unit=unit,
                        minimum=np.nanmax(np.array((min(xx), minimum))),
                        maximum=np.nanmin(np.array((max(xx), maximum))))
         self.__initialize_attributes()
@@ -1566,7 +1666,8 @@ class Interped(Prior):
 
 class FromFile(Interped):
 
-    def __init__(self, file_name, minimum=None, maximum=None, name=None, latex_label=None):
+    def __init__(self, file_name, minimum=None, maximum=None, name=None,
+                 latex_label=None, unit=None):
         """Creates an interpolated prior function from arrays of xx and yy=p(xx) extracted from a file
 
         Parameters
@@ -1581,6 +1682,8 @@ class FromFile(Interped):
             See superclass
         latex_label: str
             See superclass
+        unit: str
+            See superclass
 
         Attributes
         -------
@@ -1591,7 +1694,9 @@ class FromFile(Interped):
         try:
             self.id = file_name
             xx, yy = np.genfromtxt(self.id).T
-            Interped.__init__(self, xx=xx, yy=yy, minimum=minimum, maximum=maximum, name=name, latex_label=latex_label)
+            Interped.__init__(self, xx=xx, yy=yy, minimum=minimum,
+                              maximum=maximum, name=name,
+                              latex_label=latex_label, unit=unit)
         except IOError:
             logger.warning("Can't load {}.".format(self.id))
             logger.warning("Format should be:")
diff --git a/tupak/core/result.py b/tupak/core/result.py
index 254c385837bcc505249aff14197bd1e3dcfe3ebd..064fb6774522709ac8112ebe0c24ad770c260c68 100644
--- a/tupak/core/result.py
+++ b/tupak/core/result.py
@@ -198,7 +198,7 @@ class Result(dict):
         for k in keys:
             if k in self.search_parameter_keys:
                 idx = self.search_parameter_keys.index(k)
-                latex_labels.append(self.parameter_labels[idx])
+                latex_labels.append(self.parameter_labels_with_unit[idx])
             elif k in self.parameter_labels:
                 latex_labels.append(k)
             else:
@@ -358,14 +358,18 @@ class Result(dict):
             Function which adds in extra parameters to the data frame,
             should take the data_frame, likelihood and prior as arguments.
         """
-        data_frame = pd.DataFrame(
-            self.samples, columns=self.search_parameter_keys)
-        data_frame['log_likelihood'] = getattr(self, 'log_likelihood_evaluations', np.nan)
+        if hasattr(self, 'posterior') is False:
+            data_frame = pd.DataFrame(
+                self.samples, columns=self.search_parameter_keys)
+            data_frame['log_likelihood'] = getattr(
+                self, 'log_likelihood_evaluations', np.nan)
+            # We save the samples in the posterior and remove the array of samples
+            del self.samples
+        else:
+            data_frame = self.posterior
         if conversion_function is not None:
             data_frame = conversion_function(data_frame, likelihood, priors)
         self.posterior = data_frame
-        # We save the samples in the posterior and remove the array of samples
-        del self.samples
 
     def store_prior_values(self, priors=None):
         """
@@ -387,18 +391,36 @@ class Result(dict):
 
     def construct_cbc_derived_parameters(self):
         """ Construct widely used derived parameters of CBCs """
-        self.posterior['mass_chirp'] = (self.posterior.mass_1 * self.posterior.mass_2) ** 0.6 / (
-                self.posterior.mass_1 + self.posterior.mass_2) ** 0.2
+        self.posterior['mass_chirp'] = (
+            (self.posterior.mass_1 * self.posterior.mass_2) ** 0.6 / (
+                self.posterior.mass_1 + self.posterior.mass_2) ** 0.2)
+        self.search_parameter_keys.append('mass_chirp')
+        self.parameter_labels.append('$\mathcal{M}$')
+
         self.posterior['q'] = self.posterior.mass_2 / self.posterior.mass_1
-        self.posterior['eta'] = (self.posterior.mass_1 * self.posterior.mass_2) / (
-                self.posterior.mass_1 + self.posterior.mass_2) ** 2
-
-        self.posterior['chi_eff'] = (self.posterior.a_1 * np.cos(self.posterior.tilt_1)
-                                     + self.posterior.q * self.posterior.a_2 * np.cos(self.posterior.tilt_2)) / (
-                                            1 + self.posterior.q)
-        self.posterior['chi_p'] = max(self.posterior.a_1 * np.sin(self.posterior.tilt_1),
-                                      (4 * self.posterior.q + 3) / (3 * self.posterior.q + 4) * self.posterior.q
-                                      * self.posterior.a_2 * np.sin(self.posterior.tilt_2))
+        self.search_parameter_keys.append('q')
+        self.parameter_labels.append('$q$')
+
+        self.posterior['eta'] = (
+            (self.posterior.mass_1 * self.posterior.mass_2) / (
+                self.posterior.mass_1 + self.posterior.mass_2) ** 2)
+        self.search_parameter_keys.append('eta')
+        self.parameter_labels.append('$\eta$')
+
+        self.posterior['chi_eff'] = (
+            (self.posterior.a_1 * np.cos(self.posterior.tilt_1) +
+                self.posterior.q * self.posterior.a_2 *
+                np.cos(self.posterior.tilt_2)) / (1 + self.posterior.q))
+        self.search_parameter_keys.append('chi_eff')
+        self.parameter_labels.append('$\chi_{\mathrm eff}$')
+
+        self.posterior['chi_p'] = (
+            np.maximum(self.posterior.a_1 * np.sin(self.posterior.tilt_1),
+                       (4 * self.posterior.q + 3) / (3 * self.posterior.q + 4) *
+                       self.posterior.q * self.posterior.a_2 *
+                       np.sin(self.posterior.tilt_2)))
+        self.search_parameter_keys.append('chi_p')
+        self.parameter_labels.append('$\chi_{\mathrm p}$')
 
     def check_attribute_match_to_other_object(self, name, other_object):
         """ Check attribute name exists in other_object and is the same
diff --git a/tupak/core/sampler.py b/tupak/core/sampler.py
index 6449b7a8c4a592d89447925d6a9db0dd0fac8193..0206f803fa4984c1d1e05ff42359d600c0699912 100644
--- a/tupak/core/sampler.py
+++ b/tupak/core/sampler.py
@@ -192,6 +192,9 @@ class Sampler(object):
         result.parameter_labels = [
             self.priors[k].latex_label for k in
             self.__search_parameter_keys]
+        result.parameter_labels_with_unit = [
+            self.priors[k].latex_label_with_unit for k in
+            self.__search_parameter_keys]
         result.label = self.label
         result.outdir = self.outdir
         result.kwargs = self.kwargs
@@ -385,6 +388,48 @@ class Sampler(object):
             logger.info("Using sampler {} with kwargs {}".format(
                 self.__class__.__name__, kwargs_print))
 
+    def setup_nburn(self):
+        """ Handles calculating nburn, either from a given value or inferred """
+        if type(self.nburn) in [float, int]:
+            self.nburn = int(self.nburn)
+            logger.info("Discarding {} steps for burn-in".format(self.nburn))
+        elif self.result.max_autocorrelation_time is None:
+            self.nburn = int(self.burn_in_fraction * self.nsteps)
+            logger.info("Autocorrelation time not calculated, discarding {} "
+                        " steps for burn-in".format(self.nburn))
+        else:
+            self.nburn = int(
+                self.burn_in_act * self.result.max_autocorrelation_time)
+            logger.info("Discarding {} steps for burn-in, estimated from "
+                        "autocorr".format(self.nburn))
+
+    def calculate_autocorrelation(self, samples, c=3):
+        """ Uses the `emcee.autocorr` module to estimate the autocorrelation
+
+        Parameters
+        ----------
+        samples: array_like
+            A chain of samples.
+        c: float
+            The minimum number of autocorrelation times needed to trust the
+            estimate (default: `3`). See `emcee.autocorr.integrated_time`.
+        """
+
+        try:
+            import emcee
+        except ImportError:
+            self.result.max_autocorrelation_time = None
+            logger.info("emcee not available, so unable to calculate autocorr time")
+
+        try:
+            self.result.max_autocorrelation_time = int(np.max(
+                emcee.autocorr.integrated_time(samples, c=c)))
+            logger.info("Max autocorr time = {}".format(
+                self.result.max_autocorrelation_time))
+        except emcee.autocorr.AutocorrError as e:
+            self.result.max_autocorrelation_time = None
+            logger.info("Unable to calculate autocorr time: {}".format(e))
+
 
 class Nestle(Sampler):
 
@@ -472,14 +517,14 @@ class Dynesty(Sampler):
                              resume=True, walks=self.ndim * 5, verbose=True,
                              check_point_delta_t=60 * 10, nlive=250)
 
-        # Overwrite default values with user specified values
-        self.__kwargs.update(kwargs)
-
         # Check if nlive was instead given by another name
-        if 'nlive' not in self.__kwargs:
+        if 'nlive' not in kwargs:
             for equiv in ['nlives', 'n_live_points', 'npoint', 'npoints']:
-                if equiv in self.__kwargs:
-                    self.__kwargs['nlive'] = self.__kwargs.pop(equiv)
+                if equiv in kwargs:
+                    kwargs['nlive'] = kwargs.pop(equiv)
+
+        # Overwrite default values with user specified values
+        self.__kwargs.update(kwargs)
 
         # Set the update interval
         if 'update_interval' not in self.__kwargs:
@@ -493,8 +538,8 @@ class Dynesty(Sampler):
 
         # If n_check_point is not already set, set it checkpoint every 10 mins
         if 'n_check_point' not in self.__kwargs:
-            n_check_point_raw = (self.__kwargs['check_point_delta_t']
-                                 / self._log_likelihood_eval_time)
+            n_check_point_raw = (self.__kwargs['check_point_delta_t'] /
+                                 self._log_likelihood_eval_time)
             n_check_point_rnd = int(float("{:1.0g}".format(n_check_point_raw)))
             self.__kwargs['n_check_point'] = n_check_point_rnd
 
@@ -555,6 +600,9 @@ class Dynesty(Sampler):
         self.result.log_likelihood_evaluations = out.logl
         self.result.log_evidence = out.logz[-1]
         self.result.log_evidence_err = out.logzerr[-1]
+        self.result.nested_samples = pd.DataFrame(
+            out.samples, columns=self.search_parameter_keys)
+        self.result.nested_samples['weights'] = weights
 
         if self.plot:
             self.generate_trace_plots(out)
@@ -808,6 +856,80 @@ class Pymultinest(Sampler):
         return self.result
 
 
+class Cpnest(Sampler):
+    """
+    https://github.com/johnveitch/cpnest
+
+    TODO:
+        - allow custom jump proposals to be specified, ideally by parameter name
+        - figure out how to resume from a previous run
+    """
+
+    @property
+    def kwargs(self):
+        return self.__kwargs
+
+    @kwargs.setter
+    def kwargs(self, kwargs):
+        # Check if nlive was instead given by another name
+        if 'Nlive' not in kwargs:
+            for equiv in ['nlives', 'n_live_points', 'npoint', 'npoints',
+                          'nlive']:
+                if equiv in kwargs:
+                    kwargs['Nlive'] = kwargs.pop(equiv)
+        if 'seed' not in kwargs:
+            logger.warning('No seed provided, cpnest will use 1234.')
+
+        # Set some default values
+        self.__kwargs = dict(verbose=1, Nthreads=1, Nlive=250, maxmcmc=1000)
+
+        # Overwrite default values with user specified values
+        self.__kwargs.update(kwargs)
+
+    def _run_external_sampler(self):
+        cpnest = self.external_sampler
+        import cpnest.model
+
+        class Model(cpnest.model.Model):
+            """ A wrapper class to pass our log_likelihood into cpnest """
+            def __init__(self, names, bounds):
+                self.names = names
+                self.bounds = bounds
+                self._check_bounds()
+
+            @staticmethod
+            def log_likelihood(x):
+                theta = [x[n] for n in self.search_parameter_keys]
+                return self.log_likelihood(theta)
+
+            @staticmethod
+            def log_prior(x):
+                theta = [x[n] for n in self.search_parameter_keys]
+                return self.log_prior(theta)
+
+            def _check_bounds(self):
+                for bound in self.bounds:
+                    if not all(np.isfinite(bound)):
+                        raise ValueError(
+                            'CPNest requires priors to have finite bounds.')
+
+        bounds = [[self.priors[key].minimum, self.priors[key].maximum]
+                  for key in self.search_parameter_keys]
+        model = Model(self.search_parameter_keys, bounds)
+        out = cpnest.CPNest(model, output=self.outdir, **self.kwargs)
+        out.run()
+
+        if self.plot:
+            out.plot()
+
+        # Since the output is not just samples, but log_likelihood as well,
+        # we turn this into a dataframe here. The index [0] here may be wrong
+        self.result.posterior = pd.DataFrame(out.posterior_samples[0])
+        self.result.log_evidence = out.NS.state.logZ
+        self.result.log_evidence_err = np.nan
+        return self.result
+
+
 class Emcee(Sampler):
     """ https://github.com/dfm/emcee """
 
@@ -849,7 +971,7 @@ class Emcee(Sampler):
             pass
 
         self.result.sampler_output = np.nan
-        self.calculate_autocorrelation(sampler)
+        self.calculate_autocorrelation(sampler.chain[:, :, :].reshape((-1, self.ndim)))
         self.setup_nburn()
         self.result.nburn = self.nburn
         self.result.samples = sampler.chain[:, self.nburn:, :].reshape(
@@ -866,41 +988,6 @@ class Emcee(Sampler):
         else:
             return self.log_likelihood(theta) + p
 
-    def setup_nburn(self):
-        """ Handles calculating nburn, either from a given value or inferred """
-        if type(self.nburn) in [float, int]:
-            self.nburn = int(self.nburn)
-            logger.info("Discarding {} steps for burn-in".format(self.nburn))
-        elif self.result.max_autocorrelation_time is None:
-            self.nburn = int(self.burn_in_fraction * self.nsteps)
-            logger.info("Autocorrelation time not calculated, discarding {} "
-                        " steps for burn-in".format(self.nburn))
-        else:
-            self.nburn = int(
-                self.burn_in_act * self.result.max_autocorrelation_time)
-            logger.info("Discarding {} steps for burn-in, estimated from "
-                        "autocorr".format(self.nburn))
-
-    def calculate_autocorrelation(self, sampler, c=3):
-        """ Uses the `emcee.autocorr` module to estimate the autocorrelation
-
-        Parameters
-        ----------
-        c: float
-            The minimum number of autocorrelation times needed to trust the
-            estimate (default: `3`). See `emcee.autocorr.integrated_time`.
-        """
-
-        import emcee
-        try:
-            self.result.max_autocorrelation_time = int(np.max(
-                sampler.get_autocorr_time(c=c)))
-            logger.info("Max autocorr time = {}".format(
-                self.result.max_autocorrelation_time))
-        except emcee.autocorr.AutocorrError as e:
-            self.result.max_autocorrelation_time = None
-            logger.info("Unable to calculate autocorr time: {}".format(e))
-
 
 class Ptemcee(Emcee):
     """ https://github.com/willvousden/ptemcee """
@@ -1033,36 +1120,61 @@ class Pymc3(Sampler):
 
         prior_map = {}
         self.prior_map = prior_map
-        
-        # predefined PyMC3 distributions 
-        prior_map['Gaussian'] =          {'pymc3': 'Normal',
-                                          'argmap': {'mu': 'mu', 'sigma': 'sd'}}
-        prior_map['TruncatedGaussian'] = {'pymc3': 'TruncatedNormal',
-                                          'argmap': {'mu': 'mu', 'sigma': 'sd', 'minimum': 'lower', 'maximum': 'upper'}}
-        prior_map['HalfGaussian'] =      {'pymc3': 'HalfNormal',
-                                          'argmap': {'sigma': 'sd'}}
-        prior_map['Uniform'] =           {'pymc3': 'Uniform',
-                                          'argmap': {'minimum': 'lower', 'maximum': 'upper'}}
-        prior_map['LogNormal'] =         {'pymc3': 'Lognormal',
-                                          'argmap': {'mu': 'mu', 'sigma': 'sd'}}
-        prior_map['Exponential'] =       {'pymc3': 'Exponential',
-                                          'argmap': {'mu': 'lam'},
-                                          'argtransform': {'mu': lambda mu: 1./mu}}
-        prior_map['StudentT'] =          {'pymc3': 'StudentT',
-                                          'argmap': {'df': 'nu', 'mu': 'mu', 'scale': 'sd'}}
-        prior_map['Beta'] =              {'pymc3': 'Beta',
-                                          'argmap': {'alpha': 'alpha', 'beta': 'beta'}}
-        prior_map['Logistic'] =          {'pymc3': 'Logistic',
-                                          'argmap': {'mu': 'mu', 'scale': 's'}}
-        prior_map['Cauchy'] =            {'pymc3': 'Cauchy',
-                                          'argmap': {'alpha': 'alpha', 'beta': 'beta'}}
-        prior_map['Gamma'] =             {'pymc3': 'Gamma',
-                                          'argmap': {'k': 'alpha', 'theta': 'beta'},
-                                          'argtransform': {'theta': lambda theta: 1./theta}}
-        prior_map['ChiSquared'] =        {'pymc3': 'ChiSquared',
-                                          'argmap': {'nu': 'nu'}}
-        prior_map['Interped'] =          {'pymc3': 'Interpolated',
-                                          'argmap': {'xx': 'x_points', 'yy': 'pdf_points'}}
+
+        # predefined PyMC3 distributions
+        prior_map['Gaussian'] = {
+            'pymc3': 'Normal',
+            'argmap': {'mu': 'mu', 'sigma': 'sd'}}
+        prior_map['TruncatedGaussian'] = {
+            'pymc3': 'TruncatedNormal',
+            'argmap': {'mu': 'mu',
+                       'sigma': 'sd',
+                       'minimum': 'lower',
+                       'maximum': 'upper'}}
+        prior_map['HalfGaussian'] = {
+            'pymc3': 'HalfNormal',
+            'argmap': {'sigma': 'sd'}}
+        prior_map['Uniform'] = {
+            'pymc3': 'Uniform',
+            'argmap': {'minimum': 'lower',
+                       'maximum': 'upper'}}
+        prior_map['LogNormal'] = {
+            'pymc3': 'Lognormal',
+            'argmap': {'mu': 'mu',
+                       'sigma': 'sd'}}
+        prior_map['Exponential'] = {
+            'pymc3': 'Exponential',
+            'argmap': {'mu': 'lam'},
+            'argtransform': {'mu': lambda mu: 1. / mu}}
+        prior_map['StudentT'] = {
+            'pymc3': 'StudentT',
+            'argmap': {'df': 'nu',
+                       'mu': 'mu',
+                       'scale': 'sd'}}
+        prior_map['Beta'] = {
+            'pymc3': 'Beta',
+            'argmap': {'alpha': 'alpha',
+                       'beta': 'beta'}}
+        prior_map['Logistic'] = {
+            'pymc3': 'Logistic',
+            'argmap': {'mu': 'mu',
+                       'scale': 's'}}
+        prior_map['Cauchy'] = {
+            'pymc3': 'Cauchy',
+            'argmap': {'alpha': 'alpha',
+                       'beta': 'beta'}}
+        prior_map['Gamma'] = {
+            'pymc3': 'Gamma',
+            'argmap': {'k': 'alpha',
+                       'theta': 'beta'},
+            'argtransform': {'theta': lambda theta: 1. / theta}}
+        prior_map['ChiSquared'] = {
+            'pymc3': 'ChiSquared',
+            'argmap': {'nu': 'nu'}}
+        prior_map['Interped'] = {
+            'pymc3': 'Interpolated',
+            'argmap': {'xx': 'x_points',
+                       'yy': 'pdf_points'}}
         prior_map['Normal'] = prior_map['Gaussian']
         prior_map['TruncatedNormal'] = prior_map['TruncatedGaussian']
         prior_map['HalfNormal'] = prior_map['HalfGaussian']
@@ -1070,12 +1182,15 @@ class Pymc3(Sampler):
         prior_map['Lorentzian'] = prior_map['Cauchy']
         prior_map['FromFile'] = prior_map['Interped']
 
+        # GW specific priors
+        prior_map['UniformComovingVolume'] = prior_map['Interped']
+
         # internally defined mappings for tupak priors
         prior_map['DeltaFunction'] = {'internal': self._deltafunction_prior}
-        prior_map['Sine'] =          {'internal': self._sine_prior}
-        prior_map['Cosine'] =        {'internal': self._cosine_prior}
-        prior_map['PowerLaw'] =      {'internal': self._powerlaw_prior}
-        prior_map['LogUniform'] =    {'internal': self._powerlaw_prior}
+        prior_map['Sine'] = {'internal': self._sine_prior}
+        prior_map['Cosine'] = {'internal': self._cosine_prior}
+        prior_map['PowerLaw'] = {'internal': self._powerlaw_prior}
+        prior_map['LogUniform'] = {'internal': self._powerlaw_prior}
 
     def _deltafunction_prior(self, key, **kwargs):
         """
@@ -1094,7 +1209,7 @@ class Pymc3(Sampler):
         """
         Map the tupak Sine prior to a PyMC3 style function
         """
- 
+
         from tupak.core.prior import Sine
 
         # check prior is a Sine
@@ -1116,7 +1231,9 @@ class Pymc3(Sampler):
                     self.lower = lower = tt.as_tensor_variable(floatX(lower))
                     self.upper = upper = tt.as_tensor_variable(floatX(upper))
                     self.norm = (tt.cos(lower) - tt.cos(upper))
-                    self.mean = (tt.sin(upper)+lower*tt.cos(lower) - tt.sin(lower) - upper*tt.cos(upper))/self.norm
+                    self.mean = (
+                        tt.sin(upper) + lower * tt.cos(lower) - tt.sin(lower) -
+                        upper * tt.cos(upper)) / self.norm
 
                     transform = pymc3.distributions.transforms.interval(lower, upper)
 
@@ -1125,7 +1242,9 @@ class Pymc3(Sampler):
                 def logp(self, value):
                     upper = self.upper
                     lower = self.lower
-                    return pymc3.distributions.dist_math.bound(tt.log(tt.sin(value)/self.norm), lower <= value, value <= upper)
+                    return pymc3.distributions.dist_math.bound(
+                        tt.log(tt.sin(value) / self.norm),
+                        lower <= value, value <= upper)
 
             return Pymc3Sine(key, lower=self.priors[key].minimum, upper=self.priors[key].maximum)
         else:
@@ -1135,7 +1254,7 @@ class Pymc3(Sampler):
         """
         Map the tupak Cosine prior to a PyMC3 style function
         """
- 
+
         from tupak.core.prior import Cosine
 
         # check prior is a Cosine
@@ -1150,14 +1269,16 @@ class Pymc3(Sampler):
                 raise ImportError("You must have Theano installed to use PyMC3")
 
             class Pymc3Cosine(pymc3.Continuous):
-                def __init__(self, lower=-np.pi/2., upper=np.pi/2.):
+                def __init__(self, lower=-np.pi / 2., upper=np.pi / 2.):
                     if lower >= upper:
                         raise ValueError("Lower bound is above upper bound!")
 
                     self.lower = lower = tt.as_tensor_variable(floatX(lower))
                     self.upper = upper = tt.as_tensor_variable(floatX(upper))
                     self.norm = (tt.sin(upper) - tt.sin(lower))
-                    self.mean = (upper*tt.sin(upper) + tt.cos(upper)-lower*tt.sin(lower)-tt.cos(lower))/self.norm
+                    self.mean = (
+                        upper * tt.sin(upper) + tt.cos(upper) -
+                        lower * tt.sin(lower) - tt.cos(lower)) / self.norm
 
                     transform = pymc3.distributions.transforms.interval(lower, upper)
 
@@ -1166,7 +1287,9 @@ class Pymc3(Sampler):
                 def logp(self, value):
                     upper = self.upper
                     lower = self.lower
-                    return pymc3.distributions.dist_math.bound(tt.log(tt.cos(value)/self.norm), lower <= value, value <= upper)
+                    return pymc3.distributions.dist_math.bound(
+                        tt.log(tt.cos(value) / self.norm),
+                        lower <= value, value <= upper)
 
             return Pymc3Cosine(key, lower=self.priors[key].minimum, upper=self.priors[key].maximum)
         else:
@@ -1176,7 +1299,7 @@ class Pymc3(Sampler):
         """
         Map the tupak PowerLaw prior to a PyMC3 style function
         """
- 
+
         from tupak.core.prior import PowerLaw
 
         # check prior is a PowerLaw
@@ -1208,11 +1331,11 @@ class Pymc3(Sampler):
                         self.alpha = alpha = tt.as_tensor_variable(floatX(alpha))
 
                         if falpha == -1:
-                            self.norm = 1./(tt.log(self.upper/self.lower))
+                            self.norm = 1. / (tt.log(self.upper / self.lower))
                         else:
                             beta = (1. + self.alpha)
-                            self.norm = 1. /(beta * (tt.pow(self.upper, beta) 
-                                          - tt.pow(self.lower, beta)))
+                            self.norm = 1. / (beta * (tt.pow(self.upper, beta) -
+                                                      tt.pow(self.lower, beta)))
 
                         transform = pymc3.distributions.transforms.interval(lower, upper)
 
@@ -1223,7 +1346,9 @@ class Pymc3(Sampler):
                         lower = self.lower
                         alpha = self.alpha
 
-                        return pymc3.distributions.dist_math.bound(self.alpha*tt.log(value) + tt.log(self.norm), lower <= value, value <= upper)
+                        return pymc3.distributions.dist_math.bound(
+                            alpha * tt.log(value) + tt.log(self.norm),
+                            lower <= value, value <= upper)
 
                 return Pymc3PowerLaw(key, lower=self.priors[key].minimum, upper=self.priors[key].maximum, alpha=self.priors[key].alpha)
         else:
@@ -1237,22 +1362,41 @@ class Pymc3(Sampler):
 
         step_methods = {m.__name__.lower(): m.__name__ for m in STEP_METHODS}
         if 'step' in self.__kwargs:
-            step_method = self.__kwargs.pop('step').lower()
+            self.step_method = self.__kwargs.pop('step')
+
+            # 'step' could be a dictionary of methods for different parameters, so check for this
+            if isinstance(self.step_method, (dict, OrderedDict)):
+                for key in self.step_method:
+                    if key not in self.__search_parameter_keys:
+                        raise ValueError("Setting a step method for an unknown parameter '{}'".format(key))
+                    else:
+                        if self.step_method[key].lower() not in step_methods:
+                            raise ValueError("Using invalid step method '{}'".format(self.step_method[key]))
+            else:
+                self.step_method = self.step_method.lower()
 
-            if step_method not in step_methods:
-                raise ValueError("Using invalid step method '{}'".format(step_method))
+                if self.step_method not in step_methods:
+                    raise ValueError("Using invalid step method '{}'".format(self.step_method))
         else:
-            step_method = None
+            self.step_method = None
 
         # initialise the PyMC3 model
         self.pymc3_model = pymc3.Model()
 
-        # set the step method
-        sm = None if step_method is None else pymc3.__dict__[step_methods[step_method]]()
-
         # set the prior
         self.set_prior()
 
+        # set the step method
+        if isinstance(self.step_method, (dict, OrderedDict)):
+            # create list of step methods (any not given will default to NUTS)
+            sm = []
+            with self.pymc3_model:
+                for key in self.step_method:
+                    curmethod = self.step_method[key].lower()
+                    sm.append(pymc3.__dict__[step_methods[curmethod]]([self.pymc3_priors[key]]))
+        else:
+            sm = None if self.step_method is None else pymc3.__dict__[step_methods[self.step_method]]()
+
         # if a custom log_likelihood function requires a `sampler` argument
         # then use that log_likelihood function, with the assumption that it
         # takes in a Pymc3 Sampler, with a pymc3_model attribute, and defines
@@ -1269,13 +1413,13 @@ class Pymc3(Sampler):
             trace = pymc3.sample(self.draws, step=sm, **self.kwargs)
 
         nparams = len([key for key in self.priors.keys() if self.priors[key].__class__.__name__ != 'DeltaFunction'])
-        nsamples = len(trace)*self.chains
+        nsamples = len(trace) * self.chains
 
         self.result.samples = np.zeros((nsamples, nparams))
         count = 0
         for key in self.priors.keys():
-            if self.priors[key].__class__.__name__ != 'DeltaFunction': # ignore DeltaFunction variables
-                self.result.samples[:,count] = trace[key]
+            if self.priors[key].__class__.__name__ != 'DeltaFunction':  # ignore DeltaFunction variables
+                self.result.samples[:, count] = trace[key]
                 count += 1
 
         self.result.sampler_output = np.nan
@@ -1291,7 +1435,7 @@ class Pymc3(Sampler):
 
         self.setup_prior_mapping()
 
-        self.pymc3_priors = dict()
+        self.pymc3_priors = OrderedDict()
 
         pymc3 = self.external_sampler
 
@@ -1306,7 +1450,7 @@ class Pymc3(Sampler):
                         self.pymc3_priors[key] = self.priors[key].ln_prob(sampler=self)
                     except RuntimeError:
                         raise RuntimeError(("Problem setting PyMC3 prior for ",
-                            "'{}'".format(key)))
+                                            "'{}'".format(key)))
                 else:
                     # use Prior distribution name
                     distname = self.priors[key].__class__.__name__
@@ -1331,9 +1475,11 @@ class Pymc3(Sampler):
                                             if targ in self.prior_map[distname]['argtransform']:
                                                 tfunc = self.prior_map[distname]['argtransform'][targ]
                                             else:
-                                                tfunc = lambda x: x
+                                                def tfunc(x):
+                                                    return x
                                         else:
-                                            tfunc = lambda x: x
+                                            def tfunc(x):
+                                                return x
 
                                         priorkwargs[parg] = tfunc(getattr(self.priors[key], targ))
                                     else:
@@ -1354,19 +1500,87 @@ class Pymc3(Sampler):
         Convert any tupak likelihoods to PyMC3 distributions.
         """
 
+        try:
+            import theano  # noqa
+            import theano.tensor as tt
+            from theano.compile.ops import as_op  # noqa
+        except ImportError:
+            raise ImportError("Could not import theano")
+
+        from tupak.core.likelihood import GaussianLikelihood, PoissonLikelihood, ExponentialLikelihood, StudentTLikelihood
+        from tupak.gw.likelihood import BasicGravitationalWaveTransient, GravitationalWaveTransient
+
+        # create theano Op for the log likelihood if not using a predefined model
+        class LogLike(tt.Op):
+
+            itypes = [tt.dvector]
+            otypes = [tt.dscalar]
+
+            def __init__(self, parameters, loglike, priors):
+                self.parameters = parameters
+                self.likelihood = loglike
+                self.priors = priors
+
+                # set the fixed parameters
+                for key in self.priors.keys():
+                    if isinstance(self.priors[key], float):
+                        self.likelihood.parameters[key] = self.priors[key]
+
+                self.logpgrad = LogLikeGrad(self.parameters, self.likelihood, self.priors)
+
+            def perform(self, node, inputs, outputs):
+                theta, = inputs
+                for i, key in enumerate(self.parameters):
+                    self.likelihood.parameters[key] = theta[i]
+
+                outputs[0][0] = np.array(self.likelihood.log_likelihood())
+
+            def grad(self, inputs, g):
+                theta, = inputs
+                return [g[0] * self.logpgrad(theta)]
+
+        # create theano Op for calculating the gradient of the log likelihood
+        class LogLikeGrad(tt.Op):
+
+            itypes = [tt.dvector]
+            otypes = [tt.dvector]
+
+            def __init__(self, parameters, loglike, priors):
+                self.parameters = parameters
+                self.Nparams = len(parameters)
+                self.likelihood = loglike
+                self.priors = priors
+
+                # set the fixed parameters
+                for key in self.priors.keys():
+                    if isinstance(self.priors[key], float):
+                        self.likelihood.parameters[key] = self.priors[key]
+
+            def perform(self, node, inputs, outputs):
+                theta, = inputs
+
+                # define version of likelihood function to pass to derivative function
+                def lnlike(values):
+                    for i, key in enumerate(self.parameters):
+                        self.likelihood.parameters[key] = values[i]
+                    return self.likelihood.log_likelihood()
+
+                # calculate gradients
+                grads = utils.derivatives(theta, lnlike, abseps=1e-5, mineps=1e-12, reltol=1e-2)
+
+                outputs[0][0] = grads
+
         pymc3 = self.external_sampler
 
         with self.pymc3_model:
             #  check if it is a predefined likelhood function
-            if self.likelihood.__class__.__name__ == 'GaussianLikelihood':
+            if isinstance(self.likelihood, GaussianLikelihood):
                 # check required attributes exist
                 if (not hasattr(self.likelihood, 'sigma') or
                     not hasattr(self.likelihood, 'x') or
-                    not hasattr(self.likelihood, 'y') or
-                    not hasattr(self.likelihood, 'function') or
-                    not hasattr(self.likelihood, 'function_keys')):
+                    not hasattr(self.likelihood, 'y')):
                     raise ValueError("Gaussian Likelihood does not have all the correct attributes!")
-                
+
                 if 'sigma' in self.pymc3_priors:
                     # if sigma is suppled use that value
                     if self.likelihood.sigma is None:
@@ -1378,34 +1592,30 @@ class Pymc3(Sampler):
                     if key not in self.likelihood.function_keys:
                         raise ValueError("Prior key '{}' is not a function key!".format(key))
 
-                model = self.likelihood.function(self.likelihood.x, **self.pymc3_priors)
+                model = self.likelihood.func(self.likelihood.x, **self.pymc3_priors)
 
                 # set the distribution
                 pymc3.Normal('likelihood', mu=model, sd=self.likelihood.sigma,
                              observed=self.likelihood.y)
-            elif self.likelihood.__class__.__name__ == 'PoissonLikelihood':
+            elif isinstance(self.likelihood, PoissonLikelihood):
                 # check required attributes exist
                 if (not hasattr(self.likelihood, 'x') or
-                    not hasattr(self.likelihood, 'y') or
-                    not hasattr(self.likelihood, 'function') or
-                    not hasattr(self.likelihood, 'function_keys')):
+                    not hasattr(self.likelihood, 'y')):
                     raise ValueError("Poisson Likelihood does not have all the correct attributes!")
-                
+
                 for key in self.pymc3_priors:
                     if key not in self.likelihood.function_keys:
                         raise ValueError("Prior key '{}' is not a function key!".format(key))
 
                 # get rate function
-                model = self.likelihood.function(self.likelihood.x, **self.pymc3_priors)
+                model = self.likelihood.func(self.likelihood.x, **self.pymc3_priors)
 
                 # set the distribution
                 pymc3.Poisson('likelihood', mu=model, observed=self.likelihood.y)
-            elif self.likelihood.__class__.__name__ == 'ExponentialLikelihood':
+            elif isinstance(self.likelihood, ExponentialLikelihood):
                 # check required attributes exist
                 if (not hasattr(self.likelihood, 'x') or
-                    not hasattr(self.likelihood, 'y') or
-                    not hasattr(self.likelihood, 'function') or
-                    not hasattr(self.likelihood, 'function_keys')):
+                    not hasattr(self.likelihood, 'y')):
                     raise ValueError("Exponential Likelihood does not have all the correct attributes!")
 
                 for key in self.pymc3_priors:
@@ -1413,18 +1623,16 @@ class Pymc3(Sampler):
                         raise ValueError("Prior key '{}' is not a function key!".format(key))
 
                 # get mean function
-                model = self.likelihood.function(self.likelihood.x, **self.pymc3_priors)
+                model = self.likelihood.func(self.likelihood.x, **self.pymc3_priors)
 
                 # set the distribution
-                pymc3.Exponential('likelihood', lam=1./model, observed=self.likelihood.y)
-            elif self.likelihood.__class__.__name__ == 'StudentTLikelihood':
+                pymc3.Exponential('likelihood', lam=1. / model, observed=self.likelihood.y)
+            elif isinstance(self.likelihood, StudentTLikelihood):
                 # check required attributes exist
                 if (not hasattr(self.likelihood, 'x') or
                     not hasattr(self.likelihood, 'y') or
                     not hasattr(self.likelihood, 'nu') or
-                    not hasattr(self.likelihood, 'sigma') or
-                    not hasattr(self.likelihood, 'function') or
-                    not hasattr(self.likelihood, 'function_keys')):
+                    not hasattr(self.likelihood, 'sigma')):
                     raise ValueError("StudentT Likelihood does not have all the correct attributes!")
 
                 if 'nu' in self.pymc3_priors:
@@ -1438,32 +1646,27 @@ class Pymc3(Sampler):
                     if key not in self.likelihood.function_keys:
                         raise ValueError("Prior key '{}' is not a function key!".format(key))
 
-                model = self.likelihood.function(self.likelihood.x, **self.pymc3_priors)
+                model = self.likelihood.func(self.likelihood.x, **self.pymc3_priors)
 
                 # set the distribution
                 pymc3.StudentT('likelihood', nu=self.likelihood.nu, mu=model, sd=self.likelihood.sigma, observed=self.likelihood.y)
-            else:
-                raise ValueError("Unknown likelihood has been provided")
+            elif isinstance(self.likelihood, (GravitationalWaveTransient, BasicGravitationalWaveTransient)):
+                # set theano Op - pass __search_parameter_keys, which only contains non-fixed variables
+                logl = LogLike(self.__search_parameter_keys, self.likelihood, self.pymc3_priors)
 
-    def calculate_autocorrelation(self, samples, c=3):
-        """ Uses the `emcee.autocorr` module to estimate the autocorrelation
+                parameters = OrderedDict()
+                for key in self.__search_parameter_keys:
+                    try:
+                        parameters[key] = self.pymc3_priors[key]
+                    except KeyError:
+                        raise KeyError("Unknown key '{}' when setting GravitationalWaveTransient likelihood".format(key))
 
-        Parameters
-        ----------
-        c: float
-            The minimum number of autocorrelation times needed to trust the
-            estimate (default: `3`). See `emcee.autocorr.integrated_time`.
-        """
+                # convert to theano tensor variable
+                values = tt.as_tensor_variable(list(parameters.values()))
 
-        import emcee
-        try:
-            self.result.max_autocorrelation_time = int(np.max(
-                emcee.autocorr.integrated_time(samples, c=c)))
-            logger.info("Max autocorr time = {}".format(
-                self.result.max_autocorrelation_time))
-        except emcee.autocorr.AutocorrError as e:
-            self.result.max_autocorrelation_time = None
-            logger.info("Unable to calculate autocorr time: {}".format(e))
+                pymc3.DensityDist('likelihood', lambda v: logl(v), observed={'v': values})
+            else:
+                raise ValueError("Unknown likelihood has been provided")
 
 
 def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
diff --git a/tupak/core/utils.py b/tupak/core/utils.py
index 82d3af182c1853c515230e9d825863aa676a89da..9891bd6585f6244d1b05e8fa998b5eb07f6142ed 100644
--- a/tupak/core/utils.py
+++ b/tupak/core/utils.py
@@ -104,7 +104,7 @@ def create_time_series(sampling_frequency, duration, starting_time=0.):
     float: An equidistant time series given the parameters
 
     """
-    return np.arange(starting_time, starting_time+duration, 1./sampling_frequency)
+    return np.arange(starting_time, starting_time + duration, 1. / sampling_frequency)
 
 
 def ra_dec_to_theta_phi(ra, dec, gmst):
@@ -175,8 +175,8 @@ def create_frequency_series(sampling_frequency, duration):
     number_of_samples = int(np.round(number_of_samples))
 
     # prepare for FFT
-    number_of_frequencies = (number_of_samples-1)//2
-    delta_freq = 1./duration
+    number_of_frequencies = (number_of_samples - 1) // 2
+    delta_freq = 1. / duration
 
     frequencies = delta_freq * np.linspace(1, number_of_frequencies, number_of_frequencies)
 
@@ -207,14 +207,14 @@ def create_white_noise(sampling_frequency, duration):
     number_of_samples = duration * sampling_frequency
     number_of_samples = int(np.round(number_of_samples))
 
-    delta_freq = 1./duration
+    delta_freq = 1. / duration
 
     frequencies = create_frequency_series(sampling_frequency, duration)
 
-    norm1 = 0.5*(1./delta_freq)**0.5
+    norm1 = 0.5 * (1. / delta_freq)**0.5
     re1 = np.random.normal(0, norm1, len(frequencies))
     im1 = np.random.normal(0, norm1, len(frequencies))
-    htilde1 = re1 + 1j*im1
+    htilde1 = re1 + 1j * im1
 
     # convolve data with instrument transfer function
     otilde1 = htilde1 * 1.
@@ -260,7 +260,7 @@ def nfft(time_domain_strain, sampling_frequency):
         time_domain_strain = np.append(time_domain_strain, 0)
     LL = len(time_domain_strain)
     # frequency range
-    frequency_array = sampling_frequency / 2 * np.linspace(0, 1, int(LL/2+1))
+    frequency_array = sampling_frequency / 2 * np.linspace(0, 1, int(LL / 2 + 1))
 
     # calculate FFT
     # rfft computes the fft for real inputs
@@ -435,10 +435,6 @@ def set_up_command_line_arguments():
                         help="Force clean data, never use cached data")
     parser.add_argument("-u", "--use-cached", action="store_true",
                         help="Force cached data and do not check its validity")
-    parser.add_argument("-d", "--detectors",  nargs='+',
-                        default=['H1', 'L1', 'V1'],
-                        help=("List of detectors to use in open data calls, "
-                              "e.g. -d H1 L1 for H1 and L1"))
     parser.add_argument("-t", "--test", action="store_true",
                         help=("Used for testing only: don't run full PE, but"
                               " just check nothing breaks"))
@@ -454,6 +450,141 @@ def set_up_command_line_arguments():
     return args
 
 
+def derivatives(vals, func, releps=1e-3, abseps=None, mineps=1e-9, reltol=1e-3,
+                epsscale=0.5, nonfixedidx=None):
+    """
+    Calculate the partial derivatives of a function at a set of values. The
+    derivatives are calculated using the central difference, using an iterative
+    method to check that the values converge as step size decreases.
+
+    Parameters
+    ----------
+    vals: array_like
+        A set of values, that are passed to a function, at which to calculate
+        the gradient of that function
+    func:
+        A function that takes in an array of values.
+    releps: float, array_like, 1e-3
+        The initial relative step size for calculating the derivative.
+    abseps: float, array_like, None
+        The initial absolute step size for calculating the derivative.
+        This overrides `releps` if set.
+        `releps` is set then that is used.
+    mineps: float, 1e-9
+        The minimum relative step size at which to stop iterations if no
+        convergence is achieved.
+    epsscale: float, 0.5
+        The factor by which releps if scaled in each iteration.
+    nonfixedidx: array_like, None
+        An array of indices in `vals` that are _not_ fixed values and therefore
+        can have derivatives taken. If `None` then derivatives of all values
+        are calculated.
+
+    Returns
+    -------
+    grads: array_like
+        An array of gradients for each non-fixed value.
+    """
+
+    if nonfixedidx is None:
+        nonfixedidx = range(len(vals))
+
+    if len(nonfixedidx) > len(vals):
+        raise ValueError("To many non-fixed values")
+
+    if max(nonfixedidx) >= len(vals) or min(nonfixedidx) < 0:
+        raise ValueError("Non-fixed indexes contain non-existant indices")
+
+    grads = np.zeros(len(nonfixedidx))
+
+    # maximum number of times the gradient can change sign
+    flipflopmax = 10.
+
+    # set steps
+    if abseps is None:
+        if isinstance(releps, float):
+            eps = np.abs(vals) * releps
+            eps[eps == 0.] = releps  # if any values are zero set eps to releps
+            teps = releps * np.ones(len(vals))
+        elif isinstance(releps, (list, np.ndarray)):
+            if len(releps) != len(vals):
+                raise ValueError("Problem with input relative step sizes")
+            eps = np.multiply(np.abs(vals), releps)
+            eps[eps == 0.] = np.array(releps)[eps == 0.]
+            teps = releps
+        else:
+            raise RuntimeError("Relative step sizes are not a recognised type!")
+    else:
+        if isinstance(abseps, float):
+            eps = abseps * np.ones(len(vals))
+        elif isinstance(abseps, (list, np.ndarray)):
+            if len(abseps) != len(vals):
+                raise ValueError("Problem with input absolute step sizes")
+            eps = np.array(abseps)
+        else:
+            raise RuntimeError("Absolute step sizes are not a recognised type!")
+        teps = eps
+
+    # for each value in vals calculate the gradient
+    count = 0
+    for i in nonfixedidx:
+        # initial parameter diffs
+        leps = eps[i]
+        cureps = teps[i]
+
+        flipflop = 0
+
+        # get central finite difference
+        fvals = np.copy(vals)
+        bvals = np.copy(vals)
+
+        # central difference
+        fvals[i] += 0.5 * leps  # change forwards distance to half eps
+        bvals[i] -= 0.5 * leps  # change backwards distance to half eps
+        cdiff = (func(fvals) - func(bvals)) / leps
+
+        while 1:
+            fvals[i] -= 0.5 * leps  # remove old step
+            bvals[i] += 0.5 * leps
+
+            # change the difference by a factor of two
+            cureps *= epsscale
+            if cureps < mineps or flipflop > flipflopmax:
+                # if no convergence set flat derivative (TODO: check if there is a better thing to do instead)
+                logger.warning("Derivative calculation did not converge: setting flat derivative.")
+                grads[count] = 0.
+                break
+            leps *= epsscale
+
+            # central difference
+            fvals[i] += 0.5 * leps  # change forwards distance to half eps
+            bvals[i] -= 0.5 * leps  # change backwards distance to half eps
+            cdiffnew = (func(fvals) - func(bvals)) / leps
+
+            if cdiffnew == cdiff:
+                grads[count] = cdiff
+                break
+
+            # check whether previous diff and current diff are the same within reltol
+            rat = (cdiff / cdiffnew)
+            if np.isfinite(rat) and rat > 0.:
+                # gradient has not changed sign
+                if np.abs(1. - rat) < reltol:
+                    grads[count] = cdiffnew
+                    break
+                else:
+                    cdiff = cdiffnew
+                    continue
+            else:
+                cdiff = cdiffnew
+                flipflop += 1
+                continue
+
+        count += 1
+
+    return grads
+
+
 command_line_args = set_up_command_line_arguments()
 setup_logger(print_version=True)
 
diff --git a/tupak/gw/calibration.py b/tupak/gw/calibration.py
index e54757a86a9c60bfa029271e176b75a23c714112..1ed90538a15d683d698ed6358197d16ffedfd067 100644
--- a/tupak/gw/calibration.py
+++ b/tupak/gw/calibration.py
@@ -21,6 +21,9 @@ class Recalibrate(object):
         self.params = dict()
         self.prefix = prefix
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(prefix=\'{}\')'.format(self.prefix)
+
     def get_calibration_factor(self, frequency_array, **params):
         """Apply calibration model
 
@@ -75,7 +78,17 @@ class CubicSpline(Recalibrate):
         if n_points < 4:
             raise ValueError('Cubic spline calibration requires at least 4 spline nodes.')
         self.n_points = n_points
-        self.spline_points = np.logspace(np.log10(minimum_frequency), np.log10(maximum_frequency), n_points)
+        self.minimum_frequency = minimum_frequency
+        self.maximum_frequency = maximum_frequency
+        self.__spline_points = np.logspace(np.log10(minimum_frequency), np.log10(maximum_frequency), n_points)
+
+    @property
+    def spline_points(self):
+        return self.__spline_points
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(prefix=\'{}\', minimum_frequency={}, maximum_frequency={}, n_points={})'\
+            .format(self.prefix, self.minimum_frequency, self.maximum_frequency, self.n_points)
 
     def get_calibration_factor(self, frequency_array, **params):
         """Apply calibration model
@@ -107,4 +120,3 @@ class CubicSpline(Recalibrate):
         calibration_factor = (1 + delta_amplitude) * (2 + 1j * delta_phase) / (2 - 1j * delta_phase)
 
         return calibration_factor
-
diff --git a/tupak/gw/conversion.py b/tupak/gw/conversion.py
index 8a8c88444a98cd50aace5c5dc4862ed184f6b66e..28c5b057da7fc99ad74d53729fb53db4c4e41f06 100644
--- a/tupak/gw/conversion.py
+++ b/tupak/gw/conversion.py
@@ -128,8 +128,10 @@ def convert_to_lal_binary_black_hole_parameters(parameters, search_keys, remove=
             converted_parameters['mass_ratio'] = \
                 mass_1_and_chirp_mass_to_mass_ratio(parameters['mass_1'], parameters['chirp_mass'])
             temp = (parameters['chirp_mass'] / parameters['mass_1']) ** 5
-            parameters['mass_ratio'] = (2 * temp / 3 / ((51 * temp ** 2 - 12 * temp ** 3) ** 0.5 + 9 * temp)) ** (
-                    1 / 3) + (((51 * temp ** 2 - 12 * temp ** 3) ** 0.5 + 9 * temp) / 9 / 2 ** 0.5) ** (1 / 3)
+            parameters['mass_ratio'] = (
+                (2 * temp / 3 / (
+                    (51 * temp ** 2 - 12 * temp ** 3) ** 0.5 + 9 * temp)) ** (1 / 3) +
+                (((51 * temp ** 2 - 12 * temp ** 3) ** 0.5 + 9 * temp) / 9 / 2 ** 0.5) ** (1 / 3))
             if remove:
                 added_keys.append('chirp_mass')
         elif 'symmetric_mass_ratio' in converted_parameters.keys() and 'symmetric_mass_ratio' not in added_keys:
@@ -169,6 +171,71 @@ def convert_to_lal_binary_black_hole_parameters(parameters, search_keys, remove=
     return converted_parameters, added_keys
 
 
+def convert_to_lal_binary_neutron_star_parameters(parameters, search_keys, remove=True):
+    """
+    Convert parameters we have into parameters we need.
+
+    This is defined by the parameters of tupak.source.lal_binary_black_hole()
+
+
+    Mass: mass_1, mass_2
+    Spin: a_1, a_2, tilt_1, tilt_2, phi_12, phi_jl
+    Extrinsic: luminosity_distance, theta_jn, phase, ra, dec, geocent_time, psi
+
+    This involves popping a lot of things from parameters.
+    The keys in added_keys should be popped after evaluating the waveform.
+
+    Parameters
+    ----------
+    parameters: dict
+        dictionary of parameter values to convert into the required parameters
+    search_keys: list
+        parameters which are needed for the waveform generation
+    remove: bool, optional
+        Whether or not to remove the extra key, necessary for sampling, default=True.
+
+    Return
+    ------
+    converted_parameters: dict
+        dict of the required parameters
+    added_keys: list
+        keys which are added to parameters during function call
+    """
+    converted_parameters = parameters.copy()
+    converted_parameters, added_keys = convert_to_lal_binary_black_hole_parameters(
+        converted_parameters, search_keys, remove=remove)
+
+    if 'lambda_1' not in search_keys and 'lambda_2' not in search_keys:
+        if 'delta_lambda' in converted_parameters.keys():
+            converted_parameters['lambda_1'], converted_parameters['lambda_2'] =\
+                lambda_tilde_delta_lambda_to_lambda_1_lambda_2(
+                    converted_parameters['lambda_tilde'], parameters['delta_lambda'],
+                    converted_parameters['mass_1'], converted_parameters['mass_2'])
+            added_keys.append('lambda_1')
+            added_keys.append('lambda_2')
+        elif 'lambda_tilde' in converted_parameters.keys():
+            converted_parameters['lambda_1'], converted_parameters['lambda_2'] =\
+                lambda_tilde_to_lambda_1_lambda_2(
+                    converted_parameters['lambda_tilde'],
+                    converted_parameters['mass_1'], converted_parameters['mass_2'])
+            added_keys.append('lambda_1')
+            added_keys.append('lambda_2')
+    if 'lambda_2' not in converted_parameters.keys():
+        converted_parameters['lambda_2'] =\
+            converted_parameters['lambda_1']\
+            * converted_parameters['mass_1']**5\
+            / converted_parameters['mass_2']**5
+        added_keys.append('lambda_2')
+    elif converted_parameters['lambda_2'] is None:
+        converted_parameters['lambda_2'] =\
+            converted_parameters['lambda_1']\
+            * converted_parameters['mass_1']**5\
+            / converted_parameters['mass_2']**5
+        added_keys.append('lambda_2')
+
+    return converted_parameters, added_keys
+
+
 def total_mass_and_mass_ratio_to_component_masses(mass_ratio, total_mass):
     """
     Convert total mass and mass ratio of a binary to its component masses.
@@ -358,6 +425,84 @@ def mass_1_and_chirp_mass_to_mass_ratio(mass_1, chirp_mass):
     return mass_ratio
 
 
+def lambda_tilde_delta_lambda_to_lambda_1_lambda_2(
+        lambda_tilde, delta_lambda, mass_1, mass_2):
+    """
+    Convert from dominant tidal terms to individual tidal parameters.
+
+    See, e.g., Wade et al., https://arxiv.org/pdf/1402.5156.pdf.
+
+    Parameters
+    ----------
+    lambda_tilde: float
+        Dominant tidal term.
+    delta_lambda: float
+        Secondary tidal term.
+    mass_1: float
+        Mass of more massive neutron star.
+    mass_2: float
+        Mass of less massive neutron star.
+
+    Return
+    ------
+    lambda_1: float
+        Tidal parameter of more massive neutron star.
+    lambda_2: float
+        Tidal parameter of less massive neutron star.
+    """
+    eta = component_masses_to_symmetric_mass_ratio(mass_1, mass_2)
+    coefficient_1 = (1 + 7 * eta - 31 * eta**2)
+    coefficient_2 = (1 - 4 * eta)**0.5 * (1 + 9 * eta - 11 * eta**2)
+    coefficient_3 = (1 - 4 * eta)**0.5 *\
+                    (1 - 13272 / 1319 * eta + 8944 / 1319 * eta**2)
+    coefficient_4 = (1 - 15910 / 1319 * eta + 32850 / 1319 * eta**2 +
+                     3380 / 1319 * eta**3)
+    lambda_1 =\
+        (13 * lambda_tilde / 8 * (coefficient_3 - coefficient_4) -
+         2 * delta_lambda * (coefficient_1 - coefficient_2))\
+        / ((coefficient_1 + coefficient_2) * (coefficient_3 - coefficient_4) -
+           (coefficient_1 - coefficient_2) * (coefficient_3 + coefficient_4))
+    lambda_2 =\
+        (13 * lambda_tilde / 8 * (coefficient_3 + coefficient_4) -
+         2 * delta_lambda * (coefficient_1 + coefficient_2)) \
+        / ((coefficient_1 - coefficient_2) * (coefficient_3 + coefficient_4) -
+           (coefficient_1 + coefficient_2) * (coefficient_3 - coefficient_4))
+    return lambda_1, lambda_2
+
+
+def lambda_tilde_to_lambda_1_lambda_2(
+        lambda_tilde, mass_1, mass_2):
+    """
+    Convert from dominant tidal term to individual tidal parameters
+    assuming lambda_1 * mass_1**5 = lambda_2 * mass_2**5.
+
+    See, e.g., Wade et al., https://arxiv.org/pdf/1402.5156.pdf.
+
+    Parameters
+    ----------
+    lambda_tilde: float
+        Dominant tidal term.
+    mass_1: float
+        Mass of more massive neutron star.
+    mass_2: float
+        Mass of less massive neutron star.
+
+    Return
+    ------
+    lambda_1: float
+        Tidal parameter of more massive neutron star.
+    lambda_2: float
+        Tidal parameter of less massive neutron star.
+    """
+    eta = component_masses_to_symmetric_mass_ratio(mass_1, mass_2)
+    q = mass_2 / mass_1
+    lambda_1 = 13 / 8 * lambda_tilde / (
+        (1 + 7 * eta - 31 * eta**2) * (1 + q**-5) +
+        (1 - 4 * eta)**0.5 * (1 + 9 * eta - 11 * eta**2) * (1 - q**-5))
+    lambda_2 = lambda_1 / q**5
+    return lambda_1, lambda_2
+
+
 def generate_all_bbh_parameters(sample, likelihood=None, priors=None):
     """
     From either a single sample or a set of samples fill in all missing BBH parameters, in place.
@@ -462,12 +607,12 @@ def generate_component_spins(sample):
         output_sample['iota'], output_sample['spin_1x'], output_sample['spin_1y'], output_sample['spin_1z'], \
             output_sample['spin_2x'], output_sample['spin_2y'], output_sample['spin_2z'] = \
             lalsim.SimInspiralTransformPrecessingNewInitialConditions(
-                    output_sample['iota'], output_sample['phi_jl'],
-                    output_sample['tilt_1'], output_sample['tilt_2'],
-                    output_sample['phi_12'], output_sample['a_1'], output_sample['a_2'],
-                    output_sample['mass_1'] * tupak.core.utils.solar_mass,
-                    output_sample['mass_2'] * tupak.core.utils.solar_mass,
-                    output_sample['reference_frequency'], output_sample['phase'])
+                output_sample['iota'], output_sample['phi_jl'],
+                output_sample['tilt_1'], output_sample['tilt_2'],
+                output_sample['phi_12'], output_sample['a_1'], output_sample['a_2'],
+                output_sample['mass_1'] * tupak.core.utils.solar_mass,
+                output_sample['mass_2'] * tupak.core.utils.solar_mass,
+                output_sample['reference_frequency'], output_sample['phase'])
 
         output_sample['phi_1'] = np.arctan(output_sample['spin_1y'] / output_sample['spin_1x'])
         output_sample['phi_2'] = np.arctan(output_sample['spin_2y'] / output_sample['spin_2x'])
diff --git a/tupak/gw/detector.py b/tupak/gw/detector.py
index e597ab89d638b4c3dde7071e652ff81f49d3981f..83f74aa020f0f16abb1fb8c8d8fdc5c13d96912e 100644
--- a/tupak/gw/detector.py
+++ b/tupak/gw/detector.py
@@ -73,7 +73,9 @@ class InterferometerList(list):
 
         """
         for interferometer in self:
-            interferometer.set_strain_data_from_power_spectral_density(sampling_frequency, duration, start_time)
+            interferometer.set_strain_data_from_power_spectral_density(sampling_frequency=sampling_frequency,
+                                                                       duration=duration,
+                                                                       start_time=start_time)
 
     def inject_signal(self, parameters=None, injection_polarizations=None, waveform_generator=None):
         """ Inject a signal into noise in each of the three detectors.
@@ -129,7 +131,7 @@ class InterferometerList(list):
             The string labelling the data
         """
         for interferometer in self:
-            interferometer.save_data(outdir, label)
+            interferometer.save_data(outdir=outdir, label=label)
 
     def plot_data(self, signal=None, outdir='.', label=None):
         if utils.command_line_args.test:
@@ -397,15 +399,16 @@ class InterferometerStrainData(object):
                 self.alpha, self.roll_off))
             # self.low_pass_filter()
             window = self.time_domain_window()
-            frequency_domain_strain, self.frequency_array = utils.nfft(
+            self._frequency_domain_strain, self.frequency_array = utils.nfft(
                 self._time_domain_strain * window, self.sampling_frequency)
-            self._frequency_domain_strain = frequency_domain_strain
             return self._frequency_domain_strain * self.frequency_mask
         else:
             raise ValueError("frequency domain strain data not yet set")
 
     @frequency_domain_strain.setter
     def frequency_domain_strain(self, frequency_domain_strain):
+        if not len(self.frequency_array) == len(frequency_domain_strain):
+            raise ValueError("The frequency_array and the set strain have different lengths")
         self._frequency_domain_strain = frequency_domain_strain
 
     def add_to_frequency_domain_strain(self, x):
@@ -424,7 +427,7 @@ class InterferometerStrainData(object):
             logger.info(
                 "Low pass filter frequency of {}Hz requested, this is equal"
                 " or greater than the Nyquist frequency so no filter applied"
-                    .format(filter_freq))
+                .format(filter_freq))
             return
 
         logger.debug("Applying low pass filter with filter frequency {}".format(filter_freq))
@@ -805,6 +808,15 @@ class Interferometer(object):
             minimum_frequency=minimum_frequency,
             maximum_frequency=maximum_frequency)
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(name=\'{}\', power_spectral_density={}, minimum_frequency={}, ' \
+                                         'maximum_frequency={}, length={}, latitude={}, longitude={}, elevation={}, ' \
+                                         'xarm_azimuth={}, yarm_azimuth={}, xarm_tilt={}, yarm_tilt={})' \
+            .format(self.name, self.power_spectral_density, float(self.minimum_frequency),
+                    float(self.maximum_frequency), float(self.length), float(self.latitude), float(self.longitude),
+                    float(self.elevation), float(self.xarm_azimuth), float(self.yarm_azimuth), float(self.xarm_tilt),
+                    float(self.yarm_tilt))
+
     @property
     def minimum_frequency(self):
         return self.strain_data.minimum_frequency
@@ -1187,7 +1199,7 @@ class Interferometer(object):
             parameters['ra'],
             parameters['dec'],
             self.strain_data.start_time)
-        dt = self.strain_data.start_time - (parameters['geocent_time'] - time_shift)
+        dt = parameters['geocent_time'] + time_shift - self.strain_data.start_time
 
         signal_ifo = signal_ifo * np.exp(
             -1j * 2 * np.pi * dt * self.frequency_array)
@@ -1244,7 +1256,7 @@ class Interferometer(object):
         if not self.strain_data.time_within_data(parameters['geocent_time']):
             logger.warning(
                 'Injecting signal outside segment, start_time={}, merger time={}.'
-                    .format(self.strain_data.start_time, parameters['geocent_time']))
+                .format(self.strain_data.start_time, parameters['geocent_time']))
 
         signal_ifo = self.get_detector_response(injection_polarizations, parameters)
         if np.shape(self.frequency_domain_strain).__eq__(np.shape(signal_ifo)):
@@ -1302,9 +1314,9 @@ class Interferometer(object):
         e_h = np.array([np.cos(self.__latitude) * np.cos(self.__longitude),
                         np.cos(self.__latitude) * np.sin(self.__longitude), np.sin(self.__latitude)])
 
-        return np.cos(arm_tilt) * np.cos(arm_azimuth) * e_long + \
-               np.cos(arm_tilt) * np.sin(arm_azimuth) * e_lat + \
-               np.sin(arm_tilt) * e_h
+        return (np.cos(arm_tilt) * np.cos(arm_azimuth) * e_long +
+                np.cos(arm_tilt) * np.sin(arm_azimuth) * e_lat +
+                np.sin(arm_tilt) * e_h)
 
     @property
     def amplitude_spectral_density_array(self):
@@ -1328,8 +1340,8 @@ class Interferometer(object):
         array_like: An array representation of the PSD
 
         """
-        return self.power_spectral_density.power_spectral_density_interpolated(self.frequency_array) \
-               * self.strain_data.window_factor
+        return (self.power_spectral_density.power_spectral_density_interpolated(self.frequency_array) *
+                self.strain_data.window_factor)
 
     @property
     def frequency_array(self):
@@ -1509,61 +1521,54 @@ class TriangularInterferometer(InterferometerList):
 
 class PowerSpectralDensity(object):
 
-    def __init__(self, **kwargs):
+    def __init__(self, frequency_array=None, psd_array=None, asd_array=None,
+                 psd_file=None, asd_file=None):
         """
         Instantiate a new PowerSpectralDensity object.
 
-        If called with no argument, `PowerSpectralDensity()` will return an
-        empty instance which can be filled with one of the `set_from` methods.
-        You can also initialise a new PowerSpectralDensity object giving the
-        arguments of any `set_from` method and an attempt will be made to use
-        this information to load/create the power spectral density.
-
         Example
         -------
-        Using the `set_from` method directly (here `psd_file` is a string
+        Using the `from` method directly (here `psd_file` is a string
         containing the path to the file to load):
-        >>> power_spectral_density = PowerSpectralDensity()
-        >>> power_spectral_density.set_from_power_spectral_density_file(psd_file)
+        >>> power_spectral_density = PowerSpectralDensity.from_power_spectral_density_file(psd_file)
 
         Alternatively (and equivalently) setting the psd_file directly:
         >>> power_spectral_density = PowerSpectralDensity(psd_file=psd_file)
 
-        Note: for the "direct" method to work, you must provide the input
-        as a keyword argument as above.
-
         Attributes
         ----------
-        amplitude_spectral_density: array_like
+        asd_array: array_like
             Array representation of the ASD
-        amplitude_spectral_density_file: str
+        asd_file: str
             Name of the ASD file
         frequency_array: array_like
             Array containing the frequencies of the ASD/PSD values
-        power_spectral_density: array_like
+        psd_array: array_like
             Array representation of the PSD
-        power_spectral_density_file: str
+        psd_file: str
             Name of the PSD file
         power_spectral_density_interpolated: scipy.interpolated.interp1d
             Interpolated function of the PSD
 
         """
-        self.__power_spectral_density = None
-        self.__amplitude_spectral_density = None
+        self.frequency_array = np.array(frequency_array)
+        if psd_array is not None:
+            self.psd_array = psd_array
+        if asd_array is not None:
+            self.asd_array = asd_array
+        self.psd_file = psd_file
+        self.asd_file = asd_file
 
-        self.frequency_array = []
-        self.power_spectral_density_interpolated = None
-
-        for key in kwargs:
-            try:
-                expanded_key = (key.replace('psd', 'power_spectral_density')
-                                .replace('asd', 'amplitude_spectral_density'))
-                m = getattr(self, 'set_from_{}'.format(expanded_key))
-                m(**kwargs)
-            except AttributeError:
-                logger.info("Tried setting PSD from init kwarg {} and failed".format(key))
+    def __repr__(self):
+        if self.asd_file is not None or self.psd_file is not None:
+            return self.__class__.__name__ + '(psd_file=\'{}\', asd_file=\'{}\')' \
+                .format(self.psd_file, self.asd_file)
+        else:
+            return self.__class__.__name__ + '(frequency_array={}, psd_array={}, asd_array={})' \
+                .format(self.frequency_array, self.psd_array, self.asd_array)
 
-    def set_from_amplitude_spectral_density_file(self, asd_file):
+    @staticmethod
+    def from_amplitude_spectral_density_file(asd_file):
         """ Set the amplitude spectral density from a given file
 
         Parameters
@@ -1572,18 +1577,10 @@ class PowerSpectralDensity(object):
             File containing amplitude spectral density, format 'f h_f'
 
         """
+        return PowerSpectralDensity(asd_file=asd_file)
 
-        self.amplitude_spectral_density_file = asd_file
-        self.import_amplitude_spectral_density()
-        if min(self.amplitude_spectral_density) < 1e-30:
-            logger.warning("You specified an amplitude spectral density file.")
-            logger.warning("{} WARNING {}".format("*" * 30, "*" * 30))
-            logger.warning("The minimum of the provided curve is {:.2e}.".format(
-                min(self.amplitude_spectral_density)))
-            logger.warning(
-                "You may have intended to provide this as a power spectral density.")
-
-    def set_from_power_spectral_density_file(self, psd_file):
+    @staticmethod
+    def from_power_spectral_density_file(psd_file):
         """ Set the power spectral density from a given file
 
         Parameters
@@ -1592,20 +1589,12 @@ class PowerSpectralDensity(object):
             File containing power spectral density, format 'f h_f'
 
         """
+        return PowerSpectralDensity(psd_file=psd_file)
 
-        self.power_spectral_density_file = psd_file
-        self.import_power_spectral_density()
-        if min(self.power_spectral_density) > 1e-30:
-            logger.warning("You specified a power spectral density file.")
-            logger.warning("{} WARNING {}".format("*" * 30, "*" * 30))
-            logger.warning("The minimum of the provided curve is {:.2e}.".format(
-                min(self.power_spectral_density)))
-            logger.warning(
-                "You may have intended to provide this as an amplitude spectral density.")
-
-    def set_from_frame_file(self, frame_file, psd_start_time, psd_duration,
-                            fft_length=4, sampling_frequency=4096, roll_off=0.2,
-                            channel=None):
+    @staticmethod
+    def from_frame_file(frame_file, psd_start_time, psd_duration,
+                        fft_length=4, sampling_frequency=4096, roll_off=0.2,
+                        channel=None):
         """ Generate power spectral density from a frame file
 
         Parameters
@@ -1627,103 +1616,123 @@ class PowerSpectralDensity(object):
             Name of channel to use to generate PSD.
 
         """
-
         strain = InterferometerStrainData(roll_off=roll_off)
         strain.set_from_frame_file(
             frame_file, start_time=psd_start_time, duration=psd_duration,
             channel=channel, sampling_frequency=sampling_frequency)
+        frequency_array, psd_array = strain.create_power_spectral_density(fft_length=fft_length)
+        return PowerSpectralDensity(frequency_array=frequency_array, psd_array=psd_array)
 
-        f, psd = strain.create_power_spectral_density(fft_length=fft_length)
-        self.frequency_array = f
-        self.power_spectral_density = psd
-
-    def set_from_amplitude_spectral_density_array(self, frequency_array,
-                                                  asd_array):
-        self.frequency_array = frequency_array
-        self.amplitude_spectral_density = asd_array
+    @staticmethod
+    def from_amplitude_spectral_density_array(frequency_array, asd_array):
+        return PowerSpectralDensity(frequency_array=frequency_array, asd_array=asd_array)
 
-    def set_from_power_spectral_density_array(self, frequency_array, psd_array):
-        self.frequency_array = frequency_array
-        self.power_spectral_density = psd_array
+    @staticmethod
+    def from_power_spectral_density_array(frequency_array, psd_array):
+        return PowerSpectralDensity(frequency_array=frequency_array, psd_array=psd_array)
 
-    def set_from_aLIGO(self):
-        psd_file = 'aLIGO_ZERO_DET_high_P_psd.txt'
+    @staticmethod
+    def from_aligo():
         logger.info("No power spectral density provided, using aLIGO,"
                     "zero detuning, high power.")
-        self.set_from_power_spectral_density_file(psd_file)
+        return PowerSpectralDensity.from_power_spectral_density_file(psd_file='aLIGO_ZERO_DET_high_P_psd.txt')
 
     @property
-    def power_spectral_density(self):
-        if self.__power_spectral_density is not None:
-            return self.__power_spectral_density
-        else:
-            self.set_to_aLIGO()
-            return self.__power_spectral_density
+    def psd_array(self):
+        return self.__psd_array
 
-    @power_spectral_density.setter
-    def power_spectral_density(self, power_spectral_density):
-        self._check_frequency_array_matches_density_array(power_spectral_density)
-        self.__power_spectral_density = power_spectral_density
-        self._interpolate_power_spectral_density()
-        self.__amplitude_spectral_density = power_spectral_density ** 0.5
+    @psd_array.setter
+    def psd_array(self, psd_array):
+        self.__check_frequency_array_matches_density_array(psd_array)
+        self.__psd_array = np.array(psd_array)
+        self.__asd_array = psd_array ** 0.5
+        self.__interpolate_power_spectral_density()
 
     @property
-    def amplitude_spectral_density(self):
-        return self.__amplitude_spectral_density
-
-    @amplitude_spectral_density.setter
-    def amplitude_spectral_density(self, amplitude_spectral_density):
-        self._check_frequency_array_matches_density_array(amplitude_spectral_density)
-        self.__amplitude_spectral_density = amplitude_spectral_density
-        self.__power_spectral_density = amplitude_spectral_density ** 2
-        self._interpolate_power_spectral_density()
-
-    def import_amplitude_spectral_density(self):
+    def asd_array(self):
+        return self.__asd_array
+
+    @asd_array.setter
+    def asd_array(self, asd_array):
+        self.__check_frequency_array_matches_density_array(asd_array)
+        self.__asd_array = np.array(asd_array)
+        self.__psd_array = asd_array ** 2
+        self.__interpolate_power_spectral_density()
+
+    def __check_frequency_array_matches_density_array(self, density_array):
+        if len(self.frequency_array) != len(density_array):
+            raise ValueError('Provided spectral density does not match frequency array. Not updating.\n'
+                             'Length spectral density {}\n Length frequency array {}\n'
+                             .format(density_array, self.frequency_array))
+
+    def __interpolate_power_spectral_density(self):
+        """Interpolate the loaded power spectral density so it can be resampled
+           for arbitrary frequency arrays.
         """
-        Automagically load one of the amplitude spectral density curves
-        contained in the noise_curves directory.
+        self.__power_spectral_density_interpolated = interp1d(self.frequency_array,
+                                                              self.psd_array,
+                                                              bounds_error=False,
+                                                              fill_value=np.inf)
 
-        Test if the file contains a path (i.e., contains '/').
-        If not assume the file is in the default directory.
-        """
+    @property
+    def power_spectral_density_interpolated(self):
+        return self.__power_spectral_density_interpolated
 
-        if '/' not in self.amplitude_spectral_density_file:
-            self.amplitude_spectral_density_file = os.path.join(
-                os.path.dirname(__file__), 'noise_curves',
-                self.amplitude_spectral_density_file)
+    @property
+    def asd_file(self):
+        return self.__asd_file
+
+    @asd_file.setter
+    def asd_file(self, asd_file):
+        asd_file = self.__validate_file_name(file=asd_file)
+        self.__asd_file = asd_file
+        if asd_file is not None:
+            self.__import_amplitude_spectral_density()
+            self.__check_file_was_asd_file()
+
+    def __check_file_was_asd_file(self):
+        if min(self.asd_array) < 1e-30:
+            logger.warning("You specified an amplitude spectral density file.")
+            logger.warning("{} WARNING {}".format("*" * 30, "*" * 30))
+            logger.warning("The minimum of the provided curve is {:.2e}.".format(min(self.asd_array)))
+            logger.warning("You may have intended to provide this as a power spectral density.")
 
-        self.frequency_array, self.amplitude_spectral_density = np.genfromtxt(
-            self.amplitude_spectral_density_file).T
+    @property
+    def psd_file(self):
+        return self.__psd_file
+
+    @psd_file.setter
+    def psd_file(self, psd_file):
+        psd_file = self.__validate_file_name(file=psd_file)
+        self.__psd_file = psd_file
+        if psd_file is not None:
+            self.__import_power_spectral_density()
+            self.__check_file_was_psd_file()
+
+    def __check_file_was_psd_file(self):
+        if min(self.psd_array) > 1e-30:
+            logger.warning("You specified a power spectral density file.")
+            logger.warning("{} WARNING {}".format("*" * 30, "*" * 30))
+            logger.warning("The minimum of the provided curve is {:.2e}.".format(min(self.psd_array)))
+            logger.warning("You may have intended to provide this as an amplitude spectral density.")
 
-    def import_power_spectral_density(self):
+    @staticmethod
+    def __validate_file_name(file):
         """
-        Automagically load one of the power spectral density curves contained
-        in the noise_curves directory.
-
         Test if the file contains a path (i.e., contains '/').
         If not assume the file is in the default directory.
         """
-        if '/' not in self.power_spectral_density_file:
-            self.power_spectral_density_file = os.path.join(
-                os.path.dirname(__file__), 'noise_curves',
-                self.power_spectral_density_file)
-        self.frequency_array, self.power_spectral_density = np.genfromtxt(
-            self.power_spectral_density_file).T
+        if file is not None and '/' not in file:
+            file = os.path.join(os.path.dirname(__file__), 'noise_curves', file)
+        return file
 
-    def _check_frequency_array_matches_density_array(self, density_array):
-        """Check the provided frequency and spectral density arrays match."""
-        try:
-            self.frequency_array - density_array
-        except ValueError as e:
-            raise (e, 'Provided spectral density does not match frequency array. Not updating.')
+    def __import_amplitude_spectral_density(self):
+        """ Automagically load an amplitude spectral density curve """
+        self.frequency_array, self.asd_array = np.genfromtxt(self.asd_file).T
 
-    def _interpolate_power_spectral_density(self):
-        """Interpolate the loaded power spectral density so it can be resampled
-           for arbitrary frequency arrays.
-        """
-        self.power_spectral_density_interpolated = interp1d(
-            self.frequency_array, self.power_spectral_density, bounds_error=False,
-            fill_value=np.inf)
+    def __import_power_spectral_density(self):
+        """ Automagically load a power spectral density curve """
+        self.frequency_array, self.psd_array = np.genfromtxt(self.psd_file).T
 
     def get_noise_realisation(self, sampling_frequency, duration):
         """
@@ -1743,7 +1752,7 @@ class PowerSpectralDensity(object):
 
         """
         white_noise, frequencies = utils.create_white_noise(sampling_frequency, duration)
-        frequency_domain_strain = self.power_spectral_density_interpolated(frequencies) ** 0.5 * white_noise
+        frequency_domain_strain = self.__power_spectral_density_interpolated(frequencies) ** 0.5 * white_noise
         out_of_bounds = (frequencies < min(self.frequency_array)) | (frequencies > max(self.frequency_array))
         frequency_domain_strain[out_of_bounds] = 0 * (1 + 1j)
         return frequency_domain_strain, frequencies
@@ -1783,7 +1792,7 @@ def get_empty_interferometer(name):
     try:
         interferometer = load_interferometer(filename)
         return interferometer
-    except FileNotFoundError:
+    except OSError:
         logger.warning('Interferometer {} not implemented'.format(name))
 
 
@@ -1951,7 +1960,7 @@ def get_interferometer_with_fake_noise_and_injection(
         start_time = injection_parameters['geocent_time'] + 2 - duration
 
     interferometer = get_empty_interferometer(name)
-    interferometer.power_spectral_density.set_from_aLIGO()
+    interferometer.power_spectral_density = PowerSpectralDensity.from_aligo()
     if zero_noise:
         interferometer.set_strain_data_from_zero_noise(
             sampling_frequency=sampling_frequency, duration=duration,
@@ -2023,10 +2032,7 @@ def get_event_data(
     interferometers = []
 
     if interferometer_names is None:
-        if utils.command_line_args.detectors:
-            interferometer_names = utils.command_line_args.detectors
-        else:
-            interferometer_names = ['H1', 'L1', 'V1']
+        interferometer_names = ['H1', 'L1', 'V1']
 
     for name in interferometer_names:
         try:
diff --git a/tupak/gw/likelihood.py b/tupak/gw/likelihood.py
index 7faf696d818808fea17e7be9b7ccae5b04460231..ec89d11f08655f85b845b8c24135b1fb47a1a4d4 100644
--- a/tupak/gw/likelihood.py
+++ b/tupak/gw/likelihood.py
@@ -82,6 +82,12 @@ class GravitationalWaveTransient(likelihood.Likelihood):
             self._setup_distance_marginalization()
             prior['luminosity_distance'] = float(self._ref_dist)
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(interferometers={},\n\twaveform_generator={},\n\ttime_marginalization={}, ' \
+                                         'distance_marginalization={}, phase_marginalization={}, prior={})'\
+            .format(self.interferometers, self.waveform_generator, self.time_marginalization,
+                    self.distance_marginalization, self.phase_marginalization, self.prior)
+
     def _check_set_duration_and_sampling_frequency_of_waveform_generator(self):
         """ Check the waveform_generator has the same duration and
         sampling_frequency as the interferometers. If they are unset, then
@@ -110,8 +116,8 @@ class GravitationalWaveTransient(likelihood.Likelihood):
                 'Prior not provided for {}, using the BBH default.'.format(key))
             if key == 'geocent_time':
                 self.prior[key] = Uniform(
-                        self.interferometers.start_time,
-                        self.interferometers.start_time + self.interferometers.duration)
+                    self.interferometers.start_time,
+                    self.interferometers.start_time + self.interferometers.duration)
             else:
                 self.prior[key] = BBHPriorSet()[key]
 
@@ -172,9 +178,9 @@ class GravitationalWaveTransient(likelihood.Likelihood):
             if self.time_marginalization:
                 matched_filter_snr_squared_tc_array +=\
                     4 / self.waveform_generator.duration * np.fft.fft(
-                        signal_ifo.conjugate()[0:-1]
-                        * interferometer.frequency_domain_strain[0:-1]
-                        / interferometer.power_spectral_density_array[0:-1])
+                        signal_ifo.conjugate()[0:-1] *
+                        interferometer.frequency_domain_strain[0:-1] /
+                        interferometer.power_spectral_density_array[0:-1])
 
         if self.time_marginalization:
 
@@ -190,11 +196,12 @@ class GravitationalWaveTransient(likelihood.Likelihood):
                         rho_mf_ref_tc_array.real, rho_opt_ref)
                     log_l = logsumexp(dist_marged_log_l_tc_array) + self.tc_log_norm
             elif self.phase_marginalization:
-                log_l = logsumexp(self._bessel_function_interped(abs(matched_filter_snr_squared_tc_array)))\
-                        - optimal_snr_squared / 2 + self.tc_log_norm
+                log_l = (
+                    logsumexp(self._bessel_function_interped(abs(matched_filter_snr_squared_tc_array))) -
+                    optimal_snr_squared / 2 + self.tc_log_norm)
             else:
-                log_l = logsumexp(matched_filter_snr_squared_tc_array.real) + self.tc_log_norm\
-                        - optimal_snr_squared / 2
+                log_l = (logsumexp(matched_filter_snr_squared_tc_array.real) +
+                         self.tc_log_norm - optimal_snr_squared / 2)
 
         elif self.distance_marginalization:
             rho_mf_ref, rho_opt_ref = self._setup_rho(matched_filter_snr_squared, optimal_snr_squared)
@@ -212,9 +219,9 @@ class GravitationalWaveTransient(likelihood.Likelihood):
         return log_l.real
 
     def _setup_rho(self, matched_filter_snr_squared, optimal_snr_squared):
-        rho_opt_ref = optimal_snr_squared.real * \
-                      self.waveform_generator.parameters['luminosity_distance'] ** 2 \
-                      / self._ref_dist ** 2.
+        rho_opt_ref = (optimal_snr_squared.real *
+                       self.waveform_generator.parameters['luminosity_distance'] ** 2 /
+                       self._ref_dist ** 2.)
         rho_mf_ref = matched_filter_snr_squared * \
             self.waveform_generator.parameters['luminosity_distance'] / self._ref_dist
         return rho_mf_ref, rho_opt_ref
@@ -273,8 +280,8 @@ class GravitationalWaveTransient(likelihood.Likelihood):
 
     def _setup_phase_marginalization(self):
         self._bessel_function_interped = interp1d(
-            np.logspace(-5, 10, int(1e6)), np.log([i0e(snr) for snr in np.logspace(-5, 10, int(1e6))])
-            + np.logspace(-5, 10, int(1e6)), bounds_error=False, fill_value=(0, np.nan))
+            np.logspace(-5, 10, int(1e6)), np.log([i0e(snr) for snr in np.logspace(-5, 10, int(1e6))]) +
+            np.logspace(-5, 10, int(1e6)), bounds_error=False, fill_value=(0, np.nan))
 
     def _setup_time_marginalization(self):
         delta_tc = 2 / self.waveform_generator.sampling_frequency
@@ -307,6 +314,10 @@ class BasicGravitationalWaveTransient(likelihood.Likelihood):
         self.interferometers = interferometers
         self.waveform_generator = waveform_generator
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(interferometers={},\n\twaveform_generator={})'\
+            .format(self.interferometers, self.waveform_generator)
+
     def noise_log_likelihood(self):
         """ Calculates the real part of noise log-likelihood
 
@@ -360,8 +371,8 @@ class BasicGravitationalWaveTransient(likelihood.Likelihood):
 
         log_l = - 2. / self.waveform_generator.duration * np.vdot(
             interferometer.frequency_domain_strain - signal_ifo,
-            (interferometer.frequency_domain_strain - signal_ifo)
-            / interferometer.power_spectral_density_array)
+            (interferometer.frequency_domain_strain - signal_ifo) /
+            interferometer.power_spectral_density_array)
         return log_l.real
 
 
diff --git a/tupak/gw/prior.py b/tupak/gw/prior.py
index a9bbc9f8c94118c9a5e41e7458768c98e5544f65..e876b25440788d4b645ca78541ab74eb03ffbaa3 100644
--- a/tupak/gw/prior.py
+++ b/tupak/gw/prior.py
@@ -23,7 +23,7 @@ class UniformComovingVolume(FromFile):
         """
         file_name = os.path.join(os.path.dirname(__file__), 'prior_files', 'comoving.txt')
         FromFile.__init__(self, file_name=file_name, minimum=minimum, maximum=maximum, name=name,
-                          latex_label=latex_label)
+                          latex_label=latex_label, unit='Mpc')
 
 
 class BBHPriorSet(PriorSet):
@@ -96,6 +96,45 @@ class BBHPriorSet(PriorSet):
         return redundant
 
 
+class BNSPriorSet(PriorSet):
+
+    def __init__(self, dictionary=None, filename=None):
+        """ Initialises a Prior set for Binary Neutron Stars
+
+        Parameters
+        ----------
+        dictionary: dict, optional
+            See superclass
+        filename: str, optional
+            See superclass
+        """
+        if dictionary is None and filename is None:
+            filename = os.path.join(os.path.dirname(__file__), 'prior_files', 'binary_neutron_stars.prior')
+            logger.info('No prior given, using default BNS priors in {}.'.format(filename))
+        elif filename is not None:
+            if not os.path.isfile(filename):
+                filename = os.path.join(os.path.dirname(__file__), 'prior_files', filename)
+        PriorSet.__init__(self, dictionary=dictionary, filename=filename)
+
+    def test_redundancy(self, key):
+        bbh_redundancy = BBHPriorSet().test_redundancy(key)
+        if bbh_redundancy:
+            return True
+        redundant = False
+
+        tidal_parameters =\
+            {'lambda_1', 'lambda_2', 'lambda_tilde', 'delta_lambda'}
+
+        if key in tidal_parameters:
+            if len(tidal_parameters.intersection(self)) > 2:
+                redundant = True
+                logger.warning('{} in prior. This may lead to unexpected behaviour.'.format(
+                    tidal_parameters.intersection(self)))
+            elif len(tidal_parameters.intersection(self)) == 2:
+                redundant = True
+        return redundant
+
+
 Prior._default_latex_labels = {
     'mass_1': '$m_1$',
     'mass_2': '$m_2$',
@@ -118,7 +157,11 @@ Prior._default_latex_labels = {
     'cos_iota': '$\cos\iota$',
     'psi': '$\psi$',
     'phase': '$\phi$',
-    'geocent_time': '$t_c$'}
+    'geocent_time': '$t_c$',
+    'lambda_1': '$\\Lambda_1$',
+    'lambda_2': '$\\Lambda_2$',
+    'lambda_tilde': '$\\tilde{\\Lambda}$',
+    'delta_lambda': '$\\delta\\Lambda$'}
 
 
 class CalibrationPriorSet(PriorSet):
diff --git a/tupak/gw/prior_files/binary_neutron_stars.prior b/tupak/gw/prior_files/binary_neutron_stars.prior
new file mode 100644
index 0000000000000000000000000000000000000000..f7b427c3c324c00ea8246d5150e6df9362f709c9
--- /dev/null
+++ b/tupak/gw/prior_files/binary_neutron_stars.prior
@@ -0,0 +1,23 @@
+# These are the default priors we use for BNS systems.
+# Note that you may wish to use more specific mass and distance parameters.
+# These commands are all known to tupak.gw.prior.
+# Lines beginning "#" are ignored.
+mass_1 = Uniform(name='mass_1', minimum=1, maximum=2)
+mass_2 = Uniform(name='mass_2', minimum=1, maximum=2)
+# chirp_mass = Uniform(name='chirp_mass', minimum=0.87, maximum=1.74)
+# total_mass =  Uniform(name='total_mass', minimum=2, maximum=4)
+# mass_ratio =  Uniform(name='mass_ratio', minimum=0.5, maximum=1)
+# symmetric_mass_ratio =  Uniform(name='symmetric_mass_ratio', minimum=0.22, maximum=0.25)
+a_1 =  Uniform(name='a_1', minimum= -0.05, maximum= 0.05)
+a_2 =  Uniform(name='a_2', minimum= -0.05, maximum= 0.05)
+luminosity_distance =  tupak.gw.prior.UniformComovingVolume(name='luminosity_distance', minimum=10, maximum=500)
+dec =  Cosine(name='dec')
+ra =  Uniform(name='ra', minimum=0, maximum=2 * np.pi)
+iota =  Sine(name='iota')
+# cos_iota =  Uniform(name='cos_iota', minimum=-1, maximum=1)
+psi =  Uniform(name='psi', minimum=0, maximum=np.pi)
+phase =  Uniform(name='phase', minimum=0, maximum=2 * np.pi)
+lambda_1 = Uniform(name='lambda_1', minimum=0, maximum=3000 )
+lambda_2 = Uniform(name='lambda_2', minimum=0, maximum=3000 )
+# lambda_tilde = Uniform(name='lambda_tilde', minimum=0, maximum=5000)
+# delta_lambda = Uniform(name='delta_lambda', minimum=-5000, maximum=5000)
diff --git a/tupak/gw/source.py b/tupak/gw/source.py
index fa4fb6cd5b1163727cf312ce4f5799dc704aee6c..85641ab26d61b7fc8963e5dda18c4045bc4279de 100644
--- a/tupak/gw/source.py
+++ b/tupak/gw/source.py
@@ -7,6 +7,7 @@ from tupak.core import utils
 
 try:
     import lalsimulation as lalsim
+    import lal
 except ImportError:
     logger.warning("You do not have lalsuite installed currently. You will "
                    " not be able to use some of the prebuilt functions.")
@@ -111,9 +112,10 @@ def lal_binary_black_hole(
 
     return {'plus': h_plus, 'cross': h_cross}
 
+
 def lal_eccentric_binary_black_hole_no_spins(
-        frequency_array, mass_1, mass_2, eccentricity, luminosity_distance, iota, phase, ra, dec, 
-        geocent_time, psi, **kwargs):        
+        frequency_array, mass_1, mass_2, eccentricity, luminosity_distance, iota, phase, ra, dec,
+        geocent_time, psi, **kwargs):
     """ Eccentric binary black hole waveform model using lalsimulation (EccentricFD)
 
     Parameters
@@ -147,7 +149,7 @@ def lal_eccentric_binary_black_hole_no_spins(
     -------
     dict: A dictionary with the plus and cross polarisation strain modes
     """
-    
+
     waveform_kwargs = dict(waveform_approximant='EccentricFD', reference_frequency=10.0,
                            minimum_frequency=10.0)
     waveform_kwargs.update(kwargs)
@@ -161,14 +163,14 @@ def lal_eccentric_binary_black_hole_no_spins(
     luminosity_distance = luminosity_distance * 1e6 * utils.parsec
     mass_1 = mass_1 * utils.solar_mass
     mass_2 = mass_2 * utils.solar_mass
-    
+
     spin_1x = 0.0
     spin_1y = 0.0
-    spin_1z = 0.0 
+    spin_1z = 0.0
     spin_2x = 0.0
     spin_2y = 0.0
     spin_2z = 0.0
-    
+
     longitude_ascending_nodes = 0.0
     mean_per_ano = 0.0
 
@@ -193,21 +195,20 @@ def lal_eccentric_binary_black_hole_no_spins(
 
 
 def sinegaussian(frequency_array, hrss, Q, frequency, ra, dec, geocent_time, psi):
-    tau = Q / (np.sqrt(2.0)*np.pi*frequency)
-    temp = Q / (4.0*np.sqrt(np.pi)*frequency)
-    t = geocent_time
+    tau = Q / (np.sqrt(2.0) * np.pi * frequency)
+    temp = Q / (4.0 * np.sqrt(np.pi) * frequency)
     fm = frequency_array - frequency
     fp = frequency_array + frequency
 
-    h_plus = ((hrss / np.sqrt(temp * (1+np.exp(-Q**2))))
-              * ((np.sqrt(np.pi)*tau)/2.0)
-              * (np.exp(-fm**2 * np.pi**2 * tau**2)
-                  + np.exp(-fp**2 * np.pi**2 * tau**2)))
+    h_plus = ((hrss / np.sqrt(temp * (1 + np.exp(-Q**2)))) *
+              ((np.sqrt(np.pi) * tau) / 2.0) *
+              (np.exp(-fm**2 * np.pi**2 * tau**2) +
+              np.exp(-fp**2 * np.pi**2 * tau**2)))
 
-    h_cross = (-1j*(hrss / np.sqrt(temp * (1-np.exp(-Q**2))))
-               * ((np.sqrt(np.pi)*tau)/2.0)
-               * (np.exp(-fm**2 * np.pi**2 * tau**2)
-                  - np.exp(-fp**2 * np.pi**2 * tau**2)))
+    h_cross = (-1j * (hrss / np.sqrt(temp * (1 - np.exp(-Q**2)))) *
+               ((np.sqrt(np.pi) * tau) / 2.0) *
+               (np.exp(-fm**2 * np.pi**2 * tau**2) -
+               np.exp(-fp**2 * np.pi**2 * tau**2)))
 
     return{'plus': h_plus, 'cross': h_cross}
 
@@ -223,8 +224,8 @@ def supernova(
     # waveform in file at 10kpc
     scaling = 1e-3 * (10.0 / luminosity_distance)
 
-    h_plus = scaling * (realhplus + 1.0j*imaghplus)
-    h_cross = scaling * (realhcross + 1.0j*imaghcross)
+    h_plus = scaling * (realhplus + 1.0j * imaghplus)
+    h_cross = scaling * (realhcross + 1.0j * imaghcross)
     return {'plus': h_plus, 'cross': h_cross}
 
 
@@ -236,18 +237,112 @@ def supernova_pca_model(
     realPCs = kwargs['realPCs']
     imagPCs = kwargs['imagPCs']
 
-    pc1 = realPCs[:, 0] + 1.0j*imagPCs[:, 0]
-    pc2 = realPCs[:, 1] + 1.0j*imagPCs[:, 1]
-    pc3 = realPCs[:, 2] + 1.0j*imagPCs[:, 2]
-    pc4 = realPCs[:, 3] + 1.0j*imagPCs[:, 3]
-    pc5 = realPCs[:, 4] + 1.0j*imagPCs[:, 5]
+    pc1 = realPCs[:, 0] + 1.0j * imagPCs[:, 0]
+    pc2 = realPCs[:, 1] + 1.0j * imagPCs[:, 1]
+    pc3 = realPCs[:, 2] + 1.0j * imagPCs[:, 2]
+    pc4 = realPCs[:, 3] + 1.0j * imagPCs[:, 3]
+    pc5 = realPCs[:, 4] + 1.0j * imagPCs[:, 5]
 
     # file at 10kpc
     scaling = 1e-23 * (10.0 / luminosity_distance)
 
-    h_plus = scaling * (pc_coeff1*pc1 + pc_coeff2*pc2 + pc_coeff3*pc3
-                        + pc_coeff4*pc4 + pc_coeff5*pc5)
-    h_cross = scaling * (pc_coeff1*pc1 + pc_coeff2*pc2 + pc_coeff3*pc3
-                         + pc_coeff4*pc4 + pc_coeff5*pc5)
+    h_plus = scaling * (pc_coeff1 * pc1 + pc_coeff2 * pc2 + pc_coeff3 * pc3 +
+                        pc_coeff4 * pc4 + pc_coeff5 * pc5)
+    h_cross = scaling * (pc_coeff1 * pc1 + pc_coeff2 * pc2 + pc_coeff3 * pc3 +
+                         pc_coeff4 * pc4 + pc_coeff5 * pc5)
+
+    return {'plus': h_plus, 'cross': h_cross}
+
+
+def lal_binary_neutron_star(
+        frequency_array, mass_1, mass_2, luminosity_distance, a_1, a_2,
+        iota, phase, lambda_1, lambda_2, ra, dec, geocent_time, psi, **kwargs):
+    """ A Binary Neutron Star waveform model using lalsimulation
+
+    Parameters
+    ----------
+    frequency_array: array_like
+        The frequencies at which we want to calculate the strain
+    mass_1: float
+        The mass of the heavier object in solar masses
+    mass_2: float
+        The mass of the lighter object in solar masses
+    luminosity_distance: float
+        The luminosity distance in megaparsec
+    a_1: float
+        Dimensionless spin magnitude
+    a_2: float
+        Dimensionless secondary spin magnitude
+    iota: float
+        Orbital inclination
+    phase: float
+        The phase at coalescence
+    ra: float
+        The right ascension of the binary
+    dec: float
+        The declination of the object
+    geocent_time: float
+        The time at coalescence
+    psi: float
+        Orbital polarisation
+    lambda_1: float
+        Dimensionless tidal deformability of mass_1
+    lambda_2: float
+        Dimensionless tidal deformability of mass_2
+
+    kwargs: dict
+        Optional keyword arguments
+
+    Returns
+    -------
+    dict: A dictionary with the plus and cross polarisation strain modes
+    """
+
+    waveform_kwargs = dict(waveform_approximant='TaylorF2', reference_frequency=50.0,
+                           minimum_frequency=20.0)
+    waveform_kwargs.update(kwargs)
+    waveform_approximant = waveform_kwargs['waveform_approximant']
+    reference_frequency = waveform_kwargs['reference_frequency']
+    minimum_frequency = waveform_kwargs['minimum_frequency']
+
+    if mass_2 > mass_1:
+        return None
+
+    luminosity_distance = luminosity_distance * 1e6 * utils.parsec
+    mass_1 = mass_1 * utils.solar_mass
+    mass_2 = mass_2 * utils.solar_mass
+
+    spin_1x = 0
+    spin_1y = 0
+    spin_1z = a_1
+    spin_2x = 0
+    spin_2y = 0
+    spin_2z = a_2
+
+    longitude_ascending_nodes = 0.0
+    eccentricity = 0.0
+    mean_per_ano = 0.0
+
+    waveform_dictionary = lal.CreateDict()
+    lalsim.SimInspiralWaveformParamsInsertTidalLambda1(waveform_dictionary, lambda_1)
+    lalsim.SimInspiralWaveformParamsInsertTidalLambda2(waveform_dictionary, lambda_2)
+
+    approximant = lalsim.GetApproximantFromString(waveform_approximant)
+
+    maximum_frequency = frequency_array[-1]
+    delta_frequency = frequency_array[1] - frequency_array[0]
+
+    hplus, hcross = lalsim.SimInspiralChooseFDWaveform(
+        mass_1, mass_2, spin_1x, spin_1y, spin_1z, spin_2x, spin_2y,
+        spin_2z, luminosity_distance, iota, phase,
+        longitude_ascending_nodes, eccentricity, mean_per_ano, delta_frequency,
+        minimum_frequency, maximum_frequency, reference_frequency,
+        waveform_dictionary, approximant)
+
+    h_plus = hplus.data.data
+    h_cross = hcross.data.data
+
+    h_plus = h_plus[:len(frequency_array)]
+    h_cross = h_cross[:len(frequency_array)]
 
     return {'plus': h_plus, 'cross': h_cross}
diff --git a/tupak/gw/utils.py b/tupak/gw/utils.py
index 880921fd9cb7ac125f705abef34290c6263080e5..60fb7d150270bb7130f6f93d8605e3d632aca32c 100644
--- a/tupak/gw/utils.py
+++ b/tupak/gw/utils.py
@@ -3,11 +3,9 @@ import os
 
 import numpy as np
 
-from ..core.utils import (gps_time_to_gmst, ra_dec_to_theta_phi, speed_of_light,
-                          nfft, logger)
+from ..core.utils import (gps_time_to_gmst, ra_dec_to_theta_phi, speed_of_light, logger)
 
 try:
-    from gwpy.signal import filter_design
     from gwpy.timeseries import TimeSeries
 except ImportError:
     logger.warning("You do not have gwpy installed currently. You will "
@@ -158,8 +156,8 @@ def get_vertex_position_geocentric(latitude, longitude, elevation):
     """
     semi_major_axis = 6378137  # for ellipsoid model of Earth, in m
     semi_minor_axis = 6356752.314  # in m
-    radius = semi_major_axis**2 * (semi_major_axis**2 * np.cos(latitude)**2
-                                   + semi_minor_axis**2 * np.sin(latitude)**2)**(-0.5)
+    radius = semi_major_axis**2 * (semi_major_axis**2 * np.cos(latitude)**2 +
+                                   semi_minor_axis**2 * np.sin(latitude)**2)**(-0.5)
     x_comp = (radius + elevation) * np.cos(latitude) * np.cos(longitude)
     y_comp = (radius + elevation) * np.cos(latitude) * np.sin(longitude)
     z_comp = ((semi_minor_axis / semi_major_axis)**2 * radius + elevation) * np.sin(latitude)
@@ -282,8 +280,12 @@ def get_event_time(event):
     event_time: float
         Merger time
     """
-    event_times = {'150914': 1126259462.422, '151012': 1128678900.4443,  '151226': 1135136350.65,
-                   '170104': 1167559936.5991, '170608': 1180922494.4902, '170814': 1186741861.5268,
+    event_times = {'150914': 1126259462.422,
+                   '151012': 1128678900.4443,
+                   '151226': 1135136350.65,
+                   '170104': 1167559936.5991,
+                   '170608': 1180922494.4902,
+                   '170814': 1186741861.5268,
                    '170817': 1187008882.4457}
     if 'GW' or 'LVT' in event:
         event = event[-6:]
@@ -319,8 +321,10 @@ def get_open_strain_data(
         Passed to `gwpy.timeseries.TimeSeries.fetch_open_data`
 
     Returns
-    -----------
+    -------
     strain: gwpy.timeseries.TimeSeries
+        The object containing the strain data. If the connection to the open-data server
+        fails, this function retruns `None`.
 
     """
     filename = '{}/{}_{}_{}.txt'.format(outdir, name, start_time, end_time)
@@ -336,9 +340,16 @@ def get_open_strain_data(
     else:
         logger.info('Fetching open data from {} to {} with buffer time {}'
                     .format(start_time, end_time, buffer_time))
-        strain = TimeSeries.fetch_open_data(name, start_time, end_time, **kwargs)
-        logger.info('Saving data to {}'.format(filename))
-        strain.write(filename)
+        try:
+            strain = TimeSeries.fetch_open_data(name, start_time, end_time, **kwargs)
+            logger.info('Saving cache of data to {}'.format(filename))
+            strain.write(filename)
+        except Exception as e:
+            logger.info("Unable to fetch open data, see debug for detailed info")
+            logger.info("Call to gwpy.timeseries.TimeSeries.fetch_open_data returned {}"
+                        .format(e))
+            strain = None
+
     return strain
 
 
diff --git a/tupak/gw/waveform_generator.py b/tupak/gw/waveform_generator.py
index 88c892abae44137e04bac4941fbd544523f5e521..1c0f785823ac373a1dd1c8d2306e524ae0d17b51 100644
--- a/tupak/gw/waveform_generator.py
+++ b/tupak/gw/waveform_generator.py
@@ -6,7 +6,7 @@ class WaveformGenerator(object):
 
     def __init__(self, duration=None, sampling_frequency=None, start_time=0, frequency_domain_source_model=None,
                  time_domain_source_model=None, parameters=None,
-                 parameter_conversion=lambda parameters, search_keys: (parameters, []),
+                 parameter_conversion=None,
                  non_standard_sampling_parameter_keys=None,
                  waveform_arguments=None):
         """ A waveform generator
@@ -52,7 +52,10 @@ class WaveformGenerator(object):
         self.__parameters_from_source_model()
         self.duration = duration
         self.sampling_frequency = sampling_frequency
-        self.parameter_conversion = parameter_conversion
+        if parameter_conversion is None:
+            self.parameter_conversion = lambda params, search_keys: (params, [])
+        else:
+            self.parameter_conversion = parameter_conversion
         self.non_standard_sampling_parameter_keys = non_standard_sampling_parameter_keys
         self.parameters = parameters
         if waveform_arguments is not None:
@@ -66,6 +69,27 @@ class WaveformGenerator(object):
         self.__full_source_model_keyword_arguments.update(self.parameters)
         self.__added_keys = []
 
+    def __repr__(self):
+        if self.frequency_domain_source_model is not None:
+            fdsm_name = self.frequency_domain_source_model.__name__
+        else:
+            fdsm_name = None
+        if self.time_domain_source_model is not None:
+            tdsm_name = self.frequency_domain_source_model.__name__
+        else:
+            tdsm_name = None
+        if self.parameter_conversion.__name__ == '<lambda>':
+            param_conv_name = None
+        else:
+            param_conv_name = self.parameter_conversion.__name__
+
+        return self.__class__.__name__ + '(duration={}, sampling_frequency={}, start_time={}, ' \
+                                         'frequency_domain_source_model={}, time_domain_source_model={}, ' \
+                                         'parameters={}, parameter_conversion={}, ' \
+                                         'non_standard_sampling_parameter_keys={}, waveform_arguments={})'\
+            .format(self.duration, self.sampling_frequency, self.start_time, fdsm_name, tdsm_name, self.parameters,
+                    param_conv_name, self.non_standard_sampling_parameter_keys, self.waveform_arguments)
+
     def frequency_domain_strain(self):
         """ Rapper to source_model.
 
diff --git a/tupak/hyper/likelihood.py b/tupak/hyper/likelihood.py
index c65c1db6a56b5c752075518fc83e0f5d1b5f32df..e911d480aaa1fff714f53d11eff8fd42713b435a 100644
--- a/tupak/hyper/likelihood.py
+++ b/tupak/hyper/likelihood.py
@@ -43,8 +43,8 @@ class HyperparameterLikelihood(Likelihood):
 
     def log_likelihood(self):
         self.hyper_prior.parameters.update(self.parameters)
-        log_l = np.sum(np.log(np.sum(self.hyper_prior.prob(self.data)
-                                     / self.sampling_prior.prob(self.data), axis=-1))) + self.log_factor
+        log_l = np.sum(np.log(np.sum(self.hyper_prior.prob(self.data) /
+                       self.sampling_prior.prob(self.data), axis=-1))) + self.log_factor
         return np.nan_to_num(log_l)
 
     def resample_posteriors(self, max_samples=None):