diff --git a/examples/injection_examples/using_gwin.py b/examples/injection_examples/using_gwin.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dc3a315953a62328d3962e4158758b34e985d65
--- /dev/null
+++ b/examples/injection_examples/using_gwin.py
@@ -0,0 +1,92 @@
+#!/bin/python
+"""
+An example of how to use tupak with `gwin` (https://github.com/gwastro/gwin) to
+perform CBC parameter estimation.
+
+To run this example, it is sufficient to use the pip-installable pycbc package,
+but the source installation of gwin. You can install this by cloning the
+repository (https://github.com/gwastro/gwin) and running
+
+$ python setup.py install
+
+A practical difference between gwin and tupak is that while fixed parameters
+are specified via the prior in tupak, in gwin, these are fixed at instantiation
+of the model. So, in the following, we only create priors for the parameters
+to be searched over.
+
+"""
+from __future__ import division, print_function
+import numpy as np
+import tupak
+
+import gwin
+from pycbc import psd as pypsd
+from pycbc.waveform.generator import (FDomainDetFrameGenerator,
+                                      FDomainCBCGenerator)
+
+label = 'using_gwin'
+
+# Search priors
+priors = dict()
+priors['distance'] = tupak.core.prior.Uniform(500, 2000, 'distance')
+priors['polarization'] = tupak.core.prior.Uniform(0, np.pi, 'iota')
+
+# Data variables
+seglen = 4
+sample_rate = 2048
+N = seglen * sample_rate / 2 + 1
+fmin = 30.
+
+# Injected signal variables
+injection_parameters = dict(mass1=38.6, mass2=29.3, spin1z=0, spin2z=0,
+                            tc=0, ra=3.1, dec=1.37, polarization=2.76,
+                            distance=1500)
+
+# These lines figure out which parameters are to be searched over
+variable_parameters = priors.keys()
+fixed_parameters = injection_parameters.copy()
+for key in priors:
+    fixed_parameters.pop(key)
+
+# These lines generate the `model` object - see https://gwin.readthedocs.io/en/latest/api/gwin.models.gaussian_noise.html
+generator = FDomainDetFrameGenerator(
+    FDomainCBCGenerator, 0.,
+    variable_args=variable_parameters, detectors=['H1', 'L1'],
+    delta_f=1. / seglen, f_lower=fmin,
+    approximant='IMRPhenomPv2', **fixed_parameters)
+signal = generator.generate(**injection_parameters)
+psd = pypsd.aLIGOZeroDetHighPower(int(N), 1. / seglen, 20.)
+psds = {'H1': psd, 'L1': psd}
+model = gwin.models.GaussianNoise(
+    variable_parameters, signal, generator, fmin, psds=psds)
+model.update(**injection_parameters)
+
+
+# This create a dummy class to convert the model into a tupak.likelihood object
+class GWINLikelihood(tupak.core.likelihood.Likelihood):
+    def __init__(self, model):
+        """ A likelihood to wrap around GWIN model objects
+
+        Parameters
+        ----------
+        model: gwin.model.GaussianNoise
+            A gwin model
+
+        """
+        self.model = model
+        self.parameters = {x: None for x in self.model.variable_params}
+
+    def log_likelihood(self):
+        self.model.update(**self.parameters)
+        return self.model.loglikelihood
+
+
+likelihood = GWINLikelihood(model)
+
+
+# Now run the inference
+result = tupak.run_sampler(
+    likelihood=likelihood, priors=priors, sampler='dynesty', npoints=500,
+    label=label)
+result.plot_corner()
+
diff --git a/examples/other_examples/linear_regression_pymc3_custom_likelihood.py b/examples/other_examples/linear_regression_pymc3_custom_likelihood.py
index 3b3ab8af138ead0dc1428e1737eb46ed1ea255ca..4d87b8110bf6c4490378c29b61df2e81656df6b9 100644
--- a/examples/other_examples/linear_regression_pymc3_custom_likelihood.py
+++ b/examples/other_examples/linear_regression_pymc3_custom_likelihood.py
@@ -99,7 +99,7 @@ class GaussianLikelihoodPyMC3(tupak.Likelihood):
         with sampler.pymc3_model:
             mdist = sampler.pymc3_priors['m']
             cdist = sampler.pymc3_priors['c']
-            
+
             mu = model(time, mdist, cdist)
 
             # set the likelihood distribution
diff --git a/test/calibration_tests.py b/test/calibration_tests.py
index b481e2936ed88ec6d4cf68136c495f0ef4d25539..dce70e53f192fb1a34fbee6dfc6319d82ff4218e 100644
--- a/test/calibration_tests.py
+++ b/test/calibration_tests.py
@@ -11,6 +11,11 @@ class TestBaseClass(unittest.TestCase):
     def tearDown(self):
         del self.model
 
+    def test_repr(self):
+        expected = 'Recalibrate(prefix={})'.format('\'recalib_\'')
+        actual = repr(self.model)
+        self.assertEqual(expected, actual)
+
     def test_calibration_factor(self):
         frequency_array = np.linspace(20, 1024, 1000)
         cal_factor = self.model.get_calibration_factor(frequency_array)
@@ -20,14 +25,22 @@ class TestBaseClass(unittest.TestCase):
 class TestCubicSpline(unittest.TestCase):
 
     def setUp(self):
+        self.prefix = 'recalib_'
+        self.minimum_frequency = 20
+        self.maximum_frequency = 1024
+        self.n_points = 5
         self.model = calibration.CubicSpline(
-            prefix='recalib_', minimum_frequency=20, maximum_frequency=1024,
-            n_points=5)
+            prefix=self.prefix, minimum_frequency=self.minimum_frequency,
+            maximum_frequency=self.maximum_frequency, n_points=self.n_points)
         self.parameters = {'recalib_{}_{}'.format(param, ii): 0.0
                            for ii in range(5)
                            for param in ['amplitude', 'phase']}
 
     def tearDown(self):
+        del self.prefix
+        del self.minimum_frequency
+        del self.maximum_frequency
+        del self.n_points
         del self.model
         del self.parameters
 
@@ -37,6 +50,12 @@ class TestCubicSpline(unittest.TestCase):
                                                        **self.parameters)
         assert np.alltrue(cal_factor.real == np.ones_like(frequency_array))
 
+    def test_repr(self):
+        expected = 'CubicSpline(prefix=\'{}\', minimum_frequency={}, maximum_frequency={}, n_points={})'\
+            .format(self.prefix, self.minimum_frequency, self.maximum_frequency, self.n_points)
+        actual = repr(self.model)
+        self.assertEqual(expected, actual)
+
 
 class TestCubicSplineRequiresFourNodes(unittest.TestCase):
 
diff --git a/test/detector_tests.py b/test/detector_tests.py
index aefff9ef2ab074fca50c1223a4a96735684921f6..a69ca162378220c3149148d31784c46972f2f69c 100644
--- a/test/detector_tests.py
+++ b/test/detector_tests.py
@@ -8,6 +8,8 @@ from mock import patch
 import numpy as np
 import scipy.signal.windows
 import gwpy
+import os
+import logging
 
 
 class TestDetector(unittest.TestCase):
@@ -191,14 +193,6 @@ class TestDetector(unittest.TestCase):
             self.ifo.yarm_azimuth = 12
             self.assertEqual(self.ifo.detector_tensor, 0)
 
-    def test_amplitude_spectral_density_array(self):
-        self.ifo.power_spectral_density.power_spectral_density_interpolated = MagicMock(return_value=np.array([1, 4]))
-        self.assertTrue(np.array_equal(self.ifo.amplitude_spectral_density_array, np.array([1, 2])))
-
-    def test_power_spectral_density_array(self):
-        self.ifo.power_spectral_density.power_spectral_density_interpolated = MagicMock(return_value=np.array([1, 4]))
-        self.assertTrue(np.array_equal(self.ifo.power_spectral_density_array, np.array([1, 4])))
-
     def test_antenna_response_default(self):
         with mock.patch('tupak.gw.utils.get_polarization_tensor') as m:
             with mock.patch('numpy.einsum') as n:
@@ -310,6 +304,17 @@ class TestDetector(unittest.TestCase):
             self.assertTrue(np.array_equal(expected[0], actual[0]))  # array-like element has to be evaluated separately
             self.assertListEqual(expected[1], actual[1])
 
+    def test_repr(self):
+        expected = 'Interferometer(name=\'{}\', power_spectral_density={}, minimum_frequency={}, ' \
+                   'maximum_frequency={}, length={}, latitude={}, longitude={}, elevation={}, xarm_azimuth={}, ' \
+                   'yarm_azimuth={}, xarm_tilt={}, yarm_tilt={})' \
+            .format(self.name, self.power_spectral_density, float(self.minimum_frequency),
+                    float(self.maximum_frequency), float(self.length), float(self.latitude), float(self.longitude),
+                    float(self.elevation), float(self.xarm_azimuth), float(self.yarm_azimuth), float(self.xarm_tilt),
+                    float(self.yarm_tilt))
+        print(repr(self.ifo))
+        self.assertEqual(expected, repr(self.ifo))
+
 
 class TestInterferometerStrainData(unittest.TestCase):
 
@@ -542,10 +547,10 @@ class TestInterferometerStrainData(unittest.TestCase):
     def test_frequency_domain_strain_when_set(self):
         self.ifosd.sampling_frequency = 200
         self.ifosd.duration = 4
-        expected_strain = self.ifosd.frequency_array*self.ifosd.frequency_mask
+        expected_strain = self.ifosd.frequency_array * self.ifosd.frequency_mask
         self.ifosd._frequency_domain_strain = expected_strain
         self.assertTrue(np.array_equal(expected_strain,
-                                        self.ifosd.frequency_domain_strain))
+                                       self.ifosd.frequency_domain_strain))
 
     @patch('tupak.core.utils.nfft')
     def test_frequency_domain_strain_from_frequency_domain_strain(self, m):
@@ -790,5 +795,201 @@ class TestInterferometerList(unittest.TestCase):
         self.assertListEqual([self.ifo1.name, new_ifo.name, self.ifo2.name], names)
 
 
+class TestPowerSpectralDensityWithoutFiles(unittest.TestCase):
+
+    def setUp(self):
+        self.frequency_array = np.array([1., 2., 3.])
+        self.psd_array = np.array([16., 25., 36.])
+        self.asd_array = np.array([4., 5., 6.])
+
+    def tearDown(self):
+        del self.frequency_array
+        del self.psd_array
+        del self.asd_array
+
+    def test_init_with_asd_array(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, asd_array=self.asd_array)
+        self.assertTrue(np.array_equal(self.frequency_array, psd.frequency_array))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+
+    def test_init_with_psd_array(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, psd_array=self.psd_array)
+        self.assertTrue(np.array_equal(self.frequency_array, psd.frequency_array))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+
+    def test_setting_asd_array_after_init(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array)
+        psd.asd_array = self.asd_array
+        self.assertTrue(np.array_equal(self.frequency_array, psd.frequency_array))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+
+    def test_setting_psd_array_after_init(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array)
+        psd.psd_array = self.psd_array
+        self.assertTrue(np.array_equal(self.frequency_array, psd.frequency_array))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+
+    def test_power_spectral_density_interpolated_from_asd_array(self):
+        expected = np.array([25.])
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, asd_array = self.asd_array)
+        self.assertEqual(expected, psd.power_spectral_density_interpolated(2))
+
+    def test_power_spectral_density_interpolated_from_psd_array(self):
+        expected = np.array([25.])
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, psd_array = self.psd_array)
+        self.assertEqual(expected, psd.power_spectral_density_interpolated(2))
+
+    def test_from_amplitude_spectral_density_array(self):
+        actual = tupak.gw.detector.PowerSpectralDensity.from_amplitude_spectral_density_array(
+            frequency_array=self.frequency_array, asd_array=self.asd_array)
+        self.assertTrue(np.array_equal(self.psd_array, actual.psd_array))
+        self.assertTrue(np.array_equal(self.asd_array, actual.asd_array))
+
+    def test_from_power_spectral_density_array(self):
+        actual = tupak.gw.detector.PowerSpectralDensity.from_power_spectral_density_array(
+            frequency_array=self.frequency_array, psd_array=self.psd_array)
+        self.assertTrue(np.array_equal(self.psd_array, actual.psd_array))
+        self.assertTrue(np.array_equal(self.asd_array, actual.asd_array))
+
+    def test_repr(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, psd_array=self.psd_array)
+        expected = 'PowerSpectralDensity(frequency_array={}, psd_array={}, asd_array={})'.format(self.frequency_array,
+                                                                                                 self.psd_array,
+                                                                                                 self.asd_array)
+        self.assertEqual(expected, repr(psd))
+
+
+class TestPowerSpectralDensityWithFiles(unittest.TestCase):
+
+    def setUp(self):
+        self.dir = os.path.join(os.path.dirname(__file__), 'noise_curves')
+        os.mkdir(self.dir)
+        self.asd_file = os.path.join(os.path.dirname(__file__), 'noise_curves', 'asd_test_file.txt')
+        self.psd_file = os.path.join(os.path.dirname(__file__), 'noise_curves', 'psd_test_file.txt')
+        with open(self.asd_file, 'w') as f:
+            f.write('1.\t1.0e-21\n2.\t2.0e-21\n3.\t3.0e-21')
+        with open(self.psd_file, 'w') as f:
+            f.write('1.\t1.0e-42\n2.\t4.0e-42\n3.\t9.0e-42')
+        self.frequency_array = np.array([1.0, 2.0, 3.0])
+        self.asd_array = np.array([1.0e-21, 2.0e-21, 3.0e-21])
+        self.psd_array = np.array([1.0e-42, 4.0e-42, 9.0e-42])
+
+    def tearDown(self):
+        os.remove(self.asd_file)
+        os.remove(self.psd_file)
+        os.rmdir(self.dir)
+        del self.dir
+        del self.asd_array
+        del self.psd_array
+        del self.asd_file
+        del self.psd_file
+
+    def test_init_with_psd_file(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, psd_file=self.psd_file)
+        self.assertEqual(self.psd_file, psd.psd_file)
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+        self.assertTrue(np.allclose(self.asd_array, psd.asd_array, atol=1e-30))
+
+    def test_init_with_asd_file(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, asd_file=self.asd_file)
+        self.assertEqual(self.asd_file, psd.asd_file)
+        self.assertTrue(np.allclose(self.psd_array, psd.psd_array, atol=1e-60))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+
+    def test_setting_psd_array_after_init(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array)
+        psd.psd_file = self.psd_file
+        self.assertEqual(self.psd_file, psd.psd_file)
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+        self.assertTrue(np.allclose(self.asd_array, psd.asd_array, atol=1e-30))
+
+    def test_init_with_asd_array_after_init(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array)
+        psd.asd_file = self.asd_file
+        self.assertEqual(self.asd_file, psd.asd_file)
+        self.assertTrue(np.allclose(self.psd_array, psd.psd_array, atol=1e-60))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+
+    def test_power_spectral_density_interpolated_from_asd_file(self):
+        expected = np.array([4.0e-42])
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, asd_file=self.asd_file)
+        self.assertTrue(np.allclose(expected, psd.power_spectral_density_interpolated(2), atol=1e-60))
+
+    def test_power_spectral_density_interpolated_from_psd_file(self):
+        expected = np.array([4.0e-42])
+        psd = tupak.gw.detector.PowerSpectralDensity(frequency_array=self.frequency_array, psd_file=self.psd_file)
+        self.assertAlmostEqual(expected, psd.power_spectral_density_interpolated(2))
+
+    def test_from_amplitude_spectral_density_file(self):
+        psd = tupak.gw.detector.PowerSpectralDensity.from_amplitude_spectral_density_file(asd_file=self.asd_file)
+        self.assertEqual(self.asd_file, psd.asd_file)
+        self.assertTrue(np.allclose(self.psd_array, psd.psd_array, atol=1e-60))
+        self.assertTrue(np.array_equal(self.asd_array, psd.asd_array))
+
+    def test_from_power_spectral_density_file(self):
+        psd = tupak.gw.detector.PowerSpectralDensity.from_power_spectral_density_file(psd_file=self.psd_file)
+        self.assertEqual(self.psd_file, psd.psd_file)
+        self.assertTrue(np.array_equal(self.psd_array, psd.psd_array))
+        self.assertTrue(np.allclose(self.asd_array, psd.asd_array, atol=1e-30))
+
+    def test_from_aligo(self):
+        psd = tupak.gw.detector.PowerSpectralDensity.from_aligo()
+        expected_filename = 'aLIGO_ZERO_DET_high_P_psd.txt'
+        expected = tupak.gw.detector.PowerSpectralDensity(psd_file=expected_filename)
+        actual_filename = psd.psd_file.split('/')[-1]
+        self.assertEqual(expected_filename, actual_filename)
+        self.assertTrue(np.allclose(expected.psd_array, psd.psd_array, atol=1e-60))
+        self.assertTrue(np.array_equal(expected.asd_array, psd.asd_array))
+
+    def test_check_file_psd_file_set_to_asd_file(self):
+        logger = logging.getLogger('tupak')
+        m = MagicMock()
+        logger.warning = m
+        psd = tupak.gw.detector.PowerSpectralDensity(psd_file=self.asd_file)
+        self.assertEqual(4, m.call_count)
+
+    def test_check_file_not_called_psd_file_set_to_psd_file(self):
+        logger = logging.getLogger('tupak')
+        m = MagicMock()
+        logger.warning = m
+        psd = tupak.gw.detector.PowerSpectralDensity(psd_file=self.psd_file)
+        self.assertEqual(0, m.call_count)
+
+    def test_check_file_asd_file_set_to_psd_file(self):
+        logger = logging.getLogger('tupak')
+        m = MagicMock()
+        logger.warning = m
+        psd = tupak.gw.detector.PowerSpectralDensity(asd_file=self.psd_file)
+        self.assertEqual(4, m.call_count)
+
+    def test_check_file_not_called_asd_file_set_to_asd_file(self):
+        logger = logging.getLogger('tupak')
+        m = MagicMock()
+        logger.warning = m
+        psd = tupak.gw.detector.PowerSpectralDensity(asd_file=self.asd_file)
+        self.assertEqual(0, m.call_count)
+
+    def test_from_frame_file(self):
+        expected_frequency_array = np.array([1., 2., 3.])
+        expected_psd_array = np.array([16., 25., 36.])
+        with mock.patch('tupak.gw.detector.InterferometerStrainData.set_from_frame_file') as m:
+            with mock.patch('tupak.gw.detector.InterferometerStrainData.create_power_spectral_density') as n:
+                n.return_value = expected_frequency_array, expected_psd_array
+                psd = tupak.gw.detector.PowerSpectralDensity.from_frame_file(frame_file=self.asd_file,
+                                                                             psd_start_time=0,
+                                                                             psd_duration=4)
+                self.assertTrue(np.array_equal(expected_frequency_array, psd.frequency_array))
+                self.assertTrue(np.array_equal(expected_psd_array, psd.psd_array))
+
+    def test_repr(self):
+        psd = tupak.gw.detector.PowerSpectralDensity(psd_file=self.psd_file)
+        expected = 'PowerSpectralDensity(psd_file=\'{}\', asd_file=\'{}\')'.format(self.psd_file, None)
+        self.assertEqual(expected, repr(psd))
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/test/gw_likelihood_tests.py b/test/gw_likelihood_tests.py
index c603a59e033e653b9595d6392872361411cc2c12..fa8e53b6ec255b5defd1ad98ccd7da0f9f39b46b 100644
--- a/test/gw_likelihood_tests.py
+++ b/test/gw_likelihood_tests.py
@@ -61,6 +61,11 @@ class TestBasicGWTransient(unittest.TestCase):
                          np.nan_to_num(-np.inf))
         self.likelihood.waveform_generator.parameters['mass_2'] = 29
 
+    def test_repr(self):
+        expected = 'BasicGravitationalWaveTransient(interferometers={},\n\twaveform_generator={})'.format(
+            self.interferometers, self.waveform_generator)
+        self.assertEqual(expected, repr(self.likelihood))
+
 
 class TestGWTransient(unittest.TestCase):
 
@@ -133,6 +138,12 @@ class TestGWTransient(unittest.TestCase):
                          np.nan_to_num(-np.inf))
         self.likelihood.waveform_generator.parameters['mass_2'] = 29
 
+    def test_repr(self):
+        expected = 'GravitationalWaveTransient(interferometers={},\n\twaveform_generator={},\n\t' \
+                   'time_marginalization={}, distance_marginalization={}, phase_marginalization={}, ' \
+                   'prior={})'.format(self.interferometers, self.waveform_generator, False, False, False, self.prior)
+        self.assertEqual(expected, repr(self.likelihood))
+
 
 class TestTimeMarginalization(unittest.TestCase):
 
diff --git a/test/likelihood_tests.py b/test/likelihood_tests.py
index 731d14532e3ef56bd2adce5aef7ad17303b7d3f5..9146cb3ac037d72522bdd6866c81f72a77626d0d 100644
--- a/test/likelihood_tests.py
+++ b/test/likelihood_tests.py
@@ -19,6 +19,11 @@ class TestLikelihoodBase(unittest.TestCase):
     def tearDown(self):
         del self.likelihood
 
+    def test_repr(self):
+        self.likelihood = tupak.core.likelihood.Likelihood(parameters=['a', 'b'])
+        expected = 'Likelihood(parameters=[\'a\', \'b\'])'
+        self.assertEqual(expected, repr(self.likelihood))
+
     def test_base_log_likelihood(self):
         self.assertTrue(np.isnan(self.likelihood.log_likelihood()))
 
@@ -125,6 +130,10 @@ class TestAnalytical1DLikelihood(unittest.TestCase):
                                          parameter2=self.parameter2_value)
         self.assertDictEqual(expected_model_parameters, self.analytical_1d_likelihood.model_parameters)
 
+    def test_repr(self):
+        expected = 'Analytical1DLikelihood(x={}, y={}, func={})'.format(self.x, self.y, self.func.__name__)
+        self.assertEqual(expected, repr(self.analytical_1d_likelihood))
+
 
 class TestGaussianLikelihood(unittest.TestCase):
 
@@ -182,6 +191,13 @@ class TestGaussianLikelihood(unittest.TestCase):
         likelihood.log_likelihood()
         self.assertTrue(likelihood.sigma is None)
 
+    def test_repr(self):
+        likelihood = tupak.core.likelihood.GaussianLikelihood(
+            self.x, self.y, self.function, sigma=self.sigma)
+        expected = 'GaussianLikelihood(x={}, y={}, func={}, sigma={})' \
+            .format(self.x, self.y, self.function.__name__, self.sigma)
+        self.assertEqual(expected, repr(likelihood))
+
 
 class TestStudentTLikelihood(unittest.TestCase):
 
@@ -258,6 +274,15 @@ class TestStudentTLikelihood(unittest.TestCase):
 
         self.assertAlmostEqual(4.0, likelihood.lam)
 
+    def test_repr(self):
+        nu = 0
+        sigma = 0.5
+        likelihood = tupak.core.likelihood.StudentTLikelihood(
+            self.x, self.y, self.function, nu=nu, sigma=sigma)
+        expected = 'StudentTLikelihood(x={}, y={}, func={}, nu={}, sigma={})' \
+            .format(self.x, self.y, self.function.__name__, nu, sigma)
+        self.assertEqual(expected, repr(likelihood))
+
 
 class TestPoissonLikelihood(unittest.TestCase):
 
@@ -357,6 +382,12 @@ class TestPoissonLikelihood(unittest.TestCase):
             m.return_value = 1
             self.assertEqual(0, poisson_likelihood.log_likelihood())
 
+    def test_repr(self):
+        likelihood = tupak.core.likelihood.PoissonLikelihood(
+            self.x, self.y, self.function)
+        expected = 'PoissonLikelihood(x={}, y={}, func={})'.format(self.x, self.y, self.function.__name__)
+        self.assertEqual(expected, repr(likelihood))
+
 
 class TestExponentialLikelihood(unittest.TestCase):
 
diff --git a/test/waveform_generator_tests.py b/test/waveform_generator_tests.py
index 4d67cc47d16967dc167f5f6ede0f02bde7a9f9f7..99dd5d3f005f3dd39503c2fbdc747d63f45a8e2e 100644
--- a/test/waveform_generator_tests.py
+++ b/test/waveform_generator_tests.py
@@ -32,6 +32,21 @@ class TestWaveformGeneratorInstantiationWithoutOptionalParameters(unittest.TestC
         del self.waveform_generator
         del self.simulation_parameters
 
+    def test_repr(self):
+        expected = 'WaveformGenerator(duration={}, sampling_frequency={}, start_time={}, ' \
+                   'frequency_domain_source_model={}, time_domain_source_model={}, parameters={}, ' \
+                   'parameter_conversion={}, non_standard_sampling_parameter_keys={}, waveform_arguments={})'\
+            .format(self.waveform_generator.duration,
+                    self.waveform_generator.sampling_frequency,
+                    self.waveform_generator.start_time,
+                    self.waveform_generator.frequency_domain_source_model.__name__,
+                    self.waveform_generator.time_domain_source_model,
+                    self.waveform_generator.parameters,
+                    None,
+                    self.waveform_generator.non_standard_sampling_parameter_keys,
+                    self.waveform_generator.waveform_arguments)
+        self.assertEqual(expected, repr(self.waveform_generator))
+
     def test_duration(self):
         self.assertEqual(self.waveform_generator.duration, 1)
 
diff --git a/tupak/core/likelihood.py b/tupak/core/likelihood.py
index 7776a6d5fce5f80fcc65885894bf4df7609a2989..d49513604523abeb67f4608057de7069b20095ee 100644
--- a/tupak/core/likelihood.py
+++ b/tupak/core/likelihood.py
@@ -17,6 +17,9 @@ class Likelihood(object):
         """
         self.parameters = parameters
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(parameters={})'.format(self.parameters)
+
     def log_likelihood(self):
         """
 
@@ -69,6 +72,9 @@ class Analytical1DLikelihood(Likelihood):
         self.__func = func
         self.__function_keys = list(self.parameters.keys())
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(x={}, y={}, func={})'.format(self.x, self.y, self.func.__name__)
+
     @property
     def func(self):
         """ Make func read-only """
@@ -147,6 +153,10 @@ class GaussianLikelihood(Analytical1DLikelihood):
         if self.sigma is None:
             self.parameters['sigma'] = None
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(x={}, y={}, func={}, sigma={})'\
+            .format(self.x, self.y, self.func.__name__, self.sigma)
+
     def log_likelihood(self):
         return self.__summed_log_likelihood(sigma=self.__get_sigma())
 
@@ -189,6 +199,9 @@ class PoissonLikelihood(Analytical1DLikelihood):
 
         Analytical1DLikelihood.__init__(self, x=x, y=y, func=func)
 
+    def __repr__(self):
+        return Analytical1DLikelihood.__repr__(self)
+
     @property
     def y(self):
         """ Property assures that y-value is a positive integer. """
@@ -236,6 +249,9 @@ class ExponentialLikelihood(Analytical1DLikelihood):
         """
         Analytical1DLikelihood.__init__(self, x=x, y=y, func=func)
 
+    def __repr__(self):
+        return Analytical1DLikelihood.__repr__(self)
+
     @property
     def y(self):
         """ Property assures that y-value is positive. """
@@ -295,6 +311,10 @@ class StudentTLikelihood(Analytical1DLikelihood):
         if self.nu is None:
             self.parameters['nu'] = None
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(x={}, y={}, func={}, nu={}, sigma={})'\
+            .format(self.x, self.y, self.func.__name__, self.nu, self.sigma)
+
     @property
     def lam(self):
         """ Converts 'scale' to 'precision' """
diff --git a/tupak/core/result.py b/tupak/core/result.py
index dc99e0946ff61aebfe98796fdbf2493475181c1a..091058fedeae886aebb494c5c12eb8983acd3745 100644
--- a/tupak/core/result.py
+++ b/tupak/core/result.py
@@ -372,24 +372,31 @@ class Result(dict):
 
     def construct_cbc_derived_parameters(self):
         """ Construct widely used derived parameters of CBCs """
-        self.posterior['mass_chirp'] = (
-            (self.posterior.mass_1 * self.posterior.mass_2) ** 0.6 / (
-                self.posterior.mass_1 + self.posterior.mass_2) ** 0.2)
+        self.posterior['mass_chirp'] = (self.posterior.mass_1 * self.posterior.mass_2) ** 0.6 / (
+                self.posterior.mass_1 + self.posterior.mass_2) ** 0.2
+        self.search_parameter_keys.append('mass_chirp')
+        self.parameter_labels.append('$\mathcal{M}$')
+
         self.posterior['q'] = self.posterior.mass_2 / self.posterior.mass_1
-        self.posterior['eta'] = (
-            (self.posterior.mass_1 * self.posterior.mass_2) / (
-                self.posterior.mass_1 + self.posterior.mass_2) ** 2)
-
-        self.posterior['chi_eff'] = (
-            self.posterior.a_1 * np.cos(self.posterior.tilt_1) +
-            self.posterior.q * self.posterior.a_2 *
-            np.cos(self.posterior.tilt_2)) / (1 + self.posterior.q)
-
-        self.posterior['chi_p'] = np.maximum(
-            self.posterior.a_1 * np.sin(self.posterior.tilt_1),
-            (4 * self.posterior.q + 3) / (3 * self.posterior.q + 4) *
-            self.posterior.q * self.posterior.a_2 *
-            np.sin(self.posterior.tilt_2))
+        self.search_parameter_keys.append('q')
+        self.parameter_labels.append('$q$')
+
+        self.posterior['eta'] = (self.posterior.mass_1 * self.posterior.mass_2) / (
+                self.posterior.mass_1 + self.posterior.mass_2) ** 2
+        self.search_parameter_keys.append('eta')
+        self.parameter_labels.append('$\eta$')
+
+        self.posterior['chi_eff'] = (self.posterior.a_1 * np.cos(self.posterior.tilt_1)
+                                     + self.posterior.q * self.posterior.a_2 * np.cos(self.posterior.tilt_2)) / (
+                                            1 + self.posterior.q)
+        self.search_parameter_keys.append('chi_eff')
+        self.parameter_labels.append('$\chi_{\mathrm eff}$')
+
+        self.posterior['chi_p'] = np.maximum(self.posterior.a_1 * np.sin(self.posterior.tilt_1),
+                                      (4 * self.posterior.q + 3) / (3 * self.posterior.q + 4) * self.posterior.q
+                                      * self.posterior.a_2 * np.sin(self.posterior.tilt_2))
+        self.search_parameter_keys.append('chi_p')
+        self.parameter_labels.append('$\chi_{\mathrm p}$')
 
     def check_attribute_match_to_other_object(self, name, other_object):
         """ Check attribute name exists in other_object and is the same
diff --git a/tupak/core/sampler.py b/tupak/core/sampler.py
index e9700762e07618dca28051733d04f09175b9ae52..5d64bc2d05442d133c6fcad848e38e79ba0efca5 100644
--- a/tupak/core/sampler.py
+++ b/tupak/core/sampler.py
@@ -514,14 +514,14 @@ class Dynesty(Sampler):
                              resume=True, walks=self.ndim * 5, verbose=True,
                              check_point_delta_t=60 * 10, nlive=250)
 
-        # Overwrite default values with user specified values
-        self.__kwargs.update(kwargs)
-
         # Check if nlive was instead given by another name
-        if 'nlive' not in self.__kwargs:
+        if 'nlive' not in kwargs:
             for equiv in ['nlives', 'n_live_points', 'npoint', 'npoints']:
-                if equiv in self.__kwargs:
-                    self.__kwargs['nlive'] = self.__kwargs.pop(equiv)
+                if equiv in kwargs:
+                    kwargs['nlive'] = kwargs.pop(equiv)
+
+        # Overwrite default values with user specified values
+        self.__kwargs.update(kwargs)
 
         # Set the update interval
         if 'update_interval' not in self.__kwargs:
@@ -597,6 +597,9 @@ class Dynesty(Sampler):
         self.result.log_likelihood_evaluations = out.logl
         self.result.log_evidence = out.logz[-1]
         self.result.log_evidence_err = out.logzerr[-1]
+        self.result.nested_samples = pd.DataFrame(
+            out.samples, columns=self.search_parameter_keys)
+        self.result.nested_samples['weights'] = weights
 
         if self.plot:
             self.generate_trace_plots(out)
@@ -1176,6 +1179,9 @@ class Pymc3(Sampler):
         prior_map['Lorentzian'] = prior_map['Cauchy']
         prior_map['FromFile'] = prior_map['Interped']
 
+        # GW specific priors
+        prior_map['UniformComovingVolume'] = prior_map['Interped']
+
         # internally defined mappings for tupak priors
         prior_map['DeltaFunction'] = {'internal': self._deltafunction_prior}
         prior_map['Sine'] = {'internal': self._sine_prior}
@@ -1353,22 +1359,41 @@ class Pymc3(Sampler):
 
         step_methods = {m.__name__.lower(): m.__name__ for m in STEP_METHODS}
         if 'step' in self.__kwargs:
-            step_method = self.__kwargs.pop('step').lower()
+            self.step_method = self.__kwargs.pop('step')
+
+            # 'step' could be a dictionary of methods for different parameters, so check for this
+            if isinstance(self.step_method, (dict, OrderedDict)):
+                for key in self.step_method:
+                    if key not in self.__search_parameter_keys:
+                        raise ValueError("Setting a step method for an unknown parameter '{}'".format(key))
+                    else:
+                        if self.step_method[key].lower() not in step_methods:
+                            raise ValueError("Using invalid step method '{}'".format(self.step_method[key]))
+            else:
+                self.step_method = self.step_method.lower()
 
-            if step_method not in step_methods:
-                raise ValueError("Using invalid step method '{}'".format(step_method))
+                if self.step_method not in step_methods:
+                    raise ValueError("Using invalid step method '{}'".format(self.step_method))
         else:
-            step_method = None
+            self.step_method = None
 
         # initialise the PyMC3 model
         self.pymc3_model = pymc3.Model()
 
-        # set the step method
-        sm = None if step_method is None else pymc3.__dict__[step_methods[step_method]]()
-
         # set the prior
         self.set_prior()
 
+        # set the step method
+        if isinstance(self.step_method, (dict, OrderedDict)):
+            # create list of step methods (any not given will default to NUTS)
+            sm = []
+            with self.pymc3_model:
+                for key in self.step_method:
+                    curmethod = self.step_method[key].lower()
+                    sm.append(pymc3.__dict__[step_methods[curmethod]]([self.pymc3_priors[key]]))
+        else:
+            sm = None if self.step_method is None else pymc3.__dict__[step_methods[self.step_method]]()
+
         # if a custom log_likelihood function requires a `sampler` argument
         # then use that log_likelihood function, with the assumption that it
         # takes in a Pymc3 Sampler, with a pymc3_model attribute, and defines
@@ -1407,7 +1432,7 @@ class Pymc3(Sampler):
 
         self.setup_prior_mapping()
 
-        self.pymc3_priors = dict()
+        self.pymc3_priors = OrderedDict()
 
         pymc3 = self.external_sampler
 
@@ -1472,17 +1497,85 @@ class Pymc3(Sampler):
         Convert any tupak likelihoods to PyMC3 distributions.
         """
 
+        try:
+            import theano
+            import theano.tensor as tt
+            from theano.compile.ops import as_op
+        except ImportError:
+            raise ImportError("Could not import theano")
+
+        from tupak.core.likelihood import GaussianLikelihood, PoissonLikelihood, ExponentialLikelihood, StudentTLikelihood
+        from tupak.gw.likelihood import BasicGravitationalWaveTransient, GravitationalWaveTransient
+
+        # create theano Op for the log likelihood if not using a predefined model
+        class LogLike(tt.Op):
+
+            itypes = [tt.dvector]
+            otypes = [tt.dscalar]
+
+            def __init__(self, parameters, loglike, priors):
+                self.parameters = parameters
+                self.likelihood = loglike
+                self.priors = priors
+
+                # set the fixed parameters
+                for key in self.priors.keys():
+                    if isinstance(self.priors[key], float):
+                        self.likelihood.parameters[key] = self.priors[key]
+
+                self.logpgrad = LogLikeGrad(self.parameters, self.likelihood, self.priors)
+
+            def perform(self, node, inputs, outputs):
+                theta, = inputs
+                for i, key in enumerate(self.parameters):
+                    self.likelihood.parameters[key] = theta[i]
+
+                outputs[0][0] = np.array(self.likelihood.log_likelihood())
+
+            def grad(self, inputs, g):
+                theta, = inputs
+                return [g[0]*self.logpgrad(theta)]
+
+        # create theano Op for calculating the gradient of the log likelihood
+        class LogLikeGrad(tt.Op):
+
+            itypes = [tt.dvector]
+            otypes = [tt.dvector]
+
+            def __init__(self, parameters, loglike, priors):
+                self.parameters = parameters
+                self.Nparams = len(parameters)
+                self.likelihood = loglike
+                self.priors = priors
+
+                # set the fixed parameters
+                for key in self.priors.keys():
+                    if isinstance(self.priors[key], float):
+                        self.likelihood.parameters[key] = self.priors[key]
+
+            def perform(self, node, inputs, outputs):
+                theta, = inputs
+
+                # define version of likelihood function to pass to derivative function
+                def lnlike(values):
+                    for i, key in enumerate(self.parameters):
+                        self.likelihood.parameters[key] = values[i]
+                    return self.likelihood.log_likelihood()
+
+                # calculate gradients
+                grads = utils.derivatives(theta, lnlike, abseps=1e-5, mineps=1e-12, reltol=1e-2)
+
+                outputs[0][0] = grads
+
         pymc3 = self.external_sampler
 
         with self.pymc3_model:
             #  check if it is a predefined likelhood function
-            if self.likelihood.__class__.__name__ == 'GaussianLikelihood':
+            if isinstance(self.likelihood, GaussianLikelihood):
                 # check required attributes exist
                 if (not hasattr(self.likelihood, 'sigma') or
                     not hasattr(self.likelihood, 'x') or
-                    not hasattr(self.likelihood, 'y') or
-                    not hasattr(self.likelihood, 'function') or
-                    not hasattr(self.likelihood, 'function_keys')):
+                    not hasattr(self.likelihood, 'y')):
                     raise ValueError("Gaussian Likelihood does not have all the correct attributes!")
 
                 if 'sigma' in self.pymc3_priors:
@@ -1496,17 +1589,15 @@ class Pymc3(Sampler):
                     if key not in self.likelihood.function_keys:
                         raise ValueError("Prior key '{}' is not a function key!".format(key))
 
-                model = self.likelihood.function(self.likelihood.x, **self.pymc3_priors)
+                model = self.likelihood.func(self.likelihood.x, **self.pymc3_priors)
 
                 # set the distribution
                 pymc3.Normal('likelihood', mu=model, sd=self.likelihood.sigma,
                              observed=self.likelihood.y)
-            elif self.likelihood.__class__.__name__ == 'PoissonLikelihood':
+            elif isinstance(self.likelihood, PoissonLikelihood):
                 # check required attributes exist
                 if (not hasattr(self.likelihood, 'x') or
-                    not hasattr(self.likelihood, 'y') or
-                    not hasattr(self.likelihood, 'function') or
-                    not hasattr(self.likelihood, 'function_keys')):
+                    not hasattr(self.likelihood, 'y')):
                     raise ValueError("Poisson Likelihood does not have all the correct attributes!")
 
                 for key in self.pymc3_priors:
@@ -1514,16 +1605,14 @@ class Pymc3(Sampler):
                         raise ValueError("Prior key '{}' is not a function key!".format(key))
 
                 # get rate function
-                model = self.likelihood.function(self.likelihood.x, **self.pymc3_priors)
+                model = self.likelihood.func(self.likelihood.x, **self.pymc3_priors)
 
                 # set the distribution
                 pymc3.Poisson('likelihood', mu=model, observed=self.likelihood.y)
-            elif self.likelihood.__class__.__name__ == 'ExponentialLikelihood':
+            elif isinstance(self.likelihood, ExponentialLikelihood):
                 # check required attributes exist
                 if (not hasattr(self.likelihood, 'x') or
-                    not hasattr(self.likelihood, 'y') or
-                    not hasattr(self.likelihood, 'function') or
-                    not hasattr(self.likelihood, 'function_keys')):
+                    not hasattr(self.likelihood, 'y')):
                     raise ValueError("Exponential Likelihood does not have all the correct attributes!")
 
                 for key in self.pymc3_priors:
@@ -1531,18 +1620,16 @@ class Pymc3(Sampler):
                         raise ValueError("Prior key '{}' is not a function key!".format(key))
 
                 # get mean function
-                model = self.likelihood.function(self.likelihood.x, **self.pymc3_priors)
+                model = self.likelihood.func(self.likelihood.x, **self.pymc3_priors)
 
                 # set the distribution
                 pymc3.Exponential('likelihood', lam=1. / model, observed=self.likelihood.y)
-            elif self.likelihood.__class__.__name__ == 'StudentTLikelihood':
+            elif isinstance(self.likelihood, StudentTLikelihood):
                 # check required attributes exist
                 if (not hasattr(self.likelihood, 'x') or
                     not hasattr(self.likelihood, 'y') or
                     not hasattr(self.likelihood, 'nu') or
-                    not hasattr(self.likelihood, 'sigma') or
-                    not hasattr(self.likelihood, 'function') or
-                    not hasattr(self.likelihood, 'function_keys')):
+                    not hasattr(self.likelihood, 'sigma')):
                     raise ValueError("StudentT Likelihood does not have all the correct attributes!")
 
                 if 'nu' in self.pymc3_priors:
@@ -1556,10 +1643,25 @@ class Pymc3(Sampler):
                     if key not in self.likelihood.function_keys:
                         raise ValueError("Prior key '{}' is not a function key!".format(key))
 
-                model = self.likelihood.function(self.likelihood.x, **self.pymc3_priors)
+                model = self.likelihood.func(self.likelihood.x, **self.pymc3_priors)
 
                 # set the distribution
                 pymc3.StudentT('likelihood', nu=self.likelihood.nu, mu=model, sd=self.likelihood.sigma, observed=self.likelihood.y)
+            elif isinstance(self.likelihood, (GravitationalWaveTransient, BasicGravitationalWaveTransient)):
+                # set theano Op - pass __search_parameter_keys, which only contains non-fixed variables
+                logl = LogLike(self.__search_parameter_keys, self.likelihood, self.pymc3_priors)
+
+                parameters = OrderedDict()
+                for key in self.__search_parameter_keys:
+                    try:
+                        parameters[key] = self.pymc3_priors[key]
+                    except KeyError:
+                        raise KeyError("Unknown key '{}' when setting GravitationalWaveTransient likelihood".format(key))
+
+                # convert to theano tensor variable
+                values = tt.as_tensor_variable(list(parameters.values()))
+
+                pymc3.DensityDist('likelihood', lambda v: logl(v), observed={'v': values})
             else:
                 raise ValueError("Unknown likelihood has been provided")
 
diff --git a/tupak/core/utils.py b/tupak/core/utils.py
index 3b7fcd611be714155aa51a39824419adcf17c9a0..71ae1c4894233cb17eacd4e08268a1d242b6e157 100644
--- a/tupak/core/utils.py
+++ b/tupak/core/utils.py
@@ -450,6 +450,141 @@ def set_up_command_line_arguments():
     return args
 
 
+def derivatives(vals, func, releps=1e-3, abseps=None, mineps=1e-9, reltol=1e-3,
+                epsscale=0.5, nonfixedidx=None):
+    """
+    Calculate the partial derivatives of a function at a set of values. The
+    derivatives are calculated using the central difference, using an iterative
+    method to check that the values converge as step size decreases.
+
+    Parameters
+    ----------
+    vals: array_like
+        A set of values, that are passed to a function, at which to calculate
+        the gradient of that function
+    func:
+        A function that takes in an array of values.
+    releps: float, array_like, 1e-3
+        The initial relative step size for calculating the derivative.
+    abseps: float, array_like, None
+        The initial absolute step size for calculating the derivative.
+        This overrides `releps` if set.
+        `releps` is set then that is used.
+    mineps: float, 1e-9
+        The minimum relative step size at which to stop iterations if no
+        convergence is achieved.
+    epsscale: float, 0.5
+        The factor by which releps if scaled in each iteration.
+    nonfixedidx: array_like, None
+        An array of indices in `vals` that are _not_ fixed values and therefore
+        can have derivatives taken. If `None` then derivatives of all values
+        are calculated.
+        
+    Returns
+    -------
+    grads: array_like
+        An array of gradients for each non-fixed value.
+    """
+
+    if nonfixedidx is None:
+        nonfixedidx = range(len(vals))
+    
+    if len(nonfixedidx) > len(vals):
+        raise ValueError("To many non-fixed values")
+        
+    if max(nonfixedidx) >= len(vals) or min(nonfixedidx) < 0:
+        raise ValueError("Non-fixed indexes contain non-existant indices")
+
+    grads = np.zeros(len(nonfixedidx))
+
+    # maximum number of times the gradient can change sign
+    flipflopmax = 10.
+
+    # set steps
+    if abseps is None:
+        if isinstance(releps, float):
+            eps = np.abs(vals)*releps
+            eps[eps == 0.] = releps # if any values are zero set eps to releps
+            teps = releps*np.ones(len(vals))
+        elif isinstance(releps, (list, np.ndarray)):
+            if len(releps) != len(vals):
+                raise ValueError("Problem with input relative step sizes")
+            eps = np.multiply(np.abs(vals), releps)
+            eps[eps == 0.] = np.array(releps)[eps == 0.]
+            teps = releps
+        else:
+            raise RuntimeError("Relative step sizes are not a recognised type!")
+    else:
+        if isinstance(abseps, float):
+            eps = abseps*np.ones(len(vals))
+        elif isinstance(abseps, (list, np.ndarray)):
+            if len(abseps) != len(vals):
+                raise ValueError("Problem with input absolute step sizes")
+            eps = np.array(abseps)
+        else:
+            raise RuntimeError("Absolute step sizes are not a recognised type!")
+        teps = eps
+
+    # for each value in vals calculate the gradient
+    count = 0
+    for i in nonfixedidx:
+        # initial parameter diffs
+        leps = eps[i]
+        cureps = teps[i]
+
+        flipflop = 0
+
+        # get central finite difference
+        fvals = np.copy(vals)
+        bvals = np.copy(vals)
+
+        # central difference
+        fvals[i] += 0.5*leps  # change forwards distance to half eps
+        bvals[i] -= 0.5*leps  # change backwards distance to half eps
+        cdiff = (func(fvals)-func(bvals))/leps
+
+        while 1:
+            fvals[i] -= 0.5*leps  # remove old step
+            bvals[i] += 0.5*leps
+
+            # change the difference by a factor of two
+            cureps *= epsscale
+            if cureps < mineps or flipflop > flipflopmax:
+                # if no convergence set flat derivative (TODO: check if there is a better thing to do instead)
+                logger.warning("Derivative calculation did not converge: setting flat derivative.")
+                grads[count] = 0.
+                break
+            leps *= epsscale
+
+            # central difference
+            fvals[i] += 0.5*leps  # change forwards distance to half eps
+            bvals[i] -= 0.5*leps  # change backwards distance to half eps
+            cdiffnew = (func(fvals)-func(bvals))/leps
+
+            if cdiffnew == cdiff:
+                grads[count] = cdiff
+                break
+
+            # check whether previous diff and current diff are the same within reltol
+            rat = (cdiff/cdiffnew)
+            if np.isfinite(rat) and rat > 0.:
+                # gradient has not changed sign
+                if np.abs(1.-rat) < reltol:
+                    grads[count] = cdiffnew
+                    break
+                else:
+                    cdiff = cdiffnew
+                    continue
+            else:
+                cdiff = cdiffnew
+                flipflop += 1
+                continue
+
+        count += 1
+
+    return grads
+
+
 command_line_args = set_up_command_line_arguments()
 setup_logger(print_version=True)
 
diff --git a/tupak/gw/calibration.py b/tupak/gw/calibration.py
index 31692ab43bdd50140faf6b66ec2a1b2d81cef495..1ed90538a15d683d698ed6358197d16ffedfd067 100644
--- a/tupak/gw/calibration.py
+++ b/tupak/gw/calibration.py
@@ -21,6 +21,9 @@ class Recalibrate(object):
         self.params = dict()
         self.prefix = prefix
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(prefix=\'{}\')'.format(self.prefix)
+
     def get_calibration_factor(self, frequency_array, **params):
         """Apply calibration model
 
@@ -75,7 +78,17 @@ class CubicSpline(Recalibrate):
         if n_points < 4:
             raise ValueError('Cubic spline calibration requires at least 4 spline nodes.')
         self.n_points = n_points
-        self.spline_points = np.logspace(np.log10(minimum_frequency), np.log10(maximum_frequency), n_points)
+        self.minimum_frequency = minimum_frequency
+        self.maximum_frequency = maximum_frequency
+        self.__spline_points = np.logspace(np.log10(minimum_frequency), np.log10(maximum_frequency), n_points)
+
+    @property
+    def spline_points(self):
+        return self.__spline_points
+
+    def __repr__(self):
+        return self.__class__.__name__ + '(prefix=\'{}\', minimum_frequency={}, maximum_frequency={}, n_points={})'\
+            .format(self.prefix, self.minimum_frequency, self.maximum_frequency, self.n_points)
 
     def get_calibration_factor(self, frequency_array, **params):
         """Apply calibration model
diff --git a/tupak/gw/detector.py b/tupak/gw/detector.py
index b60496365152a8391662f2ffaa13a43cb0f22074..8e73c713a91025d676dd91f4a3c949da57b49fb3 100644
--- a/tupak/gw/detector.py
+++ b/tupak/gw/detector.py
@@ -427,7 +427,7 @@ class InterferometerStrainData(object):
             logger.info(
                 "Low pass filter frequency of {}Hz requested, this is equal"
                 " or greater than the Nyquist frequency so no filter applied"
-                .format(filter_freq))
+                    .format(filter_freq))
             return
 
         logger.debug("Applying low pass filter with filter frequency {}".format(filter_freq))
@@ -808,6 +808,16 @@ class Interferometer(object):
             minimum_frequency=minimum_frequency,
             maximum_frequency=maximum_frequency)
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(name=\'{}\', power_spectral_density={}, minimum_frequency={}, ' \
+                                         'maximum_frequency={}, length={}, latitude={}, longitude={}, elevation={}, ' \
+                                         'xarm_azimuth={}, yarm_azimuth={}, xarm_tilt={}, yarm_tilt={})' \
+            .format(self.name, self.power_spectral_density, float(self.minimum_frequency),
+                    float(self.maximum_frequency), float(self.length), float(self.latitude), float(self.longitude),
+                    float(self.elevation), float(self.xarm_azimuth), float(self.yarm_azimuth), float(self.xarm_tilt),
+                    float(self.yarm_tilt))
+
+
     @property
     def minimum_frequency(self):
         return self.strain_data.minimum_frequency
@@ -1512,61 +1522,54 @@ class TriangularInterferometer(InterferometerList):
 
 class PowerSpectralDensity(object):
 
-    def __init__(self, **kwargs):
+    def __init__(self, frequency_array=None, psd_array=None, asd_array=None,
+                 psd_file=None, asd_file=None):
         """
         Instantiate a new PowerSpectralDensity object.
 
-        If called with no argument, `PowerSpectralDensity()` will return an
-        empty instance which can be filled with one of the `set_from` methods.
-        You can also initialise a new PowerSpectralDensity object giving the
-        arguments of any `set_from` method and an attempt will be made to use
-        this information to load/create the power spectral density.
-
         Example
         -------
-        Using the `set_from` method directly (here `psd_file` is a string
+        Using the `from` method directly (here `psd_file` is a string
         containing the path to the file to load):
-        >>> power_spectral_density = PowerSpectralDensity()
-        >>> power_spectral_density.set_from_power_spectral_density_file(psd_file)
+        >>> power_spectral_density = PowerSpectralDensity.from_power_spectral_density_file(psd_file)
 
         Alternatively (and equivalently) setting the psd_file directly:
         >>> power_spectral_density = PowerSpectralDensity(psd_file=psd_file)
 
-        Note: for the "direct" method to work, you must provide the input
-        as a keyword argument as above.
-
         Attributes
         ----------
-        amplitude_spectral_density: array_like
+        asd_array: array_like
             Array representation of the ASD
-        amplitude_spectral_density_file: str
+        asd_file: str
             Name of the ASD file
         frequency_array: array_like
             Array containing the frequencies of the ASD/PSD values
-        power_spectral_density: array_like
+        psd_array: array_like
             Array representation of the PSD
-        power_spectral_density_file: str
+        psd_file: str
             Name of the PSD file
         power_spectral_density_interpolated: scipy.interpolated.interp1d
             Interpolated function of the PSD
 
         """
-        self.__power_spectral_density = None
-        self.__amplitude_spectral_density = None
+        self.frequency_array = np.array(frequency_array)
+        if psd_array is not None:
+            self.psd_array = psd_array
+        if asd_array is not None:
+            self.asd_array = asd_array
+        self.psd_file = psd_file
+        self.asd_file = asd_file
 
-        self.frequency_array = []
-        self.power_spectral_density_interpolated = None
-
-        for key in kwargs:
-            try:
-                expanded_key = (key.replace('psd', 'power_spectral_density')
-                                .replace('asd', 'amplitude_spectral_density'))
-                m = getattr(self, 'set_from_{}'.format(expanded_key))
-                m(**kwargs)
-            except AttributeError:
-                logger.info("Tried setting PSD from init kwarg {} and failed".format(key))
+    def __repr__(self):
+        if self.asd_file is not None or self.psd_file is not None:
+            return self.__class__.__name__ + '(psd_file=\'{}\', asd_file=\'{}\')'\
+                .format(self.psd_file, self.asd_file)
+        else:
+            return self.__class__.__name__ + '(frequency_array={}, psd_array={}, asd_array={})' \
+                .format(self.frequency_array, self.psd_array, self.asd_array)
 
-    def set_from_amplitude_spectral_density_file(self, asd_file):
+    @staticmethod
+    def from_amplitude_spectral_density_file(asd_file):
         """ Set the amplitude spectral density from a given file
 
         Parameters
@@ -1575,18 +1578,10 @@ class PowerSpectralDensity(object):
             File containing amplitude spectral density, format 'f h_f'
 
         """
+        return PowerSpectralDensity(asd_file=asd_file)
 
-        self.amplitude_spectral_density_file = asd_file
-        self.import_amplitude_spectral_density()
-        if min(self.amplitude_spectral_density) < 1e-30:
-            logger.warning("You specified an amplitude spectral density file.")
-            logger.warning("{} WARNING {}".format("*" * 30, "*" * 30))
-            logger.warning("The minimum of the provided curve is {:.2e}.".format(
-                min(self.amplitude_spectral_density)))
-            logger.warning(
-                "You may have intended to provide this as a power spectral density.")
-
-    def set_from_power_spectral_density_file(self, psd_file):
+    @staticmethod
+    def from_power_spectral_density_file(psd_file):
         """ Set the power spectral density from a given file
 
         Parameters
@@ -1595,20 +1590,12 @@ class PowerSpectralDensity(object):
             File containing power spectral density, format 'f h_f'
 
         """
+        return PowerSpectralDensity(psd_file=psd_file)
 
-        self.power_spectral_density_file = psd_file
-        self.import_power_spectral_density()
-        if min(self.power_spectral_density) > 1e-30:
-            logger.warning("You specified a power spectral density file.")
-            logger.warning("{} WARNING {}".format("*" * 30, "*" * 30))
-            logger.warning("The minimum of the provided curve is {:.2e}.".format(
-                min(self.power_spectral_density)))
-            logger.warning(
-                "You may have intended to provide this as an amplitude spectral density.")
-
-    def set_from_frame_file(self, frame_file, psd_start_time, psd_duration,
-                            fft_length=4, sampling_frequency=4096, roll_off=0.2,
-                            channel=None):
+    @staticmethod
+    def from_frame_file(frame_file, psd_start_time, psd_duration,
+                        fft_length=4, sampling_frequency=4096, roll_off=0.2,
+                        channel=None):
         """ Generate power spectral density from a frame file
 
         Parameters
@@ -1630,103 +1617,123 @@ class PowerSpectralDensity(object):
             Name of channel to use to generate PSD.
 
         """
-
         strain = InterferometerStrainData(roll_off=roll_off)
         strain.set_from_frame_file(
             frame_file, start_time=psd_start_time, duration=psd_duration,
             channel=channel, sampling_frequency=sampling_frequency)
+        frequency_array, psd_array = strain.create_power_spectral_density(fft_length=fft_length)
+        return PowerSpectralDensity(frequency_array=frequency_array, psd_array=psd_array)
 
-        f, psd = strain.create_power_spectral_density(fft_length=fft_length)
-        self.frequency_array = f
-        self.power_spectral_density = psd
-
-    def set_from_amplitude_spectral_density_array(self, frequency_array,
-                                                  asd_array):
-        self.frequency_array = frequency_array
-        self.amplitude_spectral_density = asd_array
+    @staticmethod
+    def from_amplitude_spectral_density_array(frequency_array, asd_array):
+        return PowerSpectralDensity(frequency_array=frequency_array, asd_array=asd_array)
 
-    def set_from_power_spectral_density_array(self, frequency_array, psd_array):
-        self.frequency_array = frequency_array
-        self.power_spectral_density = psd_array
+    @staticmethod
+    def from_power_spectral_density_array(frequency_array, psd_array):
+        return PowerSpectralDensity(frequency_array=frequency_array, psd_array=psd_array)
 
-    def set_from_aLIGO(self):
-        psd_file = 'aLIGO_ZERO_DET_high_P_psd.txt'
+    @staticmethod
+    def from_aligo():
         logger.info("No power spectral density provided, using aLIGO,"
                     "zero detuning, high power.")
-        self.set_from_power_spectral_density_file(psd_file)
+        return PowerSpectralDensity.from_power_spectral_density_file(psd_file='aLIGO_ZERO_DET_high_P_psd.txt')
 
     @property
-    def power_spectral_density(self):
-        if self.__power_spectral_density is not None:
-            return self.__power_spectral_density
-        else:
-            self.set_to_aLIGO()
-            return self.__power_spectral_density
+    def psd_array(self):
+        return self.__psd_array
 
-    @power_spectral_density.setter
-    def power_spectral_density(self, power_spectral_density):
-        self._check_frequency_array_matches_density_array(power_spectral_density)
-        self.__power_spectral_density = power_spectral_density
-        self._interpolate_power_spectral_density()
-        self.__amplitude_spectral_density = power_spectral_density ** 0.5
+    @psd_array.setter
+    def psd_array(self, psd_array):
+        self.__check_frequency_array_matches_density_array(psd_array)
+        self.__psd_array = np.array(psd_array)
+        self.__asd_array = psd_array ** 0.5
+        self.__interpolate_power_spectral_density()
 
     @property
-    def amplitude_spectral_density(self):
-        return self.__amplitude_spectral_density
-
-    @amplitude_spectral_density.setter
-    def amplitude_spectral_density(self, amplitude_spectral_density):
-        self._check_frequency_array_matches_density_array(amplitude_spectral_density)
-        self.__amplitude_spectral_density = amplitude_spectral_density
-        self.__power_spectral_density = amplitude_spectral_density ** 2
-        self._interpolate_power_spectral_density()
-
-    def import_amplitude_spectral_density(self):
+    def asd_array(self):
+        return self.__asd_array
+
+    @asd_array.setter
+    def asd_array(self, asd_array):
+        self.__check_frequency_array_matches_density_array(asd_array)
+        self.__asd_array = np.array(asd_array)
+        self.__psd_array = asd_array ** 2
+        self.__interpolate_power_spectral_density()
+
+    def __check_frequency_array_matches_density_array(self, density_array):
+        if len(self.frequency_array) != len(density_array):
+            raise ValueError('Provided spectral density does not match frequency array. Not updating.\n'
+                             'Length spectral density {}\n Length frequency array {}\n'
+                             .format(density_array, self.frequency_array))
+
+    def __interpolate_power_spectral_density(self):
+        """Interpolate the loaded power spectral density so it can be resampled
+           for arbitrary frequency arrays.
         """
-        Automagically load one of the amplitude spectral density curves
-        contained in the noise_curves directory.
+        self.__power_spectral_density_interpolated = interp1d(self.frequency_array,
+                                                              self.psd_array,
+                                                              bounds_error=False,
+                                                              fill_value=np.inf)
 
-        Test if the file contains a path (i.e., contains '/').
-        If not assume the file is in the default directory.
-        """
+    @property
+    def power_spectral_density_interpolated(self):
+        return self.__power_spectral_density_interpolated
 
-        if '/' not in self.amplitude_spectral_density_file:
-            self.amplitude_spectral_density_file = os.path.join(
-                os.path.dirname(__file__), 'noise_curves',
-                self.amplitude_spectral_density_file)
+    @property
+    def asd_file(self):
+        return self.__asd_file
+
+    @asd_file.setter
+    def asd_file(self, asd_file):
+        asd_file = self.__validate_file_name(file=asd_file)
+        self.__asd_file = asd_file
+        if asd_file is not None:
+            self.__import_amplitude_spectral_density()
+            self.__check_file_was_asd_file()
+
+    def __check_file_was_asd_file(self):
+        if min(self.asd_array) < 1e-30:
+            logger.warning("You specified an amplitude spectral density file.")
+            logger.warning("{} WARNING {}".format("*" * 30, "*" * 30))
+            logger.warning("The minimum of the provided curve is {:.2e}.".format(min(self.asd_array)))
+            logger.warning("You may have intended to provide this as a power spectral density.")
 
-        self.frequency_array, self.amplitude_spectral_density = np.genfromtxt(
-            self.amplitude_spectral_density_file).T
+    @property
+    def psd_file(self):
+        return self.__psd_file
+
+    @psd_file.setter
+    def psd_file(self, psd_file):
+        psd_file = self.__validate_file_name(file=psd_file)
+        self.__psd_file = psd_file
+        if psd_file is not None:
+            self.__import_power_spectral_density()
+            self.__check_file_was_psd_file()
+
+    def __check_file_was_psd_file(self):
+        if min(self.psd_array) > 1e-30:
+            logger.warning("You specified a power spectral density file.")
+            logger.warning("{} WARNING {}".format("*" * 30, "*" * 30))
+            logger.warning("The minimum of the provided curve is {:.2e}.".format(min(self.psd_array)))
+            logger.warning("You may have intended to provide this as an amplitude spectral density.")
 
-    def import_power_spectral_density(self):
+    @staticmethod
+    def __validate_file_name(file):
         """
-        Automagically load one of the power spectral density curves contained
-        in the noise_curves directory.
-
         Test if the file contains a path (i.e., contains '/').
         If not assume the file is in the default directory.
         """
-        if '/' not in self.power_spectral_density_file:
-            self.power_spectral_density_file = os.path.join(
-                os.path.dirname(__file__), 'noise_curves',
-                self.power_spectral_density_file)
-        self.frequency_array, self.power_spectral_density = np.genfromtxt(
-            self.power_spectral_density_file).T
+        if file is not None and '/' not in file:
+            file = os.path.join(os.path.dirname(__file__), 'noise_curves', file)
+        return file
 
-    def _check_frequency_array_matches_density_array(self, density_array):
-        """Check the provided frequency and spectral density arrays match."""
-        try:
-            self.frequency_array - density_array
-        except ValueError as e:
-            raise (e, 'Provided spectral density does not match frequency array. Not updating.')
+    def __import_amplitude_spectral_density(self):
+        """ Automagically load an amplitude spectral density curve """
+        self.frequency_array, self.asd_array = np.genfromtxt(self.asd_file).T
 
-    def _interpolate_power_spectral_density(self):
-        """Interpolate the loaded power spectral density so it can be resampled
-           for arbitrary frequency arrays.
-        """
-        self.power_spectral_density_interpolated = interp1d(
-            self.frequency_array, self.power_spectral_density, bounds_error=False,
-            fill_value=np.inf)
+    def __import_power_spectral_density(self):
+        """ Automagically load a power spectral density curve """
+        self.frequency_array, self.psd_array = np.genfromtxt(self.psd_file).T
 
     def get_noise_realisation(self, sampling_frequency, duration):
         """
@@ -1746,7 +1753,7 @@ class PowerSpectralDensity(object):
 
         """
         white_noise, frequencies = utils.create_white_noise(sampling_frequency, duration)
-        frequency_domain_strain = self.power_spectral_density_interpolated(frequencies) ** 0.5 * white_noise
+        frequency_domain_strain = self.__power_spectral_density_interpolated(frequencies) ** 0.5 * white_noise
         out_of_bounds = (frequencies < min(self.frequency_array)) | (frequencies > max(self.frequency_array))
         frequency_domain_strain[out_of_bounds] = 0 * (1 + 1j)
         return frequency_domain_strain, frequencies
@@ -1954,7 +1961,7 @@ def get_interferometer_with_fake_noise_and_injection(
         start_time = injection_parameters['geocent_time'] + 2 - duration
 
     interferometer = get_empty_interferometer(name)
-    interferometer.power_spectral_density.set_from_aLIGO()
+    interferometer.power_spectral_density = PowerSpectralDensity.from_aligo()
     if zero_noise:
         interferometer.set_strain_data_from_zero_noise(
             sampling_frequency=sampling_frequency, duration=duration,
diff --git a/tupak/gw/likelihood.py b/tupak/gw/likelihood.py
index 66011890fd00256773015c9074380e84d3dc92a3..498358c305e49106605144d7df94f5dce0ed531b 100644
--- a/tupak/gw/likelihood.py
+++ b/tupak/gw/likelihood.py
@@ -82,6 +82,12 @@ class GravitationalWaveTransient(likelihood.Likelihood):
             self._setup_distance_marginalization()
             prior['luminosity_distance'] = float(self._ref_dist)
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(interferometers={},\n\twaveform_generator={},\n\ttime_marginalization={}, ' \
+                                         'distance_marginalization={}, phase_marginalization={}, prior={})'\
+            .format(self.interferometers, self.waveform_generator, self.time_marginalization,
+                    self.distance_marginalization, self.phase_marginalization, self.prior)
+
     def _check_set_duration_and_sampling_frequency_of_waveform_generator(self):
         """ Check the waveform_generator has the same duration and
         sampling_frequency as the interferometers. If they are unset, then
@@ -308,6 +314,11 @@ class BasicGravitationalWaveTransient(likelihood.Likelihood):
         self.interferometers = interferometers
         self.waveform_generator = waveform_generator
 
+    def __repr__(self):
+        return self.__class__.__name__ + '(interferometers={},\n\twaveform_generator={})'\
+            .format(self.interferometers, self.waveform_generator)
+
+
     def noise_log_likelihood(self):
         """ Calculates the real part of noise log-likelihood
 
diff --git a/tupak/gw/waveform_generator.py b/tupak/gw/waveform_generator.py
index 88c892abae44137e04bac4941fbd544523f5e521..1c0f785823ac373a1dd1c8d2306e524ae0d17b51 100644
--- a/tupak/gw/waveform_generator.py
+++ b/tupak/gw/waveform_generator.py
@@ -6,7 +6,7 @@ class WaveformGenerator(object):
 
     def __init__(self, duration=None, sampling_frequency=None, start_time=0, frequency_domain_source_model=None,
                  time_domain_source_model=None, parameters=None,
-                 parameter_conversion=lambda parameters, search_keys: (parameters, []),
+                 parameter_conversion=None,
                  non_standard_sampling_parameter_keys=None,
                  waveform_arguments=None):
         """ A waveform generator
@@ -52,7 +52,10 @@ class WaveformGenerator(object):
         self.__parameters_from_source_model()
         self.duration = duration
         self.sampling_frequency = sampling_frequency
-        self.parameter_conversion = parameter_conversion
+        if parameter_conversion is None:
+            self.parameter_conversion = lambda params, search_keys: (params, [])
+        else:
+            self.parameter_conversion = parameter_conversion
         self.non_standard_sampling_parameter_keys = non_standard_sampling_parameter_keys
         self.parameters = parameters
         if waveform_arguments is not None:
@@ -66,6 +69,27 @@ class WaveformGenerator(object):
         self.__full_source_model_keyword_arguments.update(self.parameters)
         self.__added_keys = []
 
+    def __repr__(self):
+        if self.frequency_domain_source_model is not None:
+            fdsm_name = self.frequency_domain_source_model.__name__
+        else:
+            fdsm_name = None
+        if self.time_domain_source_model is not None:
+            tdsm_name = self.frequency_domain_source_model.__name__
+        else:
+            tdsm_name = None
+        if self.parameter_conversion.__name__ == '<lambda>':
+            param_conv_name = None
+        else:
+            param_conv_name = self.parameter_conversion.__name__
+
+        return self.__class__.__name__ + '(duration={}, sampling_frequency={}, start_time={}, ' \
+                                         'frequency_domain_source_model={}, time_domain_source_model={}, ' \
+                                         'parameters={}, parameter_conversion={}, ' \
+                                         'non_standard_sampling_parameter_keys={}, waveform_arguments={})'\
+            .format(self.duration, self.sampling_frequency, self.start_time, fdsm_name, tdsm_name, self.parameters,
+                    param_conv_name, self.non_standard_sampling_parameter_keys, self.waveform_arguments)
+
     def frequency_domain_strain(self):
         """ Rapper to source_model.