diff --git a/bilby/core/prior.py b/bilby/core/prior.py
index 2df4b7d16981898d6be0002bbe59198f745d5316..8bc6e4e5daf9324868b700d96ea03443dd9f5218 100644
--- a/bilby/core/prior.py
+++ b/bilby/core/prior.py
@@ -210,20 +210,24 @@ class PriorDict(OrderedDict):
         """
         return np.product([self[key].prob(sample[key]) for key in sample], **kwargs)
 
-    def ln_prob(self, sample):
+    def ln_prob(self, sample, axis=None):
         """
 
         Parameters
         ----------
         sample: dict
-            Dictionary of the samples of which we want to have the log probability of
+            Dictionary of the samples of which to calculate the log probability
+        axis: None or int
+            Axis along which the summation is performed
 
         Returns
         -------
-        float: Joint log probability of all the individual sample probabilities
+        float or ndarray:
+            Joint log probability of all the individual sample probabilities
 
         """
-        return np.sum([self[key].ln_prob(sample[key]) for key in sample])
+        return np.sum([self[key].ln_prob(sample[key]) for key in sample],
+                      axis=axis)
 
     def rescale(self, keys, theta):
         """Rescale samples from unit cube to prior
diff --git a/bilby/core/result.py b/bilby/core/result.py
index 84cf24ca01ad78f78085017a47f9caed8ad9105d..26a456680f14f7892779dd228b4b1f936e7a94ff 100644
--- a/bilby/core/result.py
+++ b/bilby/core/result.py
@@ -74,9 +74,10 @@ class Result(object):
                  nested_samples=None, log_evidence=np.nan,
                  log_evidence_err=np.nan, log_noise_evidence=np.nan,
                  log_bayes_factor=np.nan, log_likelihood_evaluations=None,
-                 sampling_time=None, nburn=None, walkers=None,
-                 max_autocorrelation_time=None, parameter_labels=None,
-                 parameter_labels_with_unit=None, version=None):
+                 log_prior_evaluations=None, sampling_time=None, nburn=None,
+                 walkers=None, max_autocorrelation_time=None,
+                 parameter_labels=None, parameter_labels_with_unit=None,
+                 version=None):
         """ A class to store the results of the sampling run
 
         Parameters
@@ -102,6 +103,8 @@ class Result(object):
             Natural log evidences
         log_likelihood_evaluations: array_like
             The evaluations of the likelihood for each sample point
+        log_prior_evaluations: array_like
+            The evaluations of the prior for each sample point
         sampling_time: float
             The time taken to complete the sampling
         nburn: int
@@ -143,6 +146,7 @@ class Result(object):
         self.log_noise_evidence = log_noise_evidence
         self.log_bayes_factor = log_bayes_factor
         self.log_likelihood_evaluations = log_likelihood_evaluations
+        self.log_prior_evaluations = log_prior_evaluations
         self.sampling_time = sampling_time
         self.version = version
         self.max_autocorrelation_time = max_autocorrelation_time
@@ -269,8 +273,8 @@ class Result(object):
             'log_noise_evidence', 'log_bayes_factor', 'priors', 'posterior',
             'injection_parameters', 'meta_data', 'search_parameter_keys',
             'fixed_parameter_keys', 'sampling_time', 'sampler_kwargs',
-            'log_likelihood_evaluations', 'samples', 'nested_samples',
-            'walkers', 'nburn', 'parameter_labels',
+            'log_likelihood_evaluations', 'log_prior_evaluations', 'samples',
+            'nested_samples', 'walkers', 'nburn', 'parameter_labels',
             'parameter_labels_with_unit', 'version']
         dictionary = OrderedDict()
         for attr in save_attrs:
@@ -855,6 +859,11 @@ class Result(object):
                     data_frame[key] = priors[key]
             data_frame['log_likelihood'] = getattr(
                 self, 'log_likelihood_evaluations', np.nan)
+            if self.log_prior_evaluations is None:
+                data_frame['log_prior'] = self.priors.ln_prob(
+                    data_frame[self.search_parameter_keys], axis=0)
+            else:
+                data_frame['log_prior'] = self.log_prior_evaluations
         if conversion_function is not None:
             data_frame = conversion_function(data_frame, likelihood, priors)
         self.posterior = data_frame
diff --git a/bilby/core/sampler/cpnest.py b/bilby/core/sampler/cpnest.py
index 951bff7883a607a1e59c23c3d8e6b75d186cedc5..0936900f555d0f6ede2405f059b21837100c4409 100644
--- a/bilby/core/sampler/cpnest.py
+++ b/bilby/core/sampler/cpnest.py
@@ -84,6 +84,8 @@ class Cpnest(NestedSampler):
             out.plot()
 
         self.result.posterior = DataFrame(out.posterior_samples)
+        self.result.posterior.rename(columns=dict(
+            logL='log_likelihood', logPrior='log_prior'), inplace=True)
         self.result.log_evidence = out.NS.state.logZ
         self.result.log_evidence_err = np.nan
         return self.result
diff --git a/bilby/core/sampler/emcee.py b/bilby/core/sampler/emcee.py
index cec198d11bbe3933acc878ce585919d6d06846fc..9f028c9fc91901344434aaf434c48d190daa50cf 100644
--- a/bilby/core/sampler/emcee.py
+++ b/bilby/core/sampler/emcee.py
@@ -121,6 +121,10 @@ class Emcee(MCMCSampler):
                 "The run has finished, but the chain is not burned in: "
                 "`nburn < nsteps`. Try increasing the number of steps.")
         self.result.samples = sampler.chain[:, self.nburn:, :].reshape((-1, self.ndim))
+        blobs_flat = np.array(sampler.blobs)[self.nburn:, :, :].reshape((-1, 2))
+        log_likelihoods, log_priors = blobs_flat.T
+        self.result.log_likelihood_evaluations = log_likelihoods
+        self.result.log_prior_evaluations = log_priors
         self.result.walkers = sampler.chain
         self.result.log_evidence = np.nan
         self.result.log_evidence_err = np.nan
@@ -146,8 +150,9 @@ class Emcee(MCMCSampler):
                          for _ in range(self.nwalkers)]
 
     def lnpostfn(self, theta):
-        p = self.log_prior(theta)
-        if np.isinf(p):
-            return -np.inf
+        log_prior = self.log_prior(theta)
+        if np.isinf(log_prior):
+            return -np.inf, [np.nan, np.nan]
         else:
-            return self.log_likelihood(theta) + p
+            log_likelihood = self.log_likelihood(theta)
+            return log_likelihood + log_prior, [log_likelihood, log_prior]
diff --git a/bilby/core/sampler/ptemcee.py b/bilby/core/sampler/ptemcee.py
index ce1fa17b564a64518551a85d7f193d79e27c1db6..8ea4165cd4a2f600700f18c81d46bfe762a07d91 100644
--- a/bilby/core/sampler/ptemcee.py
+++ b/bilby/core/sampler/ptemcee.py
@@ -65,9 +65,13 @@ class Ptemcee(Emcee):
                       for _ in range(self.nwalkers)]
                      for _ in range(self.kwargs['ntemps'])]
 
-        for _ in tqdm(
+        log_likelihood_evaluations = []
+        log_prior_evaluations = []
+        for pos, logpost, loglike in tqdm(
                 sampler.sample(self.pos0, **self.sampler_function_kwargs),
                 total=self.nsteps):
+            log_likelihood_evaluations.append(loglike)
+            log_prior_evaluations.append(logpost - loglike)
             pass
 
         self.calculate_autocorrelation(sampler.chain.reshape((-1, self.ndim)))
@@ -80,6 +84,10 @@ class Ptemcee(Emcee):
                 "`nburn < nsteps`. Try increasing the number of steps.")
         self.result.samples = sampler.chain[0, :, self.nburn:, :].reshape(
             (-1, self.ndim))
+        self.result.log_likelihood_evaluations = np.array(
+            log_likelihood_evaluations)[self.nburn:, 0, :].reshape((-1))
+        self.result.log_prior_evaluations = np.array(
+            log_prior_evaluations)[self.nburn:, 0, :].reshape((-1))
         self.result.betas = sampler.betas
         self.result.log_evidence, self.result.log_evidence_err =\
             sampler.log_evidence_estimate(