diff --git a/bilby/core/result.py b/bilby/core/result.py
index df91621d55c0a4fc17acd72866056ca25d84ea14..d604e52d1e2aa3990f777f306f352b78b08a660b 100644
--- a/bilby/core/result.py
+++ b/bilby/core/result.py
@@ -1644,24 +1644,26 @@ class ResultList(list):
             The result object with the combined evidences.
         """
         self.check_nested_samples()
-        if result.use_ratio:
-            log_bayes_factors = np.array([res.log_bayes_factor for res in self])
-            result.log_bayes_factor = logsumexp(log_bayes_factors, b=1. / len(self))
-            result.log_evidence = result.log_bayes_factor + result.log_noise_evidence
-            result_weights = np.exp(log_bayes_factors - np.max(log_bayes_factors))
-        else:
-            log_evidences = np.array([res.log_evidence for res in self])
-            result.log_evidence = logsumexp(log_evidences, b=1. / len(self))
-            result_weights = np.exp(log_evidences - np.max(log_evidences))
+
+        # Combine evidences
+        log_evidences = np.array([res.log_evidence for res in self])
+        result.log_evidence = logsumexp(log_evidences, b=1. / len(self))
+        result.log_bayes_factor = result.log_evidence - result.log_noise_evidence
+
+        # Propogate uncertainty in combined evidence
         log_errs = [res.log_evidence_err for res in self if np.isfinite(res.log_evidence_err)]
         if len(log_errs) > 0:
             result.log_evidence_err = 0.5 * logsumexp(2 * np.array(log_errs), b=1. / len(self))
         else:
             result.log_evidence_err = np.nan
+
+        # Combined posteriors with a weighting
+        result_weights = np.exp(log_evidences - np.max(log_evidences))
         posteriors = list()
         for res, frac in zip(self, result_weights):
             selected_samples = (np.random.uniform(size=len(res.posterior)) < frac)
             posteriors.append(res.posterior[selected_samples])
+
         # remove original nested_samples
         result.nested_samples = None
         result.sampler_kwargs = None