From fe1b075d50e29bf61e5e60524d68eb531e468dc2 Mon Sep 17 00:00:00 2001
From: John Veitch <john.veitch@ligo.org>
Date: Thu, 16 Mar 2023 12:05:02 +0000
Subject: [PATCH] flake changes

---
 bilby/core/result.py | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/bilby/core/result.py b/bilby/core/result.py
index bb895090a..7d22109e2 100644
--- a/bilby/core/result.py
+++ b/bilby/core/result.py
@@ -30,10 +30,12 @@ from .prior import Prior, PriorDict, DeltaFunction, ConditionalDeltaFunction
 
 EXTENSIONS = ["json", "hdf5", "h5", "pickle", "pkl"]
 
+
 def __eval_l(likelihood, params):
     likelihood.parameters.update(params)
     return likelihood.log_likelihood()
 
+
 def result_file_name(outdir, label, extension='json', gzip=False):
     """ Returns the standard filename used for a result file
 
@@ -149,19 +151,20 @@ def get_weights_for_reweighting(
 
         starting_index = 0
 
-    dict_samples = [{key: sample[key] for key in result.posterior} for i,sample in result.posterior.iterrows()]
+    dict_samples = [{key: sample[key] for key in result.posterior}
+                    for _, sample in result.posterior.iterrows()]
     n = len(dict_samples) - starting_index
 
     # Helper function to compute likelihoods in parallel
     def eval_pool(this_logl):
         with multiprocessing.Pool(processes=npool) as pool:
-            chunksize = max(100,n//(2*npool))
+            chunksize = max(100, n // (2 * npool))
             return list(tqdm(
-                    pool.imap(partial(__eval_l,this_logl),
-                            dict_samples[starting_index:], chunksize=chunksize),
-                    desc = 'Computing likelihoods',
-                    total = n
-                ))
+                pool.imap(partial(__eval_l, this_logl),
+                        dict_samples[starting_index:], chunksize=chunksize),
+                desc='Computing likelihoods',
+                total=n)
+            )
 
     if old_likelihood is None:
         old_log_likelihood_array[starting_index:] = \
@@ -177,9 +180,9 @@ def get_weights_for_reweighting(
 
     # Compute priors
     for ii, sample in enumerate(tqdm(dict_samples[starting_index:],
-                                     desc = 'Computing priors',
-                                     total = n),
-                                start = starting_index):
+                                     desc='Computing priors',
+                                     total=n),
+                                start=starting_index):
         if old_prior is not None:
             old_log_prior_array[ii] = old_prior.ln_prob(dict_samples[ii])
         else:
-- 
GitLab