diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 7e477d9239529a5f67ab572c1a364db3cf98245e..a9043a8071ce0767b5894479259257bcba477885 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -42,6 +42,9 @@ containers:
     # Fail if differences exist. If this fails, you may need to run
     # write_dockerfiles.py and commit the changes.
     - git diff --exit-code
+    - cp env-template.yml env.yml
+    - echo "  - python=3.10" >> env.yml
+    - mamba env create -f env.yml -n test --dry-run
 
 .test-python: &test-python
   stage: initial
@@ -75,12 +78,16 @@ basic-3.10:
   <<: *test-python
   image: python:3.10
 
+basic-3.11:
+  <<: *test-python
+  image: python:3.11
+
 .test-samplers-import: &test-samplers-import
   stage: initial
   script:
     - python -m pip install .
     - *list-env
-    - python test/test_samplers_import.py
+    - pytest test/test_samplers_import.py -v
 
 import-samplers-3.9:
   <<: *test-samplers-import
@@ -90,6 +97,10 @@ import-samplers-3.10:
   <<: *test-samplers-import
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
 
+import-samplers-3.11:
+  <<: *test-samplers-import
+  image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
+
 .precommits: &precommits
   stage: initial
   script:
@@ -98,17 +109,9 @@ import-samplers-3.10:
     - pip install --upgrade pip
     - pip --cache-dir=$CACHE_DIR install --upgrade bilby
     - pip --cache-dir=$CACHE_DIR install .
-    - pip --cache-dir=$CACHE_DIR install pre-commit
     # Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
     - pre-commit run --all-files --verbose --show-diff-on-failure
 
-precommits-py3.9:
-  <<: *precommits
-  image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
-  variables:
-    CACHE_DIR: ".pip39"
-    PYVERSION: "python39"
-
 precommits-py3.10:
   <<: *precommits
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
@@ -137,7 +140,7 @@ install:
 
 python-3.9:
   <<: *unit-test
-  needs: ["basic-3.9", "precommits-py3.9"]
+  needs: ["basic-3.9"]
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
 
 python-3.10:
@@ -157,6 +160,11 @@ python-3.10:
       - htmlcov/
     expire_in: 30 days
 
+python-3.11:
+  <<: *unit-test
+  needs: ["basic-3.11"]
+  image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
+
 .test-sampler: &test-sampler
   stage: test
   script:
@@ -166,7 +174,7 @@ python-3.10:
 
 python-3.9-samplers:
   <<: *test-sampler
-  needs: ["basic-3.9", "precommits-py3.9"]
+  needs: ["basic-3.9"]
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
 
 python-3.10-samplers:
@@ -174,6 +182,11 @@ python-3.10-samplers:
   needs: ["basic-3.10", "precommits-py3.10"]
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
 
+python-3.11-samplers:
+  <<: *test-sampler
+  needs: ["basic-3.11"]
+  image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
+
 integration-tests-python-3.10:
   stage: test
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
@@ -198,25 +211,29 @@ integration-tests-python-3.10:
 plotting-python-3.9:
   <<: *plotting
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
-  needs: ["basic-3.9", "precommits-py3.9"]
+  needs: ["basic-3.9"]
 
 plotting-python-3.10:
   <<: *plotting
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
   needs: ["basic-3.10", "precommits-py3.10"]
 
+plotting-python-3.11:
+  <<: *plotting
+  image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
+  needs: ["basic-3.11"]
+
 # ------------------- Docs stage -------------------------------------------
 
 docs:
   stage: docs
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
   before_script:
-    - conda install -c conda-forge pandoc ipython jupyter nbconvert
-    - python -m pip install ipykernel
     - python -m ipykernel install
   script:
     # Make the documentation
     - python -m pip install .
+    - python -m pip install myst_parser # only for testing purposes - remove once test image is generating correctly
     - cd examples/tutorials
     - jupyter nbconvert --to notebook --execute *.ipynb --output-dir ../../docs
     - cd ../../docs
@@ -258,7 +275,8 @@ pages:
   script:
     - cd containers
     - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
-    - docker build --tag v3-bilby-$PYVERSION - < v3-dockerfile-test-suite-$PYVERSION
+    - cp v3-dockerfile-test-suite-$PYVERSION Dockerfile
+    - docker build --tag v3-bilby-$PYVERSION .
     - docker image tag v3-bilby-$PYVERSION containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest
     - docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest
 
@@ -272,6 +290,11 @@ build-python310-container:
   variables:
     PYVERSION: "python310"
 
+build-python311-container:
+  <<: *build-container
+  variables:
+    PYVERSION: "python311"
+
 pypi-release:
   stage: deploy
   image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
@@ -279,7 +302,6 @@ pypi-release:
     TWINE_USERNAME: $PYPI_USERNAME
     TWINE_PASSWORD: $PYPI_PASSWORD
   before_script:
-    - python -m pip install twine setuptools_scm build
     - python -m build --sdist --wheel --outdir dist/ .
   script:
     - twine upload dist/*
diff --git a/AUTHORS.md b/AUTHORS.md
index 6d9f4353b6e0139e9c7bba58c36ec6c14cab248d..8c227ed6fe28947d34bf93e400af3837634bd689 100644
--- a/AUTHORS.md
+++ b/AUTHORS.md
@@ -11,6 +11,7 @@ Andrew Miller
 Antoni Ramos-Buades
 Apratim Ganguly
 Avi Vajpeyi
+Ben Patterson
 Bruce Edelman
 Carl-Johan Haster
 Cecilio Garcia-Quiros
@@ -28,6 +29,7 @@ Gregory Ashton
 Hank Hua
 Hector Estelles
 Ignacio Magaña Hernandez
+Isaac McMahon
 Isobel Marguarethe Romero-Shaw
 Jack Heinzel
 Jacob Golomb
@@ -43,6 +45,7 @@ Kaylee de Soto
 Khun Sang Phukon
 Kruthi Krishna
 Kshipraa Athar
+Kyle Wong
 Leslie Wade
 Liting Xiao
 Maite Mateu-Lucena
@@ -56,6 +59,7 @@ Michael Puerrer
 Michael Williams
 Monica Rizzo
 Moritz Huebner
+Nico Gerardo Bers
 Nicola De Lillo
 Nikhil Sarin
 Nirban Bose
@@ -71,6 +75,8 @@ Roberto Cotesta
 Rory Smith
 S. H. Oh
 Sacha Husa
+Sama Al-Shammari
+Samson Leong
 Scott Coughlin
 Serguei Ossokine
 Shanika Galaudage
@@ -78,10 +84,12 @@ Sharan Banagiri
 Shichao Wu
 Simon Stevenson
 Soichiro Morisaki
+Soumen Roy
 Stephen R Green
 Sumeet Kulkarni
 Sylvia Biscoveanu
 Tathagata Ghosh
+Teagan Clarke
 Tomasz Baka
 Will M. Farr
 Virginia d'Emilio
@@ -91,4 +99,6 @@ Isaac Legred
 Marc Penuliar
 Andrew Fowlie
 Martin White
-Peter T. H. Pang
+Peter Tsun-Ho Pang
+Alexandre Sebastien Goettel
+Ann-Kristin Malz
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 21fc144202458a8b8e541ea325ba96bc343cc0e9..564b6a37f35deddb79b25ed3af1c75180e1f9cde 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,96 @@
 # All notable changes will be documented in this file
 
-## [2.1.2] 2023-07-17
+## [Unreleased]
+
+
+## [2.3.0] - 2024-05-30
+
+### Added
+
+- Add support for sampler plugins via entry points (!1340, !1355)
+- Add `bilby.core.sampler.get_implemented_samplers` and `bilby.core.get_sampler_class` (!1340)
+- Add `bilby.core.utils.entry_points.get_entry_points` (!1340)
+- Add support for reading results from PathLike objects (!1342)
+- Add `snrs_as_sample` property to `bilby.gw.likelihood.base.GravitationalWaveTransient` (!1344)
+- Add `get_expected_outputs` method to the sampler classes (!1336)
+
+### Changed
+
+- Change `bilby_mcmc` to use `glasflow` instead of `nflows` (!1332)
+- Sampler classes in are no longer imported in `bilby.core.sampler` (!1340)
+- Sampler classes in `bilby.core.sampler.IMPLEMENTED_SAMPLERS` must now be loaded before use (!1340)
+- `bilby.core.sampler.IMPLEMENTED_SAMPLERS` is now an instance of `bilby.core.sampler.ImplementedSampler` instead of a dictionary (!1355)
+- Updates to support numpy v2 (!1362)
+
+### Fixed
+
+- Include final frequency point in relative binning integration (!1310)
+- Address various deprecation warnings and deprecated keyword arguments (!1316, !1326, !1343)
+- Fix typo in logging statement in `bilby.gw.source` (!1325)
+- Fix missing import in `bilby.gw.detector.load_data_from_cache_file` (!1327)
+- Fix bug where `linestyle` was ignored in `bilby.core.result.plot_multiple` (!1238)
+- Fix `soft_init` sampler keyword argument with `dynesty` (!1335)
+- Fix ZeroDivisionError when using the `dynesty` with `act-walk` and large values of `nact` (!1346)
+- Fix custom prior loading from result file (!1360)
+
+
+## [2.2.3] - 2024-02-24
+Version 2.2.3 release of Bilby
+
+This is a bugfix release 
+
+There are also a number of testing/infrastructure updates.
+
+### Changes
+- Fix a bug when the specified maximum frequency is too low for the multibanding likelihood (!1279)
+- Allow the `DirichletElement` prior to be pickled (!1312)
+- Add the ability to change the pool size when resuming a `dynesty` job (!1315)
+- Fix how the random seed is passed to `dynesty` (!1319)
+
+## [2.2.2] - 2023-11-29
+Version 2.2.2 release of Bilby
+
+This is a bugfix release reverting a change from 2.2.1
+
+### Changes
+- Revert !1284 (!1306)
+
+## [2.2.1] - 2023-1111
+Version 2.2.1 release of Bilby
+
+This release is a bugfix release.
+
+### Changes
+- Ensure inteferometer metadata is not empty (!1281)
+- Make interrupted pools exit more quickly (!1284)
+- Fix conditional sampling with DeltaFunction conditions (!1289)
+- The triangular prior raised an error with numpy (!1294)
+- Make sure strain data resampling works (!1295)
+- Dynesty logging (!1296)
+- A bug with saving lists that contain None (!1301)
+- Preparatory fix an upcoming change in dynesty (!1302)
+
+## [2.2.0] - 2023-07-24
+Version 2.2.0 release of Bilby
+
+This release contains one new feature and drops support for Python 3.8.
+
+### Added
+- New waveform interface to support the SEOBNRv5 family of waveforms (!1218)
+- Enable default noise + injection function for non-CBC signals (!1263)
+- Fallback to result pickle loading to match result writing (!1291)
+
+### Changes
+- Additional error catching for plotting (!1261, !1271)
+- Improve plotting options for corner plots (!1270)
+- Fix bugs in closing the pool for emcee (!1274)
+- Generalize MPI support (!1278)
+- Fix a bug with saving hdf5 results when conda isn't present (!1290)
+
+### Deprecated
+- Drop support for py38 (!1277)
+
+## [2.1.2] - 2023-07-17
 Version 2.1.2 release of Bilby
 
 This is a bugfix release.
@@ -14,7 +104,7 @@ Where users have previously used `np.random.seed` they should now call
 - Enable cosmological priors to be written/read in our plain text format (!1258)
 - Allow posterior reweighting to be performed when changing the likelihood and the prior (!1260)
 
-## [2.1.1] 2023-04-28
+## [2.1.1] - 2023-04-28
 Version 2.1.1 release of Bilby
 
 Bugfix release
@@ -24,7 +114,7 @@ Bugfix release
 - Bugfix for Fisher matrix proposals in `bilby_mcmc` (!1251)
 - Make the changes to the spline calibration backward compatible, 2.0.2 resume files can't be read with 2.1.0 (!1250)
 
-## [2.1.0] 2023-04-12
+## [2.1.0] - 2023-04-12
 Version 2.1.0 release of Bilby
 
 Minor feature improvements and bug fixes
@@ -47,7 +137,7 @@ Minor feature improvements and bug fixes
 ### Deprecated
 - Reading/writing ROQ weights to json (!1232)
 
-## [2.0.2] 2023-03-21
+## [2.0.2] - 2023-03-21
 Version 2.0.2 release of Bilby
 
 This is a bugfix release after the last major update.
@@ -57,7 +147,7 @@ This is a bugfix release after the last major update.
 - Fix to time calibration (!1234)
 - Fix nessai sampling time (!1236)
 
-## [2.0.1] 2023-03-13
+## [2.0.1] - 2023-03-13
 Version 2.0.1 release of Bilby
 
 This is a bugfix release after the last major update.
@@ -70,7 +160,7 @@ Users may notice changes in inferred binary neutron star masses after updating t
 - Update value for the solar mass (!1229).
 - Make `scikit-learn` an explicit dependence of `bilby[GW]` (!1230).
 
-## [2.0.0] 2023-02-29
+## [2.0.0] - 2023-02-29
 Version 2.0.0 release of Bilby
 
 This major version release has a significant change to the behaviour of the `dynesty` wrapper.
@@ -96,7 +186,7 @@ There are also a number of bugfixes and some new features in sampling and GW uti
 - Optimize ROQ waveform and calibration calls (!1216)
 - Add different proposal distribution and MCMC length for `dynesty` (!1187, !1222)
 
-## [1.4.1] 2022-12-07
+## [1.4.1] - 2022-12-07
 Version 1.4.1 release of Bilby
 
 This is a bugfix release to address some minor issues identified after v1.4.0.
@@ -110,7 +200,7 @@ This is a bugfix release to address some minor issues identified after v1.4.0.
 - Make sure that all dumping pickle files is done safely (!1189)
 - Make error catching for `dynesty` checkpointing more robust (!1190)
 
-## [1.4.0] 2022-11-18
+## [1.4.0] - 2022-11-18
 Version 1.4.0 release of Bilby
 
 The main changes in this release are support for more recent versions of `dynesty` (!1138)
@@ -132,7 +222,7 @@ and `nessai` (!1161) and adding the
 - Allow prior arguments read from a string to be functions (!1144)
 - Support `dynesty>=1.1.0` (!1138)
 
-## [1.3.0] 2022-10-23
+## [1.3.0] - 2022-10-23
 Version 1.3.0 release of Bilby
 
 This release has a major change to a sampler interface, `pymc3` is no longer supported, users should switch to `pymc>=4`.
@@ -156,7 +246,7 @@ This release also contains various documentation improvements.
 - Fix issue when specifying distance and redshfit independently (!1154)
 - Fix a bug in the storage of likelihood/prior samples for `bilby_mcmc` (!1156)
 
-## [1.2.1] 2022-09-05
+## [1.2.1] - 2022-09-05
 Version 1.2.1 release of Bilby
 
 This release contains a few bug fixes following 1.2.0.
@@ -174,7 +264,7 @@ This release contains a few bug fixes following 1.2.0.
 - Extend mass conversions to include source-frame parameters (!1131)
 - Fix prior ranges for GW150914 example (!1129)
 
-## [1.2.0] 2022-08-15
+## [1.2.0] - 2022-08-15
 Version 1.2.0 release of Bilby
 
 This is the first release that drops support for `Python<3.8`.
@@ -205,7 +295,7 @@ with multiprocessing.
 - `bilby.core.utils.progress`
 - Deepdish IO for `Result`, `Interferometer`, and `InterferometerList`
 
-## [1.1.5] 2022-01-14
+## [1.1.5] - 2022-01-14
 Version 1.1.5 release of Bilby
 
 ### Added
@@ -223,7 +313,7 @@ Version 1.1.5 release of Bilby
 - Improvements to the multi-banded GWT likelihood (!1026)
 - Improve meta data comparison (!1035)
 
-## [1.1.4] 2021-10-08
+## [1.1.4] - 2021-10-08
 Version 1.1.4 release of bilby
 
 ### Added
@@ -243,7 +333,7 @@ Version 1.1.4 release of bilby
 - Typo fix in eart light crossing (!1003)
 - Fix zero spin conversion (!1002)
 
-## [1.1.3] 2021-07-02
+## [1.1.3] - 2021-07-02
 Version 1.1.3 release of bilby
 
 ### Added
@@ -273,7 +363,7 @@ Version 1.1.3 release of bilby
 - Restructured utils module into several submodules. API remains backwards compatible (!873)
 - Changed number of default walks in `dynesty` from `10*self.ndim` to `100` (!961)
 
-## [1.1.2] 2021-05-05
+## [1.1.2] - 2021-05-05
 Version 1.1.2 release of bilby
 
 ### Added
@@ -303,13 +393,13 @@ Version 1.1.2 release of bilby
 - Fixed issues with pickle saving and loading (!932)
 - Fixed an issue with the `_base_roq_waveform` (!959)
 
-## [1.1.1] 2021-03-16
+## [1.1.1] - 2021-03-16
 Version 1.1.1 release of bilby
 
 ### Changes
 - Added `include requirements.txt` in `MANIFEST.in` to stop the pip installation from breaking
 
-## [1.1.0] 2021-03-15
+## [1.1.0] - 2021-03-15
 Version 1.1.0 release of bilby
 
 ### Added
@@ -348,7 +438,7 @@ Version 1.1.0 release of bilby
 - Fixed the likelihood count in `dynesty` (!853)
 - Changed the ordering of keyword arguments for the `Sine` and `Cosine` constructors (!892)
 
-## [1.0.4] 2020-11-23
+## [1.0.4] - 2020-11-23
 Version 1.0.4 release of bilby
 
 ### Added
@@ -357,7 +447,7 @@ Version 1.0.4 release of bilby
 ### Changes
 - Fixed issue in the CI
 
-## [1.0.3] 2020-10-23
+## [1.0.3] - 2020-10-23
 
 Version 1.0.3 release of bilby
 
@@ -376,7 +466,7 @@ Version 1.0.3 release of bilby
 - Typo fixes (!878, !887, !879)
 - Minor bug fixes (!888)
 
-## [1.0.2] 2020-09-14
+## [1.0.2] - 2020-09-14
 
 Version 1.0.2 release of bilby
 
@@ -398,7 +488,7 @@ Version 1.0.2 release of bilby
 - Clean up of code (!854)
 - Various minor bug, test and plotting fixes (!859, !874, !872, !865)
 
-## [1.0.1] 2020-08-29
+## [1.0.1] - 2020-08-29
 
 Version 1.0.1 release of bilby
 
@@ -423,7 +513,7 @@ Version 1.0.1 release of bilby
 - Various minor bug fixes and improvements to the documentation (!820)(!823)(!837)
 - Various testing improvements (!833)(!847)(!855)(!852)
 
-## [1.0.0] 2020-07-06
+## [1.0.0] - 2020-07-06
 
 Version 1.0 release of bilby
 
@@ -974,3 +1064,33 @@ First `pip` installable version https://pypi.org/project/BILBY/ .
 
 ### Removed
 - All chainconsumer dependency as this was causing issues.
+
+
+[Unreleased]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.3.0...master
+[2.3.0]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.2.3...v2.3.0
+[2.2.3]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.2.2...v2.2.3
+[2.2.2]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.2.1...v2.2.2
+[2.2.1]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.2.0...v2.2.1
+[2.2.0]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.1.2...v2.2.0
+[2.1.2]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.1.1...v2.1.2
+[2.1.1]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.1.0...v2.1.1
+[2.1.0]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.0.2...v2.1.0
+[2.0.2]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.0.1...v2.0.2
+[2.0.1]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.0.0...v2.0.1
+[2.0.0]: https://git.ligo.org/lscsoft/bilby/-/compare/v1.4.1...v2.0.0
+[1.4.1]: https://git.ligo.org/lscsoft/bilby/-/compare/v1.4.0...v1.4.1
+[1.4.0]: https://git.ligo.org/lscsoft/bilby/-/compare/1.3.0...v1.4.0
+[1.3.0]: https://git.ligo.org/lscsoft/bilby/-/compare/1.2.1...1.3.0
+[1.2.1]: https://git.ligo.org/lscsoft/bilby/-/compare/1.2.0...1.2.1
+[1.2.0]: https://git.ligo.org/lscsoft/bilby/-/compare/1.1.5...1.2.0
+[1.1.5]: https://git.ligo.org/lscsoft/bilby/-/compare/1.1.4...1.1.5
+[1.1.4]: https://git.ligo.org/lscsoft/bilby/-/compare/1.1.2...1.1.4
+[1.1.3]: https://git.ligo.org/lscsoft/bilby/-/compare/1.1.2...1.1.3
+[1.1.2]: https://git.ligo.org/lscsoft/bilby/-/compare/1.1.1...1.1.2
+[1.1.1]: https://git.ligo.org/lscsoft/bilby/-/compare/1.1.0...1.1.1
+[1.1.0]: https://git.ligo.org/lscsoft/bilby/-/compare/1.0.4...1.1.0
+[1.0.4]: https://git.ligo.org/lscsoft/bilby/-/compare/1.0.3...1.0.4
+[1.0.3]: https://git.ligo.org/lscsoft/bilby/-/compare/1.0.2...1.0.3
+[1.0.2]: https://git.ligo.org/lscsoft/bilby/-/compare/1.0.1...1.0.2
+[1.0.1]: https://git.ligo.org/lscsoft/bilby/-/compare/1.0.0...1.0.1
+[1.0.0]: https://git.ligo.org/lscsoft/bilby/-/compare/0.6.9...1.0.0
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 4643073bacbfd9dcef23c11254e89c02d495be2d..9d151d79cb11033081e7ecc04c9713808d59aa65 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,15 +1,17 @@
 # Contributing to bilby
 
-This is a short guide to contributing to bilby aimed at general LVC members who
+This is a short guide to contributing to bilby aimed at general LVK members who
 have some familiarity with python and git.  
 
 1. [Code of conduct](#code-of-conduct)
 2. [Code style](#code-style)
-3. [Code relevance](#code-relevance)
-4. [Merge requests](#merge-requests)
-5. [Typical workflow](#typical-workflow)
-6. [Hints and tips](#hints-and-tips)
-7. [Code overview](#code-overview)
+3. [Automated Code Checking](#automated-code-checking)
+4. [Unit Testing](#unit-testing)
+5. [Code relevance](#code-relevance)
+6. [Merge requests](#merge-requests)
+7. [Typical workflow](#typical-workflow)
+8. [Hints and tips](#hints-and-tips)
+9. [Code overview](#code-overview)
 
 
 ## Code of Conduct
@@ -17,7 +19,7 @@ have some familiarity with python and git.
 Everyone participating in the bilby community, and in particular in our issue
 tracker, merge requests, and chat channels, is expected to treat other people
 with respect and follow the guidelines articulated in the [Python Community
-Code of Conduct](https://www.python.org/psf/codeofconduct/).
+Code of Conduct](https://www.python.org/psf/codeofconduct/). Furthermore, members of the LVK collaboration must follow the [LVK Code of Conduct](https://dcc.ligo.org/LIGO-M1900037/public).
 
 ## Code style
 
@@ -46,8 +48,8 @@ def my_new_function(x, y, print=False):
 ```
 3. Avoid inline comments unless necessary. Ideally, the code should make it obvious what is going on, if not the docstring, only in subtle cases use comments
 4. Name variables sensibly. Avoid using single-letter variables, it is better to name something `power_spectral_density_array` than `psda`.
-5. Don't repeat yourself. If code is repeated in multiple places, wrap it up into a function.
-6. Add tests. The C.I. is there to do the work of "checking" the code, both now and into the future. Use it.
+5. Don't repeat yourself. If code is repeated in multiple places, wrap it up into a function. This also helps with the writing of robust unit tests (see below).
+
 
 ## Automated code checking
 
@@ -76,6 +78,50 @@ If you experience any issues with pre-commit, please ask for support on the
 usual help channels.
 
 
+## Unit Testing
+
+Unit tests are an important part of code development, helping to minimize the number of undetected bugs which may be present in a merge request. They also greatly expedite the review of code, and can even help during the initial development if used properly. Accordingly, bilby requires unit testing for any changes with machine readable inputs and outputs (i.e. pretty much everything except plotting). 
+
+Unit testing is integrated into the CI/CD pipeline, and uses the builtin unittest package. Tests should be written into the `test/` directory which corresponds to their location within the package, such that, for example, a change to `bilby/gw/conversion.py` should go into `test/gw/conversion_test.py`. To run a single test locally, one may simply do `pytest /path/to/test TestClass.test_name`, whereas to run all the tests in a given test file one may omit the class and function.
+
+For an example of what a test looks like, consider this test for the fft utils in bilby:
+
+```
+class TestFFT(unittest.TestCase):
+    def setUp(self):
+        self.sampling_frequency = 10
+
+    def tearDown(self):
+        del self.sampling_frequency
+
+    def test_nfft_sine_function(self):
+        injected_frequency = 2.7324
+        duration = 100
+        times = utils.create_time_series(self.sampling_frequency, duration)
+
+        time_domain_strain = np.sin(2 * np.pi * times * injected_frequency + 0.4)
+
+        frequency_domain_strain, frequencies = bilby.core.utils.nfft(
+            time_domain_strain, self.sampling_frequency
+        )
+        frequency_at_peak = frequencies[np.argmax(np.abs(frequency_domain_strain))]
+        self.assertAlmostEqual(injected_frequency, frequency_at_peak, places=1)
+
+    def test_nfft_infft(self):
+        time_domain_strain = np.random.normal(0, 1, 10)
+        frequency_domain_strain, _ = bilby.core.utils.nfft(
+            time_domain_strain, self.sampling_frequency
+        )
+        new_time_domain_strain = bilby.core.utils.infft(
+            frequency_domain_strain, self.sampling_frequency
+        )
+        self.assertTrue(np.allclose(time_domain_strain, new_time_domain_strain))
+```
+
+`setUp` and `tearDown` handle construction and deconstruction of the test, such that each of the other test functions may be run independently, in any order. The other two functions each make an intuitive test of the functionality of and fft/ifft function: that the fft of a sine wave should be a delta function, and that an ifft should be an inverse of an fft. 
+
+For more information on how to write effective tests, see [this guide](https://docs.python-guide.org/writing/tests/), and many others.
+
 ## Code relevance
 
 The bilby code base is intended to be highly modular and flexible. We encourage
@@ -153,7 +199,19 @@ $ git clone git@git.ligo.org:albert.einstein/bilby.git
 ```
 
 replacing the SSH url to that of your fork. This will create a directory
-`/bilby` containing a local copy of the code. From this directory, you can run
+`/bilby` containing a local copy of the code.
+
+It is strongly advised to perform development with a dedicated conda environment.
+In depth instructions for creating a conda environment may be found at the relevant
+[conda docs](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#activating-an-environment),
+but for most purposes the commands
+
+```bash
+$ conda create -n my-environment-name python=3.X
+$ conda activate my-environment-name
+```
+
+will produce the desired results. Once this is completed, one may proceed to the `/bilby` directory and run
 
 ```bash
 $ pip install -e .
@@ -202,8 +260,25 @@ $ git checkout -b my-new-feature lscsoft/master
 ### Step d) Hack away
 
 1. Develop the changes you would like to introduce, using `git add` to add files with changes. Ideally commit small units of change often, rather than creating one large commit at the end, this will simplify review and make modifying any changes easier.
-2. Commit the changes using `git commit`. This will prompt you for a commit message. Commit messages should be clear, identifying which code was changed, and why. Common practice (see e.g. [this blog](https://chris.beams.io/posts/git-commit/)) is to use a short summary line (<50 characters), followed by a blank line, then more information in longer lines.
-3. Push your changes to the remote copy of your fork on git.ligo.org
+2. Commit the changes using `git commit`. This will prompt you for a commit message. Commit messages should be clear, identifying which code was changed, and why. Bilby is adopting the use of scipy commit format, specified [here](https://docs.scipy.org/doc/scipy/dev/contributor/development_workflow.html#writing-the-commit-message). Commit messages take a standard format of `ACRONYM: Summary message` followed by a more detailed description. For example, an enhancement would look like:
+
+```
+ENH: Add my awesome new feature
+
+This is a very cool change that makes parameter estimation run 10x faster by changing a single line.
+```
+
+Similarly a bugfix:
+
+```
+BUG: Fix type error in my_awesome_feature.py
+
+Correct a typo at L1 of /bilby/my_awesome_feature.py which returned a dictionary instead of a string.
+```
+
+For more discussion of best practices, see e.g. [this blog](https://chris.beams.io/posts/git-commit/).
+
+4. Push your changes to the remote copy of your fork on git.ligo.org
 
 ```bash
 git push origin my-new-feature
diff --git a/README.rst b/README.rst
index 70bc2efa539a21c4614e3245a4ad7540f4d4bd6d..d14c44fea7869dae74744628408ed03c085ac39c 100644
--- a/README.rst
+++ b/README.rst
@@ -30,47 +30,7 @@ us directly. For advice on contributing, see `the contributing guide <https://gi
 Citation guide
 --------------
 
-If you use :code:`bilby` in a scientific publication, please cite
-
-* `Bilby: A user-friendly Bayesian inference library for gravitational-wave
-  astronomy
-  <https://ui.adsabs.harvard.edu/#abs/2018arXiv181102042A/abstract>`__
-* `Bayesian inference for compact binary coalescences with BILBY: validation and application to the first LIGO-Virgo gravitational-wave transient catalogue <https://ui.adsabs.harvard.edu/abs/2020MNRAS.499.3295R/abstract>`__
-
-The first of these papers introduces the software, while the second introduces advances in the sampling approaches and validation of the software.
-If you use the :code:`bilby_mcmc` sampler, please additionally cite
-
-* `BILBY-MCMC: an MCMC sampler for gravitational-wave inference <https://ui.adsabs.harvard.edu/abs/2021MNRAS.507.2037A/abstract>`__
-
-Additionally, :code:`bilby` builds on a number of open-source packages. If you
-make use of this functionality in your publications, we recommend you cite them
-as requested in their associated documentation.
-
-**Samplers**
-
-* `dynesty <https://github.com/joshspeagle/dynesty>`__
-* `nestle <https://github.com/kbarbary/nestle>`__
-* `pymultinest <https://github.com/JohannesBuchner/PyMultiNest>`__
-* `cpnest <https://github.com/johnveitch/cpnest>`__
-* `emcee <https://github.com/dfm/emcee>`__
-* `nessai <https://github.com/mj-will/nessai>`_
-* `ptemcee <https://github.com/willvousden/ptemcee>`__
-* `ptmcmcsampler <https://github.com/jellis18/PTMCMCSampler>`__
-* `pypolychord <https://github.com/PolyChord/PolyChordLite>`__
-* `PyMC3 <https://github.com/pymc-devs/pymc3>`_
-
-
-**Gravitational-wave tools**
-
-* `gwpy <https://github.com/gwpy/gwpy>`__
-* `lalsuite <https://git.ligo.org/lscsoft/lalsuite>`__
-* `astropy <https://github.com/astropy/astropy>`__
-
-**Plotting**
-
-* `corner <https://github.com/dfm/corner.py>`__ for generating corner plot
-* `matplotlib <https://github.com/matplotlib/matplotlib>`__ for general plotting routines
-
+Please refer to the `Acknowledging/citing bilby guide <https://lscsoft.docs.ligo.org/bilby/citing-bilby.html>`__.
 
 .. |pipeline status| image:: https://git.ligo.org/lscsoft/bilby/badges/master/pipeline.svg
    :target: https://git.ligo.org/lscsoft/bilby/commits/master
diff --git a/bilby/bilby_mcmc/flows.py b/bilby/bilby_mcmc/flows.py
index 5fbaf196b38859660307554e8bc371ce0b03edf8..b08ea3a9386e8f69b6ec7644f896b17c287956be 100644
--- a/bilby/bilby_mcmc/flows.py
+++ b/bilby/bilby_mcmc/flows.py
@@ -1,17 +1,17 @@
 import torch
-from nflows.distributions.normal import StandardNormal
-from nflows.flows.base import Flow
-from nflows.nn import nets as nets
-from nflows.transforms import (
+from glasflow.nflows.distributions.normal import StandardNormal
+from glasflow.nflows.flows.base import Flow
+from glasflow.nflows.nn import nets as nets
+from glasflow.nflows.transforms import (
     CompositeTransform,
     MaskedAffineAutoregressiveTransform,
     RandomPermutation,
 )
-from nflows.transforms.coupling import (
+from glasflow.nflows.transforms.coupling import (
     AdditiveCouplingTransform,
     AffineCouplingTransform,
 )
-from nflows.transforms.normalization import BatchNorm
+from glasflow.nflows.transforms.normalization import BatchNorm
 from torch.nn import functional as F
 
 # Turn off parallelism
diff --git a/bilby/bilby_mcmc/proposals.py b/bilby/bilby_mcmc/proposals.py
index 17892e050b33b89d8b391203b72140056ea29254..6100d75f89b68be8d0b0bf165cef3aaee53db5a7 100644
--- a/bilby/bilby_mcmc/proposals.py
+++ b/bilby/bilby_mcmc/proposals.py
@@ -754,10 +754,10 @@ class NormalizingFlowProposal(DensityEstimateProposal):
 
     @staticmethod
     def check_dependencies(warn=True):
-        if importlib.util.find_spec("nflows") is None:
+        if importlib.util.find_spec("glasflow") is None:
             if warn:
                 logger.warning(
-                    "Unable to utilise NormalizingFlowProposal as nflows is not installed"
+                    "Unable to utilise NormalizingFlowProposal as glasflow is not installed"
                 )
             return False
         else:
diff --git a/bilby/bilby_mcmc/sampler.py b/bilby/bilby_mcmc/sampler.py
index 1b528ec78cad6bd929feda0658b4b7f1d5c222d7..6676c76d37ac89ab912a824c18e0d3b175b785c1 100644
--- a/bilby/bilby_mcmc/sampler.py
+++ b/bilby/bilby_mcmc/sampler.py
@@ -127,6 +127,9 @@ class Bilby_MCMC(MCMCSampler):
     initial_sample_dict: dict
         A dictionary of the initial sample value. If incomplete, will overwrite
         the initial_sample drawn using initial_sample_method.
+    normalize_prior: bool
+        When False, disables calculation of constraint normalization factor
+        during prior probability computation. Default value is True.
     verbose: bool
         Whether to print diagnostic output during the run.
 
@@ -175,6 +178,7 @@ class Bilby_MCMC(MCMCSampler):
         resume=True,
         exit_code=130,
         verbose=True,
+        normalize_prior=True,
         **kwargs,
     ):
 
@@ -194,6 +198,7 @@ class Bilby_MCMC(MCMCSampler):
         self.kwargs["target_nsamples"] = self.kwargs["nsamples"]
         self.L1steps = self.kwargs["L1steps"]
         self.L2steps = self.kwargs["L2steps"]
+        self.normalize_prior = normalize_prior
         self.pt_inputs = ParallelTemperingInputs(
             **{key: self.kwargs[key] for key in ParallelTemperingInputs._fields}
         )
@@ -309,6 +314,7 @@ class Bilby_MCMC(MCMCSampler):
             evidence_method=self.evidence_method,
             initial_sample_method=self.initial_sample_method,
             initial_sample_dict=self.initial_sample_dict,
+            normalize_prior=self.normalize_prior,
         )
 
     def get_setup_string(self):
@@ -388,7 +394,7 @@ class Bilby_MCMC(MCMCSampler):
 
         with open(self.resume_file, "rb") as file:
             ptsampler = dill.load(file)
-            if type(ptsampler) != BilbyPTMCMCSampler:
+            if not isinstance(ptsampler, BilbyPTMCMCSampler):
                 logger.debug("Malformed resume file, ignoring")
                 return False
             self.ptsampler = ptsampler
@@ -547,6 +553,29 @@ class Bilby_MCMC(MCMCSampler):
                         all_samples=ptsampler.samples,
                     )
 
+    @classmethod
+    def get_expected_outputs(cls, outdir=None, label=None):
+        """Get lists of the expected outputs directories and files.
+
+        These are used by :code:`bilby_pipe` when transferring files via HTCondor.
+
+        Parameters
+        ----------
+        outdir : str
+            The output directory.
+        label : str
+            The label for the run.
+
+        Returns
+        -------
+        list
+            List of file names.
+        list
+            List of directory names. Will always be empty for bilby_mcmc.
+        """
+        filenames = [os.path.join(outdir, f"{label}_resume.pickle")]
+        return filenames, []
+
 
 class BilbyPTMCMCSampler(object):
     def __init__(
@@ -560,11 +589,13 @@ class BilbyPTMCMCSampler(object):
         evidence_method,
         initial_sample_method,
         initial_sample_dict,
+        normalize_prior=True,
     ):
         self.set_pt_inputs(pt_inputs)
         self.use_ratio = use_ratio
         self.initial_sample_method = initial_sample_method
         self.initial_sample_dict = initial_sample_dict
+        self.normalize_prior = normalize_prior
         self.setup_sampler_dictionary(convergence_inputs, proposal_cycle)
         self.set_convergence_inputs(convergence_inputs)
         self.pt_rejection_sample = pt_rejection_sample
@@ -635,6 +666,7 @@ class BilbyPTMCMCSampler(object):
                     use_ratio=self.use_ratio,
                     initial_sample_method=self.initial_sample_method,
                     initial_sample_dict=self.initial_sample_dict,
+                    normalize_prior=self.normalize_prior,
                 )
                 for Eindex in range(n)
             ]
@@ -1129,12 +1161,13 @@ class BilbyMCMCSampler(object):
         use_ratio=False,
         initial_sample_method="prior",
         initial_sample_dict=None,
+        normalize_prior=True,
     ):
         self.beta = beta
         self.Tindex = Tindex
         self.Eindex = Eindex
         self.use_ratio = use_ratio
-
+        self.normalize_prior = normalize_prior
         self.parameters = _sampling_convenience_dump.priors.non_fixed_keys
         self.ndim = len(self.parameters)
 
@@ -1209,7 +1242,10 @@ class BilbyMCMCSampler(object):
         return logl
 
     def log_prior(self, sample):
-        return _sampling_convenience_dump.priors.ln_prob(sample.parameter_only_dict)
+        return _sampling_convenience_dump.priors.ln_prob(
+            sample.parameter_only_dict,
+            normalized=self.normalize_prior,
+        )
 
     def accept_proposal(self, prop, proposal):
         self.chain.append(prop)
diff --git a/bilby/core/prior/analytical.py b/bilby/core/prior/analytical.py
index dd503c98911ac4abfc8264d0a710116b35b275ce..5e7b3099f4a26c5ab75cc827d9300b43dfca1adc 100644
--- a/bilby/core/prior/analytical.py
+++ b/bilby/core/prior/analytical.py
@@ -28,6 +28,7 @@ class DeltaFunction(Prior):
                                             minimum=peak, maximum=peak, check_range_nonzero=False)
         self.peak = peak
         self._is_fixed = True
+        self.least_recently_sampled = peak
 
     def rescale(self, val):
         """Rescale everything to the peak with the correct shape.
@@ -1608,7 +1609,7 @@ class Triangular(Prior):
 
         """
         return (
-            + (val > self.mode)
+            (val > self.mode)
             + (val > self.minimum)
             * (val <= self.maximum)
             / (self.scale)
diff --git a/bilby/core/prior/base.py b/bilby/core/prior/base.py
index 4ac924f74639837a2c323e5769e53c3e0a6a2871..0dcac2c0a6f4fbb28bf1cb9bea4312381bffe12c 100644
--- a/bilby/core/prior/base.py
+++ b/bilby/core/prior/base.py
@@ -168,14 +168,14 @@ class Prior(object):
 
     def cdf(self, val):
         """ Generic method to calculate CDF, can be overwritten in subclass """
-        from scipy.integrate import cumtrapz
+        from scipy.integrate import cumulative_trapezoid
         if np.any(np.isinf([self.minimum, self.maximum])):
             raise ValueError(
                 "Unable to use the generic CDF calculation for priors with"
                 "infinite support")
         x = np.linspace(self.minimum, self.maximum, 1000)
         pdf = self.prob(x)
-        cdf = cumtrapz(pdf, x, initial=0)
+        cdf = cumulative_trapezoid(pdf, x, initial=0)
         interp = interp1d(x, cdf, assume_sorted=True, bounds_error=False,
                           fill_value=(0, 1))
         return interp(val)
diff --git a/bilby/core/prior/conditional.py b/bilby/core/prior/conditional.py
index cbc99f269f6dcb7acbaa75ef9ea377dc2510cbcf..797cbd1c45a1bc895cfd383615130e28de1e95a1 100644
--- a/bilby/core/prior/conditional.py
+++ b/bilby/core/prior/conditional.py
@@ -372,6 +372,7 @@ class DirichletElement(ConditionalBeta):
             label + str(ii) for ii in range(order)
         ]
         self.__class__.__name__ = 'DirichletElement'
+        self.__class__.__qualname__ = 'DirichletElement'
 
     def dirichlet_condition(self, reference_parms, **kwargs):
         remaining = 1 - sum(
diff --git a/bilby/core/prior/dict.py b/bilby/core/prior/dict.py
index d25ca6487560bbb946b8930ba8c625e9d0f2d766..6b532cc4d8ea4228dafb63b6141b864bd3e12582 100644
--- a/bilby/core/prior/dict.py
+++ b/bilby/core/prior/dict.py
@@ -510,14 +510,14 @@ class PriorDict(dict):
         sample: dict
             Dictionary of the samples of which we want to have the probability of
         kwargs:
-            The keyword arguments are passed directly to `np.product`
+            The keyword arguments are passed directly to `np.prod`
 
         Returns
         =======
         float: Joint probability of all individual sample probabilities
 
         """
-        prob = np.product([self[key].prob(sample[key]) for key in sample], **kwargs)
+        prob = np.prod([self[key].prob(sample[key]) for key in sample], **kwargs)
 
         return self.check_prob(sample, prob)
 
@@ -537,7 +537,7 @@ class PriorDict(dict):
                 constrained_prob[keep] = prob[keep] * ratio
                 return constrained_prob
 
-    def ln_prob(self, sample, axis=None):
+    def ln_prob(self, sample, axis=None, normalized=True):
         """
 
         Parameters
@@ -546,6 +546,9 @@ class PriorDict(dict):
             Dictionary of the samples of which to calculate the log probability
         axis: None or int
             Axis along which the summation is performed
+        normalized: bool
+            When False, disables calculation of constraint normalization factor
+            during prior probability computation. Default value is True.
 
         Returns
         =======
@@ -554,10 +557,14 @@ class PriorDict(dict):
 
         """
         ln_prob = np.sum([self[key].ln_prob(sample[key]) for key in sample], axis=axis)
-        return self.check_ln_prob(sample, ln_prob)
+        return self.check_ln_prob(sample, ln_prob,
+                                  normalized=normalized)
 
-    def check_ln_prob(self, sample, ln_prob):
-        ratio = self.normalize_constraint_factor(tuple(sample.keys()))
+    def check_ln_prob(self, sample, ln_prob, normalized=True):
+        if normalized:
+            ratio = self.normalize_constraint_factor(tuple(sample.keys()))
+        else:
+            ratio = 1
         if np.all(np.isinf(ln_prob)):
             return ln_prob
         else:
@@ -711,16 +718,22 @@ class ConditionalPriorDict(PriorDict):
 
     def sample_subset(self, keys=iter([]), size=None):
         self.convert_floats_to_delta_functions()
-        subset_dict = ConditionalPriorDict({key: self[key] for key in keys})
+        add_delta_keys = [
+            key
+            for key in self.keys()
+            if key not in keys and isinstance(self[key], DeltaFunction)
+        ]
+        use_keys = add_delta_keys + list(keys)
+        subset_dict = ConditionalPriorDict({key: self[key] for key in use_keys})
         if not subset_dict._resolved:
             raise IllegalConditionsException(
                 "The current set of priors contains unresolvable conditions."
             )
         samples = dict()
         for key in subset_dict.sorted_keys:
-            if isinstance(self[key], Constraint):
+            if key not in keys or isinstance(self[key], Constraint):
                 continue
-            elif isinstance(self[key], Prior):
+            if isinstance(self[key], Prior):
                 try:
                     samples[key] = subset_dict[key].sample(
                         size=size, **subset_dict.get_required_variables(key)
@@ -764,7 +777,7 @@ class ConditionalPriorDict(PriorDict):
         sample: dict
             Dictionary of the samples of which we want to have the probability of
         kwargs:
-            The keyword arguments are passed directly to `np.product`
+            The keyword arguments are passed directly to `np.prod`
 
         Returns
         =======
@@ -776,10 +789,10 @@ class ConditionalPriorDict(PriorDict):
             self[key].prob(sample[key], **self.get_required_variables(key))
             for key in sample
         ]
-        prob = np.product(res, **kwargs)
+        prob = np.prod(res, **kwargs)
         return self.check_prob(sample, prob)
 
-    def ln_prob(self, sample, axis=None):
+    def ln_prob(self, sample, axis=None, normalized=True):
         """
 
         Parameters
@@ -788,6 +801,9 @@ class ConditionalPriorDict(PriorDict):
             Dictionary of the samples of which we want to have the log probability of
         axis: Union[None, int]
             Axis along which the summation is performed
+        normalized: bool
+            When False, disables calculation of constraint normalization factor
+            during prior probability computation. Default value is True.
 
         Returns
         =======
@@ -800,7 +816,8 @@ class ConditionalPriorDict(PriorDict):
             for key in sample
         ]
         ln_prob = np.sum(res, axis=axis)
-        return self.check_ln_prob(sample, ln_prob)
+        return self.check_ln_prob(sample, ln_prob,
+                                  normalized=normalized)
 
     def cdf(self, sample):
         self._prepare_evaluation(*zip(*sample.items()))
diff --git a/bilby/core/prior/interpolated.py b/bilby/core/prior/interpolated.py
index 187e8a60a3742c3ad8a6c42b436acdc82c0c9f43..2cee669d9fec44e1ed7722a3729f91743729f9c6 100644
--- a/bilby/core/prior/interpolated.py
+++ b/bilby/core/prior/interpolated.py
@@ -162,11 +162,11 @@ class Interped(Prior):
         self._initialize_attributes()
 
     def _initialize_attributes(self):
-        from scipy.integrate import cumtrapz
+        from scipy.integrate import cumulative_trapezoid
         if np.trapz(self._yy, self.xx) != 1:
             logger.debug('Supplied PDF for {} is not normalised, normalising.'.format(self.name))
         self._yy /= np.trapz(self._yy, self.xx)
-        self.YY = cumtrapz(self._yy, self.xx, initial=0)
+        self.YY = cumulative_trapezoid(self._yy, self.xx, initial=0)
         # Need last element of cumulative distribution to be exactly one.
         self.YY[-1] = 1
         self.probability_density = interp1d(x=self.xx, y=self._yy, bounds_error=False, fill_value=0)
diff --git a/bilby/core/result.py b/bilby/core/result.py
index 1c9914dc42f693d402a6070c5f7ccb0b6c91a7c0..c5d27bfcc827461515b6dd8978c6677484ff33fb 100644
--- a/bilby/core/result.py
+++ b/bilby/core/result.py
@@ -69,7 +69,11 @@ def result_file_name(outdir, label, extension='json', gzip=False):
 def _determine_file_name(filename, outdir, label, extension, gzip):
     """ Helper method to determine the filename """
     if filename is not None:
-        return filename
+        if isinstance(filename, os.PathLike):
+            # convert PathLike object to string
+            return str(filename)
+        else:
+            return filename
     else:
         if (outdir is None) and (label is None):
             raise ValueError("No information given to load file")
@@ -134,6 +138,16 @@ def read_in_result_list(filename_list, invalid="warning"):
     """
     results_list = []
     for filename in filename_list:
+        if (
+            not os.path.exists(filename)
+            and os.path.exists(f"{os.path.splitext(filename)[0]}.pkl")
+        ):
+            pickle_path = f"{os.path.splitext(filename)[0]}.pkl"
+            logger.warning(
+                f"Result file {filename} doesn't exist but {pickle_path} does. "
+                f"Using {pickle_path}."
+            )
+            filename = pickle_path
         try:
             results_list.append(read_in_result(filename=filename))
         except Exception as e:
@@ -1785,7 +1799,7 @@ class ResultList(list):
 
         if isinstance(result, Result):
             super(ResultList, self).append(result)
-        elif isinstance(result, str):
+        elif isinstance(result, (str, os.PathLike)):
             super(ResultList, self).append(read_in_result(result))
         else:
             raise TypeError("Could not append a non-Result type")
@@ -2047,7 +2061,7 @@ def plot_multiple(results, filename=None, labels=None, colours=None,
         hist_kwargs['color'] = c
         hist_kwargs["linestyle"] = linestyle
         kwargs["hist_kwargs"] = hist_kwargs
-        fig = result.plot_corner(fig=fig, save=False, color=c, contour_kwargs={"linestyle": linestyle}, **kwargs)
+        fig = result.plot_corner(fig=fig, save=False, color=c, contour_kwargs={"linestyles": linestyle}, **kwargs)
         default_filename += '_{}'.format(result.label)
         lines.append(mpllines.Line2D([0], [0], color=c, linestyle=linestyle))
         default_labels.append(result.label)
diff --git a/bilby/core/sampler/__init__.py b/bilby/core/sampler/__init__.py
index ab0c8f15480d83c6bcb11e5afba4cec6e584b0e6..ef65ce1dddd70b2d57aaf07c15cfe999a47ba901 100644
--- a/bilby/core/sampler/__init__.py
+++ b/bilby/core/sampler/__init__.py
@@ -2,54 +2,129 @@ import datetime
 import inspect
 import sys
 
-import bilby
-from bilby.bilby_mcmc import Bilby_MCMC
-
 from ..prior import DeltaFunction, PriorDict
-from ..utils import command_line_args, env_package_list, loaded_modules_dict, logger
+from ..utils import (
+    command_line_args,
+    env_package_list,
+    get_entry_points,
+    loaded_modules_dict,
+    logger,
+)
 from . import proposal
 from .base_sampler import Sampler, SamplingMarginalisedParameterError
-from .cpnest import Cpnest
-from .dnest4 import DNest4
-from .dynamic_dynesty import DynamicDynesty
-from .dynesty import Dynesty
-from .emcee import Emcee
-from .fake_sampler import FakeSampler
-from .kombine import Kombine
-from .nessai import Nessai
-from .nestle import Nestle
-from .polychord import PyPolyChord
-from .ptemcee import Ptemcee
-from .ptmcmc import PTMCMCSampler
-from .pymc import Pymc
-from .pymultinest import Pymultinest
-from .ultranest import Ultranest
-from .zeus import Zeus
-
-IMPLEMENTED_SAMPLERS = {
-    "bilby_mcmc": Bilby_MCMC,
-    "cpnest": Cpnest,
-    "dnest4": DNest4,
-    "dynamic_dynesty": DynamicDynesty,
-    "dynesty": Dynesty,
-    "emcee": Emcee,
-    "kombine": Kombine,
-    "nessai": Nessai,
-    "nestle": Nestle,
-    "ptemcee": Ptemcee,
-    "ptmcmcsampler": PTMCMCSampler,
-    "pymc": Pymc,
-    "pymultinest": Pymultinest,
-    "pypolychord": PyPolyChord,
-    "ultranest": Ultranest,
-    "zeus": Zeus,
-    "fake_sampler": FakeSampler,
-}
+
+
+class ImplementedSamplers:
+    """Dictionary-like object that contains implemented samplers.
+
+    This class is singleton and only one instance can exist.
+    """
+
+    _instance = None
+
+    _samplers = get_entry_points("bilby.samplers")
+
+    def keys(self):
+        """Iterator of available samplers by name.
+
+        Reduces the list to its simplest. This includes removing the 'bilby.'
+        prefix from native samplers if a corresponding plugin is not available.
+        """
+        keys = []
+        for key in self._samplers.keys():
+            name = key.replace("bilby.", "")
+            if name in self._samplers.keys():
+                keys.append(key)
+            else:
+                keys.append(name)
+        return iter(keys)
+
+    def values(self):
+        """Iterator of sampler classes.
+
+        Note: the classes need to loaded using :code:`.load()` before being
+        called.
+        """
+        return iter(self._samplers.values())
+
+    def items(self):
+        """Iterator of tuples containing keys (sampler names) and classes.
+
+        Note: the classes need to loaded using :code:`.load()` before being
+        called.
+        """
+        return iter(((k, v) for k, v in zip(self.keys(), self.values())))
+
+    def valid_keys(self):
+        """All valid keys including bilby.<sampler name>."""
+        keys = set(self._samplers.keys())
+        return iter(keys.union({k.replace("bilby.", "") for k in keys}))
+
+    def __getitem__(self, key):
+        if key in self._samplers:
+            return self._samplers[key]
+        elif f"bilby.{key}" in self._samplers:
+            return self._samplers[f"bilby.{key}"]
+        else:
+            raise ValueError(
+                f"Sampler {key} is not implemented! "
+                f"Available samplers are: {list(self.keys())}"
+            )
+
+    def __contains__(self, value):
+        return value in self.valid_keys()
+
+    def __new__(cls):
+        if cls._instance is None:
+            cls._instance = super().__new__(cls)
+        return cls._instance
+
+
+IMPLEMENTED_SAMPLERS = ImplementedSamplers()
+
+
+def get_implemented_samplers():
+    """Get a list of the names of the implemented samplers.
+
+    This includes natively supported samplers (e.g. dynesty) and any additional
+    samplers that are supported through the sampler plugins.
+
+    Returns
+    -------
+    list
+        The list of implemented samplers.
+    """
+    return list(IMPLEMENTED_SAMPLERS.keys())
+
+
+def get_sampler_class(sampler):
+    """Get the class for a sampler from its name.
+
+    This includes natively supported samplers (e.g. dynesty) and any additional
+    samplers that are supported through the sampler plugins.
+
+    Parameters
+    ----------
+    sampler : str
+        The name of the sampler.
+
+    Returns
+    -------
+    Sampler
+        The sampler class.
+
+    Raises
+    ------
+    ValueError
+        Raised if the sampler is not implemented.
+    """
+    return IMPLEMENTED_SAMPLERS[sampler.lower()].load()
+
 
 if command_line_args.sampler_help:
     sampler = command_line_args.sampler_help
     if sampler in IMPLEMENTED_SAMPLERS:
-        sampler_class = IMPLEMENTED_SAMPLERS[sampler]
+        sampler_class = IMPLEMENTED_SAMPLERS[sampler].load()
         print(f'Help for sampler "{sampler}":')
         print(sampler_class.__doc__)
     else:
@@ -60,7 +135,7 @@ if command_line_args.sampler_help:
             )
         else:
             print(f"Requested sampler {sampler} not implemented")
-        print(f"Available samplers = {IMPLEMENTED_SAMPLERS}")
+        print(f"Available samplers = {get_implemented_samplers()}")
 
     sys.exit()
 
@@ -185,24 +260,20 @@ def run_sampler(
     if isinstance(sampler, Sampler):
         pass
     elif isinstance(sampler, str):
-        if sampler.lower() in IMPLEMENTED_SAMPLERS:
-            sampler_class = IMPLEMENTED_SAMPLERS[sampler.lower()]
-            sampler = sampler_class(
-                likelihood,
-                priors=priors,
-                outdir=outdir,
-                label=label,
-                injection_parameters=injection_parameters,
-                meta_data=meta_data,
-                use_ratio=use_ratio,
-                plot=plot,
-                result_class=result_class,
-                npool=npool,
-                **kwargs,
-            )
-        else:
-            print(IMPLEMENTED_SAMPLERS)
-            raise ValueError(f"Sampler {sampler} not yet implemented")
+        sampler_class = get_sampler_class(sampler)
+        sampler = sampler_class(
+            likelihood,
+            priors=priors,
+            outdir=outdir,
+            label=label,
+            injection_parameters=injection_parameters,
+            meta_data=meta_data,
+            use_ratio=use_ratio,
+            plot=plot,
+            result_class=result_class,
+            npool=npool,
+            **kwargs,
+        )
     elif inspect.isclass(sampler):
         sampler = sampler.__init__(
             likelihood,
@@ -219,7 +290,7 @@ def run_sampler(
     else:
         raise ValueError(
             "Provided sampler should be a Sampler object or name of a known "
-            f"sampler: {', '.join(IMPLEMENTED_SAMPLERS.keys())}."
+            f"sampler: {get_implemented_samplers()}."
         )
 
     if sampler.cached_result:
diff --git a/bilby/core/sampler/base_sampler.py b/bilby/core/sampler/base_sampler.py
index 36c14de033449511d5f5b76970ff2afc4cd59cfd..3c573afe2b1ec3e5d73d8b035fab2a83767f4baa 100644
--- a/bilby/core/sampler/base_sampler.py
+++ b/bilby/core/sampler/base_sampler.py
@@ -173,6 +173,13 @@ class Sampler(object):
         Whether the implemented sampler exits hard (:code:`os._exit` rather
         than :code:`sys.exit`). The latter can be escaped as :code:`SystemExit`.
         The former cannot.
+    sampler_name : str
+        Name of the sampler. This is used when creating the output directory for
+        the sampler.
+    abbreviation : str
+        Abbreviated name of the sampler. Does not have to be specified in child
+        classes. If set to a value other than :code:`None`, this will be used
+        instead of :code:`sampler_name` when creating the output directory.
 
     Raises
     ======
@@ -187,6 +194,8 @@ class Sampler(object):
 
     """
 
+    sampler_name = "sampler"
+    abbreviation = None
     default_kwargs = dict()
     npool_equiv_kwargs = [
         "npool",
@@ -248,9 +257,10 @@ class Sampler(object):
 
         self.exit_code = exit_code
 
+        self._log_likelihood_eval_time = np.nan
         if not soft_init:
             self._verify_parameters()
-            self._time_likelihood()
+            self._log_likelihood_eval_time = self._time_likelihood()
             self._verify_use_ratio()
 
         self.kwargs = kwargs
@@ -433,6 +443,10 @@ class Sampler(object):
         n_evaluations: int
             The number of evaluations to estimate the evaluation time from
 
+        Returns
+        =======
+        log_likelihood_eval_time: float
+            The time (in s) it took for one likelihood evaluation
         """
 
         t1 = datetime.datetime.now()
@@ -442,15 +456,16 @@ class Sampler(object):
             )[:, 0]
             self.log_likelihood(theta)
         total_time = (datetime.datetime.now() - t1).total_seconds()
-        self._log_likelihood_eval_time = total_time / n_evaluations
+        log_likelihood_eval_time = total_time / n_evaluations
 
-        if self._log_likelihood_eval_time == 0:
-            self._log_likelihood_eval_time = np.nan
+        if log_likelihood_eval_time == 0:
+            log_likelihood_eval_time = np.nan
             logger.info("Unable to measure single likelihood time")
         else:
             logger.info(
                 f"Single likelihood evaluation took {self._log_likelihood_eval_time:.3e} s"
             )
+        return log_likelihood_eval_time
 
     def _verify_use_ratio(self):
         """
@@ -682,11 +697,11 @@ class Sampler(object):
         if self.cached_result is None:
             kwargs_print = self.kwargs.copy()
             for k in kwargs_print:
-                if type(kwargs_print[k]) in (list, np.ndarray):
+                if isinstance(kwargs_print[k], (list, np.ndarray)):
                     array_repr = np.array(kwargs_print[k])
                     if array_repr.size > 10:
                         kwargs_print[k] = f"array_like, shape={array_repr.shape}"
-                elif type(kwargs_print[k]) == DataFrame:
+                elif isinstance(kwargs_print[k], DataFrame):
                     kwargs_print[k] = f"DataFrame, shape={kwargs_print[k].shape}"
             logger.info(
                 f"Using sampler {self.__class__.__name__} with kwargs {kwargs_print}"
@@ -773,8 +788,37 @@ class Sampler(object):
     def write_current_state(self):
         raise NotImplementedError()
 
+    @classmethod
+    def get_expected_outputs(cls, outdir=None, label=None):
+        """Get lists of the expected outputs directories and files.
+
+        These are used by :code:`bilby_pipe` when transferring files via HTCondor.
+        Both can be empty. Defaults to a single directory:
+        :code:`"{outdir}/{name}_{label}/"`, where :code:`name`
+        is :code:`abbreviation` if it is defined for the sampler class, otherwise
+        it defaults to :code:`sampler_name`.
+
+        Parameters
+        ----------
+        outdir : str
+            The output directory.
+        label : str
+            The label for the run.
+
+        Returns
+        -------
+        list
+            List of file names.
+        list
+            List of directory names.
+        """
+        name = cls.abbreviation or cls.sampler_name
+        dirname = os.path.join(outdir, f"{name}_{label}", "")
+        return [], [dirname]
+
 
 class NestedSampler(Sampler):
+    sampler_name = "nested_sampler"
     npoints_equiv_kwargs = [
         "nlive",
         "nlives",
@@ -848,6 +892,7 @@ class NestedSampler(Sampler):
 
 
 class MCMCSampler(Sampler):
+    sampler_name = "mcmc_sampler"
     nwalkers_equiv_kwargs = ["nwalker", "nwalkers", "draws", "Niter"]
     nburn_equiv_kwargs = ["burn", "nburn"]
 
diff --git a/bilby/core/sampler/cpnest.py b/bilby/core/sampler/cpnest.py
index bc3b364656d26bcff0c14e3852bbbd394c5887cf..e777ebc67a5143fa8f41469b727df748f54735e1 100644
--- a/bilby/core/sampler/cpnest.py
+++ b/bilby/core/sampler/cpnest.py
@@ -40,6 +40,7 @@ class Cpnest(NestedSampler):
 
     """
 
+    sampler_name = "cpnest"
     default_kwargs = dict(
         verbose=3,
         nthreads=1,
@@ -182,7 +183,7 @@ class Cpnest(NestedSampler):
         if "proposals" in self.kwargs:
             if self.kwargs["proposals"] is None:
                 return
-            if type(self.kwargs["proposals"]) == JumpProposalCycle:
+            if isinstance(self.kwargs["proposals"], JumpProposalCycle):
                 self.kwargs["proposals"] = dict(
                     mhs=self.kwargs["proposals"], hmc=self.kwargs["proposals"]
                 )
diff --git a/bilby/core/sampler/dnest4.py b/bilby/core/sampler/dnest4.py
index a767ef89d47a013b3cba9dc3360c903ee5552832..87717f6fd6346b7c89548a5b579b6e3c0f365141 100644
--- a/bilby/core/sampler/dnest4.py
+++ b/bilby/core/sampler/dnest4.py
@@ -99,6 +99,7 @@ class DNest4(_TemporaryFileSamplerMixin, NestedSampler):
         If True, prints information during run
     """
 
+    sampler_name = "d4nest"
     default_kwargs = dict(
         max_num_levels=20,
         num_steps=500,
diff --git a/bilby/core/sampler/dynamic_dynesty.py b/bilby/core/sampler/dynamic_dynesty.py
index 294d8fd6d9ceee2e878ba0239cf64bb5fe8c94cf..8c7f2966ee6ea4e8934ade73b6ccd2582c019f1b 100644
--- a/bilby/core/sampler/dynamic_dynesty.py
+++ b/bilby/core/sampler/dynamic_dynesty.py
@@ -14,6 +14,7 @@ class DynamicDynesty(Dynesty):
     """
 
     external_sampler_name = "dynesty"
+    sampler_name = "dynamic_dynesty"
 
     @property
     def nlive(self):
diff --git a/bilby/core/sampler/dynesty.py b/bilby/core/sampler/dynesty.py
index 2a4d73d738effde9265e0e8a450b8e5a17d4f69a..852fb88c192195c7fb08d7bd9654dda9126e7ca5 100644
--- a/bilby/core/sampler/dynesty.py
+++ b/bilby/core/sampler/dynesty.py
@@ -123,6 +123,9 @@ class Dynesty(NestedSampler):
         The proposal methods to use during MCMC. This can be some combination
         of :code:`"diff", "volumetric"`. See the dynesty guide in the Bilby docs
         for more details. default=:code:`["diff"]`.
+    rstate: numpy.random.Generator (None)
+        Instance of a numpy random generator for generating random numbers.
+        Also see :code:`seed` in 'Other Parameters'.
 
     Other Parameters
     ================
@@ -143,8 +146,14 @@ class Dynesty(NestedSampler):
         has no impact on the sampling.
     dlogz: float, (0.1)
         Stopping criteria
+    seed: int (None)
+        Use to seed the random number generator if :code:`rstate` is not
+        specified.
     """
 
+    sampler_name = "dynesty"
+    sampling_seed_key = "seed"
+
     @property
     def _dynesty_init_kwargs(self):
         params = inspect.signature(self.sampler_init).parameters
@@ -176,6 +185,7 @@ class Dynesty(NestedSampler):
     def default_kwargs(self):
         kwargs = self._dynesty_init_kwargs
         kwargs.update(self._dynesty_sampler_kwargs)
+        kwargs["seed"] = None
         return kwargs
 
     def __init__(
@@ -230,8 +240,12 @@ class Dynesty(NestedSampler):
         self.nestcheck = nestcheck
 
         if self.n_check_point is None:
-            self.n_check_point = max(
-                int(check_point_delta_t / self._log_likelihood_eval_time / 10), 10
+            self.n_check_point = (
+                10
+                if np.isnan(self._log_likelihood_eval_time)
+                else max(
+                    int(check_point_delta_t / self._log_likelihood_eval_time / 10), 10
+                )
             )
         self.check_point_delta_t = check_point_delta_t
         logger.info(f"Checkpoint every check_point_delta_t = {check_point_delta_t}s")
@@ -265,6 +279,14 @@ class Dynesty(NestedSampler):
             for equiv in self.npool_equiv_kwargs:
                 if equiv in kwargs:
                     kwargs["queue_size"] = kwargs.pop(equiv)
+        if "seed" in kwargs:
+            seed = kwargs.get("seed")
+            if "rstate" not in kwargs:
+                kwargs["rstate"] = np.random.default_rng(seed)
+            else:
+                logger.warning(
+                    "Kwargs contain both 'rstate' and 'seed', ignoring 'seed'."
+                )
 
     def _verify_kwargs_against_default_kwargs(self):
         if not self.kwargs["walks"]:
@@ -278,6 +300,32 @@ class Dynesty(NestedSampler):
                 )
         Sampler._verify_kwargs_against_default_kwargs(self)
 
+    @classmethod
+    def get_expected_outputs(cls, outdir=None, label=None):
+        """Get lists of the expected outputs directories and files.
+
+        These are used by :code:`bilby_pipe` when transferring files via HTCondor.
+
+        Parameters
+        ----------
+        outdir : str
+            The output directory.
+        label : str
+            The label for the run.
+
+        Returns
+        -------
+        list
+            List of file names.
+        list
+            List of directory names. Will always be empty for dynesty.
+        """
+        filenames = []
+        for kind in ["resume", "dynesty"]:
+            filename = os.path.join(outdir, f"{label}_{kind}.pickle")
+            filenames.append(filename)
+        return filenames, []
+
     def _print_func(
         self,
         results,
@@ -446,7 +494,7 @@ class Dynesty(NestedSampler):
 
         if sample == "rwalk":
             logger.info(
-                "Using the bilby-implemented rwalk sample method with ACT estimated walks. "
+                f"Using the bilby-implemented {sample} sample method with ACT estimated walks. "
                 f"An average of {2 * self.nact} steps will be accepted up to chain length "
                 f"{self.maxmcmc}."
             )
@@ -460,7 +508,7 @@ class Dynesty(NestedSampler):
             dynesty.nestedsamplers._SAMPLING["rwalk"] = AcceptanceTrackingRWalk()
         elif sample == "acceptance-walk":
             logger.info(
-                "Using the bilby-implemented rwalk sampling with an average of "
+                f"Using the bilby-implemented {sample} sampling with an average of "
                 f"{self.naccept} accepted steps per MCMC and maximum length {self.maxmcmc}"
             )
             from .dynesty_utils import FixedRWalk
@@ -468,7 +516,7 @@ class Dynesty(NestedSampler):
             dynesty.nestedsamplers._SAMPLING["acceptance-walk"] = FixedRWalk()
         elif sample == "act-walk":
             logger.info(
-                "Using the bilby-implemented rwalk sampling tracking the "
+                f"Using the bilby-implemented {sample} sampling tracking the "
                 f"autocorrelation function and thinning by "
                 f"{self.nact} with maximum length {self.nact * self.maxmcmc}"
             )
@@ -604,6 +652,7 @@ class Dynesty(NestedSampler):
             sampling_time_s=self.sampling_time.seconds,
             ncores=self.kwargs.get("queue_size", 1),
         )
+        self.kwargs["rstate"] = None
 
     def _update_sampling_time(self):
         end_time = datetime.datetime.now()
@@ -717,6 +766,7 @@ class Dynesty(NestedSampler):
                 self.sampler.nqueue = -1
                 self.start_time = self.sampler.kwargs.pop("start_time")
                 self.sampling_time = self.sampler.kwargs.pop("sampling_time")
+                self.sampler.queue_size = self.kwargs["queue_size"]
                 self.sampler.pool = self.pool
                 if self.pool is not None:
                     self.sampler.M = self.pool.map
diff --git a/bilby/core/sampler/dynesty_utils.py b/bilby/core/sampler/dynesty_utils.py
index 1d959911236ddcaa56c458b1f5e85cf37de3f49b..fb7b8d20fc128b5d949ed0cee1e604c615276a47 100644
--- a/bilby/core/sampler/dynesty_utils.py
+++ b/bilby/core/sampler/dynesty_utils.py
@@ -68,7 +68,7 @@ class LivePointSampler(UnitCubeSampler):
         self.kwargs["live"] = self.live_u
         i = self.rstate.integers(self.nlive)
         u = self.live_u[i, :]
-        return u, np.identity(self.npdim)
+        return u, np.identity(self.ncdim)
 
 
 class MultiEllipsoidLivePointSampler(MultiEllipsoidSampler):
@@ -300,6 +300,8 @@ class ACTTrackingRWalk:
         )
         reject += nfail
         blob = {"accept": accept, "reject": reject, "scale": args.scale}
+        iact = int(np.ceil(self.act))
+        thin = self.thin * iact
 
         if accept == 0:
             logger.debug(
@@ -314,11 +316,9 @@ class ACTTrackingRWalk:
                 "Unable to find a new point using walk: try increasing maxmcmc"
             )
             self._cache.append((current_u, current_v, logl, ncall, blob))
-        elif self.thin == -1:
+        elif (self.thin == -1) or (len(u_list) <= thin):
             self._cache.append((current_u, current_v, logl, ncall, blob))
         else:
-            iact = int(np.ceil(self.act))
-            thin = self.thin * iact
             u_list = u_list[thin::thin]
             v_list = v_list[thin::thin]
             logl_list = logl_list[thin::thin]
diff --git a/bilby/core/sampler/emcee.py b/bilby/core/sampler/emcee.py
index 7253a0fa4c49affc236454e703310d55f45fc066..db88ee5a2207f85616f081aa3ee31585c5ad2da1 100644
--- a/bilby/core/sampler/emcee.py
+++ b/bilby/core/sampler/emcee.py
@@ -45,6 +45,7 @@ class Emcee(MCMCSampler):
 
     """
 
+    sampler_name = "emcee"
     default_kwargs = dict(
         nwalkers=500,
         a=2,
diff --git a/bilby/core/sampler/fake_sampler.py b/bilby/core/sampler/fake_sampler.py
index 5f375fdbad8055e6a5bdaf7dd7e99caabe330f33..9795631fb4951398bfaf474d2d338ae55d94d997 100644
--- a/bilby/core/sampler/fake_sampler.py
+++ b/bilby/core/sampler/fake_sampler.py
@@ -17,6 +17,8 @@ class FakeSampler(Sampler):
         A string pointing to the posterior data file to be loaded.
     """
 
+    sampler_name = "fake_sampler"
+
     default_kwargs = dict(
         verbose=True, logl_args=None, logl_kwargs=None, print_progress=True
     )
diff --git a/bilby/core/sampler/kombine.py b/bilby/core/sampler/kombine.py
index 1f09387cc33520a7c8408db7cd7af2a924fa85cf..bda7c6d4f06686bf48c142acf726a145da05c760 100644
--- a/bilby/core/sampler/kombine.py
+++ b/bilby/core/sampler/kombine.py
@@ -39,6 +39,7 @@ class Kombine(Emcee):
 
     """
 
+    sampler_name = "kombine"
     default_kwargs = dict(
         nwalkers=500,
         args=[],
diff --git a/bilby/core/sampler/nessai.py b/bilby/core/sampler/nessai.py
index b6f40f8aaca0c9e0bfe8f3a361aa54b1e2be4e60..65a650efd33f99fbbff4211c96c2b4647e53a5ae 100644
--- a/bilby/core/sampler/nessai.py
+++ b/bilby/core/sampler/nessai.py
@@ -20,6 +20,7 @@ class Nessai(NestedSampler):
     Documentation: https://nessai.readthedocs.io/
     """
 
+    sampler_name = "nessai"
     _default_kwargs = None
     _run_kwargs_list = None
     sampling_seed_key = "seed"
@@ -300,5 +301,30 @@ class Nessai(NestedSampler):
         self._log_interruption(signum=signum)
         sys.exit(self.exit_code)
 
+    @classmethod
+    def get_expected_outputs(cls, outdir=None, label=None):
+        """Get lists of the expected outputs directories and files.
+
+        These are used by :code:`bilby_pipe` when transferring files via HTCondor.
+
+        Parameters
+        ----------
+        outdir : str
+            The output directory.
+        label : str
+            The label for the run.
+
+        Returns
+        -------
+        list
+            List of file names. This will be empty for nessai.
+        list
+            List of directory names.
+        """
+        dirs = [os.path.join(outdir, f"{label}_{cls.sampler_name}", "")]
+        dirs += [os.path.join(dirs[0], d, "") for d in ["proposal", "diagnostics"]]
+        filenames = []
+        return filenames, dirs
+
     def _setup_pool(self):
         pass
diff --git a/bilby/core/sampler/nestle.py b/bilby/core/sampler/nestle.py
index ebd955376050c80bcb43277ff95688c6fb572be8..75d93bf69b242463fbfce9ce64011e5864796185 100644
--- a/bilby/core/sampler/nestle.py
+++ b/bilby/core/sampler/nestle.py
@@ -24,6 +24,7 @@ class Nestle(NestedSampler):
 
     """
 
+    sampler_name = "nestle"
     default_kwargs = dict(
         verbose=True,
         method="multi",
diff --git a/bilby/core/sampler/polychord.py b/bilby/core/sampler/polychord.py
index e43c5d50b248ba0fb12cd8d5bca97b0fee726c45..9391dd2026d2256df2cb9b89efc870c6a40b557c 100644
--- a/bilby/core/sampler/polychord.py
+++ b/bilby/core/sampler/polychord.py
@@ -1,3 +1,5 @@
+import os
+
 import numpy as np
 
 from .base_sampler import NestedSampler, signal_wrapper
@@ -20,6 +22,7 @@ class PyPolyChord(NestedSampler):
     To see what the keyword arguments are for, see the docstring of PyPolyChordSettings
     """
 
+    sampler_name = "pypolychord"
     default_kwargs = dict(
         use_polychord_defaults=False,
         nlive=None,
@@ -130,6 +133,28 @@ class PyPolyChord(NestedSampler):
         physical_parameters = samples[:, -self.ndim :]
         return log_likelihoods, physical_parameters
 
+    @classmethod
+    def get_expected_outputs(cls, outdir=None, label=None):
+        """Get lists of the expected outputs directories and files.
+
+        These are used by :code:`bilby_pipe` when transferring files via HTCondor.
+
+        Parameters
+        ----------
+        outdir : str
+            The output directory.
+        label : str
+            Ignored for pypolychord.
+
+        Returns
+        -------
+        list
+            List of file names. This will always be empty for pypolychord.
+        list
+            List of directory names.
+        """
+        return [], [os.path.join(outdir, "chains", "")]
+
     @property
     def _sample_file_directory(self):
         return self.outdir + "/chains"
diff --git a/bilby/core/sampler/ptemcee.py b/bilby/core/sampler/ptemcee.py
index 531fb102a0614015f4053e7326b87c5ec923dbfe..fd927235d370bbb0800ea805b64f8e9e495c9b78 100644
--- a/bilby/core/sampler/ptemcee.py
+++ b/bilby/core/sampler/ptemcee.py
@@ -128,6 +128,7 @@ class Ptemcee(MCMCSampler):
 
     """
 
+    sampler_name = "ptemcee"
     # Arguments used by ptemcee
     default_kwargs = dict(
         ntemps=10,
@@ -710,6 +711,29 @@ class Ptemcee(MCMCSampler):
             except Exception as e:
                 logger.info(f"mean_logl plot failed with exception {e}")
 
+    @classmethod
+    def get_expected_outputs(cls, outdir=None, label=None):
+        """Get lists of the expected outputs directories and files.
+
+        These are used by :code:`bilby_pipe` when transferring files via HTCondor.
+
+        Parameters
+        ----------
+        outdir : str
+            The output directory.
+        label : str
+            The label for the run.
+
+        Returns
+        -------
+        list
+            List of file names.
+        list
+            List of directory names. Will always be empty for ptemcee.
+        """
+        filenames = [f"{outdir}/{label}_checkpoint_resume.pickle"]
+        return filenames, []
+
 
 def get_minimum_stable_itertion(mean_array, frac, nsteps_min=10):
     nsteps = mean_array.shape[1]
diff --git a/bilby/core/sampler/ptmcmc.py b/bilby/core/sampler/ptmcmc.py
index 42279e018ed124cd117118e75949b60d74e3a302..f2a771cb0e24758b977926760e820f096268ba43 100644
--- a/bilby/core/sampler/ptmcmc.py
+++ b/bilby/core/sampler/ptmcmc.py
@@ -41,6 +41,8 @@ class PTMCMCSampler(MCMCSampler):
 
     """
 
+    sampler_name = "ptmcmcsampler"
+    abbreviation = "ptmcmc_temp"
     default_kwargs = {
         "p0": None,
         "Niter": 2 * 10**4 + 1,
diff --git a/bilby/core/sampler/pymc.py b/bilby/core/sampler/pymc.py
index e72aace4948c8b46e6b4d68e74940d378f42ff99..fd138b985e7a2e5628066426987704fca1625d6a 100644
--- a/bilby/core/sampler/pymc.py
+++ b/bilby/core/sampler/pymc.py
@@ -52,6 +52,7 @@ class Pymc(MCMCSampler):
 
     """
 
+    sampler_name = "pymc"
     default_kwargs = dict(
         draws=500,
         step=None,
diff --git a/bilby/core/sampler/pymultinest.py b/bilby/core/sampler/pymultinest.py
index 303acb705d3ed3d912ded1a132ebb2f4e138e8fe..0a9bb0aaf7c00bb376f377be3d652b1e5154d4be 100644
--- a/bilby/core/sampler/pymultinest.py
+++ b/bilby/core/sampler/pymultinest.py
@@ -34,6 +34,8 @@ class Pymultinest(_TemporaryFileSamplerMixin, NestedSampler):
 
     """
 
+    sampler_name = "pymultinest"
+    abbreviation = "pm"
     default_kwargs = dict(
         importance_nested_sampling=False,
         resume=True,
diff --git a/bilby/core/sampler/ultranest.py b/bilby/core/sampler/ultranest.py
index 542f862468c290d007f556ca6033bb30e78f2729..6aacfa99999b00e13dfafa4f69cd3657af1f6ec5 100644
--- a/bilby/core/sampler/ultranest.py
+++ b/bilby/core/sampler/ultranest.py
@@ -38,6 +38,8 @@ class Ultranest(_TemporaryFileSamplerMixin, NestedSampler):
         stepping behaviour is used.
     """
 
+    sampler_name = "ultranest"
+    abbreviation = "ultra"
     default_kwargs = dict(
         resume=True,
         show_status=True,
diff --git a/bilby/core/sampler/zeus.py b/bilby/core/sampler/zeus.py
index c7ae40da222201e5b29c53635c16c3edc94744f0..ad6e7edb8b1c91c694ab0a3e06382c3864e5fb5e 100644
--- a/bilby/core/sampler/zeus.py
+++ b/bilby/core/sampler/zeus.py
@@ -38,6 +38,7 @@ class Zeus(Emcee):
 
     """
 
+    sampler_name = "zeus"
     default_kwargs = dict(
         nwalkers=500,
         args=[],
diff --git a/bilby/core/utils/__init__.py b/bilby/core/utils/__init__.py
index 25d1eda934151fdcb85806dd336406a9f5ea7a90..bb59915642f7b144533437a8a0f7e41cc4d15b66 100644
--- a/bilby/core/utils/__init__.py
+++ b/bilby/core/utils/__init__.py
@@ -6,6 +6,7 @@ from .constants import *
 from .conversion import *
 from .counter import *
 from .docs import *
+from .entry_points import *
 from .introspection import *
 from .io import *
 from .log import *
diff --git a/bilby/core/utils/entry_points.py b/bilby/core/utils/entry_points.py
new file mode 100644
index 0000000000000000000000000000000000000000..305fc57040dfdbbbc1abec8c1aa82cbf8f63c25c
--- /dev/null
+++ b/bilby/core/utils/entry_points.py
@@ -0,0 +1,18 @@
+import sys
+if sys.version_info < (3, 10):
+    from importlib_metadata import entry_points
+else:
+    from importlib.metadata import entry_points
+
+
+def get_entry_points(group):
+    """Return a dictionary of entry points for a given group
+
+    Parameters
+    ----------
+    group: str
+        Entry points you wish to query
+    """
+    return {
+        custom.name: custom for custom in entry_points(group=group)
+    }
diff --git a/bilby/core/utils/io.py b/bilby/core/utils/io.py
index c55837bfded43ec3673e1114000e61b07b9821fa..57a2cd2dde0f7195fde00ecc6941eb2151751d60 100644
--- a/bilby/core/utils/io.py
+++ b/bilby/core/utils/io.py
@@ -154,13 +154,16 @@ def decode_bilby_json(dct):
         try:
             cls = getattr(import_module(dct["__module__"]), dct["__name__"])
         except AttributeError:
-            logger.debug(
+            logger.warning(
                 "Unknown prior class for parameter {}, defaulting to base Prior object".format(
                     dct["kwargs"]["name"]
                 )
             )
             from ..prior import Prior
 
+            for key in list(dct["kwargs"].keys()):
+                if key not in ["name", "latex_label", "unit", "minimum", "maximum", "boundary"]:
+                    dct["kwargs"].pop(key)
             cls = Prior
         obj = cls(**dct["kwargs"])
         return obj
@@ -264,9 +267,9 @@ def encode_for_hdf5(key, item):
 
     if isinstance(item, np.int_):
         item = int(item)
-    elif isinstance(item, np.float_):
+    elif isinstance(item, np.float64):
         item = float(item)
-    elif isinstance(item, np.complex_):
+    elif isinstance(item, np.complex128):
         item = complex(item)
     if isinstance(item, np.ndarray):
         # Numpy's wide unicode strings are not supported by hdf5
@@ -278,19 +281,22 @@ def encode_for_hdf5(key, item):
     elif item is None:
         output = "__none__"
     elif isinstance(item, list):
+        item_array = np.array(item)
         if len(item) == 0:
             output = item
-        elif isinstance(item[0], (str, bytes)) or item[0] is None:
+        elif np.issubdtype(item_array.dtype, np.number):
+            output = np.array(item)
+        elif issubclass(item_array.dtype.type, str) or None in item:
             output = list()
             for value in item:
                 if isinstance(value, str):
                     output.append(value.encode("utf-8"))
                 elif isinstance(value, bytes):
                     output.append(value)
-                else:
+                elif value is None:
                     output.append(b"__none__")
-        elif isinstance(item[0], (int, float, complex)):
-            output = np.array(item)
+                else:
+                    output.append(str(value).encode("utf-8"))
         else:
             raise ValueError(f'Cannot save {key}: {type(item)} type')
     elif isinstance(item, PriorDict):
diff --git a/bilby/core/utils/log.py b/bilby/core/utils/log.py
index 4884eba9cc76cf408fdcf8e5a794b6521b9ca5c2..ca86b814b3f583aea8aac2fdba3013644bd8cb00 100644
--- a/bilby/core/utils/log.py
+++ b/bilby/core/utils/log.py
@@ -22,7 +22,7 @@ def setup_logger(outdir='.', label=None, log_level='INFO', print_version=False):
         If true, print version information
     """
 
-    if type(log_level) is str:
+    if isinstance(log_level, str):
         try:
             level = getattr(logging, log_level.upper())
         except AttributeError:
@@ -34,14 +34,14 @@ def setup_logger(outdir='.', label=None, log_level='INFO', print_version=False):
     logger.propagate = False
     logger.setLevel(level)
 
-    if any([type(h) == logging.StreamHandler for h in logger.handlers]) is False:
+    if not any([isinstance(h, logging.StreamHandler) for h in logger.handlers]):
         stream_handler = logging.StreamHandler()
         stream_handler.setFormatter(logging.Formatter(
             '%(asctime)s %(name)s %(levelname)-8s: %(message)s', datefmt='%H:%M'))
         stream_handler.setLevel(level)
         logger.addHandler(stream_handler)
 
-    if any([type(h) == logging.FileHandler for h in logger.handlers]) is False:
+    if not any([isinstance(h, logging.FileHandler) for h in logger.handlers]):
         if label:
             Path(outdir).mkdir(parents=True, exist_ok=True)
             log_file = '{}/{}.log'.format(outdir, label)
diff --git a/bilby/gw/conversion.py b/bilby/gw/conversion.py
index 3e23caa8d0ba832e243b52891544bc13c6edcf9c..73f720f0d0e07c8a5db8b37785bb1fe4ed5c56a7 100644
--- a/bilby/gw/conversion.py
+++ b/bilby/gw/conversion.py
@@ -2263,17 +2263,15 @@ def compute_snrs(sample, likelihood, npool=1):
                 new_samples = [_compute_snrs(xx) for xx in tqdm(fill_args, file=sys.stdout)]
 
             for ii, ifo in enumerate(likelihood.interferometers):
-                matched_filter_snrs = list()
-                optimal_snrs = list()
-                mf_key = '{}_matched_filter_snr'.format(ifo.name)
-                optimal_key = '{}_optimal_snr'.format(ifo.name)
+                snr_updates = dict()
+                for key in new_samples[0][ii].snrs_as_sample.keys():
+                    snr_updates[f"{ifo.name}_{key}"] = list()
                 for new_sample in new_samples:
-                    matched_filter_snrs.append(new_sample[ii].complex_matched_filter_snr)
-                    optimal_snrs.append(new_sample[ii].optimal_snr_squared.real ** 0.5)
-
-                sample[mf_key] = matched_filter_snrs
-                sample[optimal_key] = optimal_snrs
-
+                    snr_update = new_sample[ii].snrs_as_sample
+                    for key, val in snr_update.items():
+                        snr_updates[f"{ifo.name}_{key}"].append(val)
+                for k, v in snr_updates.items():
+                    sample[k] = v
     else:
         logger.debug('Not computing SNRs.')
 
diff --git a/bilby/gw/detector/__init__.py b/bilby/gw/detector/__init__.py
index b3f8c2fe1d4c928e127cf88c782e0976bb882a65..3c7e9ad955e2c9e732ea922cd98f2880128247a0 100644
--- a/bilby/gw/detector/__init__.py
+++ b/bilby/gw/detector/__init__.py
@@ -239,7 +239,7 @@ def load_data_from_cache_file(
         An initialised interferometer object with strain data set to the
         appropriate data in the cache file and a PSD.
     """
-
+    import lal
     data_set = False
     psd_set = False
 
diff --git a/bilby/gw/detector/detectors/NEMO.interferometer b/bilby/gw/detector/detectors/NEMO.interferometer
new file mode 100644
index 0000000000000000000000000000000000000000..9b5b11101a36b366bb9e28b338d4b075d2b32c8a
--- /dev/null
+++ b/bilby/gw/detector/detectors/NEMO.interferometer
@@ -0,0 +1,18 @@
+# The proposed NEMO high frequency detector 
+# location here set to Gingin Western Australia
+# https://arxiv.org/abs/2007.03128 Figure 1 
+
+name = 'NEMO'
+power_spectral_density = PowerSpectralDensity(psd_file='NEMO_psd.txt')
+minimum_frequency = 10
+maximum_frequency = 2048
+
+length = 4
+latitude = -31.34
+longitude = 115.91
+elevation = 0.0
+xarm_azimuth = 2.0
+yarm_azimuth = 125.0
+
+
+
diff --git a/bilby/gw/detector/interferometer.py b/bilby/gw/detector/interferometer.py
index c150aa863b0c843afbc64e9a51d910dc40d35c67..01d01d1e660d17d22564597dbecda762e18ca945 100644
--- a/bilby/gw/detector/interferometer.py
+++ b/bilby/gw/detector/interferometer.py
@@ -90,7 +90,7 @@ class Interferometer(object):
         self.strain_data = InterferometerStrainData(
             minimum_frequency=minimum_frequency,
             maximum_frequency=maximum_frequency)
-        self.meta_data = dict()
+        self.meta_data = dict(name=name)
 
         if psi4_analysis:
             self.convert_to_psi4_analysis()
@@ -618,7 +618,7 @@ class Interferometer(object):
 
         Returns
         =======
-        float: The matched filter signal to noise ratio squared
+        complex: The matched filter signal to noise ratio
 
         """
         return gwutils.matched_filter_snr(
diff --git a/bilby/gw/detector/networks.py b/bilby/gw/detector/networks.py
index bb7b689a55f16a6f4c79251e5f8d227c9a7cd3b0..2d25cc30baf44ea241a9a8447be7c2325ec7f43a 100644
--- a/bilby/gw/detector/networks.py
+++ b/bilby/gw/detector/networks.py
@@ -25,12 +25,12 @@ class InterferometerList(list):
         """
 
         super(InterferometerList, self).__init__()
-        if type(interferometers) == str:
+        if isinstance(interferometers, str):
             raise TypeError("Input must not be a string")
         for ifo in interferometers:
-            if type(ifo) == str:
+            if isinstance(ifo, str):
                 ifo = get_empty_interferometer(ifo)
-            if type(ifo) not in [Interferometer, TriangularInterferometer]:
+            if not isinstance(ifo, (Interferometer, TriangularInterferometer)):
                 raise TypeError(
                     "Input list of interferometers are not all Interferometer objects"
                 )
diff --git a/bilby/gw/detector/noise_curves/highf_psd.txt b/bilby/gw/detector/noise_curves/NEMO_psd.txt
similarity index 100%
rename from bilby/gw/detector/noise_curves/highf_psd.txt
rename to bilby/gw/detector/noise_curves/NEMO_psd.txt
diff --git a/bilby/gw/eos/eos.py b/bilby/gw/eos/eos.py
index 5693fb33e927153ba3a577a592bc6fe557b5944e..2e962aa0c1822ab9f9a0b2db0b5be28f77891616 100644
--- a/bilby/gw/eos/eos.py
+++ b/bilby/gw/eos/eos.py
@@ -55,17 +55,17 @@ class TabularEOS(object):
     """
 
     def __init__(self, eos, sampling_flag=False, warning_flag=False):
-        from scipy.integrate import cumtrapz
+        from scipy.integrate import cumulative_trapezoid
 
         self.sampling_flag = sampling_flag
         self.warning_flag = warning_flag
 
-        if type(eos) == str:
+        if isinstance(eos, str):
             if eos in valid_eos_dict.keys():
                 table = np.loadtxt(valid_eos_dict[eos])
             else:
                 table = np.loadtxt(eos)
-        elif type(eos) == np.ndarray:
+        elif isinstance(eos, np.ndarray):
             table = eos
         else:
             raise ValueError("eos provided is invalid type please supply a str name, str path to ASCII file, "
@@ -83,7 +83,7 @@ class TabularEOS(object):
             self.warning_flag = True
         else:
             integrand = self.pressure / (self.energy_density + self.pressure)
-            self.pseudo_enthalpy = cumtrapz(integrand, np.log(self.pressure), initial=0) + integrand[0]
+            self.pseudo_enthalpy = cumulative_trapezoid(integrand, np.log(self.pressure), initial=0) + integrand[0]
 
             self.interp_energy_density_from_pressure = CubicSpline(np.log10(self.pressure),
                                                                    np.log10(self.energy_density),
diff --git a/bilby/gw/likelihood/base.py b/bilby/gw/likelihood/base.py
index 256237bfb9f8395a67a7152adb66fbd18892b9d1..e0a09c1e934eb8886e27831839602307d6aba532 100644
--- a/bilby/gw/likelihood/base.py
+++ b/bilby/gw/likelihood/base.py
@@ -128,6 +128,20 @@ class GravitationalWaveTransient(Likelihood):
                     setattr(self, key, other)
             return self
 
+        @property
+        def snrs_as_sample(self) -> dict:
+            """Get the SNRs of this object as a sample dictionary
+
+            Returns
+            =======
+            dict
+                The dictionary of SNRs labelled accordingly
+            """
+            return {
+                "matched_filter_snr" : self.complex_matched_filter_snr,
+                "optimal_snr" : self.optimal_snr_squared.real ** 0.5
+            }
+
     def __init__(
             self, interferometers, waveform_generator, time_marginalization=False,
             distance_marginalization=False, phase_marginalization=False, calibration_marginalization=False, priors=None,
diff --git a/bilby/gw/likelihood/multiband.py b/bilby/gw/likelihood/multiband.py
index 4e338e19a14a3bcd466f7f66546f9003aaca9be5..bec333e716dc10375a82d1db903ba30636de0e7b 100644
--- a/bilby/gw/likelihood/multiband.py
+++ b/bilby/gw/likelihood/multiband.py
@@ -517,7 +517,7 @@ class MBGravitationalWaveTransient(GravitationalWaveTransient):
         for ifo in self.interferometers:
             logger.info("Pre-computing linear coefficients for {}".format(ifo.name))
             fddata = np.zeros(N // 2 + 1, dtype=complex)
-            fddata[:len(ifo.frequency_domain_strain)][ifo.frequency_mask] += \
+            fddata[:len(ifo.frequency_domain_strain)][ifo.frequency_mask[:len(fddata)]] += \
                 ifo.frequency_domain_strain[ifo.frequency_mask] / ifo.power_spectral_density_array[ifo.frequency_mask]
             for b in range(self.number_of_bands):
                 Ks, Ke = self.Ks_Ke[b]
@@ -606,7 +606,7 @@ class MBGravitationalWaveTransient(GravitationalWaveTransient):
         for ifo in self.interferometers:
             logger.info("Pre-computing quadratic coefficients for {}".format(ifo.name))
             full_inv_psds = np.zeros(N // 2 + 1)
-            full_inv_psds[:len(ifo.power_spectral_density_array)][ifo.frequency_mask] = (
+            full_inv_psds[:len(ifo.power_spectral_density_array)][ifo.frequency_mask[:len(full_inv_psds)]] = (
                 1 / ifo.power_spectral_density_array[ifo.frequency_mask]
             )
             for b in range(self.number_of_bands):
diff --git a/bilby/gw/likelihood/relative.py b/bilby/gw/likelihood/relative.py
index 64fd81ab130753b079eaa9f6694088a3c6ef4667..2345b25c0e2cb28c3564fed285d187c676dfdc92 100644
--- a/bilby/gw/likelihood/relative.py
+++ b/bilby/gw/likelihood/relative.py
@@ -142,10 +142,6 @@ class RelativeBinningGravitationalWaveTransient(GravitationalWaveTransient):
         self.fiducial_polarizations = None
         self.per_detector_fiducial_waveforms = dict()
         self.per_detector_fiducial_waveform_points = dict()
-        self.bin_freqs = dict()
-        self.bin_inds = dict()
-        self.bin_widths = dict()
-        self.bin_centers = dict()
         self.set_fiducial_waveforms(self.fiducial_parameters)
         logger.info("Initial fiducial waveforms set up")
         self.setup_bins()
@@ -219,6 +215,8 @@ class RelativeBinningGravitationalWaveTransient(GravitationalWaveTransient):
             bin_inds.append(bin_index)
             bin_freqs.append(bin_freq)
         self.bin_inds = np.array(bin_inds, dtype=int)
+        self.bin_sizes = np.diff(bin_inds)
+        self.bin_sizes[-1] += 1
         self.bin_freqs = np.array(bin_freqs)
         self.number_of_bins = len(self.bin_inds) - 1
         logger.debug(
@@ -319,6 +317,10 @@ class RelativeBinningGravitationalWaveTransient(GravitationalWaveTransient):
             for edge in self.bin_freqs:
                 index = np.where(masked_frequency_array == edge)[0][0]
                 masked_bin_inds.append(index)
+            # For the last bin, make sure to include
+            # the last point in the frequency array
+            masked_bin_inds[-1] += 1
+
             masked_strain = interferometer.frequency_domain_strain[mask]
             masked_h0 = self.per_detector_fiducial_waveforms[interferometer.name][mask]
             masked_psd = interferometer.power_spectral_density_array[mask]
@@ -370,16 +372,15 @@ class RelativeBinningGravitationalWaveTransient(GravitationalWaveTransient):
             waveform_polarizations=signal_polarizations,
             interferometer=interferometer,
         )
-        f = interferometer.frequency_array
-        duplicated_r0, duplicated_r1, duplicated_fm = np.zeros((3, f.shape[0]), dtype=complex)
 
-        for i in range(self.number_of_bins):
-            idxs = slice(self.bin_inds[i], self.bin_inds[i + 1])
-            duplicated_fm[idxs] = self.bin_centers[i]
-            duplicated_r0[idxs] = r0[i]
-            duplicated_r1[idxs] = r1[i]
+        idxs = slice(self.bin_inds[0], self.bin_inds[-1] + 1)
+        duplicated_r0 = np.repeat(r0, self.bin_sizes)
+        duplicated_r1 = np.repeat(r1, self.bin_sizes)
+        duplicated_fm = np.repeat(self.bin_centers, self.bin_sizes)
 
-        full_waveform_ratio = duplicated_r0 + duplicated_r1 * (f - duplicated_fm)
+        f = interferometer.frequency_array
+        full_waveform_ratio = np.zeros(f.shape[0], dtype=complex)
+        full_waveform_ratio[idxs] = duplicated_r0 + duplicated_r1 * (f[idxs] - duplicated_fm)
         return fiducial_waveform * full_waveform_ratio
 
     def calculate_snrs(self, waveform_polarizations, interferometer, return_array=True):
diff --git a/bilby/gw/prior.py b/bilby/gw/prior.py
index 63b51790eaa5f05be5fe11f3c7b2c592f1abc32f..2176741113110de29a0499ba4ee2d525094c6d80 100644
--- a/bilby/gw/prior.py
+++ b/bilby/gw/prior.py
@@ -1362,6 +1362,7 @@ class HealPixMapPriorDist(BaseJointPriorDist):
         else:
             self.distance = False
             self.prob = self.hp.read_map(hp_file)
+        self.prob = self._check_norm(self.prob)
 
         super(HealPixMapPriorDist, self).__init__(names=names, bounds=bounds)
         self.distname = "hpmap"
@@ -1385,10 +1386,10 @@ class HealPixMapPriorDist(BaseJointPriorDist):
         """
         Method that builds the inverse cdf of the P(pixel) distribution for rescaling
         """
-        from scipy.integrate import cumtrapz
+        from scipy.integrate import cumulative_trapezoid
         yy = self._all_interped(self.pix_xx)
         yy /= np.trapz(yy, self.pix_xx)
-        YY = cumtrapz(yy, self.pix_xx, initial=0)
+        YY = cumulative_trapezoid(yy, self.pix_xx, initial=0)
         YY[-1] = 1
         self.inverse_cdf = interp1d(x=YY, y=self.pix_xx, bounds_error=True)
 
@@ -1436,7 +1437,7 @@ class HealPixMapPriorDist(BaseJointPriorDist):
                 self.update_distance(int(round(val)))
                 dist_samples[i] = self.distance_icdf(dist_samp[i])
         if self.distance:
-            sample = np.row_stack([sample[:, 0], sample[:, 1], dist_samples])
+            sample = np.vstack([sample[:, 0], sample[:, 1], dist_samples])
         return sample.reshape((-1, self.num_vars))
 
     def update_distance(self, pix_idx):
@@ -1501,9 +1502,7 @@ class HealPixMapPriorDist(BaseJointPriorDist):
         sample : array_like
             sample of ra, and dec (and distance if 3D=True)
         """
-        pixel_choices = np.arange(self.npix)
-        pixel_probs = self._check_norm(self.prob)
-        sample_pix = random.rng.choice(pixel_choices, size=size, p=pixel_probs, replace=True)
+        sample_pix = random.rng.choice(self.npix, size=size, p=self.prob, replace=True)
         sample = np.empty((size, self.num_vars))
         for samp in range(size):
             theta, ra = self.hp.pix2ang(self.nside, sample_pix[samp])
diff --git a/bilby/gw/source.py b/bilby/gw/source.py
index 84b1a3e9d93f23ee9b510b02106216aecc36297c..9c6dff23613549cd7788b05ee5c571af79091179 100644
--- a/bilby/gw/source.py
+++ b/bilby/gw/source.py
@@ -205,7 +205,7 @@ def gwsignal_binary_black_hole(frequency_array, mass_1, mass_2, luminosity_dista
             ]
             if EDOM:
                 failed_parameters = dict(mass_1=mass_1, mass_2=mass_2,
-                                         spin_1=(spin_1x, spin_2y, spin_1z),
+                                         spin_1=(spin_1x, spin_1y, spin_1z),
                                          spin_2=(spin_2x, spin_2y, spin_2z),
                                          luminosity_distance=luminosity_distance,
                                          iota=iota, phase=phase,
@@ -614,7 +614,7 @@ def _base_lal_cbc_fd_waveform(
             EDOM = (e.args[0] == 'Internal function call failed: Input domain error')
             if EDOM:
                 failed_parameters = dict(mass_1=mass_1, mass_2=mass_2,
-                                         spin_1=(spin_1x, spin_2y, spin_1z),
+                                         spin_1=(spin_1x, spin_1y, spin_1z),
                                          spin_2=(spin_2x, spin_2y, spin_2z),
                                          luminosity_distance=luminosity_distance,
                                          iota=iota, phase=phase,
@@ -1124,7 +1124,7 @@ def _base_waveform_frequency_sequence(
             EDOM = (e.args[0] == 'Internal function call failed: Input domain error')
             if EDOM:
                 failed_parameters = dict(mass_1=mass_1, mass_2=mass_2,
-                                         spin_1=(spin_1x, spin_2y, spin_1z),
+                                         spin_1=(spin_1x, spin_1y, spin_1z),
                                          spin_2=(spin_2x, spin_2y, spin_2z),
                                          luminosity_distance=luminosity_distance,
                                          iota=iota, phase=phase)
diff --git a/bilby/gw/utils.py b/bilby/gw/utils.py
index 37f2cd77a90094b300f22efd0bc976668f63f69c..b42f9034719b0c76b9fe84f8c6f9c034d3d21464 100644
--- a/bilby/gw/utils.py
+++ b/bilby/gw/utils.py
@@ -398,7 +398,7 @@ def read_frame_file(file_name, start_time, end_time, resample=None, channel=None
             strain = TimeSeries.read(source=file_name, channel=channel, start=start_time, end=end_time, **kwargs)
             loaded = True
             logger.info('Successfully loaded {}.'.format(channel))
-        except RuntimeError:
+        except (RuntimeError, ValueError):
             logger.warning('Channel {} not found. Trying preset channel names'.format(channel))
 
     if loaded is False:
@@ -418,12 +418,12 @@ def read_frame_file(file_name, start_time, end_time, resample=None, channel=None
                                              **kwargs)
                     loaded = True
                     logger.info('Successfully read strain data for channel {}.'.format(channel))
-                except RuntimeError:
+                except (RuntimeError, ValueError):
                     pass
 
     if loaded:
         if resample and (strain.sample_rate.value != resample):
-            strain.resample(resample)
+            strain = strain.resample(resample)
         return strain
     else:
         logger.warning('No data loaded.')
diff --git a/containers/dockerfile-template b/containers/dockerfile-template
index 4c52c15c849aa0276094b98e3841154d7b897347..f0ef592f07338d09fd488eed7b6209fde646d44f 100644
--- a/containers/dockerfile-template
+++ b/containers/dockerfile-template
@@ -1,52 +1,18 @@
 FROM containers.ligo.org/docker/base:conda
 LABEL name="bilby CI testing" \
-maintainer="Gregory Ashton <gregory.ashton@ligo.org>"
-
-RUN conda update -n base -c defaults conda
+maintainer="Gregory Ashton <gregory.ashton@ligo.org>, Colm Talbot <colm.talbot@ligo.org>"
 
+COPY env-template.yml env.yml
+RUN echo "  - python={python_major_version}.{python_minor_version}" >> env.yml
 ENV conda_env python{python_major_version}{python_minor_version}
 
-RUN conda create -n ${{conda_env}} python={python_major_version}.{python_minor_version}
+RUN mamba env create -f env.yml -n ${{conda_env}}
 RUN echo "source activate ${{conda_env}}" > ~/.bashrc
 ENV PATH /opt/conda/envs/${{conda_env}}/bin:$PATH
 RUN /bin/bash -c "source activate ${{conda_env}}"
-RUN conda info
+RUN mamba info
 RUN python --version
 
-# Install conda-installable programs
-RUN conda install -n ${{conda_env}} -y matplotlib numpy scipy pandas astropy flake8
-RUN conda install -n ${{conda_env}} -c anaconda coverage configargparse future dill
-RUN conda install -n ${{conda_env}} -c conda-forge black pytest-cov deepdish arviz
-
-# Install pip-requirements
-RUN pip install --upgrade pip
-RUN pip install --upgrade setuptools coverage-badge parameterized
-
-# Install documentation requirements
-RUN pip install sphinx numpydoc nbsphinx sphinx_rtd_theme sphinx-tabs autodoc
-
-# Install testing requirements
-RUN conda install -n ${{conda_env}} -c conda-forge scikit-image celerite george
-
-# Install dependencies and samplers
-RUN pip install corner healpy cython tables
-RUN conda install -n ${{conda_env}} {conda_samplers} -c conda-forge -c pytorch
-
-# Install Polychord
-RUN apt-get update --allow-releaseinfo-change
-RUN apt-get install -y build-essential
-RUN apt-get install -y libblas3 libblas-dev
-RUN apt-get install -y liblapack3 liblapack-dev
-RUN apt-get install -y libatlas3-base libatlas-base-dev
-RUN apt-get install -y gfortran
-
-RUN git clone https://github.com/PolyChord/PolyChordLite.git \
-&& (cd PolyChordLite && python setup.py --no-mpi install)
-
-# Install GW packages
-RUN conda install -n ${{conda_env}} -c conda-forge python-lalsimulation bilby.cython pyseobnr
-RUN pip install ligo-gracedb gwpy ligo.skymap
-
 # Add the ROQ data to the image
 RUN mkdir roq_basis \
     && cd roq_basis \
diff --git a/containers/env-template.yml b/containers/env-template.yml
new file mode 100644
index 0000000000000000000000000000000000000000..647709a2734f27eb1a7e973bf21285ac3b7abe52
--- /dev/null
+++ b/containers/env-template.yml
@@ -0,0 +1,76 @@
+# This is a template yaml file for the test image
+# The python version should be added before creating the env
+channels:
+  - conda-forge
+  - defaults
+  - pytorch
+dependencies:
+  - pip
+  - setuptools
+  - setuptools_scm
+  - matplotlib
+  - numpy
+  - scipy
+  - pandas
+  - astropy
+  - flake8
+  - anaconda
+  - coverage
+  - configargparse
+  - future
+  - dill
+  - black
+  - pytest-cov
+  - pytest-requires
+  - conda-forge::arviz
+  - parameterized
+  - scikit-image
+  - celerite
+  - george
+  - corner
+  - healpy
+  - cython
+  - pytables
+  - pytorch
+  - cpuonly
+  - python-lalsimulation
+  - bilby.cython
+  - pyseobnr
+  - ligo-gracedb
+  - gwpy
+  - ligo.skymap
+  - sphinx
+  - numpydoc
+  - nbsphinx
+  - sphinx_rtd_theme
+  - sphinx-tabs
+  - dynesty
+  - emcee
+  - nestle
+  - ptemcee
+  - pymultinest
+  - ultranest
+  - cpnest
+  - kombine
+  - dnest4
+  - zeus-mcmc
+  - pytorch
+  - pymc>=5.9
+  - nessai
+  - ptmcmcsampler
+  - jaxlib>=0.4
+  - jax>=0.4
+  - numba>0.53.1
+  - make
+  - pre-commit
+  - pandoc
+  - ipython
+  - jupyter
+  - nbconvert
+  - twine
+  - glasflow
+  - myst-parser
+  - pip:
+    - autodoc
+    - ipykernel
+    - build
diff --git a/containers/v3-dockerfile-test-suite-python310 b/containers/v3-dockerfile-test-suite-python310
index c6af1c7410153c9f370954b7b702ded473b13431..b7cbab298e60d68919d71d45639afacd003942fb 100644
--- a/containers/v3-dockerfile-test-suite-python310
+++ b/containers/v3-dockerfile-test-suite-python310
@@ -2,53 +2,19 @@
 
 FROM containers.ligo.org/docker/base:conda
 LABEL name="bilby CI testing" \
-maintainer="Gregory Ashton <gregory.ashton@ligo.org>"
-
-RUN conda update -n base -c defaults conda
+maintainer="Gregory Ashton <gregory.ashton@ligo.org>, Colm Talbot <colm.talbot@ligo.org>"
 
+COPY env-template.yml env.yml
+RUN echo "  - python=3.10" >> env.yml
 ENV conda_env python310
 
-RUN conda create -n ${conda_env} python=3.10
+RUN mamba env create -f env.yml -n ${conda_env}
 RUN echo "source activate ${conda_env}" > ~/.bashrc
 ENV PATH /opt/conda/envs/${conda_env}/bin:$PATH
 RUN /bin/bash -c "source activate ${conda_env}"
-RUN conda info
+RUN mamba info
 RUN python --version
 
-# Install conda-installable programs
-RUN conda install -n ${conda_env} -y matplotlib numpy scipy pandas astropy flake8
-RUN conda install -n ${conda_env} -c anaconda coverage configargparse future dill
-RUN conda install -n ${conda_env} -c conda-forge black pytest-cov deepdish arviz
-
-# Install pip-requirements
-RUN pip install --upgrade pip
-RUN pip install --upgrade setuptools coverage-badge parameterized
-
-# Install documentation requirements
-RUN pip install sphinx numpydoc nbsphinx sphinx_rtd_theme sphinx-tabs autodoc
-
-# Install testing requirements
-RUN conda install -n ${conda_env} -c conda-forge scikit-image celerite george
-
-# Install dependencies and samplers
-RUN pip install corner healpy cython tables
-RUN conda install -n ${conda_env} dynesty emcee nestle ptemcee pymultinest ultranest cpnest kombine dnest4 zeus-mcmc pytorch 'pymc>=4' nessai ptmcmcsampler -c conda-forge -c pytorch
-
-# Install Polychord
-RUN apt-get update --allow-releaseinfo-change
-RUN apt-get install -y build-essential
-RUN apt-get install -y libblas3 libblas-dev
-RUN apt-get install -y liblapack3 liblapack-dev
-RUN apt-get install -y libatlas3-base libatlas-base-dev
-RUN apt-get install -y gfortran
-
-RUN git clone https://github.com/PolyChord/PolyChordLite.git \
-&& (cd PolyChordLite && python setup.py --no-mpi install)
-
-# Install GW packages
-RUN conda install -n ${conda_env} -c conda-forge python-lalsimulation bilby.cython pyseobnr
-RUN pip install ligo-gracedb gwpy ligo.skymap
-
 # Add the ROQ data to the image
 RUN mkdir roq_basis \
     && cd roq_basis \
diff --git a/containers/v3-dockerfile-test-suite-python311 b/containers/v3-dockerfile-test-suite-python311
new file mode 100644
index 0000000000000000000000000000000000000000..85d27d405a58558629bde0e13d924a6fb9ac81ec
--- /dev/null
+++ b/containers/v3-dockerfile-test-suite-python311
@@ -0,0 +1,27 @@
+# This dockerfile is written automatically and should not be modified by hand.
+
+FROM containers.ligo.org/docker/base:conda
+LABEL name="bilby CI testing" \
+maintainer="Gregory Ashton <gregory.ashton@ligo.org>, Colm Talbot <colm.talbot@ligo.org>"
+
+COPY env-template.yml env.yml
+RUN echo "  - python=3.11" >> env.yml
+ENV conda_env python311
+
+RUN mamba env create -f env.yml -n ${conda_env}
+RUN echo "source activate ${conda_env}" > ~/.bashrc
+ENV PATH /opt/conda/envs/${conda_env}/bin:$PATH
+RUN /bin/bash -c "source activate ${conda_env}"
+RUN mamba info
+RUN python --version
+
+# Add the ROQ data to the image
+RUN mkdir roq_basis \
+    && cd roq_basis \
+    && wget https://git.ligo.org/lscsoft/ROQ_data/raw/master/IMRPhenomPv2/4s/B_linear.npy \
+    && wget https://git.ligo.org/lscsoft/ROQ_data/raw/master/IMRPhenomPv2/4s/B_quadratic.npy \
+    && wget https://git.ligo.org/lscsoft/ROQ_data/raw/master/IMRPhenomPv2/4s/fnodes_linear.npy \
+    && wget https://git.ligo.org/lscsoft/ROQ_data/raw/master/IMRPhenomPv2/4s/fnodes_quadratic.npy \
+    && wget https://git.ligo.org/lscsoft/ROQ_data/raw/master/IMRPhenomPv2/4s/params.dat \
+    && wget https://git.ligo.org/soichiro.morisaki/roq_basis/raw/main/IMRPhenomD/16s_nospins/basis_addcal.hdf5 \
+    && wget https://git.ligo.org/soichiro.morisaki/roq_basis/raw/main/IMRPhenomD/16s_nospins/basis_multiband_addcal.hdf5
diff --git a/containers/v3-dockerfile-test-suite-python39 b/containers/v3-dockerfile-test-suite-python39
index 1cff66c50a1067d200efcad3c3cb433d3f173b20..9c1d5a04c4e70181911aad76a70592cca63e0d43 100644
--- a/containers/v3-dockerfile-test-suite-python39
+++ b/containers/v3-dockerfile-test-suite-python39
@@ -2,53 +2,19 @@
 
 FROM containers.ligo.org/docker/base:conda
 LABEL name="bilby CI testing" \
-maintainer="Gregory Ashton <gregory.ashton@ligo.org>"
-
-RUN conda update -n base -c defaults conda
+maintainer="Gregory Ashton <gregory.ashton@ligo.org>, Colm Talbot <colm.talbot@ligo.org>"
 
+COPY env-template.yml env.yml
+RUN echo "  - python=3.9" >> env.yml
 ENV conda_env python39
 
-RUN conda create -n ${conda_env} python=3.9
+RUN mamba env create -f env.yml -n ${conda_env}
 RUN echo "source activate ${conda_env}" > ~/.bashrc
 ENV PATH /opt/conda/envs/${conda_env}/bin:$PATH
 RUN /bin/bash -c "source activate ${conda_env}"
-RUN conda info
+RUN mamba info
 RUN python --version
 
-# Install conda-installable programs
-RUN conda install -n ${conda_env} -y matplotlib numpy scipy pandas astropy flake8
-RUN conda install -n ${conda_env} -c anaconda coverage configargparse future dill
-RUN conda install -n ${conda_env} -c conda-forge black pytest-cov deepdish arviz
-
-# Install pip-requirements
-RUN pip install --upgrade pip
-RUN pip install --upgrade setuptools coverage-badge parameterized
-
-# Install documentation requirements
-RUN pip install sphinx numpydoc nbsphinx sphinx_rtd_theme sphinx-tabs autodoc
-
-# Install testing requirements
-RUN conda install -n ${conda_env} -c conda-forge scikit-image celerite george
-
-# Install dependencies and samplers
-RUN pip install corner healpy cython tables
-RUN conda install -n ${conda_env} dynesty emcee nestle ptemcee pymultinest ultranest cpnest kombine dnest4 zeus-mcmc pytorch 'pymc>=4' nessai ptmcmcsampler -c conda-forge -c pytorch
-
-# Install Polychord
-RUN apt-get update --allow-releaseinfo-change
-RUN apt-get install -y build-essential
-RUN apt-get install -y libblas3 libblas-dev
-RUN apt-get install -y liblapack3 liblapack-dev
-RUN apt-get install -y libatlas3-base libatlas-base-dev
-RUN apt-get install -y gfortran
-
-RUN git clone https://github.com/PolyChord/PolyChordLite.git \
-&& (cd PolyChordLite && python setup.py --no-mpi install)
-
-# Install GW packages
-RUN conda install -n ${conda_env} -c conda-forge python-lalsimulation bilby.cython pyseobnr
-RUN pip install ligo-gracedb gwpy ligo.skymap
-
 # Add the ROQ data to the image
 RUN mkdir roq_basis \
     && cd roq_basis \
diff --git a/containers/write_dockerfiles.py b/containers/write_dockerfiles.py
index 60dbf751f080e546ddab7dd63941e4f07b199526..f12c071ef711ac76ec4c5f6093073b4875e25bdf 100644
--- a/containers/write_dockerfiles.py
+++ b/containers/write_dockerfiles.py
@@ -3,15 +3,9 @@ from datetime import date
 with open("dockerfile-template", "r") as ff:
     template = ff.read()
 
-python_versions = [(3, 9), (3, 10)]
+python_versions = [(3, 9), (3, 10), (3, 11)]
 today = date.today().strftime("%Y%m%d")
 
-samplers = [
-    "dynesty", "emcee", "nestle", "ptemcee", "pymultinest", "ultranest",
-    "cpnest", "kombine", "dnest4", "zeus-mcmc",
-    "pytorch", "'pymc>=4'", "nessai", "ptmcmcsampler",
-]
-
 for python_major_version, python_minor_version in python_versions:
     key = f"python{python_major_version}{python_minor_version}"
     with open(
@@ -26,5 +20,4 @@ for python_major_version, python_minor_version in python_versions:
             date=today,
             python_major_version=python_major_version,
             python_minor_version=python_minor_version,
-            conda_samplers=" ".join(samplers)
         ))
diff --git a/docs/.gitignore b/docs/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..8c6492ca8e2ce80865b32f8c59e1f717a1d46353
--- /dev/null
+++ b/docs/.gitignore
@@ -0,0 +1 @@
+api/
diff --git a/docs/citing-bilby.txt b/docs/citing-bilby.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d91e289b155592ccc593889cbfcb8af52a527784
--- /dev/null
+++ b/docs/citing-bilby.txt
@@ -0,0 +1,190 @@
+=======================================
+Acknowledging/Citing Bilby
+=======================================
+
+If you have used Bilby in your scientific work, please acknowledge us in your papers/proposals. 
+
+ .. code:: bibtex
+
+    @article{bilby_paper,
+        author = "Ashton, Gregory and others",
+        title = "{BILBY: A user-friendly Bayesian inference library for gravitational-wave astronomy}",
+        eprint = "1811.02042",
+        archivePrefix = "arXiv",
+        primaryClass = "astro-ph.IM",
+        doi = "10.3847/1538-4365/ab06fc",
+        journal = "Astrophys. J. Suppl.",
+        volume = "241",
+        number = "2",
+        pages = "27",
+        year = "2019"
+    }
+
+Additionally, if you used `bilby_pipe` or `parallel_bilby`, please cite appropriate papers mentioned below"
+
+ .. code:: bibtex
+
+    @article{bilby_pipe_paper,
+        author = "Romero-Shaw, I. M. and others",
+        title = "{Bayesian inference for compact binary coalescences with bilby: validation and application to the first LIGO\textendash{}Virgo gravitational-wave transient catalogue}",
+        eprint = "2006.00714",
+        archivePrefix = "arXiv",
+        primaryClass = "astro-ph.IM",
+        doi = "10.1093/mnras/staa2850",
+        journal = "Mon. Not. Roy. Astron. Soc.",
+        volume = "499",
+        number = "3",
+        pages = "3295--3319",
+        year = "2020"
+    }
+
+    @article{pbilby_paper,
+        author = "Smith, Rory J. E. and Ashton, Gregory and Vajpeyi, Avi and Talbot, Colm",
+        title = "{Massively parallel Bayesian inference for transient gravitational-wave astronomy}",
+        eprint = "1909.11873",
+        archivePrefix = "arXiv",
+        primaryClass = "gr-qc",
+        reportNumber = "LIGO Document P1900255-v1",
+        doi = "10.1093/mnras/staa2483",
+        journal = "Mon. Not. Roy. Astron. Soc.",
+        volume = "498",
+        number = "3",
+        pages = "4492--4502",
+        year = "2020"
+    }
+
+If you use any of the accelerated likelihoods like `ROQGravitationalWaveTransient`, `MBGravitationalWaveTransient`, `RelativeBinningGravitationalWaveTransient` etc., please cite the following papers in addition to the above.
+
+- `ROQGravitationalWaveTransient`
+ .. code:: bibtex
+
+     @article{roq_paper_1,
+        author = {Smith, Rory and Field, Scott E. and Blackburn, Kent and Haster, Carl-Johan and P\"urrer, Michael and Raymond, Vivien and Schmidt, Patricia},
+        title = "{Fast and accurate inference on gravitational waves from precessing compact binaries}",
+        eprint = "1604.08253",
+        archivePrefix = "arXiv",
+        primaryClass = "gr-qc",
+        reportNumber = "LIGO-DOCUMENT-NUMBER-P1600096, LIGO-P1600096",
+        doi = "10.1103/PhysRevD.94.044031",
+        journal = "Phys. Rev. D",
+        volume = "94",
+        number = "4",
+        pages = "044031",
+        year = "2016"
+    }
+
+    @article{roq_paper_2,
+        author = "Morisaki, Soichiro and Smith, Rory and Tsukada, Leo and Sachdev, Surabhi and Stevenson, Simon and Talbot, Colm and Zimmerman, Aaron",
+        title = "{Rapid localization and inference on compact binary coalescences with the Advanced LIGO-Virgo-KAGRA gravitational-wave detector network}",
+        eprint = "2307.13380",
+        archivePrefix = "arXiv",
+        primaryClass = "gr-qc",
+        doi = "10.1103/PhysRevD.108.123040",
+        journal = "Phys. Rev. D",
+        volume = "108",
+        number = "12",
+        pages = "123040",
+        year = "2023"
+    }
+
+
+- `MBGravitationalWaveTransient`
+ .. code:: bibtex
+
+    @article{mb_paper,
+        author = "Morisaki, Soichiro",
+        title = "{Accelerating parameter estimation of gravitational waves from compact binary coalescence using adaptive frequency resolutions}",
+        eprint = "2104.07813",
+        archivePrefix = "arXiv",
+        primaryClass = "gr-qc",
+        doi = "10.1103/PhysRevD.104.044062",
+        journal = "Phys. Rev. D",
+        volume = "104",
+        number = "4",
+        pages = "044062",
+        year = "2021"
+    }
+
+
+- `RelativeBinningGravitationalWaveTransient`
+ .. code:: bibtex
+    
+    @article{relbin_bilby,
+        author = "Krishna, Kruthi and Vijaykumar, Aditya and Ganguly, Apratim and Talbot, Colm and Biscoveanu, Sylvia and George, Richard N. and Williams, Natalie and Zimmerman, Aaron",
+        title = "{Accelerated parameter estimation in Bilby with relative binning}",
+        eprint = "2312.06009",
+        archivePrefix = "arXiv",
+        primaryClass = "gr-qc",
+        month = "12",
+        year = "2023"
+    }
+
+    @article{relbin_cornish,
+        author = "Cornish, Neil J.",
+        title = "{Fast Fisher Matrices and Lazy Likelihoods}",
+        eprint = "1007.4820",
+        archivePrefix = "arXiv",
+        primaryClass = "gr-qc",
+        month = "7",
+        year = "2010"
+    }
+
+    @article{relbin_zackay,
+        author = "Zackay, Barak and Dai, Liang and Venumadhav, Tejaswi",
+        title = "{Relative Binning and Fast Likelihood Evaluation for Gravitational Wave Parameter Estimation}",
+        eprint = "1806.08792",
+        archivePrefix = "arXiv",
+        primaryClass = "astro-ph.IM",
+        month = "6",
+        year = "2018"
+    } 
+
+If you use the :code:`bilby_mcmc` sampler, please additionally cite the following paper
+
+ .. code:: bibtex
+
+    @article{bilby_mcmc_paper,
+        author = "Ashton, Gregory and Talbot, Colm",
+        title = "{B\,ilby-MCMC: an MCMC sampler for gravitational-wave inference}",
+        eprint = "2106.08730",
+        archivePrefix = "arXiv",
+        primaryClass = "gr-qc",
+        doi = "10.1093/mnras/stab2236",
+        journal = "Mon. Not. Roy. Astron. Soc.",
+        volume = "507",
+        number = "2",
+        pages = "2037--2051",
+        year = "2021"
+    }
+
+Additionally, :code:`bilby` builds on a number of open-source packages. If you
+make use of this functionality in your publications, we recommend you cite them
+as requested in their associated documentation.
+
+**Samplers**
+* `cpnest <https://github.com/johnveitch/cpnest>`__
+* `dnest4 <https://github.com/eggplantbren/DNest4>`__
+* `dynesty <https://github.com/joshspeagle/dynesty>`__
+* `emcee <https://github.com/dfm/emcee>`__
+* `kombine <https://github.com/bfarr/kombine>`__
+* `nestle <https://github.com/kbarbary/nestle>`__
+* `nessai <https://github.com/mj-will/nessai>`__
+* `PyMC3 <https://github.com/pymc-devs/pymc3>`__
+* `pymultinest <https://github.com/JohannesBuchner/PyMultiNest>`__
+* `pypolychord <https://github.com/PolyChord/PolyChordLite>`__
+* `ptemcee <https://github.com/willvousden/ptemcee>`__
+* `ptmcmcsampler <https://github.com/jellis18/PTMCMCSampler>`__
+* `ultranest <https://github.com/JohannesBuchner/UltraNest>`__
+* `zeus <https://github.com/minaskar/zeus>`_
+
+
+**Gravitational-wave tools**
+
+* `gwpy <https://github.com/gwpy/gwpy>`__
+* `lalsuite <https://git.ligo.org/lscsoft/lalsuite>`__
+* `astropy <https://github.com/astropy/astropy>`__
+
+**Plotting**
+
+* `corner <https://github.com/dfm/corner.py>`__ for generating corner plot
+* `matplotlib <https://github.com/matplotlib/matplotlib>`__ for general plotting routines
diff --git a/docs/conf.py b/docs/conf.py
index 1384090b85fe8acbd576a0c994abca7d59e1206e..e4641bc4cc0171d6afcb9c36285a4d4b250d7dc6 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -66,6 +66,7 @@ extensions = [
     'sphinx.ext.autosectionlabel',
     'sphinx_tabs.tabs',
     "sphinx.ext.linkcode",
+    'myst_parser'
 ]
 autosummary_generate = True
 
diff --git a/docs/contribution-guide.md b/docs/contribution-guide.md
new file mode 100644
index 0000000000000000000000000000000000000000..78caf34e38b3ffedd4c57ce35333f5c8888a5ad1
--- /dev/null
+++ b/docs/contribution-guide.md
@@ -0,0 +1,2 @@
+```{include} ../CONTRIBUTING.md
+```
diff --git a/docs/dynesty-guide.txt b/docs/dynesty-guide.txt
index 61b6a33be7efd56097a9c64cc573ca95c5d2e031..b04a6d40d4f64877a8bf7df7fd5a2a2df86b8159 100644
--- a/docs/dynesty-guide.txt
+++ b/docs/dynesty-guide.txt
@@ -37,9 +37,10 @@ new point from the constrained prior. These can be specified using the
    iterations of :code:`dynesty`. With this method, :code:`nact=2` often gives good
    results, however, in some cases, a larger value may be required.
 
-2. :code:`sample="acceptance-walk"`: with this method, the length of each MCMC
-   chain is predetermined. The specific length evolves during the run to yield an
-   average of :code:`naccept` accepted jumps during each chain. This method is well
+2. :code:`sample="acceptance-walk"`: with this method, at each iteration all MCMC chains
+   are set to the same length. The specific length evolves during the run so that the
+   number of accepted steps follows a Poisson distribution with mean :code:`naccept`
+   during each chain. This method is well
    suited to parallelised applications, as each MCMC chain will run for the same
    amount of time. The value of :code:`naccept` should be tuned based on the
    application. For example, one could run a single analysis with
@@ -62,7 +63,9 @@ There are a number of keyword arguments that influence these sampling methods:
    for consistency with other codes. Default is 5000. If this limit is reached,
    a warning will be printed during sampling.
 
-#. :code:`proposals`: a list of the desired proposals. The allowed values are
+#. :code:`proposals`: a list of the desired proposals.
+   Each of these proposals can be used with any of the sampling methods described
+   above. The allowed values are
 
   * :code:`diff`: `ter Braak + (2006) <https://doi.org/10.1007/s11222-006-8769-1>`_
     differential evolution. This is the default for :code:`bound="live"` and
@@ -70,7 +73,9 @@ There are a number of keyword arguments that influence these sampling methods:
 
   * :code:`volumetric`: sample from an ellipsoid centered on the current point.
     This is the proposal distribution implemented in :code:`dynesty` and the
-    default for all other :code:`bound` options.
+    default for all other :code:`bound` options. This was the default proposal
+    distribution for :code:`Bilby<2`, however, in many applications it leads to
+    longer autocorrelation times and struggles to explore multi-modal distributions.
 
 Finally, we implement two custom :code:`dynesty.sampler.Sampler` classes to
 facilitate the differential evolution proposal and average acceptance tracking.
diff --git a/docs/faq.txt b/docs/faq.txt
index 0818b3e4a0ba402316d2a6dcf67dbfd9eaf4f045..904898448106c79a89a1149fe9fa687b0dc599e7 100644
--- a/docs/faq.txt
+++ b/docs/faq.txt
@@ -1,6 +1,6 @@
-=========================
-Frequency Asked Questions
-=========================
+==========================
+Frequently Asked Questions
+==========================
 
 Plotting questions
 ------------------
diff --git a/docs/index.txt b/docs/index.txt
index 2259e08b4bf12eb2fe4e25997cdac364a3746541..3a02a207a0ba3a5e2559b50f8b4a275cab0ccbc8 100644
--- a/docs/index.txt
+++ b/docs/index.txt
@@ -9,6 +9,7 @@ Welcome to bilby's documentation!
    :caption: Contents:
 
    installation
+   citing-bilby
    code-overview
    examples
    basics-of-parameter-estimation
@@ -17,6 +18,7 @@ Welcome to bilby's documentation!
    samplers
    dynesty-guide
    bilby-mcmc-guide
+   plugins
    bilby-output
    compact-binary-coalescence-parameter-estimation
    transient-gw-data
@@ -27,6 +29,7 @@ Welcome to bilby's documentation!
    writing-documentation
    hyperparameters
    containers
+   contribution-guide
    faq
 
 
diff --git a/docs/installation.txt b/docs/installation.txt
index 045980afa15f2296b880d35e30679a0e3f7866bb..adfbe199922830b868fa7df23634b83085b13a03 100644
--- a/docs/installation.txt
+++ b/docs/installation.txt
@@ -10,7 +10,7 @@ Installation
 
           $ conda install -c conda-forge bilby
 
-      Supported python versions: 3.8-3.10.
+      Supported python versions: 3.9-3.11.
 
    .. tab:: Pip
 
@@ -18,7 +18,7 @@ Installation
 
           $ pip install bilby
 
-      Supported python versions: 3.8-3.10.
+      Supported python versions: 3.9-3.11.
 
 
 This will install all requirements for running :code:`bilby` for general
@@ -47,7 +47,7 @@ wave inference, please additionally run the following commands.
 Install bilby from source
 -------------------------
 
-:code:`bilby` is developed and tested with Python 3.8-3.10. In the
+:code:`bilby` is developed and tested with Python 3.9-3.11. In the
 following, we assume you have a working python installation, `python pip
 <https://packaging.python.org/tutorials/installing-packages/#use-pip-for-installing)>`_,
 and `git <https://git-scm.com/>`_. See :ref:`installing-python` for our
diff --git a/docs/plugins.txt b/docs/plugins.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a73f0ce4931e2c6a0457342e98117b8c84963a2c
--- /dev/null
+++ b/docs/plugins.txt
@@ -0,0 +1,93 @@
+=======
+Plugins
+=======
+
+----------------
+Defining plugins
+----------------
+
+:code:`bilby` allows for additional customizations/extra features via plugins.
+This allows users to add new functionality without the need to modify the main
+:code:`bilby` codebase, for example to add a new sampler.
+
+To make your plugins discoverable to :code:`bilby`, you need to specify a plugin
+group (which :code:`bilby` knows to search for), a name for the plugin, and the
+python path to your function/class within your package metadata, see `here
+<https://packaging.python.org/en/latest/guides/creating-and-discovering-plugins/#using-package-metadata>`_
+for details. For example, if you have a package called :code:`mypackage` and
+you wish to add a plugin called :code:`my_awesome_plugin` within the group
+:code:`bilby.plugin`, you would specify the following in your `pyproject.toml
+<https://packaging.python.org/en/latest/guides/writing-pyproject-toml/>`_
+file::
+
+    [project.entry-points."bilby.plugin"]
+    my_awesome_plugin = "mypackage.plugin"
+
+Currently :code:`bilby` allows for the following plugin groups:
+
+- :code:`"bilby.samplers"`: group for adding samplers to :code:`bilby`. See :ref:`Sampler plugins` for more details.
+
+
+---------------
+Sampler plugins
+---------------
+
+Sampler plugins can specified via the :code:`"bilby.samplers"` group and these
+are automatically added to the 'known' samplers in :code:`bilby`.
+This allows users to add support for new samplers without having to modify the
+core :code:`bilby` codebase.
+Sampler plugins should implement a sampler class that in inherits from one of
+the following classes:
+
+- :py:class:`bilby.core.sampler.base_sampler.Sampler`
+- :py:class:`bilby.core.sampler.base_sampler.NestedSampler`
+- :py:class:`bilby.core.sampler.base_sampler.MCMCSampler`
+
+We provide a `template <https://github.com/bilby-plugins/sampler-template>`_
+for creating sampler plugins on GitHub.
+
+.. note::
+    When implementing a new sampler plugin, please avoid using a generic name for
+    the plugin (e.g. 'nest', 'mcmc') as this may lead to naming conflicts.
+
+
+Sampler plugin library
+----------------------
+
+This is a list of known sampler plugins. if you don't see your plugin listed
+here, we encourage you to open a
+`merge request <https://git.ligo.org/lscsoft/bilby/-/merge_requests/new>`_ to add it.
+
+- This could be your sampler
+
+
+Bilby-native samplers
+---------------------
+
+Some samplers are implemented directly in :code:`bilby` and these are avertised
+under two possible names:
+
+- :code:`bilby.<sampler name>`: always available, indicates the sampler is implemented in bilby,
+- :code:`<sampler name>`: only refers to the native bilby implementation if an external plugin does not already provide this sampler.
+
+This allows for an external plugin to provide a sampler without introducing
+namespace conflicts.
+
+
+--------------------------------
+Information for bilby developers
+--------------------------------
+
+Using plugins within bilby
+--------------------------
+
+Within :code:`bilby`, plugins are discovered with the
+:py:func:`bilby.core.utils.get_entry_points` function,
+and can be used throughout the :code:`bilby` infrastructure.
+
+Adding a new plugin group
+-------------------------
+
+If you want to add support for a new plugin group, please
+`open an issue <https://git.ligo.org/lscsoft/bilby/-/issues/new>`_
+to discuss the details with other developers.
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 01869f484f73c9c8d5e666431ea014c28ca8a2d4..2708ecc94ff0331ffbe0fecec88c13df76b8a96f 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -5,3 +5,4 @@ numpydoc
 nbsphinx
 autodoc
 sphinx_rtd_theme
+myst_parser
diff --git a/docs/samplers.txt b/docs/samplers.txt
index 69699dad0e3b22725b4d3f3f6d942763406df0da..7b4a190f1b972257ec05a51ee45ca3070350edcd 100644
--- a/docs/samplers.txt
+++ b/docs/samplers.txt
@@ -73,6 +73,18 @@ MCMC samplers
 - zeus :code:`bilby.core.sampler.zeus.Zeus`
 
 
+--------------------------
+Listing available samplers
+--------------------------
+
+A list of available samplers can be produced using
+:py:func:`bilby.core.sampler.get_implemented_samplers`.
+This will list native bilby samplers and any samplers available via a plugin.
+If a plugin provides a sampler that is also implemented in bilby, the bilby
+implementation will be labeled with the prfix `bilby.` to distinguish it from
+the plugin version. See `sampler plugins`_ for more details.
+
+
 -------------------
 Installing samplers
 -------------------
diff --git a/examples/core_examples/alternative_samplers/linear_regression_pymc_custom_likelihood.py b/examples/core_examples/alternative_samplers/linear_regression_pymc_custom_likelihood.py
index e9763770c8ad96b32adbf256cc44aabf0ceffb1e..1fccbcde6e3707738fb2e18bffe0b7f687571ae0 100644
--- a/examples/core_examples/alternative_samplers/linear_regression_pymc_custom_likelihood.py
+++ b/examples/core_examples/alternative_samplers/linear_regression_pymc_custom_likelihood.py
@@ -12,6 +12,7 @@ import bilby
 import matplotlib.pyplot as plt
 import numpy as np
 import pymc as pm
+from bilby.core.sampler.pymc import Pymc
 
 # A few simple setup steps
 label = "linear_regression_pymc_custom_likelihood"
@@ -76,13 +77,11 @@ class GaussianLikelihoodPyMC(bilby.core.likelihood.GaussianLikelihood):
         ----------
         sampler: :class:`bilby.core.sampler.Pymc`
             A Sampler object must be passed containing the prior distributions
-            and PyMC3 :class:`~pymc3.Model` to use as a context manager.
+            and PyMC :class:`~pymc.Model` to use as a context manager.
             If this is not passed, the super class is called and the regular
             likelihood is evaluated.
         """
 
-        from bilby.core.sampler import Pymc
-
         if not isinstance(sampler, Pymc):
             return super(GaussianLikelihoodPyMC, self).log_likelihood()
 
@@ -116,15 +115,12 @@ class PyMCUniformPrior(bilby.core.prior.Uniform):
 
     def ln_prob(self, sampler=None):
         """
-        Change ln_prob method to take in a Sampler and return a PyMC3
+        Change ln_prob method to take in a Sampler and return a PyMC
         distribution.
 
-        If the passed argument is not a `Pymc3` sampler, assume that it is a
+        If the passed argument is not a `Pymc` sampler, assume that it is a
         float or array to be passed to the superclass.
         """
-
-        from bilby.core.sampler import Pymc
-
         if not isinstance(sampler, Pymc):
             return super(PyMCUniformPrior, self).ln_prob(sampler)
 
diff --git a/examples/core_examples/multivariate_gaussian_prior.py b/examples/core_examples/multivariate_gaussian_prior.py
index e60c7c42e90c480ef8839488d37594185ce9c136..d9250e43cba1bb4f67b805f3d1ea9f316341db6d 100644
--- a/examples/core_examples/multivariate_gaussian_prior.py
+++ b/examples/core_examples/multivariate_gaussian_prior.py
@@ -87,10 +87,10 @@ for i in range(nmodes):
     angle = np.arctan(u[1] / u[0])
     angle = 180.0 * angle / np.pi  # convert to degrees
     ell = mpl.patches.Ellipse(
-        mus[i],
-        v[0],
-        v[1],
-        180.0 + angle,
+        xy=mus[i],
+        width=v[0],
+        height=v[1],
+        angle=180.0 + angle,
         edgecolor="black",
         facecolor="none",
         lw=2,
diff --git a/examples/gw_examples/injection_examples/binary_neutron_star_example.py b/examples/gw_examples/injection_examples/binary_neutron_star_example.py
index e9c1bb70ad687b953d7c1ebe488623f296917265..bba394bcf01c42fefa400f72d80704fbec9f8575 100644
--- a/examples/gw_examples/injection_examples/binary_neutron_star_example.py
+++ b/examples/gw_examples/injection_examples/binary_neutron_star_example.py
@@ -78,7 +78,7 @@ interferometers.inject_signal(
 
 # Load the default prior for binary neutron stars.
 # We're going to sample in chirp_mass, symmetric_mass_ratio, lambda_tilde, and
-# delta_lambda rather than mass_1, mass_2, lambda_1, and lambda_2.
+# delta_lambda_tilde rather than mass_1, mass_2, lambda_1, and lambda_2.
 # BNS have aligned spins by default, if you want to allow precessing spins
 # pass aligned_spin=False to the BNSPriorDict
 priors = bilby.gw.prior.BNSPriorDict()
@@ -102,8 +102,9 @@ priors["symmetric_mass_ratio"] = bilby.core.prior.Uniform(
     0.1, 0.25, name="symmetric_mass_ratio"
 )
 priors["lambda_tilde"] = bilby.core.prior.Uniform(0, 5000, name="lambda_tilde")
-priors["delta_lambda"] = bilby.core.prior.Uniform(-500, 1000, name="delta_lambda")
-
+priors["delta_lambda_tilde"] = bilby.core.prior.Uniform(
+    -500, 1000, name="delta_lambda_tilde"
+)
 priors["lambda_1"] = bilby.core.prior.Constraint(
     name="lambda_1", minimum=0, maximum=10000
 )
diff --git a/mcmc_requirements.txt b/mcmc_requirements.txt
index 6f5678c045b2d668402f1be6191c6b1622f8644e..441ba479c762e775111a2fe9e1bb48594406bedc 100644
--- a/mcmc_requirements.txt
+++ b/mcmc_requirements.txt
@@ -1,2 +1,2 @@
 scikit-learn
-nflows
+glasflow
diff --git a/optional_requirements.txt b/optional_requirements.txt
index 60e3fb4bac5cf62f9d4c92dcc277686efe4ac0d3..07934750c2813dda21a70b5982f855a3496cb91e 100644
--- a/optional_requirements.txt
+++ b/optional_requirements.txt
@@ -1,3 +1,4 @@
 celerite
 george
 plotly
+pytest-requires
diff --git a/requirements.txt b/requirements.txt
index c668c9d4a5b12fe7b1459dc3e575b48233307446..05c8c4fad1b0bbec698dfa22f9627bbf31ca5e0e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,12 +1,15 @@
 bilby.cython>=0.3.0
-dynesty>=2.0.1
+# remove pin after https://git.ligo.org/lscsoft/bilby/-/merge_requests/1368
+dynesty>=2.0.1,<2.1.4
 emcee
 corner
 numpy
-matplotlib>=2.1
-scipy>=1.5
+matplotlib
+# remove pin after https://git.ligo.org/lscsoft/bilby/-/merge_requests/1368
+scipy>=1.5,<1.14
 pandas
 dill
 tqdm
 h5py
 attrs
+importlib-metadata>=3.6; python_version < '3.10'
diff --git a/setup.py b/setup.py
index 4ddb9744230089dcddb86a10e967316ee51a9737..353e92ee443d1e6e90eb104e07c15ec063229020 100644
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,7 @@ setup(
         "bilby.gw.detector": ["noise_curves/*.txt", "detectors/*"],
         "bilby.gw.eos": ["eos_tables/*.dat"],
     },
-    python_requires=">=3.8",
+    python_requires=">=3.9",
     install_requires=get_requirements(),
     extras_require={
         "gw": get_requirements("gw"),
@@ -82,11 +82,31 @@ setup(
         "console_scripts": [
             "bilby_plot=cli_bilby.plot_multiple_posteriors:main",
             "bilby_result=cli_bilby.bilby_result:main",
-        ]
+        ],
+        "bilby.samplers": [
+            "bilby.bilby_mcmc=bilby.bilby_mcmc.sampler:Bilby_MCMC",
+            "bilby.cpnest=bilby.core.sampler.cpnest:Cpnest",
+            "bilby.dnest4=bilby.core.sampler.dnest4:DNest4",
+            "bilby.dynesty=bilby.core.sampler.dynesty:Dynesty",
+            "bilby.dynamic_dynesty=bilby.core.sampler.dynamic_dynesty:DynamicDynesty",
+            "bilby.emcee=bilby.core.sampler.emcee:Emcee",
+            "bilby.kombine=bilby.core.sampler.kombine:Kombine",
+            "bilby.nessai=bilby.core.sampler.nessai:Nessai",
+            "bilby.nestle=bilby.core.sampler.nestle:Nestle",
+            "bilby.ptemcee=bilby.core.sampler.ptemcee:Ptemcee",
+            "bilby.ptmcmcsampler=bilby.core.sampler.ptmcmc:PTMCMCSampler",
+            "bilby.pymc=bilby.core.sampler.pymc:Pymc",
+            "bilby.pymultinest=bilby.core.sampler.pymultinest:Pymultinest",
+            "bilby.pypolychord=bilby.core.sampler.polychord:PyPolyChord",
+            "bilby.ultranest=bilby.core.sampler.ultranest:Ultranest",
+            "bilby.zeus=bilby.core.sampler.zeus:Zeus",
+            "bilby.fake_sampler=bilby.core.sampler.fake_sampler:FakeSampler",
+        ],
     },
     classifiers=[
         "Programming Language :: Python :: 3.9",
         "Programming Language :: Python :: 3.10",
+        "Programming Language :: Python :: 3.11",
         "License :: OSI Approved :: MIT License",
         "Operating System :: OS Independent",
     ],
diff --git a/test/bilby_mcmc/test_proposals.py b/test/bilby_mcmc/test_proposals.py
index 37fa0a0fe3b3327b161df4d82c12d46466709431..cd36f94fa371030afe6e917a597b601843fcfb1f 100644
--- a/test/bilby_mcmc/test_proposals.py
+++ b/test/bilby_mcmc/test_proposals.py
@@ -11,6 +11,7 @@ from bilby.bilby_mcmc.chain import Chain, Sample
 from bilby.bilby_mcmc import proposals
 from bilby.bilby_mcmc.utils import LOGLKEY, LOGPKEY
 import numpy as np
+import pytest
 
 
 class GivenProposal(proposals.BaseProposal):
@@ -164,36 +165,32 @@ class TestProposals(TestBaseProposals):
         else:
             print("Unable to test GMM as sklearn is not installed")
 
+    @pytest.mark.requires("glasflow")
     def test_NF_proposal(self):
         priors = self.create_priors()
         chain = self.create_chain(10000)
-        if proposals.NormalizingFlowProposal.check_dependencies():
-            prop = proposals.NormalizingFlowProposal(priors, first_fit=10000)
-            prop.steps_since_refit = 9999
-            start = time.time()
-            p, w = prop(chain)
-            dt = time.time() - start
-            print(f"Training for {prop.__class__.__name__} took dt~{dt:0.2g} [s]")
-            self.assertTrue(prop.trained)
-            self.proposal_check(prop)
-        else:
-            print("nflows not installed, unable to test NormalizingFlowProposal")
+        prop = proposals.NormalizingFlowProposal(priors, first_fit=10000)
+        prop.steps_since_refit = 9999
+        start = time.time()
+        p, w = prop(chain)
+        dt = time.time() - start
+        print(f"Training for {prop.__class__.__name__} took dt~{dt:0.2g} [s]")
+        self.assertTrue(prop.trained)
+        self.proposal_check(prop)
 
+    @pytest.mark.requires("glasflow")
     def test_NF_proposal_15D(self):
         ndim = 15
         priors = self.create_priors(ndim)
         chain = self.create_chain(10000, ndim=ndim)
-        if proposals.NormalizingFlowProposal.check_dependencies():
-            prop = proposals.NormalizingFlowProposal(priors, first_fit=10000)
-            prop.steps_since_refit = 9999
-            start = time.time()
-            p, w = prop(chain)
-            dt = time.time() - start
-            print(f"Training for {prop.__class__.__name__} took dt~{dt:0.2g} [s]")
-            self.assertTrue(prop.trained)
-            self.proposal_check(prop, ndim=ndim)
-        else:
-            print("nflows not installed, unable to test NormalizingFlowProposal")
+        prop = proposals.NormalizingFlowProposal(priors, first_fit=10000)
+        prop.steps_since_refit = 9999
+        start = time.time()
+        p, w = prop(chain)
+        dt = time.time() - start
+        print(f"Training for {prop.__class__.__name__} took dt~{dt:0.2g} [s]")
+        self.assertTrue(prop.trained)
+        self.proposal_check(prop, ndim=ndim)
 
 
 if __name__ == "__main__":
diff --git a/test/bilby_mcmc/test_sampler.py b/test/bilby_mcmc/test_sampler.py
index 746eb1a9e1150732e93d5c31664751040e7b639c..7e636e1ab07c06449a254433ded0014a210f2e19 100644
--- a/test/bilby_mcmc/test_sampler.py
+++ b/test/bilby_mcmc/test_sampler.py
@@ -85,5 +85,16 @@ class TestBilbyMCMCSampler(unittest.TestCase):
         self.assertTrue(isinstance(sampler.samples, pd.DataFrame))
 
 
+def test_get_expected_outputs():
+    label = "par0"
+    outdir = os.path.join("some", "bilby_pipe", "dir")
+    filenames, directories = Bilby_MCMC.get_expected_outputs(
+        outdir=outdir, label=label
+    )
+    assert len(filenames) == 1
+    assert len(directories) == 0
+    assert os.path.join(outdir, f"{label}_resume.pickle") in filenames
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/test/check_author_list.py b/test/check_author_list.py
index c04932d5eaa945867eb4be4bf2e5f328d40d5946..95d5be1ca2ac9cbd0a601b24460435e1f69731b2 100644
--- a/test/check_author_list.py
+++ b/test/check_author_list.py
@@ -3,7 +3,7 @@
 import re
 import subprocess
 
-special_cases = ["plasky", "thomas", "mj-will", "richard", "douglas"]
+special_cases = ["plasky", "thomas", "mj-will", "richard", "douglas", "nixnyxnyx"]
 AUTHORS_list = []
 with open("AUTHORS.md", "r") as f:
     AUTHORS_list = " ".join([line for line in f]).lower()
diff --git a/test/conftest.py b/test/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..d08c38604998b615cddce21c2271f4275a88fa11
--- /dev/null
+++ b/test/conftest.py
@@ -0,0 +1,19 @@
+import pytest
+
+
+def pytest_addoption(parser):
+    parser.addoption(
+        "--skip-roqs", action="store_true", default=False, help="Skip all tests that require ROQs"
+    )
+
+
+def pytest_configure(config):
+    config.addinivalue_line("markers", "requires_roqs: mark a test that requires ROQs")
+
+
+def pytest_collection_modifyitems(config, items):
+    if config.getoption("--skip-roqs"):
+        skip_roqs = pytest.mark.skip(reason="Skipping tests that require ROQs")
+        for item in items:
+            if "requires_roqs" in item.keywords:
+                item.add_marker(skip_roqs)
diff --git a/test/core/likelihood_test.py b/test/core/likelihood_test.py
index fb6ffa0c9d29eba66e19b82b4c1daaf5a6da9fe3..a7eb4a1c80ea99eadb3148597b74b55bac854770 100644
--- a/test/core/likelihood_test.py
+++ b/test/core/likelihood_test.py
@@ -193,7 +193,7 @@ class TestGaussianLikelihood(unittest.TestCase):
         likelihood.parameters["m"] = 2
         likelihood.parameters["c"] = 0
         likelihood.log_likelihood()
-        self.assertTrue(type(likelihood.sigma) == type(sigma_array))
+        self.assertTrue(type(likelihood.sigma) == type(sigma_array))  # noqa: E721
         self.assertTrue(all(likelihood.sigma == sigma_array))
 
     def test_set_sigma_None(self):
diff --git a/test/core/prior/conditional_test.py b/test/core/prior/conditional_test.py
index 5ee4efd60618f89547ec4731407ca97aebf062a7..20c0cda93f71a86177bf0d8cc74c3985882109f8 100644
--- a/test/core/prior/conditional_test.py
+++ b/test/core/prior/conditional_test.py
@@ -5,6 +5,7 @@ from unittest import mock
 
 import numpy as np
 import pandas as pd
+import pickle
 
 import bilby
 
@@ -412,6 +413,38 @@ class TestConditionalPriorDict(unittest.TestCase):
         res = priors.rescale(["a", "b", "d", "c"], [0.5, 0.5, 0.5, 0.5])
         print(res)
 
+    def test_subset_sampling(self):
+        def _tp_conditional_uniform(ref_params, period):
+            min_ref, max_ref = ref_params["minimum"], ref_params["maximum"]
+            max_ref = np.minimum(max_ref, min_ref + period)
+            return {"minimum": min_ref, "maximum": max_ref}
+
+        p0 = 68400.0
+        prior = bilby.core.prior.ConditionalPriorDict(
+            {
+                "tp": bilby.core.prior.ConditionalUniform(
+                    condition_func=_tp_conditional_uniform, minimum=0, maximum=2 * p0
+                )
+            }
+        )
+
+        # ---------- 0. Sanity check: sample full prior
+        prior["period"] = p0
+        samples2d = prior.sample(1000)
+        assert samples2d["tp"].max() < p0
+
+        # ---------- 1. Subset sampling with external delta-prior
+        print("Test 1: Subset-sampling conditionals for fixed 'externals':")
+        prior["period"] = p0
+        samples1d = prior.sample_subset(["tp"], 1000)
+        self.assertLess(samples1d["tp"].max(), p0)
+
+        # ---------- 2. Subset sampling with external uniform prior
+        prior["period"] = bilby.core.prior.Uniform(minimum=p0, maximum=2 * p0)
+        print("Test 2: Subset-sampling conditionals for 'external' uncertainties:")
+        with self.assertRaises(bilby.core.prior.IllegalConditionsException):
+            prior.sample_subset(["tp"], 1000)
+
 
 class TestDirichletPrior(unittest.TestCase):
 
@@ -440,6 +473,10 @@ class TestDirichletPrior(unittest.TestCase):
         test = bilby.core.prior.PriorDict.from_json(filename="priors/test_prior.json")
         self.assertEqual(self.priors, test)
 
+    def test_pickle(self):
+        """Assert can be pickled (needed for use with bilby_pipe)"""
+        pickle.dumps(self.priors)
+
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/test/core/prior/joint_test.py b/test/core/prior/joint_test.py
index ebadfcfaeb96ed1cd017a212097c8ff65f5c477c..c99373b00a8494b5d0a3b67e44aa40c0860db7ca 100644
--- a/test/core/prior/joint_test.py
+++ b/test/core/prior/joint_test.py
@@ -32,7 +32,7 @@ MultivariateGaussianDist(
                 self.assertTrue(item == fromstr.__getattribute__(key))
             elif key == "mvn":
                 for d1, d2 in zip(fromstr.__getattribute__(key), item):
-                    self.assertTrue(type(d1) == type(d2))
+                    self.assertTrue(type(d1) == type(d2))  # noqa: E721
             elif isinstance(item, (list, tuple, np.ndarray)):
                 self.assertTrue(
                     np.all(np.array(item) == np.array(fromstr.__getattribute__(key)))
diff --git a/test/core/result_test.py b/test/core/result_test.py
index 36e50aa365d9547236506f67dbfa38844215ee38..a2acec375100a524dc3dd2af32aa6f694b07228b 100644
--- a/test/core/result_test.py
+++ b/test/core/result_test.py
@@ -751,5 +751,71 @@ class TestPPPlots(unittest.TestCase):
             )
 
 
+class SimpleGaussianLikelihood(bilby.core.likelihood.Likelihood):
+    def __init__(self, mean=0, sigma=1):
+        """
+        A very simple Gaussian likelihood for testing
+        """
+        from scipy.stats import norm
+        super().__init__(parameters=dict())
+        self.mean = mean
+        self.sigma = sigma
+        self.dist = norm(loc=mean, scale=sigma)
+
+    def log_likelihood(self):
+        return self.dist.logpdf(self.parameters["mu"])
+
+
+class TestReweight(unittest.TestCase):
+
+    def setUp(self):
+        self.priors = bilby.core.prior.PriorDict(dict(
+            mu=bilby.core.prior.TruncatedNormal(0, 1, minimum=-5, maximum=5),
+        ))
+        self.result = bilby.core.result.Result(
+            search_parameter_keys=list(self.priors.keys()),
+            priors=self.priors,
+            posterior=pd.DataFrame(self.priors.sample(1000)),
+            log_evidence=-np.log(10),
+        )
+
+    def _run_reweighting(self, sigma):
+        likelihood_1 = SimpleGaussianLikelihood()
+        likelihood_2 = SimpleGaussianLikelihood(sigma=sigma)
+        original_ln_likelihoods = list()
+        for ii in range(len(self.result.posterior)):
+            likelihood_1.parameters = self.result.posterior.iloc[ii]
+            original_ln_likelihoods.append(likelihood_1.log_likelihood())
+        self.result.posterior["log_prior"] = self.priors.ln_prob(self.result.posterior)
+        self.result.posterior["log_likelihood"] = original_ln_likelihoods
+        self.original_ln_likelihoods = original_ln_likelihoods
+        return bilby.core.result.reweight(
+            self.result, likelihood_1, likelihood_2, verbose_output=True
+        )
+
+    def test_reweight_same_likelihood_weights_1(self):
+        """
+        When the likelihoods are the same, the weights should be 1.
+        """
+        _, weights, _, _, _, _ = self._run_reweighting(sigma=1)
+        self.assertLess(min(abs(weights - 1)), 1e-10)
+
+    def test_reweight_different_likelihood_weights_correct(self):
+        """
+        Test the known case where the target likelihood is a Gaussian with
+        sigma=0.5. The weights can be calculated analytically and the evidence
+        should be close to the original evidence within statistical error.
+        """
+        from scipy.stats import norm
+        new, weights, _, _, _, _ = self._run_reweighting(sigma=0.5)
+        expected_weights = (
+            norm(0, 0.5).pdf(self.result.posterior["mu"])
+            / norm(0, 1).pdf(self.result.posterior["mu"])
+        )
+        self.assertLess(min(abs(weights - expected_weights)), 1e-10)
+        self.assertLess(abs(new.log_evidence - self.result.log_evidence), 0.05)
+        self.assertNotEqual(new.log_evidence, self.result.log_evidence)
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/test/core/sampler/base_sampler_test.py b/test/core/sampler/base_sampler_test.py
index 4856a9e7df4922bf94fe26746972bb1864774535..d20ee978a3bb1fdbd88bc4656ae66690a2ed2ae4 100644
--- a/test/core/sampler/base_sampler_test.py
+++ b/test/core/sampler/base_sampler_test.py
@@ -10,9 +10,11 @@ import numpy as np
 import bilby
 from bilby.core import prior
 
+loaded_samplers = {k: v.load() for k, v in bilby.core.sampler.IMPLEMENTED_SAMPLERS.items()}
+
 
 class TestSampler(unittest.TestCase):
-    def setUp(self):
+    def setUp(self, soft_init=False):
         likelihood = bilby.core.likelihood.Likelihood()
         likelihood.parameters = dict(a=1, b=2, c=3)
         delta_prior = prior.DeltaFunction(peak=0)
@@ -36,11 +38,16 @@ class TestSampler(unittest.TestCase):
             outdir=test_directory,
             use_ratio=False,
             skip_import_verification=True,
+            soft_init=soft_init
         )
 
     def tearDown(self):
         del self.sampler
 
+    def test_softinit(self):
+        self.setUp(soft_init=True)
+        self.assertTrue(hasattr(self.sampler, "_log_likelihood_eval_time"))
+
     def test_search_parameter_keys(self):
         expected_search_parameter_keys = ["c"]
         self.assertListEqual(
@@ -141,6 +148,30 @@ class TestSampler(unittest.TestCase):
         )
 
 
+def test_get_expected_outputs():
+    outdir = os.path.join("some", "bilby_pipe", "dir")
+    label = "par0"
+    filenames, directories = bilby.core.sampler.Sampler.get_expected_outputs(
+        outdir=outdir, label=label
+    )
+    assert len(filenames) == 0
+    assert len(directories) == 1
+    assert directories[0] == os.path.join(outdir, f"sampler_{label}", "")
+
+
+def test_get_expected_outputs_abbreviation():
+    outdir = os.path.join("some", "bilby_pipe", "dir")
+    label = "par0"
+    bilby.core.sampler.Sampler.abbreviation = "abbr"
+    filenames, directories = bilby.core.sampler.Sampler.get_expected_outputs(
+        outdir=outdir, label=label
+    )
+    assert len(filenames) == 0
+    assert len(directories) == 1
+    assert directories[0] == os.path.join(outdir, f"abbr_{label}", "")
+    bilby.core.sampler.Sampler.abbreviation = None
+
+
 samplers = [
     "bilby_mcmc",
     "dynamic_dynesty",
@@ -165,9 +196,7 @@ class GenericSamplerTest(unittest.TestCase):
 
     @parameterized.expand(samplers)
     def test_pool_creates_properly_no_pool(self, sampler_name):
-        sampler = bilby.core.sampler.IMPLEMENTED_SAMPLERS[sampler_name](
-            self.likelihood, self.priors
-        )
+        sampler = loaded_samplers[sampler_name](self.likelihood, self.priors)
         sampler._setup_pool()
         if sampler_name == "kombine":
             from kombine import SerialPool
@@ -179,7 +208,7 @@ class GenericSamplerTest(unittest.TestCase):
 
     @parameterized.expand(samplers)
     def test_pool_creates_properly_pool(self, sampler):
-        sampler = bilby.core.sampler.IMPLEMENTED_SAMPLERS[sampler](
+        sampler = loaded_samplers[sampler](
             self.likelihood, self.priors, npool=2
         )
         sampler._setup_pool()
diff --git a/test/core/sampler/cpnest_test.py b/test/core/sampler/cpnest_test.py
index 08a23b0a85276818423f83c583765ac5f6721547..d56412d7aa918eb09df56d73cfd0ddd19f70c325 100644
--- a/test/core/sampler/cpnest_test.py
+++ b/test/core/sampler/cpnest_test.py
@@ -2,6 +2,7 @@ import unittest
 from unittest.mock import MagicMock
 
 import bilby
+import bilby.core.sampler.cpnest
 
 
 class TestCPNest(unittest.TestCase):
@@ -10,7 +11,7 @@ class TestCPNest(unittest.TestCase):
         self.priors = bilby.core.prior.PriorDict(
             dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
         )
-        self.sampler = bilby.core.sampler.Cpnest(
+        self.sampler = bilby.core.sampler.cpnest.Cpnest(
             self.likelihood,
             self.priors,
             outdir="outdir",
diff --git a/test/core/sampler/dnest4_test.py b/test/core/sampler/dnest4_test.py
index dac5289f68ec775e1a00189a85244200b85891f8..4ce7fc62d5cc251f0008101a38de6dd5a0681306 100644
--- a/test/core/sampler/dnest4_test.py
+++ b/test/core/sampler/dnest4_test.py
@@ -2,6 +2,7 @@ import unittest
 from unittest.mock import MagicMock
 
 import bilby
+import bilby.core.sampler.dnest4
 
 
 class TestDnest4(unittest.TestCase):
@@ -10,7 +11,7 @@ class TestDnest4(unittest.TestCase):
         self.priors = bilby.core.prior.PriorDict(
             dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
         )
-        self.sampler = bilby.core.sampler.DNest4(
+        self.sampler = bilby.core.sampler.dnest4.DNest4(
             self.likelihood,
             self.priors,
             outdir="outdir",
diff --git a/test/core/sampler/dynamic_dynesty_test.py b/test/core/sampler/dynamic_dynesty_test.py
index f5119affcf63a1d6d23de69a6325e63c487bc8c4..36d7e6b088e4ea882f0eae859cd72ee31226c9ca 100644
--- a/test/core/sampler/dynamic_dynesty_test.py
+++ b/test/core/sampler/dynamic_dynesty_test.py
@@ -2,6 +2,7 @@ import unittest
 from unittest.mock import MagicMock
 
 import bilby
+import bilby.core.sampler.dynamic_dynesty
 
 
 class TestDynamicDynesty(unittest.TestCase):
@@ -10,7 +11,7 @@ class TestDynamicDynesty(unittest.TestCase):
         self.priors = bilby.core.prior.PriorDict(
             dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
         )
-        self.sampler = bilby.core.sampler.DynamicDynesty(
+        self.sampler = bilby.core.sampler.dynamic_dynesty.DynamicDynesty(
             self.likelihood,
             self.priors,
             outdir="outdir",
diff --git a/test/core/sampler/dynesty_test.py b/test/core/sampler/dynesty_test.py
index 9640495bc8bc55c500f3d15a75877408adc2f706..d33cc2e23d9f4219f3cb114688e1d5eff9af3ad7 100644
--- a/test/core/sampler/dynesty_test.py
+++ b/test/core/sampler/dynesty_test.py
@@ -5,8 +5,11 @@ from attr import define
 import bilby
 import numpy as np
 import parameterized
+import bilby.core.sampler.dynesty
 from bilby.core.sampler import dynesty_utils
 from scipy.stats import gamma, ks_1samp, uniform, powerlaw
+import shutil
+import os
 
 
 @define
@@ -40,7 +43,7 @@ class TestDynesty(unittest.TestCase):
         self.priors = bilby.core.prior.PriorDict(
             dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
         )
-        self.sampler = bilby.core.sampler.Dynesty(
+        self.sampler = bilby.core.sampler.dynesty.Dynesty(
             self.likelihood,
             self.priors,
             outdir="outdir",
@@ -83,7 +86,7 @@ class TestDynesty(unittest.TestCase):
         self.priors["c"] = bilby.core.prior.Prior(boundary=None)
         self.priors["d"] = bilby.core.prior.Prior(boundary="reflective")
         self.priors["e"] = bilby.core.prior.Prior(boundary="periodic")
-        self.sampler = bilby.core.sampler.Dynesty(
+        self.sampler = bilby.core.sampler.dynesty.Dynesty(
             self.likelihood,
             self.priors,
             outdir="outdir",
@@ -99,6 +102,18 @@ class TestDynesty(unittest.TestCase):
         self.sampler._run_test()
 
 
+def test_get_expected_outputs():
+    label = "par0"
+    outdir = os.path.join("some", "bilby_pipe", "dir")
+    filenames, directories = bilby.core.sampler.dynesty.Dynesty.get_expected_outputs(
+        outdir=outdir, label=label
+    )
+    assert len(filenames) == 2
+    assert len(directories) == 0
+    assert os.path.join(outdir, f"{label}_resume.pickle") in filenames
+    assert os.path.join(outdir, f"{label}_dynesty.pickle") in filenames
+
+
 class ProposalsTest(unittest.TestCase):
 
     def test_boundaries(self):
@@ -271,5 +286,78 @@ class TestEstimateNMCMC(unittest.TestCase):
             self.assertAlmostEqual(estimated, expected)
 
 
+class TestReproducibility(unittest.TestCase):
+
+    @staticmethod
+    def model(x, m, c):
+        return m * x + c
+
+    def setUp(self):
+        bilby.core.utils.random.seed(42)
+        bilby.core.utils.command_line_args.bilby_test_mode = False
+        rng = bilby.core.utils.random.rng
+        self.x = np.linspace(0, 1, 11)
+        self.injection_parameters = dict(m=0.5, c=0.2)
+        self.sigma = 0.1
+        self.y = self.model(self.x, **self.injection_parameters) + rng.normal(
+            0, self.sigma, len(self.x)
+        )
+        self.likelihood = bilby.likelihood.GaussianLikelihood(
+            self.x, self.y, self.model, self.sigma
+        )
+
+        self.priors = bilby.core.prior.PriorDict()
+        self.priors["m"] = bilby.core.prior.Uniform(0, 5, boundary="periodic")
+        self.priors["c"] = bilby.core.prior.Uniform(-2, 2, boundary="reflective")
+        # Evaluate prior once to ensure normalization constant have been set
+        theta = self.priors.sample()
+        self.priors.ln_prob(theta)
+        self._remove_tree()
+        bilby.core.utils.check_directory_exists_and_if_not_mkdir("outdir")
+
+    def tearDown(self):
+        del self.likelihood
+        del self.priors
+        bilby.core.utils.command_line_args.bilby_test_mode = False
+        self._remove_tree()
+
+    def _remove_tree(self):
+        try:
+            shutil.rmtree("outdir")
+        except OSError:
+            pass
+
+    def _run_sampler(self, **kwargs):
+        bilby.core.utils.random.seed(42)
+        return bilby.run_sampler(
+            likelihood=self.likelihood,
+            priors=self.priors,
+            sampler="dynesty",
+            save=False,
+            resume=False,
+            dlogz=1.0,
+            nlive=20,
+            **kwargs,
+        )
+
+    def test_reproducibility_seed(self):
+        res0 = self._run_sampler(seed=1234)
+        res1 = self._run_sampler(seed=1234)
+        assert res0.log_evidence == res1.log_evidence
+
+    def test_reproducibility_state(self):
+        rstate = np.random.default_rng(1234)
+        res0 = self._run_sampler(rstate=rstate)
+        rstate = np.random.default_rng(1234)
+        res1 = self._run_sampler(rstate=rstate)
+        assert res0.log_evidence == res1.log_evidence
+
+    def test_reproducibility_state_and_seed(self):
+        rstate = np.random.default_rng(1234)
+        res0 = self._run_sampler(rstate=rstate)
+        res1 = self._run_sampler(seed=1234)
+        assert res0.log_evidence == res1.log_evidence
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/test/core/sampler/emcee_test.py b/test/core/sampler/emcee_test.py
index 66265e51e44797123a90b77193137c8e1fba4c2b..e22d891d51e848e5e7e98f17def490eee9e5b000 100644
--- a/test/core/sampler/emcee_test.py
+++ b/test/core/sampler/emcee_test.py
@@ -2,6 +2,7 @@ import unittest
 from unittest.mock import MagicMock
 
 import bilby
+import bilby.core.sampler.emcee
 
 
 class TestEmcee(unittest.TestCase):
@@ -10,7 +11,7 @@ class TestEmcee(unittest.TestCase):
         self.priors = bilby.core.prior.PriorDict(
             dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
         )
-        self.sampler = bilby.core.sampler.Emcee(
+        self.sampler = bilby.core.sampler.emcee.Emcee(
             self.likelihood,
             self.priors,
             outdir="outdir",
diff --git a/test/core/sampler/general_sampler_tests.py b/test/core/sampler/general_sampler_tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..38700c632d3312a0d8202e93a36081c46be436a2
--- /dev/null
+++ b/test/core/sampler/general_sampler_tests.py
@@ -0,0 +1,32 @@
+from bilby.core.sampler import (
+    get_implemented_samplers,
+    get_sampler_class,
+)
+import pytest
+
+
+def test_get_implemented_samplers():
+    """Assert the function returns a list of the correct length"""
+    from bilby.core.sampler import IMPLEMENTED_SAMPLERS
+
+    out = get_implemented_samplers()
+    assert isinstance(out, list)
+    assert len(out) == len(IMPLEMENTED_SAMPLERS)
+    assert "dynesty" in out
+
+
+def test_get_sampler_class():
+    """Assert the function returns the correct class"""
+    from bilby.core.sampler.dynesty import Dynesty
+
+    sampler_class = get_sampler_class("dynesty")
+    assert sampler_class is Dynesty
+
+
+def test_get_sampler_class_not_implemented():
+    """Assert an error is raised if the sampler is not recognized"""
+    with pytest.raises(
+        ValueError,
+        match=r"Sampler not_a_valid_sampler not yet implemented"
+    ):
+        get_sampler_class("not_a_valid_sampler")
diff --git a/test/core/sampler/implemented_samplers_test.py b/test/core/sampler/implemented_samplers_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..c34398185051997efef52c2f8e967bb866072d39
--- /dev/null
+++ b/test/core/sampler/implemented_samplers_test.py
@@ -0,0 +1,39 @@
+from bilby.core.sampler import IMPLEMENTED_SAMPLERS, ImplementedSamplers
+import pytest
+
+
+def test_singleton():
+    assert ImplementedSamplers() is IMPLEMENTED_SAMPLERS
+
+
+def test_keys():
+    # The fake sampler should never have a plugin, so this should always work
+    assert "fake_sampler" in IMPLEMENTED_SAMPLERS.keys()
+    assert "bilby.fake_sampler" not in IMPLEMENTED_SAMPLERS.keys()
+
+
+def test_allowed_keys():
+    # The fake sampler should never have a plugin, so this should always work
+    assert "fake_sampler" in IMPLEMENTED_SAMPLERS.valid_keys()
+    assert "bilby.fake_sampler" in IMPLEMENTED_SAMPLERS.valid_keys()
+
+
+def test_values():
+    # Values and keys should have the same lengths
+    assert len(list(IMPLEMENTED_SAMPLERS.values())) \
+        == len(list(IMPLEMENTED_SAMPLERS.keys()))
+    assert len(list(IMPLEMENTED_SAMPLERS.values())) \
+        == len(list(IMPLEMENTED_SAMPLERS._samplers.values()))
+
+
+def test_items():
+    keys, values = list(zip(*IMPLEMENTED_SAMPLERS.items()))
+    assert len(keys) == len(values)
+    # Keys and values should be the same as the individual methods
+    assert list(keys) == list(IMPLEMENTED_SAMPLERS.keys())
+    assert list(values) == list(IMPLEMENTED_SAMPLERS.values())
+
+
+@pytest.mark.parametrize("sampler", ["fake_sampler", "bilby.fake_sampler"])
+def test_in_operator(sampler):
+    assert sampler in IMPLEMENTED_SAMPLERS
diff --git a/test/core/sampler/kombine_test.py b/test/core/sampler/kombine_test.py
index d16eb8c90c7f11f6cf4280e00f8ca0c8e5ebb1a3..0423520561d90215a8c2b5615fd3aa832dbad97f 100644
--- a/test/core/sampler/kombine_test.py
+++ b/test/core/sampler/kombine_test.py
@@ -2,6 +2,7 @@ import unittest
 from unittest.mock import MagicMock
 
 import bilby
+import bilby.core.sampler.kombine
 
 
 class TestKombine(unittest.TestCase):
@@ -10,7 +11,7 @@ class TestKombine(unittest.TestCase):
         self.priors = bilby.core.prior.PriorDict(
             dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
         )
-        self.sampler = bilby.core.sampler.Kombine(
+        self.sampler = bilby.core.sampler.kombine.Kombine(
             self.likelihood,
             self.priors,
             outdir="outdir",
diff --git a/test/core/sampler/nessai_test.py b/test/core/sampler/nessai_test.py
index 0cac7a45b24e9174336ed454e11908fd0e0e6555..3246c74e752a060725b0a0accfcb08fc9ebe345d 100644
--- a/test/core/sampler/nessai_test.py
+++ b/test/core/sampler/nessai_test.py
@@ -2,6 +2,8 @@ import unittest
 from unittest.mock import MagicMock, patch, mock_open
 
 import bilby
+import bilby.core.sampler.nessai
+import os
 
 
 class TestNessai(unittest.TestCase):
@@ -12,7 +14,7 @@ class TestNessai(unittest.TestCase):
         self.priors = bilby.core.prior.PriorDict(
             dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
         )
-        self.sampler = bilby.core.sampler.Nessai(
+        self.sampler = bilby.core.sampler.nessai.Nessai(
             self.likelihood,
             self.priors,
             outdir="outdir",
@@ -83,5 +85,19 @@ class TestNessai(unittest.TestCase):
         self.assertDictEqual(expected, self.sampler.kwargs)
 
 
+def test_get_expected_outputs():
+    label = "par0"
+    outdir = os.path.join("some", "bilby_pipe", "dir")
+    filenames, directories = bilby.core.sampler.nessai.Nessai.get_expected_outputs(
+        outdir=outdir, label=label
+    )
+    assert len(filenames) == 0
+    assert len(directories) == 3
+    base_dir = os.path.join(outdir, f"{label}_nessai", "")
+    assert base_dir in directories
+    assert os.path.join(base_dir, "proposal", "") in directories
+    assert os.path.join(base_dir, "diagnostics", "") in directories
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/test/core/sampler/nestle_test.py b/test/core/sampler/nestle_test.py
index e5623ef336552a0b3ba8936c49d8704fad22ee0d..f6f8a698cc3274715cb2453e97aeb157535149fe 100644
--- a/test/core/sampler/nestle_test.py
+++ b/test/core/sampler/nestle_test.py
@@ -2,6 +2,7 @@ import unittest
 from unittest.mock import MagicMock
 
 import bilby
+import bilby.core.sampler.nestle
 
 
 class TestNestle(unittest.TestCase):
@@ -10,7 +11,7 @@ class TestNestle(unittest.TestCase):
         self.priors = bilby.core.prior.PriorDict(
             dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
         )
-        self.sampler = bilby.core.sampler.Nestle(
+        self.sampler = bilby.core.sampler.nestle.Nestle(
             self.likelihood,
             self.priors,
             outdir="outdir",
diff --git a/test/core/sampler/polychord_test.py b/test/core/sampler/polychord_test.py
deleted file mode 100644
index 88193a83a0cf3b0c644f63f728a1356f79ccfb46..0000000000000000000000000000000000000000
--- a/test/core/sampler/polychord_test.py
+++ /dev/null
@@ -1,103 +0,0 @@
-import unittest
-from unittest.mock import MagicMock
-
-import numpy as np
-
-import bilby
-
-
-class TestPolyChord(unittest.TestCase):
-    def setUp(self):
-        self.likelihood = MagicMock()
-        self.priors = bilby.core.prior.PriorDict(
-            dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
-        )
-        self.sampler = bilby.core.sampler.PyPolyChord(
-            self.likelihood,
-            self.priors,
-            outdir="outdir",
-            label="polychord",
-            use_ratio=False,
-            plot=False,
-            skip_import_verification=True,
-        )
-
-    def tearDown(self):
-        del self.likelihood
-        del self.priors
-        del self.sampler
-
-    def test_default_kwargs(self):
-        expected = dict(
-            use_polychord_defaults=False,
-            nlive=self.sampler.ndim * 25,
-            num_repeats=self.sampler.ndim * 5,
-            nprior=-1,
-            do_clustering=True,
-            feedback=1,
-            precision_criterion=0.001,
-            logzero=-1e30,
-            max_ndead=-1,
-            boost_posterior=0.0,
-            posteriors=True,
-            equals=True,
-            cluster_posteriors=True,
-            write_resume=True,
-            write_paramnames=False,
-            read_resume=True,
-            write_stats=True,
-            write_live=True,
-            write_dead=True,
-            write_prior=True,
-            compression_factor=np.exp(-1),
-            base_dir="outdir",
-            file_root="polychord",
-            seed=-1,
-            grade_dims=list([self.sampler.ndim]),
-            grade_frac=list([1.0] * len([self.sampler.ndim])),
-            nlives={},
-        )
-        self.sampler._setup_dynamic_defaults()
-        self.assertDictEqual(expected, self.sampler.kwargs)
-
-    def test_translate_kwargs(self):
-        expected = dict(
-            use_polychord_defaults=False,
-            nlive=123,
-            num_repeats=self.sampler.ndim * 5,
-            nprior=-1,
-            do_clustering=True,
-            feedback=1,
-            precision_criterion=0.001,
-            logzero=-1e30,
-            max_ndead=-1,
-            boost_posterior=0.0,
-            posteriors=True,
-            equals=True,
-            cluster_posteriors=True,
-            write_resume=True,
-            write_paramnames=False,
-            read_resume=True,
-            write_stats=True,
-            write_live=True,
-            write_dead=True,
-            write_prior=True,
-            compression_factor=np.exp(-1),
-            base_dir="outdir",
-            file_root="polychord",
-            seed=-1,
-            grade_dims=list([self.sampler.ndim]),
-            grade_frac=list([1.0] * len([self.sampler.ndim])),
-            nlives={},
-        )
-        self.sampler._setup_dynamic_defaults()
-        for equiv in bilby.core.sampler.base_sampler.NestedSampler.npoints_equiv_kwargs:
-            new_kwargs = self.sampler.kwargs.copy()
-            del new_kwargs["nlive"]
-            new_kwargs[equiv] = 123
-            self.sampler.kwargs = new_kwargs
-            self.assertDictEqual(expected, self.sampler.kwargs)
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/test/core/sampler/ptemcee_test.py b/test/core/sampler/ptemcee_test.py
index 65c49c4e28ea1529d0a879a2106b8ad8f3da0bb9..4708a12b09bbb310a708727e0482b6b80d415633 100644
--- a/test/core/sampler/ptemcee_test.py
+++ b/test/core/sampler/ptemcee_test.py
@@ -2,9 +2,10 @@ import unittest
 
 from bilby.core.likelihood import GaussianLikelihood
 from bilby.core.prior import Uniform, PriorDict
-from bilby.core.sampler import Ptemcee
+from bilby.core.sampler.ptemcee import Ptemcee
 from bilby.core.sampler.base_sampler import MCMCSampler
 import numpy as np
+import os
 
 
 class TestPTEmcee(unittest.TestCase):
@@ -89,5 +90,16 @@ class TestPTEmcee(unittest.TestCase):
         self.assertEqual(old, new)
 
 
+def test_get_expected_outputs():
+    label = "par0"
+    outdir = os.path.join("some", "bilby_pipe", "dir")
+    filenames, directories = Ptemcee.get_expected_outputs(
+        outdir=outdir, label=label
+    )
+    assert len(filenames) == 1
+    assert len(directories) == 0
+    assert os.path.join(outdir, f"{label}_checkpoint_resume.pickle") in filenames
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/test/core/sampler/pymc_test.py b/test/core/sampler/pymc_test.py
index 3ef4fac80826a53768ce0e876dceb300ec48784a..15e3275f44efc9852220fa451ce99b9494c28b38 100644
--- a/test/core/sampler/pymc_test.py
+++ b/test/core/sampler/pymc_test.py
@@ -2,6 +2,7 @@ import unittest
 from unittest.mock import MagicMock
 
 import bilby
+import bilby.core.sampler.pymc
 
 
 class TestPyMC(unittest.TestCase):
@@ -10,7 +11,7 @@ class TestPyMC(unittest.TestCase):
         self.priors = bilby.core.prior.PriorDict(
             dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
         )
-        self.sampler = bilby.core.sampler.Pymc(
+        self.sampler = bilby.core.sampler.pymc.Pymc(
             self.likelihood,
             self.priors,
             outdir="outdir",
diff --git a/test/core/sampler/pymultinest_test.py b/test/core/sampler/pymultinest_test.py
index 8ffcef6745b89ed350dd1bc00a784b5e9999585d..7ec64b4867f47dee216770559505e2ebebcd4f9b 100644
--- a/test/core/sampler/pymultinest_test.py
+++ b/test/core/sampler/pymultinest_test.py
@@ -2,6 +2,7 @@ import unittest
 from unittest.mock import MagicMock
 
 import bilby
+import bilby.core.sampler.pymultinest
 
 
 class TestPymultinest(unittest.TestCase):
@@ -12,7 +13,7 @@ class TestPymultinest(unittest.TestCase):
         )
         self.priors["a"] = bilby.core.prior.Prior(boundary="periodic")
         self.priors["b"] = bilby.core.prior.Prior(boundary="reflective")
-        self.sampler = bilby.core.sampler.Pymultinest(
+        self.sampler = bilby.core.sampler.pymultinest.Pymultinest(
             self.likelihood,
             self.priors,
             outdir="outdir",
diff --git a/test/core/sampler/ultranest_test.py b/test/core/sampler/ultranest_test.py
index be22c1a1f50b8d304000fcb8d0e4816e57c9c1b9..c0219295bc78c74de302d7a5334da1ccc9bb5173 100644
--- a/test/core/sampler/ultranest_test.py
+++ b/test/core/sampler/ultranest_test.py
@@ -3,6 +3,7 @@ import unittest
 from unittest.mock import MagicMock
 
 import bilby
+import bilby.core.sampler.ultranest
 
 
 class TestUltranest(unittest.TestCase):
@@ -15,10 +16,15 @@ class TestUltranest(unittest.TestCase):
                  b=bilby.core.prior.Uniform(0, 1)))
         self.priors["a"] = bilby.core.prior.Prior(boundary="periodic")
         self.priors["b"] = bilby.core.prior.Prior(boundary="reflective")
-        self.sampler = bilby.core.sampler.Ultranest(self.likelihood, self.priors,
-                                                    outdir="outdir", label="label",
-                                                    use_ratio=False, plot=False,
-                                                    skip_import_verification=True)
+        self.sampler = bilby.core.sampler.ultranest.Ultranest(
+            self.likelihood,
+            self.priors,
+            outdir="outdir",
+            label="label",
+            use_ratio=False,
+            plot=False,
+            skip_import_verification=True,
+        )
 
     def tearDown(self):
         del self.likelihood
diff --git a/test/core/sampler/zeus_test.py b/test/core/sampler/zeus_test.py
index 2b3e2b5dea14dfe19c6b8930bb353d6871d73409..0f8dea9b1269fd43191e12a5bf89d49bee8d9cee 100644
--- a/test/core/sampler/zeus_test.py
+++ b/test/core/sampler/zeus_test.py
@@ -2,6 +2,7 @@ import unittest
 from unittest.mock import MagicMock
 
 import bilby
+import bilby.core.sampler.zeus
 
 
 class TestZeus(unittest.TestCase):
@@ -10,7 +11,7 @@ class TestZeus(unittest.TestCase):
         self.priors = bilby.core.prior.PriorDict(
             dict(a=bilby.core.prior.Uniform(0, 1), b=bilby.core.prior.Uniform(0, 1))
         )
-        self.sampler = bilby.core.sampler.Zeus(
+        self.sampler = bilby.core.sampler.zeus.Zeus(
             self.likelihood,
             self.priors,
             outdir="outdir",
diff --git a/test/gw/conversion_test.py b/test/gw/conversion_test.py
index f484019c194c6c9c9f9e1f3a1196f2f1bc2a2f2e..8ce40e3b887135202a10beb5e770efd3b754e1af 100644
--- a/test/gw/conversion_test.py
+++ b/test/gw/conversion_test.py
@@ -475,7 +475,7 @@ class TestGenerateAllParameters(unittest.TestCase):
     def test_generate_bbh_parameters_with_likelihood(self):
         priors = bilby.gw.prior.BBHPriorDict()
         priors["geocent_time"] = bilby.core.prior.Uniform(0.4, 0.6)
-        ifos = bilby.gw.detector.InterferometerList(["H1"])
+        ifos = bilby.gw.detector.InterferometerList(["H1", "L1"])
         ifos.set_strain_data_from_power_spectral_densities(duration=1, sampling_frequency=256)
         wfg = bilby.gw.waveform_generator.WaveformGenerator(
             frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole
@@ -501,6 +501,8 @@ class TestGenerateAllParameters(unittest.TestCase):
             "phase",
             "H1_optimal_snr",
             "H1_matched_filter_snr",
+            "L1_optimal_snr",
+            "L1_matched_filter_snr",
             "ra",
             "dec",
         ]
@@ -864,8 +866,8 @@ class TestEquationOfStateConversions(unittest.TestCase):
                     self.mass_2_source_polytrope[i],
                     0
                 )
-            self.assertAlmostEqual(self.lambda_1_polytrope[i], lambda_1, places=3)
-            self.assertAlmostEqual(self.lambda_2_polytrope[i], lambda_2, places=3)
+            self.assertAlmostEqual(self.lambda_1_polytrope[i], lambda_1, places=2)
+            self.assertAlmostEqual(self.lambda_2_polytrope[i], lambda_2, places=1)
             self.assertAlmostEqual(self.eos_check_polytrope[i], eos_check)
 
 
diff --git a/test/gw/likelihood/relative_binning_test.py b/test/gw/likelihood/relative_binning_test.py
index 6eeca464366bba63dfc9292719b8483e7bea5a83..3f4af1e21a94a7435edb5918ed0a4a852e68470f 100644
--- a/test/gw/likelihood/relative_binning_test.py
+++ b/test/gw/likelihood/relative_binning_test.py
@@ -78,13 +78,13 @@ class TestRelativeBinningLikelihood(unittest.TestCase):
             duration=duration, sampling_frequency=sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
             waveform_arguments=dict(
-                reference_frequency=fmin, minimum_frequency=fmin, approximant=approximant)
+                reference_frequency=fmin, minimum_frequency=fmin, waveform_approximant=approximant)
         )
         bin_wfg = bilby.gw.waveform_generator.WaveformGenerator(
             duration=duration, sampling_frequency=sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole_relative_binning,
             waveform_arguments=dict(
-                reference_frequency=fmin, approximant=approximant, minimum_frequency=fmin)
+                reference_frequency=fmin, waveform_approximant=approximant, minimum_frequency=fmin)
         )
         ifos.inject_signal(
             parameters=self.test_parameters,
diff --git a/test/gw/likelihood_test.py b/test/gw/likelihood_test.py
index 87a007072f7dff2f0ee00976356eef43a3d5d15b..0c1575945247360f39ddeaa24acf0d48ba18430e 100644
--- a/test/gw/likelihood_test.py
+++ b/test/gw/likelihood_test.py
@@ -3,6 +3,7 @@ import unittest
 import tempfile
 from itertools import product
 from parameterized import parameterized
+import pytest
 
 import h5py
 import numpy as np
@@ -189,10 +190,7 @@ class TestGWTransient(unittest.TestCase):
         self.assertListEqual(
             bilby.gw.detector.InterferometerList(ifos), self.likelihood.interferometers
         )
-        self.assertTrue(
-            type(self.likelihood.interferometers)
-            == bilby.gw.detector.InterferometerList
-        )
+        self.assertIsInstance(self.likelihood.interferometers, bilby.gw.detector.InterferometerList)
 
     def test_interferometers_setting_interferometer_list(self):
         ifos = bilby.gw.detector.InterferometerList(
@@ -205,10 +203,7 @@ class TestGWTransient(unittest.TestCase):
         self.assertListEqual(
             bilby.gw.detector.InterferometerList(ifos), self.likelihood.interferometers
         )
-        self.assertTrue(
-            type(self.likelihood.interferometers)
-            == bilby.gw.detector.InterferometerList
-        )
+        self.assertIsInstance(self.likelihood.interferometers, bilby.gw.detector.InterferometerList)
 
     def test_meta_data(self):
         expected = dict(
@@ -279,6 +274,7 @@ class TestGWTransient(unittest.TestCase):
         )
 
 
+@pytest.mark.requires_roqs
 class TestROQLikelihood(unittest.TestCase):
     def setUp(self):
         self.duration = 4
@@ -347,7 +343,7 @@ class TestROQLikelihood(unittest.TestCase):
             waveform_arguments=dict(
                 reference_frequency=20.0,
                 minimum_frequency=20.0,
-                approximant="IMRPhenomPv2",
+                waveform_approximant="IMRPhenomPv2",
             ),
         )
 
@@ -366,7 +362,7 @@ class TestROQLikelihood(unittest.TestCase):
                 frequency_nodes_quadratic=fnodes_quadratic,
                 reference_frequency=20.0,
                 minimum_frequency=20.0,
-                approximant="IMRPhenomPv2",
+                waveform_approximant="IMRPhenomPv2",
             ),
         )
 
@@ -544,6 +540,7 @@ class TestROQLikelihood(unittest.TestCase):
             )
 
 
+@pytest.mark.requires_roqs
 class TestRescaledROQLikelihood(unittest.TestCase):
     def test_rescaling(self):
 
@@ -603,7 +600,7 @@ class TestRescaledROQLikelihood(unittest.TestCase):
                 frequency_nodes_quadratic=fnodes_quadratic,
                 reference_frequency=20.0,
                 minimum_frequency=20.0,
-                approximant="IMRPhenomPv2",
+                waveform_approximant="IMRPhenomPv2",
             ),
         )
 
@@ -618,6 +615,7 @@ class TestRescaledROQLikelihood(unittest.TestCase):
         )
 
 
+@pytest.mark.requires_roqs
 class TestROQLikelihoodHDF5(unittest.TestCase):
     """
     Test ROQ likelihood constructed from .hdf5 basis
@@ -738,6 +736,32 @@ class TestROQLikelihoodHDF5(unittest.TestCase):
     )
     def test_likelihood_accuracy(self, basis_linear, basis_quadratic, mc_range, roq_scale_factor, add_cal_errors):
         "Compare with log likelihood ratios computed by the non-ROQ likelihood"
+        # The maximum error of log likelihood ratio. It is set to be larger for roq_scale_factor=1 as the injected SNR
+        # is higher.
+        if roq_scale_factor == 1:
+            max_llr_error = 5e-1
+        elif roq_scale_factor == 2:
+            max_llr_error = 5e-2
+        else:
+            raise
+
+        self.assertLess_likelihood_errors(
+            basis_linear, basis_quadratic, mc_range, roq_scale_factor, add_cal_errors, max_llr_error
+        )
+
+    @parameterized.expand([(_path_to_basis_mb, 100, 1024), (_path_to_basis_mb, 20, 200), (_path_to_basis_mb, 100, 200)])
+    def test_likelihood_accuracy_narrower_frequency_range(self, basis, minimum_frequency, maximum_frequency):
+        """Compare with log likelihood ratios computed by the non-ROQ likelihood in the case where analyzed frequency
+        range is narrower than the basis frequency range"""
+        self.assertLess_likelihood_errors(
+            basis, basis, (8, 9), 1, False, 1.5e-1,
+            minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency
+        )
+
+    def assertLess_likelihood_errors(
+        self, basis_linear, basis_quadratic, mc_range, roq_scale_factor, add_cal_errors, max_llr_error,
+        minimum_frequency=None, maximum_frequency=None
+    ):
         self.minimum_frequency *= roq_scale_factor
         self.sampling_frequency *= roq_scale_factor
         self.duration /= roq_scale_factor
@@ -751,7 +775,12 @@ class TestROQLikelihoodHDF5(unittest.TestCase):
 
         interferometers = bilby.gw.detector.InterferometerList(["H1", "L1"])
         for ifo in interferometers:
-            ifo.minimum_frequency = self.minimum_frequency
+            if minimum_frequency is None:
+                ifo.minimum_frequency = self.minimum_frequency
+            else:
+                ifo.minimum_frequency = minimum_frequency
+            if maximum_frequency is not None:
+                ifo.maximum_frequency = maximum_frequency
         interferometers.set_strain_data_from_zero_noise(
             sampling_frequency=self.sampling_frequency,
             duration=self.duration,
@@ -811,14 +840,6 @@ class TestROQLikelihoodHDF5(unittest.TestCase):
             quadratic_matrix=basis_quadratic,
             roq_scale_factor=roq_scale_factor
         )
-        # The maximum error of log likelihood ratio. It is set to be larger for roq_scale_factor=1 as the injected SNR
-        # is higher.
-        if roq_scale_factor == 1:
-            max_llr_error = 5e-1
-        elif roq_scale_factor == 2:
-            max_llr_error = 5e-2
-        else:
-            raise
         for mc in np.linspace(self.priors["chirp_mass"].minimum, self.priors["chirp_mass"].maximum, 11):
             parameters = self.injection_parameters.copy()
             parameters["chirp_mass"] = mc
@@ -829,6 +850,7 @@ class TestROQLikelihoodHDF5(unittest.TestCase):
             self.assertLess(np.abs(llr - llr_roq), max_llr_error)
 
 
+@pytest.mark.requires_roqs
 class TestCreateROQLikelihood(unittest.TestCase):
     """
     Test if ROQ likelihood is constructed without any errors from .hdf5 or .npy basis
@@ -949,6 +971,7 @@ class TestCreateROQLikelihood(unittest.TestCase):
         )
 
 
+@pytest.mark.requires_roqs
 class TestInOutROQWeights(unittest.TestCase):
 
     @parameterized.expand(['npz', 'hdf5'])
@@ -1223,7 +1246,7 @@ class TestMBLikelihood(unittest.TestCase):
         ("IMRPhenomHM", False, 4, True, 1e-3)
     ])
     def test_matches_original_likelihood(
-        self, approximant, linear_interpolation, highest_mode, add_cal_errors, tolerance
+        self, waveform_approximant, linear_interpolation, highest_mode, add_cal_errors, tolerance
     ):
         """
         Check if multi-band likelihood values match original likelihood values
@@ -1232,7 +1255,7 @@ class TestMBLikelihood(unittest.TestCase):
             duration=self.duration, sampling_frequency=self.sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
             waveform_arguments=dict(
-                reference_frequency=self.fmin, waveform_approximant=approximant
+                reference_frequency=self.fmin, waveform_approximant=waveform_approximant
             )
         )
         self.ifos.inject_signal(parameters=self.test_parameters, waveform_generator=wfg)
@@ -1241,7 +1264,7 @@ class TestMBLikelihood(unittest.TestCase):
             duration=self.duration, sampling_frequency=self.sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.binary_black_hole_frequency_sequence,
             waveform_arguments=dict(
-                reference_frequency=self.fmin, waveform_approximant=approximant
+                reference_frequency=self.fmin, waveform_approximant=waveform_approximant
             )
         )
         likelihood = bilby.gw.likelihood.GravitationalWaveTransient(
@@ -1267,12 +1290,12 @@ class TestMBLikelihood(unittest.TestCase):
         """
         Check if larger accuracy factor increases the accuracy.
         """
-        approximant = "IMRPhenomD"
+        waveform_approximant = "IMRPhenomD"
         wfg = bilby.gw.WaveformGenerator(
             duration=self.duration, sampling_frequency=self.sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
             waveform_arguments=dict(
-                reference_frequency=self.fmin, waveform_approximant=approximant
+                reference_frequency=self.fmin, waveform_approximant=waveform_approximant
             )
         )
         self.ifos.inject_signal(parameters=self.test_parameters, waveform_generator=wfg)
@@ -1281,7 +1304,7 @@ class TestMBLikelihood(unittest.TestCase):
             duration=self.duration, sampling_frequency=self.sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.binary_black_hole_frequency_sequence,
             waveform_arguments=dict(
-                reference_frequency=self.fmin, waveform_approximant=approximant
+                reference_frequency=self.fmin, waveform_approximant=waveform_approximant
             )
         )
         likelihood = bilby.gw.likelihood.GravitationalWaveTransient(
@@ -1313,7 +1336,7 @@ class TestMBLikelihood(unittest.TestCase):
             duration=self.duration, sampling_frequency=self.sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.binary_black_hole_frequency_sequence,
             waveform_arguments=dict(
-                reference_frequency=self.fmin, approximant="IMRPhenomD"
+                reference_frequency=self.fmin, waveform_approximant="IMRPhenomD"
             )
         )
         likelihood1 = bilby.gw.likelihood.MBGravitationalWaveTransient(
@@ -1335,7 +1358,7 @@ class TestMBLikelihood(unittest.TestCase):
             duration=self.duration, sampling_frequency=self.sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.binary_black_hole_frequency_sequence,
             waveform_arguments=dict(
-                reference_frequency=self.fmin, approximant="IMRPhenomD"
+                reference_frequency=self.fmin, waveform_approximant="IMRPhenomD"
             )
         )
         with self.assertRaises(TypeError):
@@ -1351,7 +1374,7 @@ class TestMBLikelihood(unittest.TestCase):
             duration=self.duration, sampling_frequency=self.sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.binary_black_hole_frequency_sequence,
             waveform_arguments=dict(
-                reference_frequency=self.fmin, approximant="IMRPhenomD"
+                reference_frequency=self.fmin, waveform_approximant="IMRPhenomD"
             )
         )
         for key in ["chirp_mass", "mass_1", "mass_2"]:
@@ -1368,12 +1391,12 @@ class TestMBLikelihood(unittest.TestCase):
         Check if multiband weights can be saved as a file, and a likelihood object constructed from the weights file
         produces the same likelihood value.
         """
-        approximant = "IMRPhenomD"
+        waveform_approximant = "IMRPhenomD"
         wfg = bilby.gw.WaveformGenerator(
             duration=self.duration, sampling_frequency=self.sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
             waveform_arguments=dict(
-                reference_frequency=self.fmin, approximant=approximant
+                reference_frequency=self.fmin, waveform_approximant=waveform_approximant
             )
         )
         self.ifos.inject_signal(
@@ -1384,7 +1407,7 @@ class TestMBLikelihood(unittest.TestCase):
             duration=self.duration, sampling_frequency=self.sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.binary_black_hole_frequency_sequence,
             waveform_arguments=dict(
-                reference_frequency=self.fmin, approximant=approximant
+                reference_frequency=self.fmin, waveform_approximant=waveform_approximant
             )
         )
         likelihood_mb = bilby.gw.likelihood.MBGravitationalWaveTransient(
@@ -1407,7 +1430,7 @@ class TestMBLikelihood(unittest.TestCase):
                 duration=self.duration, sampling_frequency=self.sampling_frequency,
                 frequency_domain_source_model=bilby.gw.source.binary_black_hole_frequency_sequence,
                 waveform_arguments=dict(
-                    reference_frequency=self.fmin, approximant=approximant
+                    reference_frequency=self.fmin, waveform_approximant=waveform_approximant
                 )
             )
             likelihood_mb_from_weights = bilby.gw.likelihood.MBGravitationalWaveTransient(
@@ -1424,12 +1447,12 @@ class TestMBLikelihood(unittest.TestCase):
         """
         Check if a likelihood object constructed from dictionary-like weights produce the same likelihood value
         """
-        approximant = "IMRPhenomD"
+        waveform_approximant = "IMRPhenomD"
         wfg = bilby.gw.WaveformGenerator(
             duration=self.duration, sampling_frequency=self.sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
             waveform_arguments=dict(
-                reference_frequency=self.fmin, approximant=approximant
+                reference_frequency=self.fmin, waveform_approximant=waveform_approximant
             )
         )
         self.ifos.inject_signal(
@@ -1440,7 +1463,7 @@ class TestMBLikelihood(unittest.TestCase):
             duration=self.duration, sampling_frequency=self.sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.binary_black_hole_frequency_sequence,
             waveform_arguments=dict(
-                reference_frequency=self.fmin, approximant=approximant
+                reference_frequency=self.fmin, waveform_approximant=waveform_approximant
             )
         )
         likelihood_mb = bilby.gw.likelihood.MBGravitationalWaveTransient(
@@ -1457,7 +1480,7 @@ class TestMBLikelihood(unittest.TestCase):
             duration=self.duration, sampling_frequency=self.sampling_frequency,
             frequency_domain_source_model=bilby.gw.source.binary_black_hole_frequency_sequence,
             waveform_arguments=dict(
-                reference_frequency=self.fmin, approximant=approximant
+                reference_frequency=self.fmin, waveform_approximant=waveform_approximant
             )
         )
         weights = likelihood_mb.weights
@@ -1469,6 +1492,55 @@ class TestMBLikelihood(unittest.TestCase):
 
         self.assertAlmostEqual(llr, llr_from_weights)
 
+    @parameterized.expand([
+        ("IMRPhenomD", True, 2, False, 1e-2),
+        ("IMRPhenomD", True, 2, True, 1e-2),
+        ("IMRPhenomHM", False, 4, False, 5e-3),
+    ])
+    def test_matches_original_likelihood_low_maximum_frequency(
+        self, waveform_approximant, linear_interpolation, highest_mode, add_cal_errors, tolerance
+    ):
+        """
+        Test for maximum frequency < sampling frequency / 2
+        """
+        for ifo in self.ifos:
+            ifo.maximum_frequency = self.sampling_frequency / 8
+
+        wfg = bilby.gw.WaveformGenerator(
+            duration=self.duration, sampling_frequency=self.sampling_frequency,
+            frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
+            waveform_arguments=dict(
+                reference_frequency=self.fmin, waveform_approximant=waveform_approximant
+            )
+        )
+        self.ifos.inject_signal(parameters=self.test_parameters, waveform_generator=wfg)
+
+        wfg_mb = bilby.gw.WaveformGenerator(
+            duration=self.duration, sampling_frequency=self.sampling_frequency,
+            frequency_domain_source_model=bilby.gw.source.binary_black_hole_frequency_sequence,
+            waveform_arguments=dict(
+                reference_frequency=self.fmin, waveform_approximant=waveform_approximant
+            )
+        )
+        likelihood = bilby.gw.likelihood.GravitationalWaveTransient(
+            interferometers=self.ifos, waveform_generator=wfg
+        )
+        likelihood_mb = bilby.gw.likelihood.MBGravitationalWaveTransient(
+            interferometers=self.ifos, waveform_generator=wfg_mb,
+            reference_chirp_mass=self.test_parameters['chirp_mass'],
+            priors=self.priors.copy(), linear_interpolation=linear_interpolation,
+            highest_mode=highest_mode
+        )
+        likelihood.parameters.update(self.test_parameters)
+        likelihood_mb.parameters.update(self.test_parameters)
+        if add_cal_errors:
+            likelihood.parameters.update(self.calibration_parameters)
+            likelihood_mb.parameters.update(self.calibration_parameters)
+        self.assertLess(
+            abs(likelihood.log_likelihood_ratio() - likelihood_mb.log_likelihood_ratio()),
+            tolerance
+        )
+
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/test/gw/source_test.py b/test/gw/source_test.py
index 979699b34ead35eeb742e6c9a35282230894fcbd..5f5199fd10698993036f21fdfe5517cbf7dcbff7 100644
--- a/test/gw/source_test.py
+++ b/test/gw/source_test.py
@@ -306,7 +306,7 @@ class TestROQBBH(unittest.TestCase):
             frequency_nodes_quadratic=fnodes_quadratic,
             reference_frequency=50.0,
             minimum_frequency=20.0,
-            approximant="IMRPhenomPv2",
+            waveform_approximant="IMRPhenomPv2",
         )
         self.frequency_array = bilby.core.utils.create_frequency_series(2048, 4)
 
diff --git a/test/gw/utils_test.py b/test/gw/utils_test.py
index f2aeb1c78ad3b1c0c2a4c5b22f2a35cac9d632ea..b67d72d5dc399bcb847ca52cd33ba462d4f37285 100644
--- a/test/gw/utils_test.py
+++ b/test/gw/utils_test.py
@@ -1,6 +1,7 @@
 import unittest
 import os
 from shutil import rmtree
+from importlib.metadata import version
 
 import numpy as np
 import lal
@@ -89,12 +90,28 @@ class TestGWUtils(unittest.TestCase):
         with self.assertRaises(ValueError):
             gwutils.get_event_time("GW010290")
 
+    @pytest.mark.skipif(version("gwpy") < "3.0.8", reason="GWpy version < 3.0.8")
     def test_read_frame_file(self):
+        """
+        Test that reading a frame file works as expected
+        for a few conditions.
+
+        1. Reading without time limits returns the full data
+        2. Reading with time limits returns the expected data
+           (inclusive of start time if present, exclusive of end time)
+        3. Reading without the channel name provided finds a standard name
+        4. Reading without the channel with a non-standard name returns None.
+
+        Notes
+        =====
+        There was a longstanding bug in gwpy that we previously tested for
+        here, but this has been fixed in gwpy 3.0.8.
+        """
         start_time = 0
         end_time = 10
         channel = "H1:GDS-CALIB_STRAIN"
         N = 100
-        times = np.linspace(start_time, end_time, N)
+        times = np.linspace(start_time, end_time, N, endpoint=False)
         data = np.random.normal(0, 1, N)
         ts = TimeSeries(data=data, times=times, t0=0)
         ts.channel = Channel(channel)
@@ -107,7 +124,7 @@ class TestGWUtils(unittest.TestCase):
             filename, start_time=None, end_time=None, channel=channel
         )
         self.assertEqual(strain.name, channel)
-        self.assertTrue(np.all(strain.value == data[:-1]))
+        self.assertTrue(np.all(strain.value == data))
 
         # Check reading with time limits
         start_cut = 2
@@ -115,19 +132,18 @@ class TestGWUtils(unittest.TestCase):
         strain = gwutils.read_frame_file(
             filename, start_time=start_cut, end_time=end_cut, channel=channel
         )
-        idxs = (times > start_cut) & (times < end_cut)
-        # Dropping the last element - for some reason gwpy drops the last element when reading in data
-        self.assertTrue(np.all(strain.value == data[idxs][:-1]))
+        idxs = (times >= start_cut) & (times < end_cut)
+        self.assertTrue(np.all(strain.value == data[idxs]))
 
         # Check reading with unknown channels
         strain = gwutils.read_frame_file(filename, start_time=None, end_time=None)
-        self.assertTrue(np.all(strain.value == data[:-1]))
+        self.assertTrue(np.all(strain.value == data))
 
         # Check reading with incorrect channel
         strain = gwutils.read_frame_file(
             filename, start_time=None, end_time=None, channel="WRONG"
         )
-        self.assertTrue(np.all(strain.value == data[:-1]))
+        self.assertTrue(np.all(strain.value == data))
 
         ts = TimeSeries(data=data, times=times, t0=0)
         ts.name = "NOT-A-KNOWN-CHANNEL"
diff --git a/test/gw/waveform_generator_test.py b/test/gw/waveform_generator_test.py
index c4bd5729f32257cecea81d5731c2e6238e16b88c..ce809140d54f0146f736e78dbc3ee3565142f62d 100644
--- a/test/gw/waveform_generator_test.py
+++ b/test/gw/waveform_generator_test.py
@@ -438,42 +438,42 @@ class TestFrequencyDomainStrainMethod(unittest.TestCase):
     def test_frequency_domain_caching_and_using_time_domain_strain_without_parameters(
         self,
     ):
-        original_waveform = self.waveform_generator.frequency_domain_strain(
-            parameters=self.simulation_parameters
-        )
-        new_waveform = self.waveform_generator.time_domain_strain()
-        self.assertNotEqual(original_waveform, new_waveform)
+        self.assertFalse(_test_caching_different_domain(
+            self.waveform_generator.frequency_domain_strain,
+            self.waveform_generator.time_domain_strain,
+            self.simulation_parameters,
+            None,
+        ))
 
     def test_frequency_domain_caching_and_using_time_domain_strain_with_parameters(
         self,
     ):
-        original_waveform = self.waveform_generator.frequency_domain_strain(
-            parameters=self.simulation_parameters
-        )
-        new_waveform = self.waveform_generator.time_domain_strain(
-            parameters=self.simulation_parameters
-        )
-        self.assertNotEqual(original_waveform, new_waveform)
+        self.assertFalse(_test_caching_different_domain(
+            self.waveform_generator.frequency_domain_strain,
+            self.waveform_generator.time_domain_strain,
+            self.simulation_parameters,
+            self.simulation_parameters,
+        ))
 
     def test_time_domain_caching_and_using_frequency_domain_strain_without_parameters(
         self,
     ):
-        original_waveform = self.waveform_generator.time_domain_strain(
-            parameters=self.simulation_parameters
-        )
-        new_waveform = self.waveform_generator.frequency_domain_strain()
-        self.assertNotEqual(original_waveform, new_waveform)
+        self.assertFalse(_test_caching_different_domain(
+            self.waveform_generator.time_domain_strain,
+            self.waveform_generator.frequency_domain_strain,
+            self.simulation_parameters,
+            None,
+        ))
 
     def test_time_domain_caching_and_using_frequency_domain_strain_with_parameters(
         self,
     ):
-        original_waveform = self.waveform_generator.time_domain_strain(
-            parameters=self.simulation_parameters
-        )
-        new_waveform = self.waveform_generator.frequency_domain_strain(
-            parameters=self.simulation_parameters
-        )
-        self.assertNotEqual(original_waveform, new_waveform)
+        self.assertFalse(_test_caching_different_domain(
+            self.waveform_generator.time_domain_strain,
+            self.waveform_generator.frequency_domain_strain,
+            self.simulation_parameters,
+            self.simulation_parameters,
+        ))
 
     def test_frequency_domain_caching_changing_model(self):
         original_waveform = self.waveform_generator.frequency_domain_strain(
@@ -648,42 +648,51 @@ class TestTimeDomainStrainMethod(unittest.TestCase):
     def test_frequency_domain_caching_and_using_time_domain_strain_without_parameters(
         self,
     ):
-        original_waveform = self.waveform_generator.frequency_domain_strain(
-            parameters=self.simulation_parameters
-        )
-        new_waveform = self.waveform_generator.time_domain_strain()
-        self.assertNotEqual(original_waveform, new_waveform)
+        self.assertFalse(_test_caching_different_domain(
+            self.waveform_generator.frequency_domain_strain,
+            self.waveform_generator.time_domain_strain,
+            self.simulation_parameters,
+            None,
+        ))
 
     def test_frequency_domain_caching_and_using_time_domain_strain_with_parameters(
         self,
     ):
-        original_waveform = self.waveform_generator.frequency_domain_strain(
-            parameters=self.simulation_parameters
-        )
-        new_waveform = self.waveform_generator.time_domain_strain(
-            parameters=self.simulation_parameters
-        )
-        self.assertNotEqual(original_waveform, new_waveform)
+        self.assertFalse(_test_caching_different_domain(
+            self.waveform_generator.frequency_domain_strain,
+            self.waveform_generator.time_domain_strain,
+            self.simulation_parameters,
+            self.simulation_parameters,
+        ))
 
     def test_time_domain_caching_and_using_frequency_domain_strain_without_parameters(
         self,
     ):
-        original_waveform = self.waveform_generator.time_domain_strain(
-            parameters=self.simulation_parameters
-        )
-        new_waveform = self.waveform_generator.frequency_domain_strain()
-        self.assertNotEqual(original_waveform, new_waveform)
+        self.assertFalse(_test_caching_different_domain(
+            self.waveform_generator.time_domain_strain,
+            self.waveform_generator.frequency_domain_strain,
+            self.simulation_parameters,
+            None,
+        ))
 
     def test_time_domain_caching_and_using_frequency_domain_strain_with_parameters(
         self,
     ):
-        original_waveform = self.waveform_generator.time_domain_strain(
-            parameters=self.simulation_parameters
-        )
-        new_waveform = self.waveform_generator.frequency_domain_strain(
-            parameters=self.simulation_parameters
-        )
-        self.assertNotEqual(original_waveform, new_waveform)
+        self.assertFalse(_test_caching_different_domain(
+            self.waveform_generator.time_domain_strain,
+            self.waveform_generator.frequency_domain_strain,
+            self.simulation_parameters,
+            self.simulation_parameters,
+        ))
+
+
+def _test_caching_different_domain(func1, func2, params1, params2):
+    original_waveform = func1(parameters=params1)
+    new_waveform = func2(parameters=params2)
+    output = True
+    for key in original_waveform:
+        output &= np.array_equal(original_waveform[key], new_waveform[key])
+    return output
 
 
 if __name__ == "__main__":
diff --git a/test/integration/sampler_run_test.py b/test/integration/sampler_run_test.py
index 4495593ab46aaf0ec34073aaff3938eae05030ff..79a61a7b42f57a10ab07476029ba222f2978439c 100644
--- a/test/integration/sampler_run_test.py
+++ b/test/integration/sampler_run_test.py
@@ -55,7 +55,6 @@ _sampler_kwargs = dict(
     PTMCMCSampler=dict(Niter=101, burn=100, covUpdate=100, isave=100),
     pymc=dict(draws=50, tune=50, n_init=250),
     pymultinest=dict(nlive=100),
-    pypolychord=dict(nlive=100),
     ultranest=dict(nlive=100, temporary_directory=False),
     zeus=dict(nwalkers=10, iterations=100)
 )
@@ -65,7 +64,9 @@ sampler_imports = dict(
     dynamic_dynesty="dynesty"
 )
 
-no_pool_test = ["dnest4", "pymultinest", "nestle", "ptmcmcsampler", "pypolychord", "ultranest", "pymc"]
+no_pool_test = ["dnest4", "pymultinest", "nestle", "ptmcmcsampler", "ultranest", "pymc"]
+
+loaded_samplers = {k: v.load() for k, v in bilby.core.sampler.IMPLEMENTED_SAMPLERS.items()}
 
 
 def slow_func(x, m, c):
@@ -155,7 +156,7 @@ class TestRunningSamplers(unittest.TestCase):
 
     def _run_with_signal_handling(self, sampler, pool_size=1):
         pytest.importorskip(sampler_imports.get(sampler, sampler))
-        if bilby.core.sampler.IMPLEMENTED_SAMPLERS[sampler.lower()].hard_exit:
+        if loaded_samplers[sampler.lower()].hard_exit:
             pytest.skip(f"{sampler} hard exits, can't test signal handling.")
         if pool_size > 1 and sampler.lower() in no_pool_test:
             pytest.skip(f"{sampler} cannot be parallelized")
diff --git a/test/test_samplers_import.py b/test/test_samplers_import.py
index acc8baa23ff7cef0bdac200b5b8a6289bb41c1b6..1cab28a76b229bd6e8001be7ba9ef7b7118f3ab3 100644
--- a/test/test_samplers_import.py
+++ b/test/test_samplers_import.py
@@ -1,17 +1,21 @@
-"""
-Tests that all of the implemented samplers can be initialized.
-
-The :code:`FakeSampler` is omitted as that doesn't require importing
-any package.
-"""
 import bilby
+import pytest
+
+
+@pytest.mark.parametrize(
+    "sampler_name", bilby.core.sampler.IMPLEMENTED_SAMPLERS.keys()
+)
+def test_sampler_import(sampler_name):
+    """
+    Tests that all of the implemented samplers can be initialized.
 
-bilby.core.utils.logger.setLevel("ERROR")
-IMPLEMENTED_SAMPLERS = bilby.core.sampler.IMPLEMENTED_SAMPLERS
-likelihood = bilby.core.likelihood.Likelihood(dict())
-priors = bilby.core.prior.PriorDict(dict(a=bilby.core.prior.Uniform(0, 1)))
-for sampler in IMPLEMENTED_SAMPLERS:
-    if sampler == "fake_sampler":
-        continue
-    sampler_class = IMPLEMENTED_SAMPLERS[sampler]
+    Do not test :code:`FakeSampler` since it requires an additional argument.
+    """
+    if sampler_name in ["fake_sampler", "pypolychord"]:
+        pytest.skip(f"Skipping import test for {sampler_name}")
+    bilby.core.utils.logger.setLevel("ERROR")
+    likelihood = bilby.core.likelihood.Likelihood(dict())
+    priors = bilby.core.prior.PriorDict(dict(a=bilby.core.prior.Uniform(0, 1)))
+    sampler_class = bilby.core.sampler.IMPLEMENTED_SAMPLERS[sampler_name].load()
     sampler = sampler_class(likelihood=likelihood, priors=priors)
+    assert sampler is not None