Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (608)
Showing with 1586 additions and 495 deletions
......@@ -3,3 +3,4 @@ omit =
test/integration/example_test.py
test/integration/noise_realisation_test.py
test/integration/other_test.py
bilby/_version.py
......@@ -13,5 +13,6 @@ MANIFEST
*.dat
*.version
*.ipynb_checkpoints
outdir/*
**/outdir
.idea/*
bilby/_version.py
......@@ -17,39 +17,51 @@ stages:
# ------------------- Initial stage -------------------------------------------
.list-env: &list-env
- PREFIX="$(dirname $(which python))/.."
- if [ -d "${PREFIX}/conda-meta" ]; then
conda list --prefix "${PREFIX}" --show-channel-urls;
else
python -m pip list installed;
fi
# Check author list is up to date
authors:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
script:
- python test/check_author_list.py
# Test containers scripts are up to date
containers:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
script:
- cd containers
- python write_dockerfiles.py #HACK
# Fail if differences exist. If this fails, you may need to run
# write_dockerfiles.py and commit the changes.
- git diff --exit-code
- cp env-template.yml env.yml
- echo " - python=3.11" >> env.yml
- mamba env create -f env.yml -n test --dry-run
.test-python: &test-python
stage: initial
image: python
before_script:
# this is required because pytables doesn't use a wheel on py37
- apt-get -yqq update
- apt-get -yqq install libhdf5-dev
script:
- python -m pip install .
- *list-env
- python -c "import bilby"
- python -c "import bilby.bilby_mcmc"
- python -c "import bilby.core"
- python -c "import bilby.core.prior"
- python -c "import bilby.core.sampler"
- python -c "import bilby.core.utils"
- python -c "import bilby.gw"
- python -c "import bilby.gw.detector"
- python -c "import bilby.gw.eos"
- python -c "import bilby.gw.likelihood"
- python -c "import bilby.gw.sampler"
- python -c "import bilby.hyper"
- python -c "import cli_bilby"
......@@ -58,173 +70,173 @@ containers:
${script} --help;
done
# Test basic setup on python 3.7
basic-3.7:
basic-3.10:
<<: *test-python
image: python:3.7
image: python:3.10
# Test basic setup on python 3.8
basic-3.8:
basic-3.11:
<<: *test-python
image: python:3.8
image: python:3.11
# Test basic setup on python 3.9
basic-3.9:
basic-3.12:
<<: *test-python
image: python:3.9
image: python:3.12
precommits-py3.7:
.test-samplers-import: &test-samplers-import
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- source activate python37
- mkdir -p .pip37
- pip install --upgrade pip
- pip --cache-dir=.pip37 install --upgrade bilby
- pip --cache-dir=.pip37 install .
- pip --cache-dir=.pip37 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
- python -m pip install .
- *list-env
- pytest test/test_samplers_import.py -v
import-samplers-3.10:
<<: *test-samplers-import
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
precommits-py3.8:
import-samplers-3.11:
<<: *test-samplers-import
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
import-samplers-3.12:
<<: *test-samplers-import
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python312
.precommits: &precommits
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
script:
- source activate python38
- mkdir -p .pip38
- source activate $PYVERSION
- mkdir -p $CACHE_DIR
- pip install --upgrade pip
- pip --cache-dir=.pip38 install --upgrade bilby
- pip --cache-dir=.pip38 install .
- pip --cache-dir=.pip38 install pre-commit
- pip --cache-dir=$CACHE_DIR install --upgrade bilby
- pip --cache-dir=$CACHE_DIR install .
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
precommits-py3.9:
precommits-py3.11:
<<: *precommits
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
variables:
CACHE_DIR: ".pip311"
PYVERSION: "python311"
install:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
parallel:
matrix:
- EXTRA: [gw, mcmc, all]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
script:
- source activate python39
- mkdir -p .pip38
- pip install --upgrade pip
- pip --cache-dir=.pip39 install --upgrade bilby
- pip --cache-dir=.pip39 install .
- pip --cache-dir=.pip39 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
- pip install .[$EXTRA]
# ------------------- Test stage -------------------------------------------
# test example on python 3.7 and build coverage
python-3.7:
.unit-tests: &unit-test
stage: test
needs: ["basic-3.7", "precommits-py3.7"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- python -m pip install .
- *list-env
# Run pyflakes
- flake8 .
# Run tests and collect coverage data
- pytest --cov=bilby --durations 10
- coverage html
- coverage-badge -o coverage_badge.svg -f
python-3.10:
<<: *unit-test
needs: ["basic-3.10"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
python-3.11:
<<: *unit-test
needs: ["basic-3.11", "precommits-py3.11"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
after_script:
- coverage html
- coverage xml
coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
paths:
- coverage_badge.svg
- htmlcov/
expire_in: 30 days
# test example on python 3.8
python-3.8:
stage: test
needs: ["basic-3.8", "precommits-py3.8"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
script:
- python -m pip install .
- pytest
# test example on python 3.9
python-3.9:
stage: test
needs: ["basic-3.9", "precommits-py3.9"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script:
- python -m pip install .
- pytest
# test samplers on python 3.7
python-3.7-samplers:
stage: test
needs: ["basic-3.7", "precommits-py3.7"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- python -m pip install .
- pytest test/integration/sampler_run_test.py --durations 10
# test samplers on python 3.8
python-3.8-samplers:
stage: test
needs: ["basic-3.8", "precommits-py3.8"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
script:
- python -m pip install .
- pytest test/integration/sampler_run_test.py --durations 10
python-3.12:
<<: *unit-test
needs: ["basic-3.12"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python312
# test samplers on python 3.9
python-3.9-samplers:
.test-sampler: &test-sampler
stage: test
needs: ["basic-3.9", "precommits-py3.9"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script:
- python -m pip install .
- pytest test/integration/sampler_run_test.py --durations 10
integration-tests-python-3.7:
- python -m pip install .[all]
- *list-env
- pytest test/integration/sampler_run_test.py --durations 10 -v
python-3.10-samplers:
<<: *test-sampler
needs: ["basic-3.10"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
python-3.11-samplers:
<<: *test-sampler
needs: ["basic-3.11", "precommits-py3.11"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
python-3.12-samplers:
<<: *test-sampler
needs: ["basic-3.12"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python312
integration-tests-python-3.11:
stage: test
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
needs: ["basic-3.7", "precommits-py3.7"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
needs: ["basic-3.11", "precommits-py3.11"]
only:
- schedules
script:
- python -m pip install .
- *list-env
# Run tests which are only done on schedule
- pytest test/integration/example_test.py
plotting-python-3.7:
.plotting: &plotting
stage: test
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
needs: ["basic-3.7", "precommits-py3.7"]
only:
- schedules
script:
- python -m pip install .
- python -m pip install ligo.skymap
- *list-env
- pytest test/gw/plot_test.py
plotting-python-3.10:
<<: *plotting
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
needs: ["basic-3.10"]
plotting-python-3.11:
<<: *plotting
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
needs: ["basic-3.11", "precommits-py3.11"]
plotting-python-3.12:
<<: *plotting
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python312
needs: ["basic-3.12"]
# ------------------- Docs stage -------------------------------------------
docs:
stage: docs
needs: ["python-3.7"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
before_script:
- python -m ipykernel install
script:
# Make the documentation
- apt-get -yqq install pandoc
- python -m pip install .
- cd docs
- pip install ipykernel ipython jupyter
- cp ../examples/tutorials/*.ipynb ./
- rm basic_ptmcmc_tutorial.ipynb
- rm compare_samplers.ipynb
- rm visualising_the_results.ipynb
- jupyter nbconvert --to notebook --execute *.ipynb --inplace
- python -m pip install myst_parser # only for testing purposes - remove once test image is generating correctly
- cd examples/tutorials
- jupyter nbconvert --to notebook --execute *.ipynb --output-dir ../../docs
- cd ../../docs
- make clean
- make html
......@@ -234,13 +246,12 @@ docs:
# ------------------- Deploy stage -------------------------------------------
deploy-docs:
pages:
stage: deploy
needs: ["docs", "python-3.7"]
needs: ["docs", "python-3.11"]
script:
- mkdir public/
- mv htmlcov/ public/
- mv coverage_badge.svg public/
- mv docs/_build/html/* public/
artifacts:
paths:
......@@ -249,56 +260,50 @@ deploy-docs:
only:
- master
# Build the containers
build-python37-container:
.build-container: &build-container
stage: deploy
image: docker:19.03.12
image: docker:20.10.23
needs: ["containers"]
only:
- schedules
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
changes:
compare_to: 'refs/heads/main'
paths:
- containers/*
when: manual
- if: $CI_PIPELINE_SOURCE == "schedule"
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --tag v3-bilby-python37 - < v3-dockerfile-test-suite-python37
- docker image tag v3-bilby-python37 containers.ligo.org/lscsoft/bilby/v2-bilby-python37:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-python37:latest
- cp v3-dockerfile-test-suite-$PYVERSION Dockerfile
- docker build --tag v3-bilby-$PYVERSION .
- docker image tag v3-bilby-$PYVERSION containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest
build-python38-container:
stage: deploy
image: docker:19.03.12
needs: ["containers"]
only:
- schedules
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --tag v3-bilby-python38 - < v3-dockerfile-test-suite-python38
- docker image tag v3-bilby-python38 containers.ligo.org/lscsoft/bilby/v2-bilby-python38:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-python38:latest
build-python310-container:
<<: *build-container
variables:
PYVERSION: "python310"
build-python39-container:
stage: deploy
image: docker:19.03.12
needs: ["containers"]
only:
- schedules
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --tag v3-bilby-python39 - < v3-dockerfile-test-suite-python39
- docker image tag v3-bilby-python39 containers.ligo.org/lscsoft/bilby/v2-bilby-python39:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-python39:latest
build-python311-container:
<<: *build-container
variables:
PYVERSION: "python311"
build-python312-container:
<<: *build-container
variables:
PYVERSION: "python312"
pypi-release:
stage: deploy
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
variables:
TWINE_USERNAME: $PYPI_USERNAME
TWINE_PASSWORD: $PYPI_PASSWORD
before_script:
- pip install twine
- python setup.py sdist
- python -m build --sdist --wheel --outdir dist/ .
script:
- twine upload dist/*
only:
- tags
- tags
......@@ -5,25 +5,38 @@ repos:
- id: check-merge-conflict # prevent committing files with merge conflicts
- id: flake8 # checks for flake8 errors
- repo: https://github.com/psf/black
rev: 20.8b1
rev: 22.3.0
hooks:
- id: black
language_version: python3
files: ^bilby/bilby_mcmc/
files: '^(bilby/bilby_mcmc/|bilby/core/sampler/|examples/)'
- repo: https://github.com/codespell-project/codespell
rev: v1.16.0
rev: v2.1.0
hooks:
- id: codespell
args: [--ignore-words=.dictionary.txt]
- repo: https://github.com/asottile/seed-isort-config
rev: v1.3.0
hooks:
- id: seed-isort-config
args: [--application-directories, 'bilby/']
files: ^bilby/bilby_mcmc/
- repo: https://github.com/pre-commit/mirrors-isort
rev: v4.3.21
rev: v5.10.1
hooks:
- id: isort # sort imports alphabetically and separates import into sections
args: [-w=88, -m=3, -tc, -sp=setup.cfg ]
files: ^bilby/bilby_mcmc/
files: '^(bilby/bilby_mcmc/|bilby/core/sampler/|examples/)'
- repo: https://github.com/datarootsio/databooks
rev: 0.1.14
hooks:
- id: databooks
name: databooks
description:
"Remove notebook metadata using `databooks`."
entry: databooks meta
language: python
minimum_pre_commit_version: 2.9.2
types: [ jupyter ]
args: [-w]
- repo: local
hooks:
- id: jupyter-nb-clear-output
name: jupyter-nb-clear-output
files: \.ipynb$
language: system
entry: jupyter nbconvert --ClearOutputPreprocessor.enabled=True --inplace
......@@ -9,11 +9,14 @@ Aditya Vijaykumar
Andrew Kim
Andrew Miller
Antoni Ramos-Buades
Apratim Ganguly
Avi Vajpeyi
Ben Patterson
Bruce Edelman
Carl-Johan Haster
Cecilio Garcia-Quiros
Charlie Hoy
Chentao Yang
Christopher Philip Luke Berry
Christos Karathanasis
Colm Talbot
......@@ -24,18 +27,26 @@ Eric Thrane
Ethan Payne
Francisco Javier Hernandez
Gregory Ashton
Hank Hua
Hector Estelles
Ignacio Magaña Hernandez
Isaac McMahon
Isobel Marguarethe Romero-Shaw
Jack Heinzel
Jacob Golomb
Jade Powell
James A Clark
Jeremy G Baier
John Veitch
Joshua Brandt
Josh Willis
Karl Wette
Katerina Chatziioannou
Kaylee de Soto
Khun Sang Phukon
Kruthi Krishna
Kshipraa Athar
Kyle Wong
Leslie Wade
Liting Xiao
Maite Mateu-Lucena
......@@ -49,19 +60,24 @@ Michael Puerrer
Michael Williams
Monica Rizzo
Moritz Huebner
Nico Gerardo Bers
Nicola De Lillo
Nikhil Sarin
Nirban Bose
Noah Wolfe
Olivia Wilk
Paul Easter
Paul Lasky
Philip Relton
Rhys Green
Rhiannon Udall
Rico Lo
Roberto Cotesta
Rory Smith
S. H. Oh
Sacha Husa
Sama Al-Shammari
Samson Leong
Scott Coughlin
Serguei Ossokine
Shanika Galaudage
......@@ -69,9 +85,21 @@ Sharan Banagiri
Shichao Wu
Simon Stevenson
Soichiro Morisaki
Soumen Roy
Stephen R Green
Sumeet Kulkarni
Sylvia Biscoveanu
Tathagata Ghosh
Teagan Clarke
Tomasz Baka
Will M. Farr
Virginia d'Emilio
Vivien Raymond
Ka-Lok Lo
Isaac Legred
Marc Penuliar
Andrew Fowlie
Martin White
Peter Tsun-Ho Pang
Alexandre Sebastien Goettel
Ann-Kristin Malz
# All notable changes will be documented in this file
## [1.1.4] 2021-10-08
## [Unreleased]
## [2.3.0] - 2024-05-30
### Added
- Add support for sampler plugins via entry points (!1340, !1355)
- Add `bilby.core.sampler.get_implemented_samplers` and `bilby.core.get_sampler_class` (!1340)
- Add `bilby.core.utils.entry_points.get_entry_points` (!1340)
- Add support for reading results from PathLike objects (!1342)
- Add `snrs_as_sample` property to `bilby.gw.likelihood.base.GravitationalWaveTransient` (!1344)
- Add `get_expected_outputs` method to the sampler classes (!1336)
### Changed
- Change `bilby_mcmc` to use `glasflow` instead of `nflows` (!1332)
- Sampler classes in are no longer imported in `bilby.core.sampler` (!1340)
- Sampler classes in `bilby.core.sampler.IMPLEMENTED_SAMPLERS` must now be loaded before use (!1340)
- `bilby.core.sampler.IMPLEMENTED_SAMPLERS` is now an instance of `bilby.core.sampler.ImplementedSampler` instead of a dictionary (!1355)
- Updates to support numpy v2 (!1362)
### Fixed
- Include final frequency point in relative binning integration (!1310)
- Address various deprecation warnings and deprecated keyword arguments (!1316, !1326, !1343)
- Fix typo in logging statement in `bilby.gw.source` (!1325)
- Fix missing import in `bilby.gw.detector.load_data_from_cache_file` (!1327)
- Fix bug where `linestyle` was ignored in `bilby.core.result.plot_multiple` (!1238)
- Fix `soft_init` sampler keyword argument with `dynesty` (!1335)
- Fix ZeroDivisionError when using the `dynesty` with `act-walk` and large values of `nact` (!1346)
- Fix custom prior loading from result file (!1360)
## [2.2.3] - 2024-02-24
Version 2.2.3 release of Bilby
This is a bugfix release
There are also a number of testing/infrastructure updates.
### Changes
- Fix a bug when the specified maximum frequency is too low for the multibanding likelihood (!1279)
- Allow the `DirichletElement` prior to be pickled (!1312)
- Add the ability to change the pool size when resuming a `dynesty` job (!1315)
- Fix how the random seed is passed to `dynesty` (!1319)
## [2.2.2] - 2023-11-29
Version 2.2.2 release of Bilby
This is a bugfix release reverting a change from 2.2.1
### Changes
- Revert !1284 (!1306)
## [2.2.1] - 2023-1111
Version 2.2.1 release of Bilby
This release is a bugfix release.
### Changes
- Ensure inteferometer metadata is not empty (!1281)
- Make interrupted pools exit more quickly (!1284)
- Fix conditional sampling with DeltaFunction conditions (!1289)
- The triangular prior raised an error with numpy (!1294)
- Make sure strain data resampling works (!1295)
- Dynesty logging (!1296)
- A bug with saving lists that contain None (!1301)
- Preparatory fix an upcoming change in dynesty (!1302)
## [2.2.0] - 2023-07-24
Version 2.2.0 release of Bilby
This release contains one new feature and drops support for Python 3.8.
### Added
- New waveform interface to support the SEOBNRv5 family of waveforms (!1218)
- Enable default noise + injection function for non-CBC signals (!1263)
- Fallback to result pickle loading to match result writing (!1291)
### Changes
- Additional error catching for plotting (!1261, !1271)
- Improve plotting options for corner plots (!1270)
- Fix bugs in closing the pool for emcee (!1274)
- Generalize MPI support (!1278)
- Fix a bug with saving hdf5 results when conda isn't present (!1290)
### Deprecated
- Drop support for py38 (!1277)
## [2.1.2] - 2023-07-17
Version 2.1.2 release of Bilby
This is a bugfix release.
Note that one of the changes will have a significant impact on scripts that rely on
a seed for random data generation.
Where users have previously used `np.random.seed` they should now call
`bilby.core.utils.random.seed`.
### Changes
- Fix issues related to random number generation with multiprocessing (!1273)
- Enable cosmological priors to be written/read in our plain text format (!1258)
- Allow posterior reweighting to be performed when changing the likelihood and the prior (!1260)
## [2.1.1] - 2023-04-28
Version 2.1.1 release of Bilby
Bugfix release
### Changes
- Fix the matched filter SNR phase for the multiband likelihood (!1253)
- Bugfix for Fisher matrix proposals in `bilby_mcmc` (!1251)
- Make the changes to the spline calibration backward compatible, 2.0.2 resume files can't be read with 2.1.0 (!1250)
## [2.1.0] - 2023-04-12
Version 2.1.0 release of Bilby
Minor feature improvements and bug fixes
### Additions
- Additional parameterizations for equation-of-state inference (!1083, !1240)
- Add Fisher matrix posterior estimator (!1242)
### Changes
- Improvements to the bilby-mcmc sampler including a Fisher Information Matrix proposal (!1242)
- Optimize spline interpolation of calibration uncertainties (!1241)
- Update LIGO India coordinates record to public DCC (!1246)
- Make logger disabling work in redundancy test (!1245)
- Make sure nested samples are data frame (!1244)
- Minor improvements to the result methods including moving to top level imports (!1243)
- Fix a bug in the slabspike prior (!1235)
- Reduce verbosity when setting strain data (!1233)
- Fix issue with cached results class (!1223)
### Deprecated
- Reading/writing ROQ weights to json (!1232)
## [2.0.2] - 2023-03-21
Version 2.0.2 release of Bilby
This is a bugfix release after the last major update.
### Changes
- Fix to bilby-MCMC implementation of prior boundary (!1237)
- Fix to time calibration (!1234)
- Fix nessai sampling time (!1236)
## [2.0.1] - 2023-03-13
Version 2.0.1 release of Bilby
This is a bugfix release after the last major update.
Users may notice changes in inferred binary neutron star masses after updating to match [lalsuite](https://git.ligo.org/lscsoft/lalsuite/-/merge_requests/1658).
### Changes
- Make sure quantities that need to be conserved between dynesty iterations are class-level attributes (!1225).
- Fix massive memory usage in post-processing calculation of SNRs (!1227).
- Update value for the solar mass (!1229).
- Make `scikit-learn` an explicit dependence of `bilby[GW]` (!1230).
## [2.0.0] - 2023-02-29
Version 2.0.0 release of Bilby
This major version release has a significant change to the behaviour of the `dynesty` wrapper.
There are also a number of bugfixes and some new features in sampling and GW utilities.
### Added
- Add marginalized time reconstruction for the ROQ likelihood (!1196)
- Generate the `dynesty` posterior using rejection sampling by default (!1203)
- Add optimization for mass ratio prior peaking at equal masses (!1204)
- Add option to sample over a number of precomputed calibration curves (!1215)
### Changes
- Optimize weight calculation for `MultiBandGravitationalWaveTransient` (!1171)
- Add compatibility with pymc 5 (!1191)
- A bug fix of the stored prior when using a marginalized likelihood (!1193)
- Various bug fixes to improve the reliability of the `RelativeBinningGravitationalWaveTransient` (!1198, !1211)
- A hack fix for samplers that are not compatible with `numpy>1.23` (!1194)
- Updates to some reference noise curves (!1206, !1207)
- Fix the broken time+calibration marginalization (!1201)
- Fix a bug when reading GW frame files (!1202)
- Fix the normalization of the whitened strain attribute of `Interferometer` (!1205)
- Optimize ROQ waveform and calibration calls (!1216)
- Add different proposal distribution and MCMC length for `dynesty` (!1187, !1222)
## [1.4.1] - 2022-12-07
Version 1.4.1 release of Bilby
This is a bugfix release to address some minor issues identified after v1.4.0.
### Changes
- Documentation updates (!1181, !1183)
- Fix some of the examples in the repository (!1182)
- Make sure conversion to symmetric mass ratio always returns a valid value (!1184)
- Provide a default nlive for dynamic dynesty (!1185)
- Enable the relative binning likelihood to be initialized with ra/dec when sampling in a different sky parameterization (!1186)
- Make sure that all dumping pickle files is done safely (!1189)
- Make error catching for `dynesty` checkpointing more robust (!1190)
## [1.4.0] - 2022-11-18
Version 1.4.0 release of Bilby
The main changes in this release are support for more recent versions of `dynesty` (!1138)
and `nessai` (!1161) and adding the
`RelativeBinningGravitationalWaveTransientLikelihood` (!1105)
(see [arXiv:1806.08792](https://arxiv.org/abs/1806.08792)) for details.
### Added
- Per-detector likelihood calculations (!1149)
- `bilby.gw.likelihood.relative.RelativeBinningGravitationalWaveTransient` (!1105)
### Changes
- Reset the timer for `PyMultiNest` when overwriting an existing checkpoint directory (!1163)
- Cache the computed the noise log likelihood for the `GravitationalWaveTransient` (!1179)
- Set the reference chirp mass for the multi banded likelihood from the prior when not specified (!1169)
- Bugfix in the name of the saved ASD file in `Interferometer.save_data` (!1176)
- Modify the window length for stationarity tests for `ptemcee` (!1146)
- Explicit support for `nessai>=0.7.0` (!1161)
- Allow prior arguments read from a string to be functions (!1144)
- Support `dynesty>=1.1.0` (!1138)
## [1.3.0] - 2022-10-23
Version 1.3.0 release of Bilby
This release has a major change to a sampler interface, `pymc3` is no longer supported, users should switch to `pymc>=4`.
This release also adds a new top-level dependency, `bilby-cython`.
This release also contains various documentation improvements.
### Added
- Improved logging of likelihood information when starting sampling (!1148)
- Switch some geometric calculations to use optimized bilby-cython package (!1053)
- Directly specify the starting point for `bilby_mcmc` (!1155)
- Allow a signal to be specified to only be present in a specific `Interferometer` (!1164)
- Store time domain model function in CBCResult metadata (!1165)
### Changes
- Switch from `pymc3` to `pymc` (!1117)
- Relax equality check for distance marginalization lookup to allow cross-platform use (!1150)
- Fix to deal with non-checkpointing `bilby_mcmc` analyses (!1151)
- Allow result objects with different analysis configurations to be combined (!1153)
- Improve the storing of environment information (!166)
- Fix issue when specifying distance and redshfit independently (!1154)
- Fix a bug in the storage of likelihood/prior samples for `bilby_mcmc` (!1156)
## [1.2.1] - 2022-09-05
Version 1.2.1 release of Bilby
This release contains a few bug fixes following 1.2.0.
### Changes
- Improve how sampling seed is handled across samplers (!1134)
- Make sure labels are included when evidences are in corner plot legend (!1135)
- Remove calls to `getargspec` (!1136)
- Make sure parameter reconstruction cache is not mangled when reading (!1126)
- Enable the constant uncertainty calibration spline to have a specifiable boundary condition (!1137)
- Fix a bug in checkpointing for `bilby_mcmc` (!1141)
- Fix the `LALCBCWaveformGenerator` (!1140)
- Switch to automatic versioning with `setuptools_scm` (!1125)
- Improve the stability of the multivariate normal prior (!1142)
- Extend mass conversions to include source-frame parameters (!1131)
- Fix prior ranges for GW150914 example (!1129)
## [1.2.0] - 2022-08-15
Version 1.2.0 release of Bilby
This is the first release that drops support for `Python<3.8`.
This release involves major refactoring, especially of the sampler implementations.
Additionally, there are a range of improvements to how information is passed
with multiprocessing.
### Added
- Time marginalized ROQ likelihood (!1040)
- Multiple and multi-banded ROQ likelihood (!1093)
- Gaussian process likelihoods (!1086)
- `CBCWaveformGenerator` added with CBC specific defaults (!1080)
### Changes
- Fixes and improvements to multi-processing (!1084, !1043, !1096)
- Major refactoring of sampler implementations (!1043)
- Fixes for reading/writing priors (!1103, !1127, !1128)
- Fixes/updates to exmample scripts (!1050, !1031, !1076, !1081, !1074)
- Fixes to calibration correction in GW likelihoods (!1114, !1120, !1119)
### Deprecated/removed
- Require `Python>=3.8`
- Require `astropy>=5`
- `bilby.core.utils.conversion.gps_time_to_gmst`
- `bilby.core.utils.spherical_to_cartesian`
- `bilby.core.utils.progress`
- Deepdish IO for `Result`, `Interferometer`, and `InterferometerList`
## [1.1.5] - 2022-01-14
Version 1.1.5 release of Bilby
### Added
- Option to enforce that a GW signal fits into the segment duration (!1041)
- Remove the save `.dat` samples file with `dynesty` (!1028)
- Catch corrupted `json` result result files being passed (!1034)
### Changes
- Fixes to conversion function for `nessai` and `cpnest` (!1055)
- Workaround for `astropy` v5 (!1054)
- Various fixes to testing system (!1038, !1044, !1045, !1048)
- Updated defaults for `nessai` (!1042)
- Small bug fixes (!1032, !1036, !1039, !1046, !1052)
- Bug fix in the definition of some standard interferometers (!1037)
- Improvements to the multi-banded GWT likelihood (!1026)
- Improve meta data comparison (!1035)
## [1.1.4] - 2021-10-08
Version 1.1.4 release of bilby
### Added
......@@ -20,7 +333,7 @@ Version 1.1.4 release of bilby
- Typo fix in eart light crossing (!1003)
- Fix zero spin conversion (!1002)
## [1.1.3] 2021-07-02
## [1.1.3] - 2021-07-02
Version 1.1.3 release of bilby
### Added
......@@ -50,7 +363,7 @@ Version 1.1.3 release of bilby
- Restructured utils module into several submodules. API remains backwards compatible (!873)
- Changed number of default walks in `dynesty` from `10*self.ndim` to `100` (!961)
## [1.1.2] 2021-05-05
## [1.1.2] - 2021-05-05
Version 1.1.2 release of bilby
### Added
......@@ -80,13 +393,13 @@ Version 1.1.2 release of bilby
- Fixed issues with pickle saving and loading (!932)
- Fixed an issue with the `_base_roq_waveform` (!959)
## [1.1.1] 2021-03-16
## [1.1.1] - 2021-03-16
Version 1.1.1 release of bilby
### Changes
- Added `include requirements.txt` in `MANIFEST.in` to stop the pip installation from breaking
## [1.1.0] 2021-03-15
## [1.1.0] - 2021-03-15
Version 1.1.0 release of bilby
### Added
......@@ -125,7 +438,7 @@ Version 1.1.0 release of bilby
- Fixed the likelihood count in `dynesty` (!853)
- Changed the ordering of keyword arguments for the `Sine` and `Cosine` constructors (!892)
## [1.0.4] 2020-11-23
## [1.0.4] - 2020-11-23
Version 1.0.4 release of bilby
### Added
......@@ -134,7 +447,7 @@ Version 1.0.4 release of bilby
### Changes
- Fixed issue in the CI
## [1.0.3] 2020-10-23
## [1.0.3] - 2020-10-23
Version 1.0.3 release of bilby
......@@ -153,7 +466,7 @@ Version 1.0.3 release of bilby
- Typo fixes (!878, !887, !879)
- Minor bug fixes (!888)
## [1.0.2] 2020-09-14
## [1.0.2] - 2020-09-14
Version 1.0.2 release of bilby
......@@ -175,7 +488,7 @@ Version 1.0.2 release of bilby
- Clean up of code (!854)
- Various minor bug, test and plotting fixes (!859, !874, !872, !865)
## [1.0.1] 2020-08-29
## [1.0.1] - 2020-08-29
Version 1.0.1 release of bilby
......@@ -200,7 +513,7 @@ Version 1.0.1 release of bilby
- Various minor bug fixes and improvements to the documentation (!820)(!823)(!837)
- Various testing improvements (!833)(!847)(!855)(!852)
## [1.0.0] 2020-07-06
## [1.0.0] - 2020-07-06
Version 1.0 release of bilby
......@@ -751,3 +1064,33 @@ First `pip` installable version https://pypi.org/project/BILBY/ .
### Removed
- All chainconsumer dependency as this was causing issues.
[Unreleased]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.3.0...master
[2.3.0]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.2.3...v2.3.0
[2.2.3]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.2.2...v2.2.3
[2.2.2]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.2.1...v2.2.2
[2.2.1]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.2.0...v2.2.1
[2.2.0]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.1.2...v2.2.0
[2.1.2]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.1.1...v2.1.2
[2.1.1]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.1.0...v2.1.1
[2.1.0]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.0.2...v2.1.0
[2.0.2]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.0.1...v2.0.2
[2.0.1]: https://git.ligo.org/lscsoft/bilby/-/compare/v2.0.0...v2.0.1
[2.0.0]: https://git.ligo.org/lscsoft/bilby/-/compare/v1.4.1...v2.0.0
[1.4.1]: https://git.ligo.org/lscsoft/bilby/-/compare/v1.4.0...v1.4.1
[1.4.0]: https://git.ligo.org/lscsoft/bilby/-/compare/1.3.0...v1.4.0
[1.3.0]: https://git.ligo.org/lscsoft/bilby/-/compare/1.2.1...1.3.0
[1.2.1]: https://git.ligo.org/lscsoft/bilby/-/compare/1.2.0...1.2.1
[1.2.0]: https://git.ligo.org/lscsoft/bilby/-/compare/1.1.5...1.2.0
[1.1.5]: https://git.ligo.org/lscsoft/bilby/-/compare/1.1.4...1.1.5
[1.1.4]: https://git.ligo.org/lscsoft/bilby/-/compare/1.1.2...1.1.4
[1.1.3]: https://git.ligo.org/lscsoft/bilby/-/compare/1.1.2...1.1.3
[1.1.2]: https://git.ligo.org/lscsoft/bilby/-/compare/1.1.1...1.1.2
[1.1.1]: https://git.ligo.org/lscsoft/bilby/-/compare/1.1.0...1.1.1
[1.1.0]: https://git.ligo.org/lscsoft/bilby/-/compare/1.0.4...1.1.0
[1.0.4]: https://git.ligo.org/lscsoft/bilby/-/compare/1.0.3...1.0.4
[1.0.3]: https://git.ligo.org/lscsoft/bilby/-/compare/1.0.2...1.0.3
[1.0.2]: https://git.ligo.org/lscsoft/bilby/-/compare/1.0.1...1.0.2
[1.0.1]: https://git.ligo.org/lscsoft/bilby/-/compare/1.0.0...1.0.1
[1.0.0]: https://git.ligo.org/lscsoft/bilby/-/compare/0.6.9...1.0.0
# Contributing to bilby
This is a short guide to contributing to bilby aimed at general LVC members who
This is a short guide to contributing to bilby aimed at general LVK members who
have some familiarity with python and git.
1. [Code of conduct](#code-of-conduct)
2. [Code style](#code-style)
3. [Code relevance](#code-relevance)
4. [Merge requests](#merge-requests)
5. [Typical workflow](#typical-workflow)
6. [Hints and tips](#hints-and-tips)
7. [Code overview](#code-overview)
3. [Automated Code Checking](#automated-code-checking)
4. [Unit Testing](#unit-testing)
5. [Code relevance](#code-relevance)
6. [Merge requests](#merge-requests)
7. [Typical workflow](#typical-workflow)
8. [Hints and tips](#hints-and-tips)
9. [Code overview](#code-overview)
## Code of Conduct
......@@ -17,7 +19,7 @@ have some familiarity with python and git.
Everyone participating in the bilby community, and in particular in our issue
tracker, merge requests, and chat channels, is expected to treat other people
with respect and follow the guidelines articulated in the [Python Community
Code of Conduct](https://www.python.org/psf/codeofconduct/).
Code of Conduct](https://www.python.org/psf/codeofconduct/). Furthermore, members of the LVK collaboration must follow the [LVK Code of Conduct](https://dcc.ligo.org/LIGO-M1900037/public).
## Code style
......@@ -46,8 +48,8 @@ def my_new_function(x, y, print=False):
```
3. Avoid inline comments unless necessary. Ideally, the code should make it obvious what is going on, if not the docstring, only in subtle cases use comments
4. Name variables sensibly. Avoid using single-letter variables, it is better to name something `power_spectral_density_array` than `psda`.
5. Don't repeat yourself. If code is repeated in multiple places, wrap it up into a function.
6. Add tests. The C.I. is there to do the work of "checking" the code, both now and into the future. Use it.
5. Don't repeat yourself. If code is repeated in multiple places, wrap it up into a function. This also helps with the writing of robust unit tests (see below).
## Automated code checking
......@@ -76,6 +78,50 @@ If you experience any issues with pre-commit, please ask for support on the
usual help channels.
## Unit Testing
Unit tests are an important part of code development, helping to minimize the number of undetected bugs which may be present in a merge request. They also greatly expedite the review of code, and can even help during the initial development if used properly. Accordingly, bilby requires unit testing for any changes with machine readable inputs and outputs (i.e. pretty much everything except plotting).
Unit testing is integrated into the CI/CD pipeline, and uses the builtin unittest package. Tests should be written into the `test/` directory which corresponds to their location within the package, such that, for example, a change to `bilby/gw/conversion.py` should go into `test/gw/conversion_test.py`. To run a single test locally, one may simply do `pytest /path/to/test TestClass.test_name`, whereas to run all the tests in a given test file one may omit the class and function.
For an example of what a test looks like, consider this test for the fft utils in bilby:
```
class TestFFT(unittest.TestCase):
def setUp(self):
self.sampling_frequency = 10
def tearDown(self):
del self.sampling_frequency
def test_nfft_sine_function(self):
injected_frequency = 2.7324
duration = 100
times = utils.create_time_series(self.sampling_frequency, duration)
time_domain_strain = np.sin(2 * np.pi * times * injected_frequency + 0.4)
frequency_domain_strain, frequencies = bilby.core.utils.nfft(
time_domain_strain, self.sampling_frequency
)
frequency_at_peak = frequencies[np.argmax(np.abs(frequency_domain_strain))]
self.assertAlmostEqual(injected_frequency, frequency_at_peak, places=1)
def test_nfft_infft(self):
time_domain_strain = np.random.normal(0, 1, 10)
frequency_domain_strain, _ = bilby.core.utils.nfft(
time_domain_strain, self.sampling_frequency
)
new_time_domain_strain = bilby.core.utils.infft(
frequency_domain_strain, self.sampling_frequency
)
self.assertTrue(np.allclose(time_domain_strain, new_time_domain_strain))
```
`setUp` and `tearDown` handle construction and deconstruction of the test, such that each of the other test functions may be run independently, in any order. The other two functions each make an intuitive test of the functionality of and fft/ifft function: that the fft of a sine wave should be a delta function, and that an ifft should be an inverse of an fft.
For more information on how to write effective tests, see [this guide](https://docs.python-guide.org/writing/tests/), and many others.
## Code relevance
The bilby code base is intended to be highly modular and flexible. We encourage
......@@ -153,7 +199,19 @@ $ git clone git@git.ligo.org:albert.einstein/bilby.git
```
replacing the SSH url to that of your fork. This will create a directory
`/bilby` containing a local copy of the code. From this directory, you can run
`/bilby` containing a local copy of the code.
It is strongly advised to perform development with a dedicated conda environment.
In depth instructions for creating a conda environment may be found at the relevant
[conda docs](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#activating-an-environment),
but for most purposes the commands
```bash
$ conda create -n my-environment-name python=3.X
$ conda activate my-environment-name
```
will produce the desired results. Once this is completed, one may proceed to the `/bilby` directory and run
```bash
$ pip install -e .
......@@ -202,8 +260,25 @@ $ git checkout -b my-new-feature lscsoft/master
### Step d) Hack away
1. Develop the changes you would like to introduce, using `git add` to add files with changes. Ideally commit small units of change often, rather than creating one large commit at the end, this will simplify review and make modifying any changes easier.
2. Commit the changes using `git commit`. This will prompt you for a commit message. Commit messages should be clear, identifying which code was changed, and why. Common practice (see e.g. [this blog](https://chris.beams.io/posts/git-commit/)) is to use a short summary line (<50 characters), followed by a blank line, then more information in longer lines.
3. Push your changes to the remote copy of your fork on git.ligo.org
2. Commit the changes using `git commit`. This will prompt you for a commit message. Commit messages should be clear, identifying which code was changed, and why. Bilby is adopting the use of scipy commit format, specified [here](https://docs.scipy.org/doc/scipy/dev/contributor/development_workflow.html#writing-the-commit-message). Commit messages take a standard format of `ACRONYM: Summary message` followed by a more detailed description. For example, an enhancement would look like:
```
ENH: Add my awesome new feature
This is a very cool change that makes parameter estimation run 10x faster by changing a single line.
```
Similarly a bugfix:
```
BUG: Fix type error in my_awesome_feature.py
Correct a typo at L1 of /bilby/my_awesome_feature.py which returned a dictionary instead of a string.
```
For more discussion of best practices, see e.g. [this blog](https://chris.beams.io/posts/git-commit/).
4. Push your changes to the remote copy of your fork on git.ligo.org
```bash
git push origin my-new-feature
......
include README.rst
include LICENSE.md
include requirements.txt
include gw_requirements.txt
include mcmc_requirements.txt
include optional_requirements.txt
include sampler_requirements.txt
include bilby/_version.py
recursive-include test *.py *.prior
|pipeline status| |coverage report| |pypi| |conda| |version|
=====
Bilby
Bilby development has moved to `GitHub <https://github.com/bilby-dev/bilby>`__!
=====
Please open any new issues or pull requests there. A full migration guide will be provided soon. Links below here may no longer be active.
====
A user-friendly Bayesian inference library.
Fulfilling all your Bayesian dreams.
......@@ -30,50 +32,11 @@ us directly. For advice on contributing, see `the contributing guide <https://gi
Citation guide
--------------
If you use :code:`bilby` in a scientific publication, please cite
* `Bilby: A user-friendly Bayesian inference library for gravitational-wave
astronomy
<https://ui.adsabs.harvard.edu/#abs/2018arXiv181102042A/abstract>`__
* `Bayesian inference for compact binary coalescences with BILBY: validation and application to the first LIGO-Virgo gravitational-wave transient catalogue <https://ui.adsabs.harvard.edu/abs/2020MNRAS.499.3295R/abstract>`__
The first of these papers introduces the software, while the second introduces advances in the sampling approaches and validation of the software.
If you use the :code:`bilby_mcmc` sampler, please additionally cite
* `BILBY-MCMC: an MCMC sampler for gravitational-wave inference <https://ui.adsabs.harvard.edu/abs/2021MNRAS.507.2037A/abstract>`__
Additionally, :code:`bilby` builds on a number of open-source packages. If you
make use of this functionality in your publications, we recommend you cite them
as requested in their associated documentation.
**Samplers**
* `dynesty <https://github.com/joshspeagle/dynesty>`__
* `nestle <https://github.com/kbarbary/nestle>`__
* `pymultinest <https://github.com/JohannesBuchner/PyMultiNest>`__
* `cpnest <https://github.com/johnveitch/cpnest>`__
* `emcee <https://github.com/dfm/emcee>`__
* `ptemcee <https://github.com/willvousden/ptemcee>`__
* `ptmcmcsampler <https://github.com/jellis18/PTMCMCSampler>`__
* `pypolychord <https://github.com/PolyChord/PolyChordLite>`__
* `PyMC3 <https://github.com/pymc-devs/pymc3>`_
**Gravitational-wave tools**
* `gwpy <https://github.com/gwpy/gwpy>`__
* `lalsuite <https://git.ligo.org/lscsoft/lalsuite>`__
* `astropy <https://github.com/astropy/astropy>`__
**Plotting**
* `corner <https://github.com/dfm/corner.py>`__ for generating corner plot
* `matplotlib <https://github.com/matplotlib/matplotlib>`__ for general plotting routines
Please refer to the `Acknowledging/citing bilby guide <https://lscsoft.docs.ligo.org/bilby/citing-bilby.html>`__.
.. |pipeline status| image:: https://git.ligo.org/lscsoft/bilby/badges/master/pipeline.svg
:target: https://git.ligo.org/lscsoft/bilby/commits/master
.. |coverage report| image:: https://lscsoft.docs.ligo.org/bilby/coverage_badge.svg
.. |coverage report| image:: https://git.ligo.org/lscsoft/bilby/badges/master/coverage.svg
:target: https://lscsoft.docs.ligo.org/bilby/htmlcov/
.. |pypi| image:: https://badge.fury.io/py/bilby.svg
:target: https://pypi.org/project/bilby/
......
......@@ -23,8 +23,12 @@ from . import core, gw, hyper
from .core import utils, likelihood, prior, result, sampler
from .core.sampler import run_sampler
from .core.likelihood import Likelihood
from .core.result import read_in_result, read_in_result_list
__version__ = utils.get_version_information()
try:
from ._version import version as __version__
except ModuleNotFoundError: # development mode
__version__ = 'unknown'
if sys.version_info < (3,):
......
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from packaging import version
from ..core.sampler.base_sampler import SamplerError
from ..core.utils import logger
......@@ -137,12 +136,14 @@ class Chain(object):
@property
def _random_idx(self):
from ..core.utils.random import rng
mindex = self._last_minimum_index[1]
# Check if mindex exceeds current position by 10 ACT: if so use a random sample
# otherwise we draw only from the chain past the minimum_index
if np.isinf(self.tau_last) or self.position - mindex < 10 * self.tau_last:
mindex = 0
return np.random.randint(mindex, self.position + 1)
return rng.integers(mindex, self.position + 1)
@property
def random_sample(self):
......@@ -158,7 +159,7 @@ class Chain(object):
@property
def minimum_index(self):
"""This calculated a minimum index from which to discard samples
"""This calculates a minimum index from which to discard samples
A number of methods are provided for the calculation. A subset are
switched off (by `if False` statements) for future development
......@@ -253,7 +254,7 @@ class Chain(object):
@property
def tau(self):
""" The maximum ACT over all parameters """
"""The maximum ACT over all parameters"""
if self.position in self.max_tau_dict:
# If we have the ACT at the current position, return it
......@@ -272,7 +273,7 @@ class Chain(object):
@property
def tau_nocache(self):
""" Calculate tau forcing a recalculation (no cached tau) """
"""Calculate tau forcing a recalculation (no cached tau)"""
tau = max(self.tau_dict.values())
self.max_tau_dict[self.position] = tau
self.cached_tau_count = 0
......@@ -280,7 +281,7 @@ class Chain(object):
@property
def tau_last(self):
""" Return the last-calculated tau if it exists, else inf """
"""Return the last-calculated tau if it exists, else inf"""
if len(self.max_tau_dict) > 0:
return list(self.max_tau_dict.values())[-1]
else:
......@@ -288,7 +289,7 @@ class Chain(object):
@property
def _tau_for_full_chain(self):
""" The maximum ACT over all parameters """
"""The maximum ACT over all parameters"""
return max(self._tau_dict_for_full_chain.values())
@property
......@@ -297,11 +298,11 @@ class Chain(object):
@property
def tau_dict(self):
""" Calculate a dictionary of tau (ACT) for every parameter """
"""Calculate a dictionary of tau (ACT) for every parameter"""
return self._calculate_tau_dict(self.minimum_index)
def _calculate_tau_dict(self, minimum_index):
""" Calculate a dictionary of tau (ACT) for every parameter """
"""Calculate a dictionary of tau (ACT) for every parameter"""
logger.debug(f"Calculating tau_dict {self}")
# If there are too few samples to calculate tau
......@@ -343,7 +344,12 @@ class Chain(object):
@property
def nsamples(self):
nuseable_steps = self.position - self.minimum_index
return int(nuseable_steps / (self.thin_by_nact * self.tau))
n_independent_samples = nuseable_steps / self.tau
nsamples = int(n_independent_samples / self.thin_by_nact)
if nuseable_steps >= nsamples:
return nsamples
else:
return 0
@property
def nsamples_last(self):
......@@ -408,12 +414,20 @@ class Chain(object):
for ax, key in zip(axes[:, 1], self.keys):
if all_samples is not None:
yy_all = all_samples[key]
ax.hist(yy_all, bins=50, alpha=0.6, density=True, color="k")
if np.any(np.isinf(yy_all)):
logger.warning(
f"Could not plot histogram for parameter {key} due to infinite values"
)
else:
ax.hist(yy_all, bins=50, alpha=0.6, density=True, color="k")
yy = self.get_1d_array(key)[nburn : self.position : self.thin]
ax.hist(yy, bins=50, alpha=0.8, density=True)
ax.set_xlabel(self._get_plot_label_by_key(key, priors))
if np.any(np.isinf(yy)):
logger.warning(
f"Could not plot histogram for parameter {key} due to infinite values"
)
else:
ax.hist(yy, bins=50, alpha=0.8, density=True)
ax.set_xlabel(self._get_plot_label_by_key(key, priors))
# Add x-axes labels to the traceplots
axes[-1, 0].set_xlabel(r"Iteration $[\times 10^{3}]$")
......@@ -512,7 +526,7 @@ class Sample(object):
def calculate_tau(x, autocorr_c=5):
import emcee
if LooseVersion(emcee.__version__) < LooseVersion("3"):
if version.parse(emcee.__version__) < version.parse("3"):
raise SamplerError("bilby-mcmc requires emcee > 3.0 for autocorr analysis")
if np.all(np.diff(x) == 0):
......
import torch
from nflows.distributions.normal import StandardNormal
from nflows.flows.base import Flow
from nflows.nn import nets as nets
from nflows.transforms import (
from glasflow.nflows.distributions.normal import StandardNormal
from glasflow.nflows.flows.base import Flow
from glasflow.nflows.nn import nets as nets
from glasflow.nflows.transforms import (
CompositeTransform,
MaskedAffineAutoregressiveTransform,
RandomPermutation,
)
from nflows.transforms.coupling import (
from glasflow.nflows.transforms.coupling import (
AdditiveCouplingTransform,
AffineCouplingTransform,
)
from nflows.transforms.normalization import BatchNorm
from glasflow.nflows.transforms.normalization import BatchNorm
from torch.nn import functional as F
# Turn off parallelism
......
......@@ -6,9 +6,10 @@ import numpy as np
from scipy.spatial.distance import jensenshannon
from scipy.stats import gaussian_kde
from ..core.fisher import FisherMatrixPosteriorEstimator
from ..core.prior import PriorDict
from ..core.sampler.base_sampler import SamplerError
from ..core.utils import logger, reflect
from ..core.utils import logger, random, reflect
from ..gw.source import PARAMETER_SETS
......@@ -18,7 +19,7 @@ class ProposalCycle(object):
self.weights = [prop.weight for prop in self.proposal_list]
self.normalized_weights = [w / sum(self.weights) for w in self.weights]
self.weighted_proposal_list = [
np.random.choice(self.proposal_list, p=self.normalized_weights)
random.rng.choice(self.proposal_list, p=self.normalized_weights)
for _ in range(10 * int(1 / min(self.normalized_weights)))
]
self.nproposals = len(self.weighted_proposal_list)
......@@ -61,6 +62,9 @@ class BaseProposal(object):
self.parameters = [p for p in self.parameters if p in subset]
self._str_attrs.append("parameters")
if len(self.parameters) == 0:
raise ValueError("Proposal requested with zero parameters")
self.ndim = len(self.parameters)
self.prior_boundary_dict = {key: priors[key].boundary for key in priors}
......@@ -129,9 +133,16 @@ class BaseProposal(object):
val_normalised_reflected = reflect(np.array(val_normalised))
return minimum + width * val_normalised_reflected
def __call__(self, chain):
sample, log_factor = self.propose(chain)
sample = self.apply_boundaries(sample)
def __call__(self, chain, likelihood=None, priors=None):
if getattr(self, "needs_likelihood_and_priors", False):
sample, log_factor = self.propose(chain, likelihood, priors)
else:
sample, log_factor = self.propose(chain)
if log_factor == 0:
sample = self.apply_boundaries(sample)
return sample, log_factor
@abstractmethod
......@@ -208,7 +219,7 @@ class FixedGaussianProposal(BaseProposal):
sample = chain.current_sample
for key in self.parameters:
sigma = self.prior_width_dict[key] * self.sigmas[key]
sample[key] += sigma * np.random.randn()
sample[key] += sigma * random.rng.normal(0, 1)
log_factor = 0
return sample, log_factor
......@@ -245,15 +256,15 @@ class AdaptiveGaussianProposal(BaseProposal):
def propose(self, chain):
sample = chain.current_sample
self.update_scale(chain)
if np.random.random() < 1e-3:
if random.rng.uniform(0, 1) < 1e-3:
factor = 1e1
elif np.random.random() < 1e-4:
elif random.rng.uniform(0, 1) < 1e-4:
factor = 1e2
else:
factor = 1
for key in self.parameters:
sigma = factor * self.scale * self.prior_width_dict[key] * self.sigmas[key]
sample[key] += sigma * np.random.randn()
sample[key] += sigma * random.rng.normal(0, 1)
log_factor = 0
return sample, log_factor
......@@ -295,13 +306,13 @@ class DifferentialEvolutionProposal(BaseProposal):
theta = chain.current_sample
theta1 = chain.random_sample
theta2 = chain.random_sample
if np.random.rand() > self.mode_hopping_frac:
if random.rng.uniform(0, 1) > self.mode_hopping_frac:
gamma = 1
else:
# Base jump size
gamma = np.random.normal(0, 2.38 / np.sqrt(2 * self.ndim))
gamma = random.rng.normal(0, 2.38 / np.sqrt(2 * self.ndim))
# Scale uniformly in log between 0.1 and 10 times
gamma *= np.exp(np.log(0.1) + np.log(100.0) * np.random.rand())
gamma *= np.exp(np.log(0.1) + np.log(100.0) * random.rng.uniform(0, 1))
for key in self.parameters:
theta[key] += gamma * (theta2[key] - theta1[key])
......@@ -336,7 +347,7 @@ class UniformProposal(BaseProposal):
for key in self.parameters:
width = self.prior_width_dict[key]
if np.isinf(width) is False:
sample[key] = np.random.uniform(
sample[key] = random.rng.uniform(
self.prior_minimum_dict[key], self.prior_maximum_dict[key]
)
else:
......@@ -458,7 +469,7 @@ class DensityEstimateProposal(BaseProposal):
# Print a log message
took = time.time() - start
logger.info(
logger.debug(
f"{self.density_name} construction at {self.steps_since_refit} finished"
f" for length {chain.position} chain, took {took:0.2f}s."
f" Current accept-ratio={self.acceptance_ratio:0.2f}"
......@@ -479,7 +490,7 @@ class DensityEstimateProposal(BaseProposal):
fail_parameters.append(key)
if len(fail_parameters) > 0:
logger.info(
logger.debug(
f"{self.density_name} construction failed verification and is discarded"
)
self.density = current_density
......@@ -492,7 +503,10 @@ class DensityEstimateProposal(BaseProposal):
# Check if we refit
testA = self.steps_since_refit >= self.next_refit_time
if testA:
self.refit(chain)
try:
self.refit(chain)
except Exception as e:
logger.warning(f"Failed to refit chain due to error {e}")
# If KDE is yet to be fitted, use the fallback
if self.trained is False:
......@@ -547,6 +561,7 @@ class GMMProposal(DensityEstimateProposal):
def _sample(self, nsamples=None):
return np.squeeze(self.density.sample(n_samples=nsamples)[0])
@staticmethod
def check_dependencies(warn=True):
if importlib.util.find_spec("sklearn") is None:
if warn:
......@@ -593,12 +608,15 @@ class NormalizingFlowProposal(DensityEstimateProposal):
fallback=fallback,
scale_fits=scale_fits,
)
self.setup_flow()
self.setup_optimizer()
self.initialised = False
self.max_training_epochs = max_training_epochs
self.js_factor = js_factor
def initialise(self):
self.setup_flow()
self.setup_optimizer()
self.initialised = True
def setup_flow(self):
if self.ndim < 3:
self.setup_basic_flow()
......@@ -651,7 +669,7 @@ class NormalizingFlowProposal(DensityEstimateProposal):
return np.power(max_js, 2)
def train(self, chain):
logger.info("Starting NF training")
logger.debug("Starting NF training")
import torch
......@@ -682,14 +700,14 @@ class NormalizingFlowProposal(DensityEstimateProposal):
validation_samples, training_samples_draw
)
if max_js_bits < max_js_threshold:
logger.info(
logger.debug(
f"Training complete after {epoch} steps, "
f"max_js_bits={max_js_bits:0.5f}<{max_js_threshold}"
)
break
took = time.time() - start
logger.info(
logger.debug(
f"Flow training step ({self.steps_since_refit}) finished"
f" for length {chain.position} chain, took {took:0.2f}s."
f" Current accept-ratio={self.acceptance_ratio:0.2f}"
......@@ -699,6 +717,9 @@ class NormalizingFlowProposal(DensityEstimateProposal):
self.trained = True
def propose(self, chain):
if self.initialised is False:
self.initialise()
import torch
self.steps_since_refit += 1
......@@ -707,7 +728,10 @@ class NormalizingFlowProposal(DensityEstimateProposal):
# Check if we retrain the NF
testA = self.steps_since_refit >= self.next_refit_time
if testA:
self.train(chain)
try:
self.train(chain)
except Exception as e:
logger.warning(f"Failed to retrain chain due to error {e}")
if self.trained is False:
return self.fallback.propose(chain)
......@@ -728,11 +752,12 @@ class NormalizingFlowProposal(DensityEstimateProposal):
return theta, float(log_factor)
@staticmethod
def check_dependencies(warn=True):
if importlib.util.find_spec("nflows") is None:
if importlib.util.find_spec("glasflow") is None:
if warn:
logger.warning(
"Unable to utilise NormalizingFlowProposal as nflows is not installed"
"Unable to utilise NormalizingFlowProposal as glasflow is not installed"
)
return False
else:
......@@ -753,14 +778,72 @@ class FixedJumpProposal(BaseProposal):
def propose(self, chain):
sample = chain.current_sample
for key, jump in self.jumps.items():
sign = np.random.randint(2) * 2 - 1
sign = random.rng.integers(2) * 2 - 1
sample[key] += sign * jump + self.epsilon * self.prior_width_dict[key]
log_factor = 0
return sample, log_factor
@property
def epsilon(self):
return self.scale * np.random.normal()
return self.scale * random.rng.normal()
class FisherMatrixProposal(AdaptiveGaussianProposal):
needs_likelihood_and_priors = True
"""Fisher Matrix Proposals
Uses a finite differencing approach motivated by BayesWave (see, e.g.
https://arxiv.org/abs/1410.3835). The inverse Fisher Information Matrix
is calculated from the current sample, then proposals are drawn from a
multivariate Gaussian and scaled by an adaptive parameter.
"""
def __init__(
self,
priors,
subset=None,
weight=1,
update_interval=100,
scale_init=1e0,
fd_eps=1e-4,
adapt=False,
):
super(FisherMatrixProposal, self).__init__(
priors, weight, subset, scale_init=scale_init
)
self.update_interval = update_interval
self.steps_since_update = update_interval
self.adapt = adapt
self.mean = np.zeros(len(self.parameters))
self.fd_eps = fd_eps
def propose(self, chain, likelihood, priors):
sample = chain.current_sample
if self.adapt:
self.update_scale(chain)
if self.steps_since_update >= self.update_interval:
fmp = FisherMatrixPosteriorEstimator(
likelihood, priors, parameters=self.parameters, fd_eps=self.fd_eps
)
try:
self.iFIM = fmp.calculate_iFIM(sample.dict)
except (RuntimeError, ValueError, np.linalg.LinAlgError) as e:
logger.warning(f"FisherMatrixProposal failed with {e}")
if hasattr(self, "iFIM") is False:
# No past iFIM exists, return sample
return sample, 0
self.steps_since_update = 0
jump = self.scale * random.rng.multivariate_normal(
self.mean, self.iFIM, check_valid="ignore"
)
for key, val in zip(self.parameters, jump):
sample[key] += val
log_factor = 0
self.steps_since_update += 1
return sample, log_factor
class BaseGravitationalWaveTransientProposal(BaseProposal):
......@@ -814,11 +897,11 @@ class CorrelatedPolarisationPhaseJump(BaseGravitationalWaveTransientProposal):
alpha = sample["psi"] + phase
beta = sample["psi"] - phase
draw = np.random.random()
draw = random.rng.random()
if draw < 0.5:
alpha = 3.0 * np.pi * np.random.random()
alpha = 3.0 * np.pi * random.rng.random()
else:
beta = 3.0 * np.pi * np.random.random() - 2 * np.pi
beta = 3.0 * np.pi * random.rng.random() - 2 * np.pi
# Update
sample["psi"] = (alpha + beta) * 0.5
......@@ -853,7 +936,7 @@ class PhaseReversalProposal(BaseGravitationalWaveTransientProposal):
@property
def epsilon(self):
if self.fuzz:
return np.random.normal(0, self.fuzz_sigma)
return random.rng.normal(0, self.fuzz_sigma)
else:
return 0
......@@ -918,7 +1001,7 @@ class StretchProposal(BaseProposal):
def _stretch_move(sample, complement, scale, ndim, parameters):
# Draw z
u = np.random.rand()
u = random.rng.uniform(0, 1)
z = (u * (scale - 1) + 1) ** 2 / scale
log_factor = (ndim - 1) * np.log(z)
......@@ -930,14 +1013,15 @@ def _stretch_move(sample, complement, scale, ndim, parameters):
class EnsembleProposal(BaseProposal):
""" Base EnsembleProposal class for ensemble-based swap proposals """
"""Base EnsembleProposal class for ensemble-based swap proposals"""
def __init__(self, priors, weight=1):
super(EnsembleProposal, self).__init__(priors, weight)
def __call__(self, chain, chain_complement):
sample, log_factor = self.propose(chain, chain_complement)
sample = self.apply_boundaries(sample)
if log_factor == 0:
sample = self.apply_boundaries(sample)
return sample, log_factor
......@@ -961,7 +1045,7 @@ class EnsembleStretch(EnsembleProposal):
def propose(self, chain, chain_complement):
sample = chain.current_sample
completement = chain_complement[
np.random.randint(len(chain_complement))
random.rng.integers(len(chain_complement))
].current_sample
return _stretch_move(
sample, completement, self.scale, self.ndim, self.parameters
......@@ -975,7 +1059,7 @@ def get_default_ensemble_proposal_cycle(priors):
def get_proposal_cycle(string, priors, L1steps=1, warn=True):
big_weight = 10
small_weight = 5
tiny_weight = 0.1
tiny_weight = 0.5
if "gwA" in string:
# Parameters for learning proposals
......@@ -983,9 +1067,12 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
first_fit=1000, nsamples_for_density=10000, fit_multiplier=2
)
all_but_cal = [key for key in priors if "recalib" not in key]
plist = [
AdaptiveGaussianProposal(priors, weight=small_weight),
DifferentialEvolutionProposal(priors, weight=small_weight),
AdaptiveGaussianProposal(priors, weight=small_weight, subset=all_but_cal),
DifferentialEvolutionProposal(
priors, weight=small_weight, subset=all_but_cal
),
]
if GMMProposal.check_dependencies(warn=warn) is False:
......@@ -996,15 +1083,15 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
if priors.intrinsic:
intrinsic = PARAMETER_SETS["intrinsic"]
plist += [
AdaptiveGaussianProposal(priors, weight=big_weight, subset=intrinsic),
AdaptiveGaussianProposal(priors, weight=small_weight, subset=intrinsic),
DifferentialEvolutionProposal(
priors, weight=big_weight, subset=intrinsic
priors, weight=small_weight, subset=intrinsic
),
KDEProposal(
priors, weight=big_weight, subset=intrinsic, **learning_kwargs
priors, weight=small_weight, subset=intrinsic, **learning_kwargs
),
GMMProposal(
priors, weight=big_weight, subset=intrinsic, **learning_kwargs
priors, weight=small_weight, subset=intrinsic, **learning_kwargs
),
]
......@@ -1013,13 +1100,13 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
plist += [
AdaptiveGaussianProposal(priors, weight=small_weight, subset=extrinsic),
DifferentialEvolutionProposal(
priors, weight=big_weight, subset=extrinsic
priors, weight=small_weight, subset=extrinsic
),
KDEProposal(
priors, weight=big_weight, subset=extrinsic, **learning_kwargs
priors, weight=small_weight, subset=extrinsic, **learning_kwargs
),
GMMProposal(
priors, weight=big_weight, subset=extrinsic, **learning_kwargs
priors, weight=small_weight, subset=extrinsic, **learning_kwargs
),
]
......@@ -1030,6 +1117,11 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
GMMProposal(
priors, weight=small_weight, subset=mass, **learning_kwargs
),
FisherMatrixProposal(
priors,
weight=small_weight,
subset=mass,
),
]
if priors.spin:
......@@ -1039,13 +1131,23 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
GMMProposal(
priors, weight=small_weight, subset=spin, **learning_kwargs
),
FisherMatrixProposal(
priors,
weight=big_weight,
subset=spin,
),
]
if priors.precession:
measured_spin = ["chi_1", "chi_2", "a_1", "a_2", "chi_1_in_plane"]
if priors.measured_spin:
measured_spin = PARAMETER_SETS["measured_spin"]
plist += [
AdaptiveGaussianProposal(
priors, weight=small_weight, subset=measured_spin
),
FisherMatrixProposal(
priors,
weight=small_weight,
subset=measured_spin,
),
]
if priors.mass and priors.spin:
......@@ -1073,6 +1175,21 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
CorrelatedPolarisationPhaseJump(priors, weight=tiny_weight),
PhasePolarisationReversalProposal(priors, weight=tiny_weight),
]
if priors.sky:
sky = PARAMETER_SETS["sky"]
plist += [
FisherMatrixProposal(
priors,
weight=small_weight,
subset=sky,
),
GMMProposal(
priors,
weight=small_weight,
subset=sky,
**learning_kwargs,
),
]
for key in ["time_jitter", "psi", "phi_12", "tilt_2", "lambda_1", "lambda_2"]:
if key in priors.non_fixed_keys:
plist.append(PriorProposal(priors, subset=[key], weight=tiny_weight))
......@@ -1088,13 +1205,10 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
DifferentialEvolutionProposal(priors, weight=big_weight),
UniformProposal(priors, weight=tiny_weight),
KDEProposal(priors, weight=big_weight, scale_fits=L1steps),
FisherMatrixProposal(priors, weight=big_weight),
]
if GMMProposal.check_dependencies(warn=warn):
plist.append(GMMProposal(priors, weight=big_weight, scale_fits=L1steps))
if NormalizingFlowProposal.check_dependencies(warn=warn):
plist.append(
NormalizingFlowProposal(priors, weight=big_weight, scale_fits=L1steps)
)
plist = remove_proposals_using_string(plist, string)
return ProposalCycle(plist)
......@@ -1111,6 +1225,7 @@ def remove_proposals_using_string(plist, string):
GM=GMMProposal,
PR=PriorProposal,
UN=UniformProposal,
FM=FisherMatrixProposal,
)
for element in string.split("no")[1:]:
......
This diff is collapsed.
from . import grid, likelihood, prior, result, sampler, series, utils
from . import grid, likelihood, prior, result, sampler, series, utils, fisher
import numpy as np
import pandas as pd
import scipy.linalg
from scipy.optimize import minimize
class FisherMatrixPosteriorEstimator(object):
def __init__(self, likelihood, priors, parameters=None, fd_eps=1e-6, n_prior_samples=100):
""" A class to estimate posteriors using the Fisher Matrix approach
Parameters
----------
likelihood: bilby.core.likelihood.Likelihood
A bilby likelihood object
priors: bilby.core.prior.PriorDict
A bilby prior object
parameters: list
Names of parameters to sample in
fd_eps: float
A parameter to control the size of perturbation used when finite
differencing the likelihood
n_prior_samples: int
The number of prior samples to draw and use to attempt estimatation
of the maximum likelihood sample.
"""
self.likelihood = likelihood
if parameters is None:
self.parameter_names = priors.non_fixed_keys
else:
self.parameter_names = parameters
self.fd_eps = fd_eps
self.n_prior_samples = n_prior_samples
self.N = len(self.parameter_names)
self.prior_samples = [
priors.sample_subset(self.parameter_names) for _ in range(n_prior_samples)
]
self.prior_bounds = [(priors[key].minimum, priors[key].maximum) for key in self.parameter_names]
self.prior_width_dict = {}
for key in self.parameter_names:
width = priors[key].width
if np.isnan(width):
raise ValueError(f"Prior width is ill-formed for {key}")
self.prior_width_dict[key] = width
def log_likelihood(self, sample):
self.likelihood.parameters.update(sample)
return self.likelihood.log_likelihood()
def calculate_iFIM(self, sample):
FIM = self.calculate_FIM(sample)
iFIM = scipy.linalg.inv(FIM)
# Ensure iFIM is positive definite
min_eig = np.min(np.real(np.linalg.eigvals(iFIM)))
if min_eig < 0:
iFIM -= 10 * min_eig * np.eye(*iFIM.shape)
return iFIM
def sample_array(self, sample, n=1):
from .utils.random import rng
if sample == "maxL":
sample = self.get_maximum_likelihood_sample()
self.mean = np.array(list(sample.values()))
self.iFIM = self.calculate_iFIM(sample)
return rng.multivariate_normal(self.mean, self.iFIM, n)
def sample_dataframe(self, sample, n=1):
samples = self.sample_array(sample, n)
return pd.DataFrame(samples, columns=self.parameter_names)
def calculate_FIM(self, sample):
FIM = np.zeros((self.N, self.N))
for ii, ii_key in enumerate(self.parameter_names):
for jj, jj_key in enumerate(self.parameter_names):
FIM[ii, jj] = -self.get_second_order_derivative(sample, ii_key, jj_key)
return FIM
def get_second_order_derivative(self, sample, ii, jj):
if ii == jj:
return self.get_finite_difference_xx(sample, ii)
else:
return self.get_finite_difference_xy(sample, ii, jj)
def get_finite_difference_xx(self, sample, ii):
# Sample grid
p = self.shift_sample_x(sample, ii, 1)
m = self.shift_sample_x(sample, ii, -1)
dx = .5 * (p[ii] - m[ii])
loglp = self.log_likelihood(p)
logl = self.log_likelihood(sample)
loglm = self.log_likelihood(m)
return (loglp - 2 * logl + loglm) / dx ** 2
def get_finite_difference_xy(self, sample, ii, jj):
# Sample grid
pp = self.shift_sample_xy(sample, ii, 1, jj, 1)
pm = self.shift_sample_xy(sample, ii, 1, jj, -1)
mp = self.shift_sample_xy(sample, ii, -1, jj, 1)
mm = self.shift_sample_xy(sample, ii, -1, jj, -1)
dx = .5 * (pp[ii] - mm[ii])
dy = .5 * (pp[jj] - mm[jj])
loglpp = self.log_likelihood(pp)
loglpm = self.log_likelihood(pm)
loglmp = self.log_likelihood(mp)
loglmm = self.log_likelihood(mm)
return (loglpp - loglpm - loglmp + loglmm) / (4 * dx * dy)
def shift_sample_x(self, sample, x_key, x_coef):
vx = sample[x_key]
dvx = self.fd_eps * self.prior_width_dict[x_key]
shift_sample = sample.copy()
shift_sample[x_key] = vx + x_coef * dvx
return shift_sample
def shift_sample_xy(self, sample, x_key, x_coef, y_key, y_coef):
vx = sample[x_key]
vy = sample[y_key]
dvx = self.fd_eps * self.prior_width_dict[x_key]
dvy = self.fd_eps * self.prior_width_dict[y_key]
shift_sample = sample.copy()
shift_sample[x_key] = vx + x_coef * dvx
shift_sample[y_key] = vy + y_coef * dvy
return shift_sample
def get_maximum_likelihood_sample(self, initial_sample=None):
""" A method to attempt optimization of the maximum likelihood
This uses a simple scipy optimization approach, starting from a number
of draws from the prior to avoid problems with local optimization.
Note: this approach works well in small numbers of dimensions when the
posterior is narrow relative to the prior. But, if the number of dimensions
is large or the posterior is wide relative to the prior, the method fails
to find the global maximum in high dimensional problems.
"""
minlogL = np.inf
for i in range(self.n_prior_samples):
initial_sample = self.prior_samples[i]
x0 = list(initial_sample.values())
def neg_log_like(x, self, T=1):
sample = {key: val for key, val in zip(self.parameter_names, x)}
return - 1 / T * self.log_likelihood(sample)
out = minimize(
neg_log_like,
x0,
args=(self, 1),
bounds=self.prior_bounds,
method="L-BFGS-B",
)
if out.fun < minlogL:
minout = out
return {key: val for key, val in zip(self.parameter_names, minout.x)}
import json
import os
from collections import OrderedDict
import numpy as np
......@@ -352,7 +351,7 @@ class Grid(object):
'label', 'outdir', 'parameter_names', 'n_dims', 'priors',
'sample_points', 'ln_likelihood', 'ln_evidence',
'ln_noise_evidence']
dictionary = OrderedDict()
dictionary = dict()
for attr in save_attrs:
try:
dictionary[attr] = getattr(self, attr)
......@@ -437,7 +436,7 @@ class Grid(object):
grid: bilby.core.grid.Grid
Raises
=======
======
ValueError: If no filename is given and either outdir or label is None
If no bilby.core.grid.Grid is found in the path
......
......@@ -4,7 +4,7 @@ import numpy as np
from scipy.special import gammaln, xlogy
from scipy.stats import multivariate_normal
from .utils import infer_parameters_from_function
from .utils import infer_parameters_from_function, infer_args_from_function_except_n_args
class Likelihood(object):
......@@ -108,13 +108,14 @@ class Analytical1DLikelihood(Likelihood):
value is given).
"""
def __init__(self, x, y, func):
def __init__(self, x, y, func, **kwargs):
parameters = infer_parameters_from_function(func)
super(Analytical1DLikelihood, self).__init__(dict.fromkeys(parameters))
super(Analytical1DLikelihood, self).__init__(dict())
self.x = x
self.y = y
self._func = func
self._function_keys = list(self.parameters.keys())
self._function_keys = [key for key in parameters if key not in kwargs]
self.kwargs = kwargs
def __repr__(self):
return self.__class__.__name__ + '(x={}, y={}, func={})'.format(self.x, self.y, self.func.__name__)
......@@ -164,11 +165,11 @@ class Analytical1DLikelihood(Likelihood):
@property
def residual(self):
""" Residual of the function against the data. """
return self.y - self.func(self.x, **self.model_parameters)
return self.y - self.func(self.x, **self.model_parameters, **self.kwargs)
class GaussianLikelihood(Analytical1DLikelihood):
def __init__(self, x, y, func, sigma=None):
def __init__(self, x, y, func, sigma=None, **kwargs):
"""
A general Gaussian likelihood for known or unknown noise - the model
parameters are inferred from the arguments of function
......@@ -190,7 +191,7 @@ class GaussianLikelihood(Analytical1DLikelihood):
to that for `x` and `y`.
"""
super(GaussianLikelihood, self).__init__(x=x, y=y, func=func)
super(GaussianLikelihood, self).__init__(x=x, y=y, func=func, **kwargs)
self.sigma = sigma
# Check if sigma was provided, if not it is a parameter
......@@ -229,7 +230,7 @@ class GaussianLikelihood(Analytical1DLikelihood):
class PoissonLikelihood(Analytical1DLikelihood):
def __init__(self, x, y, func):
def __init__(self, x, y, func, **kwargs):
"""
A general Poisson likelihood for a rate - the model parameters are
inferred from the arguments of function, which provides a rate.
......@@ -251,10 +252,10 @@ class PoissonLikelihood(Analytical1DLikelihood):
fixed value is given).
"""
super(PoissonLikelihood, self).__init__(x=x, y=y, func=func)
super(PoissonLikelihood, self).__init__(x=x, y=y, func=func, **kwargs)
def log_likelihood(self):
rate = self.func(self.x, **self.model_parameters)
rate = self.func(self.x, **self.model_parameters, **self.kwargs)
if not isinstance(rate, np.ndarray):
raise ValueError(
"Poisson rate function returns wrong value type! "
......@@ -286,7 +287,7 @@ class PoissonLikelihood(Analytical1DLikelihood):
class ExponentialLikelihood(Analytical1DLikelihood):
def __init__(self, x, y, func):
def __init__(self, x, y, func, **kwargs):
"""
An exponential likelihood function.
......@@ -302,10 +303,10 @@ class ExponentialLikelihood(Analytical1DLikelihood):
value is given). The model should return the expected mean of
the exponential distribution for each data point.
"""
super(ExponentialLikelihood, self).__init__(x=x, y=y, func=func)
super(ExponentialLikelihood, self).__init__(x=x, y=y, func=func, **kwargs)
def log_likelihood(self):
mu = self.func(self.x, **self.model_parameters)
mu = self.func(self.x, **self.model_parameters, **self.kwargs)
if np.any(mu < 0.):
return -np.inf
return -np.sum(np.log(mu) + (self.y / mu))
......@@ -328,7 +329,7 @@ class ExponentialLikelihood(Analytical1DLikelihood):
class StudentTLikelihood(Analytical1DLikelihood):
def __init__(self, x, y, func, nu=None, sigma=1.):
def __init__(self, x, y, func, nu=None, sigma=1, **kwargs):
"""
A general Student's t-likelihood for known or unknown number of degrees
of freedom, and known or unknown scale (which tends toward the standard
......@@ -357,7 +358,7 @@ class StudentTLikelihood(Analytical1DLikelihood):
Set the scale of the distribution. If not given then this defaults
to 1, which specifies a standard (central) Student's t-distribution
"""
super(StudentTLikelihood, self).__init__(x=x, y=y, func=func)
super(StudentTLikelihood, self).__init__(x=x, y=y, func=func, **kwargs)
self.nu = nu
self.sigma = sigma
......@@ -406,7 +407,7 @@ class Multinomial(Likelihood):
Likelihood for system with N discrete possibilities.
"""
def __init__(self, data, n_dimensions, label="parameter_"):
def __init__(self, data, n_dimensions, base="parameter_"):
"""
Parameters
......@@ -415,19 +416,21 @@ class Multinomial(Likelihood):
The number of objects in each class
n_dimensions: int
The number of classes
base: str
The base of the parameter labels
"""
self.data = np.array(data)
self._total = np.sum(self.data)
super(Multinomial, self).__init__(dict())
self.n = n_dimensions
self.label = label
self.base = base
self._nll = None
def log_likelihood(self):
"""
Since n - 1 parameters are sampled, the last parameter is 1 - the rest
"""
probs = [self.parameters[self.label + str(ii)]
probs = [self.parameters[self.base + str(ii)]
for ii in range(self.n - 1)]
probs.append(1 - sum(probs))
return self._multinomial_ln_pdf(probs=probs)
......@@ -567,5 +570,166 @@ class JointLikelihood(Likelihood):
return sum([likelihood.noise_log_likelihood() for likelihood in self.likelihoods])
def function_to_celerite_mean_model(func):
from celerite.modeling import Model as CeleriteModel
return _function_to_gp_model(func, CeleriteModel)
def function_to_george_mean_model(func):
from celerite.modeling import Model as GeorgeModel
return _function_to_gp_model(func, GeorgeModel)
def _function_to_gp_model(func, cls):
class MeanModel(cls):
parameter_names = tuple(infer_args_from_function_except_n_args(func=func, n=1))
def get_value(self, t):
params = {name: getattr(self, name) for name in self.parameter_names}
return func(t, **params)
def compute_gradient(self, *args, **kwargs):
pass
return MeanModel
class _GPLikelihood(Likelihood):
def __init__(self, kernel, mean_model, t, y, yerr=1e-6, gp_class=None):
"""
Basic Gaussian Process likelihood interface for `celerite` and `george`.
For `celerite` documentation see: https://celerite.readthedocs.io/en/stable/
For `george` documentation see: https://george.readthedocs.io/en/latest/
Parameters
==========
kernel: Union[celerite.term.Term, george.kernels.Kernel]
`celerite` or `george` kernel. See the respective package documentation about the usage.
mean_model: Union[celerite.modeling.Model, george.modeling.Model]
Mean model
t: array_like
The `times` or `x` values of the data set.
y: array_like
The `y` values of the data set.
yerr: float, int, array_like, optional
The error values on the y-values. If a single value is given, it is assumed that the value
applies for all y-values. Default is 1e-6, effectively assuming that no y-errors are present.
gp_class: type, None, optional
GPClass to use. This is determined by the child class used to instantiate the GP. Should usually
not be given by the user and is mostly used for testing
"""
self.kernel = kernel
self.mean_model = mean_model
self.t = np.array(t)
self.y = np.array(y)
self.yerr = np.array(yerr)
self.GPClass = gp_class
self.gp = self.GPClass(kernel=self.kernel, mean=self.mean_model, fit_mean=True, fit_white_noise=True)
self.gp.compute(self.t, yerr=self.yerr)
super().__init__(parameters=self.gp.get_parameter_dict())
def set_parameters(self, parameters):
"""
Safely set a set of parameters to the internal instances of the `gp` and `mean_model`, as well as the
`parameters` dict.
Parameters
==========
parameters: dict, pandas.DataFrame
The set of parameters we would like to set.
"""
for name, value in parameters.items():
try:
self.gp.set_parameter(name=name, value=value)
except ValueError:
pass
self.parameters[name] = value
class CeleriteLikelihood(_GPLikelihood):
def __init__(self, kernel, mean_model, t, y, yerr=1e-6):
"""
Basic Gaussian Process likelihood interface for `celerite` and `george`.
For `celerite` documentation see: https://celerite.readthedocs.io/en/stable/
For `george` documentation see: https://george.readthedocs.io/en/latest/
Parameters
==========
kernel: celerite.term.Term
`celerite` or `george` kernel. See the respective package documentation about the usage.
mean_model: celerite.modeling.Model
Mean model
t: array_like
The `times` or `x` values of the data set.
y: array_like
The `y` values of the data set.
yerr: float, int, array_like, optional
The error values on the y-values. If a single value is given, it is assumed that the value
applies for all y-values. Default is 1e-6, effectively assuming that no y-errors are present.
"""
import celerite
super().__init__(kernel=kernel, mean_model=mean_model, t=t, y=y, yerr=yerr, gp_class=celerite.GP)
def log_likelihood(self):
"""
Calculate the log-likelihood for the Gaussian process given the current parameters.
Returns
=======
float: The log-likelihood value.
"""
self.gp.set_parameter_vector(vector=np.array(list(self.parameters.values())))
try:
return self.gp.log_likelihood(self.y)
except Exception:
return -np.inf
class GeorgeLikelihood(_GPLikelihood):
def __init__(self, kernel, mean_model, t, y, yerr=1e-6):
"""
Basic Gaussian Process likelihood interface for `celerite` and `george`.
For `celerite` documentation see: https://celerite.readthedocs.io/en/stable/
For `george` documentation see: https://george.readthedocs.io/en/latest/
Parameters
==========
kernel: george.kernels.Kernel
`celerite` or `george` kernel. See the respective package documentation about the usage.
mean_model: george.modeling.Model
Mean model
t: array_like
The `times` or `x` values of the data set.
y: array_like
The `y` values of the data set.
yerr: float, int, array_like, optional
The error values on the y-values. If a single value is given, it is assumed that the value
applies for all y-values. Default is 1e-6, effectively assuming that no y-errors are present.
"""
import george
super().__init__(kernel=kernel, mean_model=mean_model, t=t, y=y, yerr=yerr, gp_class=george.GP)
def log_likelihood(self):
"""
Calculate the log-likelihood for the Gaussian process given the current parameters.
Returns
=======
float: The log-likelihood value.
"""
for name, value in self.parameters.items():
try:
self.gp.set_parameter(name=name, value=value)
except ValueError:
raise ValueError(f"Parameter {name} not a valid parameter for the GP.")
try:
return self.gp.log_likelihood(self.y)
except Exception:
return -np.inf
class MarginalizedLikelihoodReconstructionError(Exception):
pass
......@@ -28,6 +28,7 @@ class DeltaFunction(Prior):
minimum=peak, maximum=peak, check_range_nonzero=False)
self.peak = peak
self._is_fixed = True
self.least_recently_sampled = peak
def rescale(self, val):
"""Rescale everything to the peak with the correct shape.
......@@ -1430,8 +1431,8 @@ class Categorical(Prior):
unit=None, boundary="periodic"):
""" An equal-weighted Categorical prior
Parameters:
-----------
Parameters
==========
ncategories: int
The number of available categories. The prior mass support is then
integers [0, ncategories - 1].
......@@ -1517,3 +1518,107 @@ class Categorical(Prior):
idxs = np.isin(val, self.categories)
probs[idxs] = self.lnp
return probs
class Triangular(Prior):
"""
Define a new prior class which draws from a triangular distribution.
For distribution details see: wikipedia.org/wiki/Triangular_distribution
Here, minimum <= mode <= maximum,
where the mode has the highest pdf value.
"""
def __init__(self, mode, minimum, maximum, name=None, latex_label=None, unit=None):
super(Triangular, self).__init__(
name=name,
latex_label=latex_label,
unit=unit,
minimum=minimum,
maximum=maximum,
)
self.mode = mode
self.fractional_mode = (self.mode - self.minimum) / (
self.maximum - self.minimum
)
self.scale = self.maximum - self.minimum
self.rescaled_minimum = self.minimum - (self.minimum == self.mode) * self.scale
self.rescaled_maximum = self.maximum + (self.maximum == self.mode) * self.scale
def rescale(self, val):
"""
'Rescale' a sample from standard uniform to a triangular distribution.
This maps to the inverse CDF. This has been analytically solved for this case.
Parameters
==========
val: Union[float, int, array_like]
Uniform probability
Returns
=======
Union[float, array_like]: Rescaled probability
"""
below_mode = (val * self.scale * (self.mode - self.minimum)) ** 0.5
above_mode = ((1 - val) * self.scale * (self.maximum - self.mode)) ** 0.5
return (self.minimum + below_mode) * (val < self.fractional_mode) + (
self.maximum - above_mode
) * (val >= self.fractional_mode)
def prob(self, val):
"""
Return the prior probability of val
Parameters
==========
val: Union[float, int, array_like]
Returns
=======
float: Prior probability of val
"""
between_minimum_and_mode = (
(val < self.mode)
* (self.minimum <= val)
* (val - self.rescaled_minimum)
/ (self.mode - self.rescaled_minimum)
)
between_mode_and_maximum = (
(self.mode <= val)
* (val <= self.maximum)
* (self.rescaled_maximum - val)
/ (self.rescaled_maximum - self.mode)
)
return 2.0 * (between_minimum_and_mode + between_mode_and_maximum) / self.scale
def cdf(self, val):
"""
Return the prior cumulative probability at val
Parameters
==========
val: Union[float, int, array_like]
Returns
=======
float: prior cumulative probability at val
"""
return (
(val > self.mode)
+ (val > self.minimum)
* (val <= self.maximum)
/ (self.scale)
* (
(val > self.mode)
* (self.rescaled_maximum - val) ** 2.0
/ (self.mode - self.rescaled_maximum)
+ (val <= self.mode)
* (val - self.rescaled_minimum) ** 2.0
/ (self.mode - self.rescaled_minimum)
)
)
......@@ -7,8 +7,13 @@ import numpy as np
import scipy.stats
from scipy.interpolate import interp1d
from bilby.core.utils import infer_args_from_method, BilbyJsonEncoder, decode_bilby_json, logger, \
get_dict_with_properties
from ..utils import (
infer_args_from_method,
BilbyJsonEncoder,
decode_bilby_json,
logger,
get_dict_with_properties,
)
class Prior(object):
......@@ -124,7 +129,9 @@ class Prior(object):
float: A random number between 0 and 1, rescaled to match the distribution of this Prior
"""
self.least_recently_sampled = self.rescale(np.random.uniform(0, 1, size))
from ..utils.random import rng
self.least_recently_sampled = self.rescale(rng.uniform(0, 1, size))
return self.least_recently_sampled
def rescale(self, val):
......@@ -161,14 +168,14 @@ class Prior(object):
def cdf(self, val):
""" Generic method to calculate CDF, can be overwritten in subclass """
from scipy.integrate import cumtrapz
from scipy.integrate import cumulative_trapezoid
if np.any(np.isinf([self.minimum, self.maximum])):
raise ValueError(
"Unable to use the generic CDF calculation for priors with"
"infinite support")
x = np.linspace(self.minimum, self.maximum, 1000)
pdf = self.prob(x)
cdf = cumtrapz(pdf, x, initial=0)
cdf = cumulative_trapezoid(pdf, x, initial=0)
interp = interp1d(x, cdf, assume_sorted=True, bounds_error=False,
fill_value=(0, 1))
return interp(val)
......@@ -214,22 +221,13 @@ class Prior(object):
"""
prior_name = self.__class__.__name__
prior_module = self.__class__.__module__
instantiation_dict = self.get_instantiation_dict()
args = ', '.join(['{}={}'.format(key, repr(instantiation_dict[key]))
for key in instantiation_dict])
return "{}({})".format(prior_name, args)
@property
def _repr_dict(self):
"""
Get a dictionary containing the arguments needed to reproduce this object.
"""
property_names = {p for p in dir(self.__class__) if isinstance(getattr(self.__class__, p), property)}
subclass_args = infer_args_from_method(self.__init__)
dict_with_properties = self.__dict__.copy()
for key in property_names.intersection(subclass_args):
dict_with_properties[key] = getattr(self, key)
return {key: dict_with_properties[key] for key in subclass_args}
args = ', '.join([f'{key}={repr(instantiation_dict[key])}' for key in instantiation_dict])
if "bilby.core.prior" in prior_module:
return f"{prior_name}({args})"
else:
return f"{prior_module}.{prior_name}({args})"
@property
def is_fixed(self):
......@@ -333,7 +331,7 @@ class Prior(object):
@classmethod
def from_repr(cls, string):
"""Generate the prior from it's __repr__"""
"""Generate the prior from its __repr__"""
return cls._from_repr(string)
@classmethod
......@@ -405,6 +403,10 @@ class Prior(object):
The string is interpreted as a call to instantiate another prior
class, Bilby will attempt to recursively construct that prior,
e.g., Uniform(minimum=0, maximum=1), my.custom.PriorClass(**kwargs).
- Else If the string contains a ".":
It is treated as a path to a Python function and imported, e.g.,
"some_module.some_function" returns
:code:`import some_module; return some_module.some_function`
- Else:
Try to evaluate the string using `eval`. Only built-in functions
and numpy methods can be used, e.g., np.pi / 2, 1.57.
......@@ -429,9 +431,9 @@ class Prior(object):
val = None
elif re.sub(r'\'.*\'', '', val) in ['r', 'u']:
val = val[2:-1]
elif "'" in val:
elif val.startswith("'") and val.endswith("'"):
val = val.strip("'")
elif '(' in val:
elif '(' in val and not val.startswith(("[", "{")):
other_cls = val.split('(')[0]
vals = '('.join(val.split('(')[1:])[:-1]
if "." in other_cls:
......@@ -445,10 +447,17 @@ class Prior(object):
try:
val = eval(val, dict(), dict(np=np, inf=np.inf, pi=np.pi))
except NameError:
raise TypeError(
"Cannot evaluate prior, "
"failed to parse argument {}".format(val)
)
if "." in val:
module = '.'.join(val.split('.')[:-1])
func = val.split('.')[-1]
new_val = getattr(import_module(module), func, val)
if val == new_val:
raise TypeError(
"Cannot evaluate prior, "
f"failed to parse argument {val}"
)
else:
val = new_val
return val
......