Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (2086)
Showing with 4910 additions and 874 deletions
[run] [run]
omit = omit =
test/example_test.py test/integration/example_test.py
test/gw_example_test.py test/integration/noise_realisation_test.py
test/noise_realisation_test.py test/integration/other_test.py
test/other_test.py bilby/_version.py
hist
livetime
iff
amin
amax
...@@ -13,4 +13,6 @@ MANIFEST ...@@ -13,4 +13,6 @@ MANIFEST
*.dat *.dat
*.version *.version
*.ipynb_checkpoints *.ipynb_checkpoints
outdir/* **/outdir
\ No newline at end of file .idea/*
bilby/_version.py
...@@ -10,91 +10,248 @@ ...@@ -10,91 +10,248 @@
# before the next stage begins # before the next stage begins
stages: stages:
- initial
- test - test
- docs
- deploy - deploy
# ------------------- Initial stage -------------------------------------------
.list-env: &list-env
- PREFIX="$(dirname $(which python))/.."
- if [ -d "${PREFIX}/conda-meta" ]; then
conda list --prefix "${PREFIX}" --show-channel-urls;
else
python -m pip list installed;
fi
# Check author list is up to date
authors:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
script:
- python test/check_author_list.py
# Test containers scripts are up to date
containers:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
script:
- cd containers
- python write_dockerfiles.py #HACK
# Fail if differences exist. If this fails, you may need to run
# write_dockerfiles.py and commit the changes.
- git diff --exit-code
- cp env-template.yml env.yml
- echo " - python=3.11" >> env.yml
- mamba env create -f env.yml -n test --dry-run
.test-python: &test-python .test-python: &test-python
stage: test stage: initial
image: python image: python
before_script:
# this is required because pytables doesn't use a wheel on py37
- apt-get -yqq update
- apt-get -yqq install libhdf5-dev
script: script:
- python -m pip install . - python -m pip install .
- *list-env
- python -c "import bilby" - python -c "import bilby"
- python -c "import bilby.bilby_mcmc"
- python -c "import bilby.core" - python -c "import bilby.core"
- python -c "import bilby.core.prior"
- python -c "import bilby.core.sampler"
- python -c "import bilby.core.utils"
- python -c "import bilby.gw" - python -c "import bilby.gw"
- python -c "import bilby.gw.detector"
- python -c "import bilby.gw.eos"
- python -c "import bilby.gw.likelihood"
- python -c "import bilby.gw.sampler"
- python -c "import bilby.hyper" - python -c "import bilby.hyper"
- python -c "import cli_bilby" - python -c "import cli_bilby"
- python test/import_test.py
- for script in $(pip show -f bilby | grep "bin\/" | xargs -I {} basename {}); do
${script} --help;
done
# test basic setup on python2 basic-3.10:
basic-2.7:
<<: *test-python <<: *test-python
image: python:2.7 image: python:3.10
# test basic setup on python3 basic-3.11:
basic-3.7:
<<: *test-python <<: *test-python
image: python:3.7 image: python:3.11
# test example on python 2 basic-3.12:
python-2.7: <<: *test-python
stage: test image: python:3.12
image: bilbydev/bilby-test-suite-python27
.test-samplers-import: &test-samplers-import
stage: initial
script: script:
- python setup.py install - python -m pip install .
# Run tests without finding coverage - *list-env
- pytest --ignore=test/utils_py3_test.py - pytest test/test_samplers_import.py -v
# test example on python 3 import-samplers-3.10:
python-3.7: <<: *test-samplers-import
stage: test image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
image: bilbydev/bilby-test-suite-python37
import-samplers-3.11:
<<: *test-samplers-import
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
import-samplers-3.12:
<<: *test-samplers-import
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python312
.precommits: &precommits
stage: initial
script: script:
- python setup.py install - source activate $PYVERSION
- mkdir -p $CACHE_DIR
- pip install --upgrade pip
- pip --cache-dir=$CACHE_DIR install --upgrade bilby
- pip --cache-dir=$CACHE_DIR install .
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
# Run pyflakes precommits-py3.11:
- flake8 . <<: *precommits
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
variables:
CACHE_DIR: ".pip311"
PYVERSION: "python311"
# Run tests and collect coverage data install:
- pytest --cov=bilby stage: initial
- coverage html parallel:
- coverage-badge -o coverage_badge.svg -f matrix:
- EXTRA: [gw, mcmc, all]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
script:
- pip install .[$EXTRA]
# Make the documentation # ------------------- Test stage -------------------------------------------
- cd docs
- make clean .unit-tests: &unit-test
- make html stage: test
script:
- python -m pip install .
- *list-env
- pytest --cov=bilby --durations 10
python-3.10:
<<: *unit-test
needs: ["basic-3.10"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
python-3.11:
<<: *unit-test
needs: ["basic-3.11", "precommits-py3.11"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
after_script:
- coverage html
- coverage xml
coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
artifacts: artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
paths: paths:
- htmlcov/ - htmlcov/
- coverage_badge.svg expire_in: 30 days
- docs/_build/html/
python-3.12:
<<: *unit-test
needs: ["basic-3.12"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python312
.test-sampler: &test-sampler
stage: test
script:
- python -m pip install .[all]
- *list-env
- pytest test/integration/sampler_run_test.py --durations 10 -v
python-3.10-samplers:
<<: *test-sampler
needs: ["basic-3.10"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
python-3.11-samplers:
<<: *test-sampler
needs: ["basic-3.11", "precommits-py3.11"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
# Tests run at a fixed schedule rather than on push python-3.12-samplers:
scheduled-python-3.7: <<: *test-sampler
needs: ["basic-3.12"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python312
integration-tests-python-3.11:
stage: test stage: test
image: bilbydev/bilby-test-suite-python37 image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
needs: ["basic-3.11", "precommits-py3.11"]
only: only:
- schedules - schedules
script: script:
- python setup.py install - python -m pip install .
- *list-env
# Run tests which are only done on schedule # Run tests which are only done on schedule
- pytest test/example_test.py - pytest test/integration/example_test.py
- pytest test/gw_example_test.py
.plotting: &plotting
stage: test
only:
- schedules
script:
- python -m pip install .
- *list-env
- pytest test/gw/plot_test.py
plotting-python-3.10:
<<: *plotting
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
needs: ["basic-3.10"]
plotting-python-3.11:
<<: *plotting
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
needs: ["basic-3.11", "precommits-py3.11"]
plotting-python-3.12:
<<: *plotting
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python312
needs: ["basic-3.12"]
# ------------------- Docs stage -------------------------------------------
docs:
stage: docs
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
before_script:
- python -m ipykernel install
script:
# Make the documentation
- python -m pip install .
- python -m pip install myst_parser # only for testing purposes - remove once test image is generating correctly
- cd examples/tutorials
- jupyter nbconvert --to notebook --execute *.ipynb --output-dir ../../docs
- cd ../../docs
- make clean
- make html
artifacts:
paths:
- docs/_build/html/
# ------------------- Deploy stage -------------------------------------------
pages: pages:
stage: deploy stage: deploy
dependencies: needs: ["docs", "python-3.11"]
- python-3.7
- python-2.7
script: script:
- mkdir public/ - mkdir public/
- mv htmlcov/ public/ - mv htmlcov/ public/
- mv coverage_badge.svg public/
- mv docs/_build/html/* public/ - mv docs/_build/html/* public/
artifacts: artifacts:
paths: paths:
...@@ -102,3 +259,51 @@ pages: ...@@ -102,3 +259,51 @@ pages:
expire_in: 30 days expire_in: 30 days
only: only:
- master - master
.build-container: &build-container
stage: deploy
image: docker:20.10.23
needs: ["containers"]
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
changes:
compare_to: 'refs/heads/main'
paths:
- containers/*
when: manual
- if: $CI_PIPELINE_SOURCE == "schedule"
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- cp v3-dockerfile-test-suite-$PYVERSION Dockerfile
- docker build --tag v3-bilby-$PYVERSION .
- docker image tag v3-bilby-$PYVERSION containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest
build-python310-container:
<<: *build-container
variables:
PYVERSION: "python310"
build-python311-container:
<<: *build-container
variables:
PYVERSION: "python311"
build-python312-container:
<<: *build-container
variables:
PYVERSION: "python312"
pypi-release:
stage: deploy
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
variables:
TWINE_USERNAME: $PYPI_USERNAME
TWINE_PASSWORD: $PYPI_PASSWORD
before_script:
- python -m build --sdist --wheel --outdir dist/ .
script:
- twine upload dist/*
only:
- tags
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
hooks:
- id: check-merge-conflict # prevent committing files with merge conflicts
- id: flake8 # checks for flake8 errors
- repo: https://github.com/psf/black
rev: 22.3.0
hooks:
- id: black
language_version: python3
files: '^(bilby/bilby_mcmc/|bilby/core/sampler/|examples/)'
- repo: https://github.com/codespell-project/codespell
rev: v2.1.0
hooks:
- id: codespell
args: [--ignore-words=.dictionary.txt]
- repo: https://github.com/pre-commit/mirrors-isort
rev: v5.10.1
hooks:
- id: isort # sort imports alphabetically and separates import into sections
args: [-w=88, -m=3, -tc, -sp=setup.cfg ]
files: '^(bilby/bilby_mcmc/|bilby/core/sampler/|examples/)'
- repo: https://github.com/datarootsio/databooks
rev: 0.1.14
hooks:
- id: databooks
name: databooks
description:
"Remove notebook metadata using `databooks`."
entry: databooks meta
language: python
minimum_pre_commit_version: 2.9.2
types: [ jupyter ]
args: [-w]
- repo: local
hooks:
- id: jupyter-nb-clear-output
name: jupyter-nb-clear-output
files: \.ipynb$
language: system
entry: jupyter nbconvert --ClearOutputPreprocessor.enabled=True --inplace
# Authors
This file lists all the authors in first-name alphabetical order who have
contributed (either by code contribution or indirectly). If your name is not
listed here, please contact anyone on this list and raise your concern.
Abhirup Ghosh
Aditya Vijaykumar
Andrew Kim
Andrew Miller
Antoni Ramos-Buades
Apratim Ganguly
Avi Vajpeyi
Ben Patterson
Bruce Edelman
Carl-Johan Haster
Cecilio Garcia-Quiros
Charlie Hoy
Chentao Yang
Christopher Philip Luke Berry
Christos Karathanasis
Colm Talbot
Daniel Williams
David Keitel
Duncan Macleod
Eric Thrane
Ethan Payne
Francisco Javier Hernandez
Gregory Ashton
Hank Hua
Hector Estelles
Ignacio Magaña Hernandez
Isaac McMahon
Isobel Marguarethe Romero-Shaw
Jack Heinzel
Jacob Golomb
Jade Powell
James A Clark
Jeremy G Baier
John Veitch
Joshua Brandt
Josh Willis
Karl Wette
Katerina Chatziioannou
Kaylee de Soto
Khun Sang Phukon
Kruthi Krishna
Kshipraa Athar
Kyle Wong
Leslie Wade
Liting Xiao
Maite Mateu-Lucena
Marc Arene
Marcus Edward Lower
Margaret Millhouse
Marta Colleoni
Matthew Carney
Matthew David Pitkin
Michael Puerrer
Michael Williams
Monica Rizzo
Moritz Huebner
Nico Gerardo Bers
Nicola De Lillo
Nikhil Sarin
Nirban Bose
Noah Wolfe
Olivia Wilk
Paul Easter
Paul Lasky
Philip Relton
Rhys Green
Rhiannon Udall
Rico Lo
Roberto Cotesta
Rory Smith
S. H. Oh
Sacha Husa
Sama Al-Shammari
Samson Leong
Scott Coughlin
Serguei Ossokine
Shanika Galaudage
Sharan Banagiri
Shichao Wu
Simon Stevenson
Soichiro Morisaki
Soumen Roy
Stephen R Green
Sumeet Kulkarni
Sylvia Biscoveanu
Tathagata Ghosh
Teagan Clarke
Tomasz Baka
Will M. Farr
Virginia d'Emilio
Vivien Raymond
Ka-Lok Lo
Isaac Legred
Marc Penuliar
Andrew Fowlie
Martin White
Peter Tsun-Ho Pang
Alexandre Sebastien Goettel
Ann-Kristin Malz
This diff is collapsed.
This diff is collapsed.
include README.rst include README.rst
include LICENSE.md include LICENSE.md
include requirements.txt
include gw_requirements.txt
include mcmc_requirements.txt
include optional_requirements.txt
include sampler_requirements.txt
include bilby/_version.py
recursive-include test *.py *.prior
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
future = "*"
corner = "*"
numpy = "==1.15.2"
ligotimegps = "<=1.2.3"
matplotlib = "<3"
scipy = ">=0.16"
pandas = "==0.23.0"
deepdish = "==0.3.6"
mock = "*"
astropy = "<3"
gwpy = "*"
theano = "*"
lalsuite = "*"
# cpnest = "*"
dynesty = "*"
emcee = "*"
nestle = "*"
ptemcee = "*"
pymc3 = "*"
[requires]
[dev-packages]
[pipenv]
allow_prereleases = false
This diff is collapsed.
|pipeline status| |coverage report| |pypi| |conda| |version| |pipeline status| |coverage report| |pypi| |conda| |version|
===== =====
Bilby Bilby development has moved to `GitHub <https://github.com/bilby-dev/bilby>`__!
===== =====
Please open any new issues or pull requests there. A full migration guide will be provided soon. Links below here may no longer be active.
====
A user-friendly Bayesian inference library. A user-friendly Bayesian inference library.
Fulfilling all your Bayesian dreams. Fulfilling all your Bayesian dreams.
...@@ -14,59 +16,27 @@ Online material to help you get started: ...@@ -14,59 +16,27 @@ Online material to help you get started:
If you need help, find an issue, or just have a question/suggestion you can If you need help, find an issue, or just have a question/suggestion you can
- Join our `Slack workspace <https://bilby-code.slack.com/>`__ (you may need to email the support desk to request an invite)
- Email our support desk: contact+lscsoft-bilby-1846-issue-@support.ligo.org - Email our support desk: contact+lscsoft-bilby-1846-issue-@support.ligo.org
- Join our `Slack workspace <https://bilby-code.slack.com/>`__
- Ask questions (or search through other users questions and answers) on `StackOverflow <https://stackoverflow.com/questions/tagged/bilby>`__ using the bilby tag - Ask questions (or search through other users questions and answers) on `StackOverflow <https://stackoverflow.com/questions/tagged/bilby>`__ using the bilby tag
- For www.git.ligo.org users, submit issues directly through `the issue tracker <https://git.ligo.org/lscsoft/bilby/issues>`__ - For www.git.ligo.org users, submit issues directly through `the issue tracker <https://git.ligo.org/lscsoft/bilby/issues>`__
- For www.chat.ligo.org users, join the `#bilby-help <https://chat.ligo.org/ligo/channels/bilby-help>`__ or `#bilby-devel <https://chat.ligo.org/ligo/channels/bilby-devel>`__ channels
We encourage you to contribute to the development of bilby. This is done via a merge request. For We encourage you to contribute to the development of bilby. This is done via a merge request. For
help in creating a merge request, see `this page help in creating a merge request, see `this page
<https://docs.gitlab.com/ee/gitlab-basics/add-merge-request.html>`__ or contact <https://docs.gitlab.com/ee/gitlab-basics/add-merge-request.html>`__ or contact
us directly. For advice on contributing, see `this help page <https://git.ligo.org/lscsoft/bilby/blob/master/CONTRIBUTING.md>`__. us directly. For advice on contributing, see `the contributing guide <https://git.ligo.org/lscsoft/bilby/blob/master/CONTRIBUTING.md>`__.
-------------- --------------
Citation guide Citation guide
-------------- --------------
If you use :code:`bilby` in a scientific publication, please cite Please refer to the `Acknowledging/citing bilby guide <https://lscsoft.docs.ligo.org/bilby/citing-bilby.html>`__.
* `Bilby: A user-friendly Bayesian inference library for gravitational-wave
astronomy
<https://ui.adsabs.harvard.edu/#abs/2018arXiv181102042A/abstract>`__
Additionally, :code:`bilby` builds on a number of open-source packages. If you
make use of this functionality in your publications, we recommend you cite them
as requested in their associated documentation.
**Samplers**
* `dynesty <https://github.com/joshspeagle/dynesty>`__
* `nestle <https://github.com/kbarbary/nestle>`__
* `pymultinest <https://github.com/JohannesBuchner/PyMultiNest>`__
* `cpnest <https://github.com/johnveitch/cpnest>`__
* `emcee <https://github.com/dfm/emcee>`__
* `ptemcee <https://github.com/willvousden/ptemcee>`__
* `ptmcmcsampler <https://github.com/jellis18/PTMCMCSampler>`__
* `pypolychord <https://github.com/vhaasteren/pypolychord>`__
* `PyMC3 <https://github.com/pymc-devs/pymc3>`_
**Gravitational-wave tools**
* `gwpy <https://github.com/gwpy/gwpy>`__
* `lalsuite <https://git.ligo.org/lscsoft/lalsuite>`__
* `astropy <https://github.com/astropy/astropy>`__
**Plotting**
* `corner <https://github.com/dfm/corner.py>`__ for generating corner plot
* `matplotlib <https://github.com/matplotlib/matplotlib>`__ for general plotting routines
.. |pipeline status| image:: https://git.ligo.org/lscsoft/bilby/badges/master/pipeline.svg .. |pipeline status| image:: https://git.ligo.org/lscsoft/bilby/badges/master/pipeline.svg
:target: https://git.ligo.org/lscsoft/bilby/commits/master :target: https://git.ligo.org/lscsoft/bilby/commits/master
.. |coverage report| image:: https://lscsoft.docs.ligo.org/bilby/coverage_badge.svg .. |coverage report| image:: https://git.ligo.org/lscsoft/bilby/badges/master/coverage.svg
:target: https://lscsoft.docs.ligo.org/bilby/htmlcov/ :target: https://lscsoft.docs.ligo.org/bilby/htmlcov/
.. |pypi| image:: https://badge.fury.io/py/bilby.svg .. |pypi| image:: https://badge.fury.io/py/bilby.svg
:target: https://pypi.org/project/bilby/ :target: https://pypi.org/project/bilby/
......
...@@ -4,7 +4,7 @@ Bilby ...@@ -4,7 +4,7 @@ Bilby
Bilby: a user-friendly Bayesian inference library. Bilby: a user-friendly Bayesian inference library.
The aim of bilby is to provide user friendly interface to perform parameter The aim of bilby is to provide a user-friendly interface to perform parameter
estimation. It is primarily designed and built for inference of compact estimation. It is primarily designed and built for inference of compact
binary coalescence events in interferometric data, but it can also be used for binary coalescence events in interferometric data, but it can also be used for
more general problems. more general problems.
...@@ -16,12 +16,38 @@ https://lscsoft.docs.ligo.org/bilby/installation.html. ...@@ -16,12 +16,38 @@ https://lscsoft.docs.ligo.org/bilby/installation.html.
""" """
from __future__ import absolute_import import sys
from . import core, gw, hyper from . import core, gw, hyper
from .core import utils, likelihood, prior, result, sampler from .core import utils, likelihood, prior, result, sampler
from .core.sampler import run_sampler from .core.sampler import run_sampler
from .core.likelihood import Likelihood from .core.likelihood import Likelihood
from .core.result import read_in_result, read_in_result_list
__version__ = utils.get_version_information() try:
from ._version import version as __version__
except ModuleNotFoundError: # development mode
__version__ = 'unknown'
if sys.version_info < (3,):
raise ImportError(
"""You are running bilby >= 0.6.4 on Python 2
Bilby 0.6.4 and above are no longer compatible with Python 2, and you still
ended up with this version installed. That's unfortunate; sorry about that.
It should not have happened. Make sure you have pip >= 9.0 to avoid this kind
of issue, as well as setuptools >= 24.2:
$ pip install pip setuptools --upgrade
Your choices:
- Upgrade to Python 3.
- Install an older version of bilby:
$ pip install 'bilby<0.6.4'
""")
from .sampler import Bilby_MCMC
import numpy as np
import pandas as pd
from packaging import version
from ..core.sampler.base_sampler import SamplerError
from ..core.utils import logger
from .utils import LOGLKEY, LOGLLATEXKEY, LOGPKEY, LOGPLATEXKEY
class Chain(object):
def __init__(
self,
initial_sample,
burn_in_nact=1,
thin_by_nact=1,
fixed_discard=0,
autocorr_c=5,
min_tau=1,
fixed_tau=None,
tau_window=None,
block_length=100000,
):
"""Object to store a single mcmc chain
Parameters
----------
initial_sample: bilby.bilby_mcmc.chain.Sample
The starting point of the chain
burn_in_nact, thin_by_nact : int (1, 1)
The number of autocorrelation times (tau) to discard for burn-in
and the multiplicative factor to thin by (thin_by_nact < 1). I.e
burn_in_nact=10 and thin_by_nact=1 will discard 10*tau samples from
the start of the chain, then thin the final chain by a factor
of 1*tau (resulting in independent samples).
fixed_discard: int (0)
A fixed minimum number of samples to discard (can be used to
override the burn_in_nact if it is too small).
autocorr_c: float (5)
The step size of the window search used by emcee.autocorr when
estimating the autocorrelation time.
min_tau: int (1)
A minimum value for the autocorrelation time.
fixed_tau: int (None)
A fixed value for the autocorrelation (overrides the automated
autocorrelation time estimation). Used in testing.
tau_window: int (None)
Only calculate the autocorrelation time in a trailing window. If
None (default) this method is not used.
block_length: int
The incremental size to extend the array by when it runs out of
space.
"""
self.autocorr_c = autocorr_c
self.min_tau = min_tau
self.burn_in_nact = burn_in_nact
self.thin_by_nact = thin_by_nact
self.block_length = block_length
self.fixed_discard = int(fixed_discard)
self.fixed_tau = fixed_tau
self.tau_window = tau_window
self.ndim = initial_sample.ndim
self.current_sample = initial_sample
self.keys = self.current_sample.keys
self.parameter_keys = self.current_sample.parameter_keys
# Initialize chain
self._chain_array = self._get_zero_chain_array()
self._chain_array_length = block_length
self.position = -1
self.max_log_likelihood = -np.inf
self.max_tau_dict = {}
self.converged = False
self.cached_tau_count = 0
self._minimum_index_proposal = 0
self._minimum_index_adapt = 0
self._last_minimum_index = (0, 0, "I")
self.last_full_tau_dict = {key: np.inf for key in self.parameter_keys}
# Append the initial sample
self.append(self.current_sample)
def _get_zero_chain_array(self):
return np.zeros((self.block_length, self.ndim + 2), dtype=np.float64)
def _extend_chain_array(self):
self._chain_array = np.concatenate(
(self._chain_array, self._get_zero_chain_array()), axis=0
)
self._chain_array_length = len(self._chain_array)
@property
def current_sample(self):
return self._current_sample.copy()
@current_sample.setter
def current_sample(self, current_sample):
self._current_sample = current_sample
def append(self, sample):
self.position += 1
# Extend the array if needed
if self.position >= self._chain_array_length:
self._extend_chain_array()
# Store the current sample and append to the array
self.current_sample = sample
self._chain_array[self.position] = sample.list
# Update the maximum log_likelihood
if sample[LOGLKEY] > self.max_log_likelihood:
self.max_log_likelihood = sample[LOGLKEY]
def __getitem__(self, index):
if index < 0:
index = index + self.position + 1
if index <= self.position:
values = self._chain_array[index]
return Sample({k: v for k, v in zip(self.keys, values)})
else:
raise SamplerError(f"Requested index {index} out of bounds")
def __setitem__(self, index, sample):
if index < 0:
index = index + self.position + 1
self._chain_array[index] = sample.list
def key_to_idx(self, key):
return self.keys.index(key)
def get_1d_array(self, key):
return self._chain_array[: 1 + self.position, self.key_to_idx(key)]
@property
def _random_idx(self):
from ..core.utils.random import rng
mindex = self._last_minimum_index[1]
# Check if mindex exceeds current position by 10 ACT: if so use a random sample
# otherwise we draw only from the chain past the minimum_index
if np.isinf(self.tau_last) or self.position - mindex < 10 * self.tau_last:
mindex = 0
return rng.integers(mindex, self.position + 1)
@property
def random_sample(self):
return self[self._random_idx]
@property
def fixed_discard(self):
return self._fixed_discard
@fixed_discard.setter
def fixed_discard(self, fixed_discard):
self._fixed_discard = int(fixed_discard)
@property
def minimum_index(self):
"""This calculates a minimum index from which to discard samples
A number of methods are provided for the calculation. A subset are
switched off (by `if False` statements) for future development
"""
position = self.position
# Return cached minimum index
last_minimum_index = self._last_minimum_index
if position == last_minimum_index[0]:
return int(last_minimum_index[1])
# If fixed discard is not yet reached, just return that
if position < self.fixed_discard:
self.minimum_index_method = "FD"
return self.fixed_discard
# Initialize list of minimum index methods with the fixed discard (FD)
minimum_index_list = [self.fixed_discard]
minimum_index_method_list = ["FD"]
# Calculate minimum index from tau
if self.tau_last < np.inf:
tau = self.tau_last
elif len(self.max_tau_dict) == 0:
# Bootstrap calculating tau when minimum index has not yet been calculated
tau = self._tau_for_full_chain
else:
tau = np.inf
if tau < np.inf:
minimum_index_list.append(self.burn_in_nact * tau)
minimum_index_method_list.append(f"{self.burn_in_nact}tau")
# Calculate points when log-posterior is within z std of the mean
if True:
zfactor = 1
N = 100
delta_lnP = zfactor * self.ndim / 2
logl = self.get_1d_array(LOGLKEY)
log_prior = self.get_1d_array(LOGPKEY)
log_posterior = logl + log_prior
max_posterior = np.max(log_posterior)
ave = pd.Series(log_posterior).rolling(window=N).mean().iloc[N - 1 :]
delta = max_posterior - ave
passes = ave[delta < delta_lnP]
if len(passes) > 0:
minimum_index_list.append(passes.index[0] + 1)
minimum_index_method_list.append(f"z{zfactor}")
# Add last minimum_index_method
if False:
minimum_index_list.append(last_minimum_index[1])
minimum_index_method_list.append(last_minimum_index[2])
# Minimum index set by proposals
minimum_index_list.append(self.minimum_index_proposal)
minimum_index_method_list.append("PR")
# Minimum index set by temperature adaptation
minimum_index_list.append(self.minimum_index_adapt)
minimum_index_method_list.append("AD")
# Calculate the maximum minimum index and associated method (reporting)
minimum_index = int(np.max(minimum_index_list))
minimum_index_method = minimum_index_method_list[np.argmax(minimum_index_list)]
# Cache the method
self._last_minimum_index = (position, minimum_index, minimum_index_method)
self.minimum_index_method = minimum_index_method
return minimum_index
@property
def minimum_index_proposal(self):
return self._minimum_index_proposal
@minimum_index_proposal.setter
def minimum_index_proposal(self, minimum_index_proposal):
if minimum_index_proposal > self._minimum_index_proposal:
self._minimum_index_proposal = minimum_index_proposal
@property
def minimum_index_adapt(self):
return self._minimum_index_adapt
@minimum_index_adapt.setter
def minimum_index_adapt(self, minimum_index_adapt):
if minimum_index_adapt > self._minimum_index_adapt:
self._minimum_index_adapt = minimum_index_adapt
@property
def tau(self):
"""The maximum ACT over all parameters"""
if self.position in self.max_tau_dict:
# If we have the ACT at the current position, return it
return self.max_tau_dict[self.position]
elif (
self.tau_last < np.inf
and self.cached_tau_count < 50
and self.nsamples_last > 50
):
# If we have a recent ACT return it
self.cached_tau_count += 1
return self.tau_last
else:
# Calculate the ACT
return self.tau_nocache
@property
def tau_nocache(self):
"""Calculate tau forcing a recalculation (no cached tau)"""
tau = max(self.tau_dict.values())
self.max_tau_dict[self.position] = tau
self.cached_tau_count = 0
return tau
@property
def tau_last(self):
"""Return the last-calculated tau if it exists, else inf"""
if len(self.max_tau_dict) > 0:
return list(self.max_tau_dict.values())[-1]
else:
return np.inf
@property
def _tau_for_full_chain(self):
"""The maximum ACT over all parameters"""
return max(self._tau_dict_for_full_chain.values())
@property
def _tau_dict_for_full_chain(self):
return self._calculate_tau_dict(minimum_index=0)
@property
def tau_dict(self):
"""Calculate a dictionary of tau (ACT) for every parameter"""
return self._calculate_tau_dict(self.minimum_index)
def _calculate_tau_dict(self, minimum_index):
"""Calculate a dictionary of tau (ACT) for every parameter"""
logger.debug(f"Calculating tau_dict {self}")
# If there are too few samples to calculate tau
if (self.position - minimum_index) < 2 * self.autocorr_c:
return {key: np.inf for key in self.parameter_keys}
# Choose minimimum index for the ACT calculation
last_tau = self.tau_last
if self.tau_window is not None and last_tau < np.inf:
minimum_index_for_act = max(
minimum_index, int(self.position - self.tau_window * last_tau)
)
else:
minimum_index_for_act = minimum_index
# Calculate a dictionary of tau's for each parameter
taus = {}
for key in self.parameter_keys:
if self.fixed_tau is None:
x = self.get_1d_array(key)[minimum_index_for_act:]
tau = calculate_tau(x, self.autocorr_c)
taux = round(tau, 1)
else:
taux = self.fixed_tau
taus[key] = max(taux, self.min_tau)
# Cache the last tau dictionary for future use
self.last_full_tau_dict = taus
return taus
@property
def thin(self):
if np.isfinite(self.tau):
return np.max([1, int(self.thin_by_nact * self.tau)])
else:
return 1
@property
def nsamples(self):
nuseable_steps = self.position - self.minimum_index
n_independent_samples = nuseable_steps / self.tau
nsamples = int(n_independent_samples / self.thin_by_nact)
if nuseable_steps >= nsamples:
return nsamples
else:
return 0
@property
def nsamples_last(self):
nuseable_steps = self.position - self.minimum_index
return int(nuseable_steps / (self.thin_by_nact * self.tau_last))
@property
def samples(self):
samples = self._chain_array[self.minimum_index : self.position : self.thin]
return pd.DataFrame(samples, columns=self.keys)
def plot(self, outdir=".", label="label", priors=None, all_samples=None):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(
nrows=self.ndim + 3, ncols=2, figsize=(8, 9 + 3 * (self.ndim))
)
scatter_kwargs = dict(
lw=0,
marker="o",
)
K = 1000
nburn = self.minimum_index
plot_setups = zip(
[0, nburn, nburn],
[nburn, self.position, self.position],
[1, 1, self.thin], # Thin-by factor
["tab:red", "tab:grey", "tab:blue"], # Color
[0.5, 0.05, 0.5], # Alpha
[1, 1, 1], # Marker size
)
position_indexes = np.arange(self.position + 1)
# Plot the traceplots
for (start, stop, thin, color, alpha, ms) in plot_setups:
for ax, key in zip(axes[:, 0], self.keys):
xx = position_indexes[start:stop:thin] / K
yy = self.get_1d_array(key)[start:stop:thin]
# Downsample plots to max_pts: avoid memory issues
max_pts = 10000
while len(xx) > max_pts:
xx = xx[::2]
yy = yy[::2]
ax.plot(
xx,
yy,
color=color,
alpha=alpha,
ms=ms,
**scatter_kwargs,
)
ax.set_ylabel(self._get_plot_label_by_key(key, priors))
if key not in [LOGLKEY, LOGPKEY]:
msg = r"$\tau=$" + f"{self.last_full_tau_dict[key]:0.1f}"
ax.set_title(msg)
# Plot the histograms
for ax, key in zip(axes[:, 1], self.keys):
if all_samples is not None:
yy_all = all_samples[key]
if np.any(np.isinf(yy_all)):
logger.warning(
f"Could not plot histogram for parameter {key} due to infinite values"
)
else:
ax.hist(yy_all, bins=50, alpha=0.6, density=True, color="k")
yy = self.get_1d_array(key)[nburn : self.position : self.thin]
if np.any(np.isinf(yy)):
logger.warning(
f"Could not plot histogram for parameter {key} due to infinite values"
)
else:
ax.hist(yy, bins=50, alpha=0.8, density=True)
ax.set_xlabel(self._get_plot_label_by_key(key, priors))
# Add x-axes labels to the traceplots
axes[-1, 0].set_xlabel(r"Iteration $[\times 10^{3}]$")
# Plot the calculated ACT
ax = axes[-1, 0]
tausit = np.array(list(self.max_tau_dict.keys()) + [self.position]) / K
taus = list(self.max_tau_dict.values()) + [self.tau_last]
ax.plot(tausit, taus, color="C3")
ax.set(ylabel=r"Maximum $\tau$")
axes[-1, 1].set_axis_off()
filename = "{}/{}_checkpoint_trace.png".format(outdir, label)
msg = [
r"Maximum $\tau$" + f"={self.tau:0.1f} ",
r"$n_{\rm samples}=$" + f"{self.nsamples} ",
]
if self.thin_by_nact != 1:
msg += [
r"$n_{\rm samples}^{\rm eff}=$"
+ f"{int(self.nsamples * self.thin_by_nact)} "
]
fig.suptitle(
"| ".join(msg),
y=1,
)
fig.tight_layout()
fig.savefig(filename, dpi=200)
plt.close(fig)
@staticmethod
def _get_plot_label_by_key(key, priors=None):
if priors is not None and key in priors:
return priors[key].latex_label
elif key == LOGLKEY:
return LOGLLATEXKEY
elif key == LOGPKEY:
return LOGPLATEXKEY
else:
return key
class Sample(object):
def __init__(self, sample_dict):
"""A single sample
Parameters
----------
sample_dict: dict
A dictionary of the sample
"""
self.sample_dict = sample_dict
self.keys = list(sample_dict.keys())
self.parameter_keys = [k for k in self.keys if k not in [LOGPKEY, LOGLKEY]]
self.ndim = len(self.parameter_keys)
def __getitem__(self, key):
return self.sample_dict[key]
def __setitem__(self, key, value):
self.sample_dict[key] = value
if key not in self.keys:
self.keys = list(self.sample_dict.keys())
@property
def list(self):
return list(self.sample_dict.values())
def __repr__(self):
return str(self.sample_dict)
@property
def parameter_only_dict(self):
return {key: self.sample_dict[key] for key in self.parameter_keys}
@property
def dict(self):
return {key: self.sample_dict[key] for key in self.keys}
def as_dict(self, keys=None):
sdict = self.dict
if keys is None:
return sdict
else:
return {key: sdict[key] for key in keys}
def __eq__(self, other_sample):
return self.list == other_sample.list
def copy(self):
return Sample(self.sample_dict.copy())
def calculate_tau(x, autocorr_c=5):
import emcee
if version.parse(emcee.__version__) < version.parse("3"):
raise SamplerError("bilby-mcmc requires emcee > 3.0 for autocorr analysis")
if np.all(np.diff(x) == 0):
return np.inf
try:
# Hard code tol=1: we perform this check internally
tau = emcee.autocorr.integrated_time(x, c=autocorr_c, tol=1)[0]
if np.isnan(tau):
tau = np.inf
return tau
except emcee.autocorr.AutocorrError:
return np.inf
import torch
from glasflow.nflows.distributions.normal import StandardNormal
from glasflow.nflows.flows.base import Flow
from glasflow.nflows.nn import nets as nets
from glasflow.nflows.transforms import (
CompositeTransform,
MaskedAffineAutoregressiveTransform,
RandomPermutation,
)
from glasflow.nflows.transforms.coupling import (
AdditiveCouplingTransform,
AffineCouplingTransform,
)
from glasflow.nflows.transforms.normalization import BatchNorm
from torch.nn import functional as F
# Turn off parallelism
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
class NVPFlow(Flow):
"""A simplified version of Real NVP for 1-dim inputs.
This implementation uses 1-dim checkerboard masking but doesn't use
multi-scaling.
Reference:
> L. Dinh et al., Density estimation using Real NVP, ICLR 2017.
This class has been modified from the example found at:
https://github.com/bayesiains/nflows/blob/master/nflows/flows/realnvp.py
"""
def __init__(
self,
features,
hidden_features,
num_layers,
num_blocks_per_layer,
use_volume_preserving=False,
activation=F.relu,
dropout_probability=0.0,
batch_norm_within_layers=False,
batch_norm_between_layers=False,
random_permutation=True,
):
if use_volume_preserving:
coupling_constructor = AdditiveCouplingTransform
else:
coupling_constructor = AffineCouplingTransform
mask = torch.ones(features)
mask[::2] = -1
def create_resnet(in_features, out_features):
return nets.ResidualNet(
in_features,
out_features,
hidden_features=hidden_features,
num_blocks=num_blocks_per_layer,
activation=activation,
dropout_probability=dropout_probability,
use_batch_norm=batch_norm_within_layers,
)
layers = []
for _ in range(num_layers):
transform = coupling_constructor(
mask=mask, transform_net_create_fn=create_resnet
)
layers.append(transform)
mask *= -1
if batch_norm_between_layers:
layers.append(BatchNorm(features=features))
if random_permutation:
layers.append(RandomPermutation(features=features))
super().__init__(
transform=CompositeTransform(layers),
distribution=StandardNormal([features]),
)
class BasicFlow(Flow):
def __init__(self, features):
transform = CompositeTransform(
[
MaskedAffineAutoregressiveTransform(
features=features, hidden_features=2 * features
),
RandomPermutation(features=features),
]
)
distribution = StandardNormal(shape=[features])
super().__init__(
transform=transform,
distribution=distribution,
)
This diff is collapsed.
This diff is collapsed.
from collections import namedtuple
LOGLKEY = "logl"
LOGLLATEXKEY = r"$\log\mathcal{L}$"
LOGPKEY = "logp"
LOGPLATEXKEY = r"$\log\pi$"
ConvergenceInputs = namedtuple(
"ConvergenceInputs",
[
"autocorr_c",
"burn_in_nact",
"thin_by_nact",
"fixed_discard",
"target_nsamples",
"stop_after_convergence",
"L1steps",
"L2steps",
"min_tau",
"fixed_tau",
"tau_window",
],
)
ParallelTemperingInputs = namedtuple(
"ParallelTemperingInputs",
[
"ntemps",
"nensemble",
"Tmax",
"Tmax_from_SNR",
"initial_betas",
"adapt",
"adapt_t0",
"adapt_nu",
"pt_ensemble",
],
)
from __future__ import absolute_import from . import grid, likelihood, prior, result, sampler, series, utils, fisher
from . import grid, likelihood, prior, result, sampler, series, utils