Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (854)
Showing with 4414 additions and 207 deletions
......@@ -3,3 +3,4 @@ omit =
test/integration/example_test.py
test/integration/noise_realisation_test.py
test/integration/other_test.py
bilby/_version.py
hist
livetime
iff
amin
amax
......@@ -13,5 +13,6 @@ MANIFEST
*.dat
*.version
*.ipynb_checkpoints
outdir/*
**/outdir
.idea/*
bilby/_version.py
......@@ -10,148 +10,248 @@
# before the next stage begins
stages:
- initial
- test
- docs
- deploy
# ------------------- Initial stage -------------------------------------------
.list-env: &list-env
- PREFIX="$(dirname $(which python))/.."
- if [ -d "${PREFIX}/conda-meta" ]; then
conda list --prefix "${PREFIX}" --show-channel-urls;
else
python -m pip list installed;
fi
# Check author list is up to date
authors:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
script:
- python test/check_author_list.py
# Test containers scripts are up to date
containers:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
script:
- cd containers
- python write_dockerfiles.py #HACK
# Fail if differences exist. If this fails, you may need to run
# write_dockerfiles.py and commit the changes.
- git diff --exit-code
- cp env-template.yml env.yml
- echo " - python=3.11" >> env.yml
- mamba env create -f env.yml -n test --dry-run
.test-python: &test-python
stage: test
stage: initial
image: python
before_script:
# this is required because pytables doesn't use a wheel on py37
- apt-get -yqq update
- apt-get -yqq install libhdf5-dev
script:
- python -m pip install .
- *list-env
- python -c "import bilby"
- python -c "import bilby.bilby_mcmc"
- python -c "import bilby.core"
- python -c "import bilby.core.prior"
- python -c "import bilby.core.sampler"
- python -c "import bilby.core.utils"
- python -c "import bilby.gw"
- python -c "import bilby.gw.detector"
- python -c "import bilby.gw.eos"
- python -c "import bilby.gw.likelihood"
- python -c "import bilby.gw.sampler"
- python -c "import bilby.hyper"
- python -c "import cli_bilby"
- python test/import_test.py
- for script in $(pip show -f bilby | grep "bin\/" | xargs -I {} basename {}); do
${script} --help;
done
# test basic setup on python3
basic-3.7:
basic-3.10:
<<: *test-python
image: python:3.7
image: python:3.10
# test example on python 3.7
python-3.7:
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
basic-3.11:
<<: *test-python
image: python:3.11
basic-3.12:
<<: *test-python
image: python:3.12
.test-samplers-import: &test-samplers-import
stage: initial
script:
- python -m pip install .
- *list-env
- pytest test/test_samplers_import.py -v
# Run pyflakes
- flake8 .
import-samplers-3.10:
<<: *test-samplers-import
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
# Run tests and collect coverage data
- pytest --cov=bilby --durations 10
- coverage html
- coverage-badge -o coverage_badge.svg -f
import-samplers-3.11:
<<: *test-samplers-import
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
# Make the documentation
- cd docs
- make clean
- make html
artifacts:
paths:
- htmlcov/
- coverage_badge.svg
- docs/_build/html/
import-samplers-3.12:
<<: *test-samplers-import
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python312
# test example on python 3.8
python-3.8:
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python38
.precommits: &precommits
stage: initial
script:
- python -m pip install .
- source activate $PYVERSION
- mkdir -p $CACHE_DIR
- pip install --upgrade pip
- pip --cache-dir=$CACHE_DIR install --upgrade bilby
- pip --cache-dir=$CACHE_DIR install .
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
- pytest
precommits-py3.11:
<<: *precommits
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
variables:
CACHE_DIR: ".pip311"
PYVERSION: "python311"
# test example on python 3.6
python-3.6:
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python36
install:
stage: initial
parallel:
matrix:
- EXTRA: [gw, mcmc, all]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
script:
- python -m pip install .
- pip install .[$EXTRA]
- pytest
# ------------------- Test stage -------------------------------------------
# test samplers on python 3.7
python-3.7-samplers:
.unit-tests: &unit-test
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- python -m pip install .
- *list-env
- pytest test/integration/sampler_run_test.py --durations 10
- pytest test/integration/sample_from_the_prior_test.py
- pytest --cov=bilby --durations 10
# test samplers on python 3.6
python-3.6-samplers:
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python36
script:
- python -m pip install .
python-3.10:
<<: *unit-test
needs: ["basic-3.10"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
- pytest test/integration/sampler_run_test.py
python-3.11:
<<: *unit-test
needs: ["basic-3.11", "precommits-py3.11"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
after_script:
- coverage html
- coverage xml
coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
paths:
- htmlcov/
expire_in: 30 days
# Test containers are up to date
containers:
python-3.12:
<<: *unit-test
needs: ["basic-3.12"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python312
.test-sampler: &test-sampler
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- cd containers
- python write_dockerfiles.py
# Fail if differences exist. If this fails, you may need to run
# write_dockerfiles.py and commit the changes.
- git diff --exit-code
- python -m pip install .[all]
- *list-env
- pytest test/integration/sampler_run_test.py --durations 10 -v
# Tests run at a fixed schedule rather than on push
scheduled-python-3.7:
python-3.10-samplers:
<<: *test-sampler
needs: ["basic-3.10"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
python-3.11-samplers:
<<: *test-sampler
needs: ["basic-3.11", "precommits-py3.11"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
python-3.12-samplers:
<<: *test-sampler
needs: ["basic-3.12"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python312
integration-tests-python-3.11:
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
needs: ["basic-3.11", "precommits-py3.11"]
only:
- schedules
script:
- python -m pip install .
- *list-env
# Run tests which are only done on schedule
- pytest test/integration/example_test.py
- pytest test/integration/sample_from_the_prior_test.py
plotting:
.plotting: &plotting
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
only:
- schedules
script:
- python -m pip install .
- python -m pip install ligo.skymap
- *list-env
- pytest test/gw/plot_test.py
authors:
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
plotting-python-3.10:
<<: *plotting
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
needs: ["basic-3.10"]
plotting-python-3.11:
<<: *plotting
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
needs: ["basic-3.11", "precommits-py3.11"]
plotting-python-3.12:
<<: *plotting
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python312
needs: ["basic-3.12"]
# ------------------- Docs stage -------------------------------------------
docs:
stage: docs
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python311
before_script:
- python -m ipykernel install
script:
- python test/check_author_list.py
# Make the documentation
- python -m pip install .
- python -m pip install myst_parser # only for testing purposes - remove once test image is generating correctly
- cd examples/tutorials
- jupyter nbconvert --to notebook --execute *.ipynb --output-dir ../../docs
- cd ../../docs
- make clean
- make html
artifacts:
paths:
- docs/_build/html/
# ------------------- Deploy stage -------------------------------------------
pages:
stage: deploy
dependencies:
- python-3.7
needs: ["docs", "python-3.11"]
script:
- mkdir public/
- mv htmlcov/ public/
- mv coverage_badge.svg public/
- mv docs/_build/html/* public/
artifacts:
paths:
......@@ -160,31 +260,50 @@ pages:
only:
- master
deploy_release:
.build-container: &build-container
stage: deploy
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
image: docker:20.10.23
needs: ["containers"]
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
changes:
compare_to: 'refs/heads/main'
paths:
- containers/*
when: manual
- if: $CI_PIPELINE_SOURCE == "schedule"
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- cp v3-dockerfile-test-suite-$PYVERSION Dockerfile
- docker build --tag v3-bilby-$PYVERSION .
- docker image tag v3-bilby-$PYVERSION containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest
build-python310-container:
<<: *build-container
variables:
PYVERSION: "python310"
build-python311-container:
<<: *build-container
variables:
PYVERSION: "python311"
build-python312-container:
<<: *build-container
variables:
PYVERSION: "python312"
pypi-release:
stage: deploy
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
variables:
TWINE_USERNAME: $PYPI_USERNAME
TWINE_PASSWORD: $PYPI_PASSWORD
before_script:
- pip install twine
- python setup.py sdist
- python -m build --sdist --wheel --outdir dist/ .
script:
- twine upload dist/*
only:
- tags
precommits-py3.7:
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- source activate python37
- mkdir -p .pip37
- pip install --upgrade pip
- pip --cache-dir=.pip37 install --upgrade bilby
- pip --cache-dir=.pip37 install .
- pip --cache-dir=.pip37 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
- tags
......@@ -4,19 +4,39 @@ repos:
hooks:
- id: check-merge-conflict # prevent committing files with merge conflicts
- id: flake8 # checks for flake8 errors
#- repo: https://github.com/codespell-project/codespell
# rev: v1.16.0
# hooks:
# - id: codespell # Spellchecker
# args: [-L, nd, --skip, "*.ipynb,*.html", --ignore-words=.dictionary.txt]
# exclude: ^examples/tutorials/
#- repo: https://github.com/asottile/seed-isort-config
# rev: v1.3.0
# hooks:
# - id: seed-isort-config
# args: [--application-directories, 'bilby/']
#- repo: https://github.com/pre-commit/mirrors-isort
# rev: v4.3.21
# hooks:
# - id: isort # sort imports alphabetically and separates import into sections
# args: [-w=88, -m=3, -tc, -sp=setup.cfg ]
- repo: https://github.com/psf/black
rev: 22.3.0
hooks:
- id: black
language_version: python3
files: '^(bilby/bilby_mcmc/|bilby/core/sampler/|examples/)'
- repo: https://github.com/codespell-project/codespell
rev: v2.1.0
hooks:
- id: codespell
args: [--ignore-words=.dictionary.txt]
- repo: https://github.com/pre-commit/mirrors-isort
rev: v5.10.1
hooks:
- id: isort # sort imports alphabetically and separates import into sections
args: [-w=88, -m=3, -tc, -sp=setup.cfg ]
files: '^(bilby/bilby_mcmc/|bilby/core/sampler/|examples/)'
- repo: https://github.com/datarootsio/databooks
rev: 0.1.14
hooks:
- id: databooks
name: databooks
description:
"Remove notebook metadata using `databooks`."
entry: databooks meta
language: python
minimum_pre_commit_version: 2.9.2
types: [ jupyter ]
args: [-w]
- repo: local
hooks:
- id: jupyter-nb-clear-output
name: jupyter-nb-clear-output
files: \.ipynb$
language: system
entry: jupyter nbconvert --ClearOutputPreprocessor.enabled=True --inplace
......@@ -9,12 +9,15 @@ Aditya Vijaykumar
Andrew Kim
Andrew Miller
Antoni Ramos-Buades
Apratim Ganguly
Avi Vajpeyi
Ben Patterson
Bruce Edelman
Carl-Johan Haster
Cecilio Garcia-Quiros
Charlie Hoy
Christopher Berry
Chentao Yang
Christopher Philip Luke Berry
Christos Karathanasis
Colm Talbot
Daniel Williams
......@@ -24,16 +27,27 @@ Eric Thrane
Ethan Payne
Francisco Javier Hernandez
Gregory Ashton
Hank Hua
Hector Estelles
Ignacio Magaña Hernandez
Isaac McMahon
Isobel Marguarethe Romero-Shaw
Jack Heinzel
Jacob Golomb
Jade Powell
James A Clark
Jeremy G Baier
John Veitch
Joshua Brandt
Josh Willis
Karl Wette
Katerina Chatziioannou
Kaylee de Soto
Khun Sang Phukon
Kruthi Krishna
Kshipraa Athar
Kyle Wong
Leslie Wade
Liting Xiao
Maite Mateu-Lucena
Marc Arene
......@@ -46,17 +60,24 @@ Michael Puerrer
Michael Williams
Monica Rizzo
Moritz Huebner
Nico Gerardo Bers
Nicola De Lillo
Nikhil Sarin
Nirban Bose
Noah Wolfe
Olivia Wilk
Paul Easter
Paul Lasky
Philip Relton
Rhys Green
Rhiannon Udall
Rico Lo
Roberto Cotesta
Rory Smith
S. H. Oh
Sacha Husa
Sama Al-Shammari
Samson Leong
Scott Coughlin
Serguei Ossokine
Shanika Galaudage
......@@ -64,8 +85,21 @@ Sharan Banagiri
Shichao Wu
Simon Stevenson
Soichiro Morisaki
Soumen Roy
Stephen R Green
Sumeet Kulkarni
Sylvia Biscoveanu
Tathagata Ghosh
Teagan Clarke
Tomasz Baka
Will M. Farr
Virginia d'Emilio
Vivien Raymond
Ka-Lok Lo
Isaac Legred
Marc Penuliar
Andrew Fowlie
Martin White
Peter Tsun-Ho Pang
Alexandre Sebastien Goettel
Ann-Kristin Malz
This diff is collapsed.
# Contributing to bilby
This is a short guide to contributing to bilby aimed at general LVC members who
This is a short guide to contributing to bilby aimed at general LVK members who
have some familiarity with python and git.
1. [Code of conduct](#code-of-conduct)
2. [Code style](#code-style)
3. [Code relevance](#code-relevance)
4. [Merge requests](#merge-requests)
5. [Typical workflow](#typical-workflow)
6. [Hints and tips](#hints-and-tips)
7. [Code overview](#code-overview)
3. [Automated Code Checking](#automated-code-checking)
4. [Unit Testing](#unit-testing)
5. [Code relevance](#code-relevance)
6. [Merge requests](#merge-requests)
7. [Typical workflow](#typical-workflow)
8. [Hints and tips](#hints-and-tips)
9. [Code overview](#code-overview)
## Code of Conduct
......@@ -17,7 +19,7 @@ have some familiarity with python and git.
Everyone participating in the bilby community, and in particular in our issue
tracker, merge requests, and chat channels, is expected to treat other people
with respect and follow the guidelines articulated in the [Python Community
Code of Conduct](https://www.python.org/psf/codeofconduct/).
Code of Conduct](https://www.python.org/psf/codeofconduct/). Furthermore, members of the LVK collaboration must follow the [LVK Code of Conduct](https://dcc.ligo.org/LIGO-M1900037/public).
## Code style
......@@ -46,8 +48,8 @@ def my_new_function(x, y, print=False):
```
3. Avoid inline comments unless necessary. Ideally, the code should make it obvious what is going on, if not the docstring, only in subtle cases use comments
4. Name variables sensibly. Avoid using single-letter variables, it is better to name something `power_spectral_density_array` than `psda`.
5. Don't repeat yourself. If code is repeated in multiple places, wrap it up into a function.
6. Add tests. The C.I. is there to do the work of "checking" the code, both now and into the future. Use it.
5. Don't repeat yourself. If code is repeated in multiple places, wrap it up into a function. This also helps with the writing of robust unit tests (see below).
## Automated code checking
......@@ -76,6 +78,50 @@ If you experience any issues with pre-commit, please ask for support on the
usual help channels.
## Unit Testing
Unit tests are an important part of code development, helping to minimize the number of undetected bugs which may be present in a merge request. They also greatly expedite the review of code, and can even help during the initial development if used properly. Accordingly, bilby requires unit testing for any changes with machine readable inputs and outputs (i.e. pretty much everything except plotting).
Unit testing is integrated into the CI/CD pipeline, and uses the builtin unittest package. Tests should be written into the `test/` directory which corresponds to their location within the package, such that, for example, a change to `bilby/gw/conversion.py` should go into `test/gw/conversion_test.py`. To run a single test locally, one may simply do `pytest /path/to/test TestClass.test_name`, whereas to run all the tests in a given test file one may omit the class and function.
For an example of what a test looks like, consider this test for the fft utils in bilby:
```
class TestFFT(unittest.TestCase):
def setUp(self):
self.sampling_frequency = 10
def tearDown(self):
del self.sampling_frequency
def test_nfft_sine_function(self):
injected_frequency = 2.7324
duration = 100
times = utils.create_time_series(self.sampling_frequency, duration)
time_domain_strain = np.sin(2 * np.pi * times * injected_frequency + 0.4)
frequency_domain_strain, frequencies = bilby.core.utils.nfft(
time_domain_strain, self.sampling_frequency
)
frequency_at_peak = frequencies[np.argmax(np.abs(frequency_domain_strain))]
self.assertAlmostEqual(injected_frequency, frequency_at_peak, places=1)
def test_nfft_infft(self):
time_domain_strain = np.random.normal(0, 1, 10)
frequency_domain_strain, _ = bilby.core.utils.nfft(
time_domain_strain, self.sampling_frequency
)
new_time_domain_strain = bilby.core.utils.infft(
frequency_domain_strain, self.sampling_frequency
)
self.assertTrue(np.allclose(time_domain_strain, new_time_domain_strain))
```
`setUp` and `tearDown` handle construction and deconstruction of the test, such that each of the other test functions may be run independently, in any order. The other two functions each make an intuitive test of the functionality of and fft/ifft function: that the fft of a sine wave should be a delta function, and that an ifft should be an inverse of an fft.
For more information on how to write effective tests, see [this guide](https://docs.python-guide.org/writing/tests/), and many others.
## Code relevance
The bilby code base is intended to be highly modular and flexible. We encourage
......@@ -153,7 +199,19 @@ $ git clone git@git.ligo.org:albert.einstein/bilby.git
```
replacing the SSH url to that of your fork. This will create a directory
`/bilby` containing a local copy of the code. From this directory, you can run
`/bilby` containing a local copy of the code.
It is strongly advised to perform development with a dedicated conda environment.
In depth instructions for creating a conda environment may be found at the relevant
[conda docs](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#activating-an-environment),
but for most purposes the commands
```bash
$ conda create -n my-environment-name python=3.X
$ conda activate my-environment-name
```
will produce the desired results. Once this is completed, one may proceed to the `/bilby` directory and run
```bash
$ pip install -e .
......@@ -202,8 +260,25 @@ $ git checkout -b my-new-feature lscsoft/master
### Step d) Hack away
1. Develop the changes you would like to introduce, using `git add` to add files with changes. Ideally commit small units of change often, rather than creating one large commit at the end, this will simplify review and make modifying any changes easier.
2. Commit the changes using `git commit`. This will prompt you for a commit message. Commit messages should be clear, identifying which code was changed, and why. Common practice (see e.g. [this blog](https://chris.beams.io/posts/git-commit/)) is to use a short summary line (<50 characters), followed by a blank line, then more information in longer lines.
3. Push your changes to the remote copy of your fork on git.ligo.org
2. Commit the changes using `git commit`. This will prompt you for a commit message. Commit messages should be clear, identifying which code was changed, and why. Bilby is adopting the use of scipy commit format, specified [here](https://docs.scipy.org/doc/scipy/dev/contributor/development_workflow.html#writing-the-commit-message). Commit messages take a standard format of `ACRONYM: Summary message` followed by a more detailed description. For example, an enhancement would look like:
```
ENH: Add my awesome new feature
This is a very cool change that makes parameter estimation run 10x faster by changing a single line.
```
Similarly a bugfix:
```
BUG: Fix type error in my_awesome_feature.py
Correct a typo at L1 of /bilby/my_awesome_feature.py which returned a dictionary instead of a string.
```
For more discussion of best practices, see e.g. [this blog](https://chris.beams.io/posts/git-commit/).
4. Push your changes to the remote copy of your fork on git.ligo.org
```bash
git push origin my-new-feature
......@@ -275,7 +350,7 @@ help orient users and make it easier to contribute. The layout is intended to
define the logic of the code and new merge requests should aim to fit within
this logic (unless there is a good argument to change it). For example, code
which adds a new sampler should not effect the gravitational-wave specific
parts of the code. Note that this document is not programatically generated and
parts of the code. Note that this document is not programmatically generated and
so may get out of date with time. If you notice something wrong, please open an
issue.
......
include README.rst
include LICENSE.md
include requirements.txt
include gw_requirements.txt
include mcmc_requirements.txt
include optional_requirements.txt
include sampler_requirements.txt
include bilby/_version.py
recursive-include test *.py *.prior
|pipeline status| |coverage report| |pypi| |conda| |version|
=====
Bilby
Bilby development has moved to `GitHub <https://github.com/bilby-dev/bilby>`__!
=====
Please open any new issues or pull requests there. A full migration guide will be provided soon. Links below here may no longer be active.
====
A user-friendly Bayesian inference library.
Fulfilling all your Bayesian dreams.
......@@ -30,44 +32,11 @@ us directly. For advice on contributing, see `the contributing guide <https://gi
Citation guide
--------------
If you use :code:`bilby` in a scientific publication, please cite
* `Bilby: A user-friendly Bayesian inference library for gravitational-wave
astronomy
<https://ui.adsabs.harvard.edu/#abs/2018arXiv181102042A/abstract>`__
Additionally, :code:`bilby` builds on a number of open-source packages. If you
make use of this functionality in your publications, we recommend you cite them
as requested in their associated documentation.
**Samplers**
* `dynesty <https://github.com/joshspeagle/dynesty>`__
* `nestle <https://github.com/kbarbary/nestle>`__
* `pymultinest <https://github.com/JohannesBuchner/PyMultiNest>`__
* `cpnest <https://github.com/johnveitch/cpnest>`__
* `emcee <https://github.com/dfm/emcee>`__
* `ptemcee <https://github.com/willvousden/ptemcee>`__
* `ptmcmcsampler <https://github.com/jellis18/PTMCMCSampler>`__
* `pypolychord <https://github.com/PolyChord/PolyChordLite>`__
* `PyMC3 <https://github.com/pymc-devs/pymc3>`_
**Gravitational-wave tools**
* `gwpy <https://github.com/gwpy/gwpy>`__
* `lalsuite <https://git.ligo.org/lscsoft/lalsuite>`__
* `astropy <https://github.com/astropy/astropy>`__
**Plotting**
* `corner <https://github.com/dfm/corner.py>`__ for generating corner plot
* `matplotlib <https://github.com/matplotlib/matplotlib>`__ for general plotting routines
Please refer to the `Acknowledging/citing bilby guide <https://lscsoft.docs.ligo.org/bilby/citing-bilby.html>`__.
.. |pipeline status| image:: https://git.ligo.org/lscsoft/bilby/badges/master/pipeline.svg
:target: https://git.ligo.org/lscsoft/bilby/commits/master
.. |coverage report| image:: https://lscsoft.docs.ligo.org/bilby/coverage_badge.svg
.. |coverage report| image:: https://git.ligo.org/lscsoft/bilby/badges/master/coverage.svg
:target: https://lscsoft.docs.ligo.org/bilby/htmlcov/
.. |pypi| image:: https://badge.fury.io/py/bilby.svg
:target: https://pypi.org/project/bilby/
......
......@@ -16,7 +16,6 @@ https://lscsoft.docs.ligo.org/bilby/installation.html.
"""
from __future__ import absolute_import
import sys
from . import core, gw, hyper
......@@ -24,13 +23,17 @@ from . import core, gw, hyper
from .core import utils, likelihood, prior, result, sampler
from .core.sampler import run_sampler
from .core.likelihood import Likelihood
from .core.result import read_in_result, read_in_result_list
__version__ = utils.get_version_information()
try:
from ._version import version as __version__
except ModuleNotFoundError: # development mode
__version__ = 'unknown'
if sys.version_info < (3,):
raise ImportError(
"""You are running bilby 0.6.4 on Python 2
"""You are running bilby >= 0.6.4 on Python 2
Bilby 0.6.4 and above are no longer compatible with Python 2, and you still
ended up with this version installed. That's unfortunate; sorry about that.
......
from .sampler import Bilby_MCMC
import numpy as np
import pandas as pd
from packaging import version
from ..core.sampler.base_sampler import SamplerError
from ..core.utils import logger
from .utils import LOGLKEY, LOGLLATEXKEY, LOGPKEY, LOGPLATEXKEY
class Chain(object):
def __init__(
self,
initial_sample,
burn_in_nact=1,
thin_by_nact=1,
fixed_discard=0,
autocorr_c=5,
min_tau=1,
fixed_tau=None,
tau_window=None,
block_length=100000,
):
"""Object to store a single mcmc chain
Parameters
----------
initial_sample: bilby.bilby_mcmc.chain.Sample
The starting point of the chain
burn_in_nact, thin_by_nact : int (1, 1)
The number of autocorrelation times (tau) to discard for burn-in
and the multiplicative factor to thin by (thin_by_nact < 1). I.e
burn_in_nact=10 and thin_by_nact=1 will discard 10*tau samples from
the start of the chain, then thin the final chain by a factor
of 1*tau (resulting in independent samples).
fixed_discard: int (0)
A fixed minimum number of samples to discard (can be used to
override the burn_in_nact if it is too small).
autocorr_c: float (5)
The step size of the window search used by emcee.autocorr when
estimating the autocorrelation time.
min_tau: int (1)
A minimum value for the autocorrelation time.
fixed_tau: int (None)
A fixed value for the autocorrelation (overrides the automated
autocorrelation time estimation). Used in testing.
tau_window: int (None)
Only calculate the autocorrelation time in a trailing window. If
None (default) this method is not used.
block_length: int
The incremental size to extend the array by when it runs out of
space.
"""
self.autocorr_c = autocorr_c
self.min_tau = min_tau
self.burn_in_nact = burn_in_nact
self.thin_by_nact = thin_by_nact
self.block_length = block_length
self.fixed_discard = int(fixed_discard)
self.fixed_tau = fixed_tau
self.tau_window = tau_window
self.ndim = initial_sample.ndim
self.current_sample = initial_sample
self.keys = self.current_sample.keys
self.parameter_keys = self.current_sample.parameter_keys
# Initialize chain
self._chain_array = self._get_zero_chain_array()
self._chain_array_length = block_length
self.position = -1
self.max_log_likelihood = -np.inf
self.max_tau_dict = {}
self.converged = False
self.cached_tau_count = 0
self._minimum_index_proposal = 0
self._minimum_index_adapt = 0
self._last_minimum_index = (0, 0, "I")
self.last_full_tau_dict = {key: np.inf for key in self.parameter_keys}
# Append the initial sample
self.append(self.current_sample)
def _get_zero_chain_array(self):
return np.zeros((self.block_length, self.ndim + 2), dtype=np.float64)
def _extend_chain_array(self):
self._chain_array = np.concatenate(
(self._chain_array, self._get_zero_chain_array()), axis=0
)
self._chain_array_length = len(self._chain_array)
@property
def current_sample(self):
return self._current_sample.copy()
@current_sample.setter
def current_sample(self, current_sample):
self._current_sample = current_sample
def append(self, sample):
self.position += 1
# Extend the array if needed
if self.position >= self._chain_array_length:
self._extend_chain_array()
# Store the current sample and append to the array
self.current_sample = sample
self._chain_array[self.position] = sample.list
# Update the maximum log_likelihood
if sample[LOGLKEY] > self.max_log_likelihood:
self.max_log_likelihood = sample[LOGLKEY]
def __getitem__(self, index):
if index < 0:
index = index + self.position + 1
if index <= self.position:
values = self._chain_array[index]
return Sample({k: v for k, v in zip(self.keys, values)})
else:
raise SamplerError(f"Requested index {index} out of bounds")
def __setitem__(self, index, sample):
if index < 0:
index = index + self.position + 1
self._chain_array[index] = sample.list
def key_to_idx(self, key):
return self.keys.index(key)
def get_1d_array(self, key):
return self._chain_array[: 1 + self.position, self.key_to_idx(key)]
@property
def _random_idx(self):
from ..core.utils.random import rng
mindex = self._last_minimum_index[1]
# Check if mindex exceeds current position by 10 ACT: if so use a random sample
# otherwise we draw only from the chain past the minimum_index
if np.isinf(self.tau_last) or self.position - mindex < 10 * self.tau_last:
mindex = 0
return rng.integers(mindex, self.position + 1)
@property
def random_sample(self):
return self[self._random_idx]
@property
def fixed_discard(self):
return self._fixed_discard
@fixed_discard.setter
def fixed_discard(self, fixed_discard):
self._fixed_discard = int(fixed_discard)
@property
def minimum_index(self):
"""This calculates a minimum index from which to discard samples
A number of methods are provided for the calculation. A subset are
switched off (by `if False` statements) for future development
"""
position = self.position
# Return cached minimum index
last_minimum_index = self._last_minimum_index
if position == last_minimum_index[0]:
return int(last_minimum_index[1])
# If fixed discard is not yet reached, just return that
if position < self.fixed_discard:
self.minimum_index_method = "FD"
return self.fixed_discard
# Initialize list of minimum index methods with the fixed discard (FD)
minimum_index_list = [self.fixed_discard]
minimum_index_method_list = ["FD"]
# Calculate minimum index from tau
if self.tau_last < np.inf:
tau = self.tau_last
elif len(self.max_tau_dict) == 0:
# Bootstrap calculating tau when minimum index has not yet been calculated
tau = self._tau_for_full_chain
else:
tau = np.inf
if tau < np.inf:
minimum_index_list.append(self.burn_in_nact * tau)
minimum_index_method_list.append(f"{self.burn_in_nact}tau")
# Calculate points when log-posterior is within z std of the mean
if True:
zfactor = 1
N = 100
delta_lnP = zfactor * self.ndim / 2
logl = self.get_1d_array(LOGLKEY)
log_prior = self.get_1d_array(LOGPKEY)
log_posterior = logl + log_prior
max_posterior = np.max(log_posterior)
ave = pd.Series(log_posterior).rolling(window=N).mean().iloc[N - 1 :]
delta = max_posterior - ave
passes = ave[delta < delta_lnP]
if len(passes) > 0:
minimum_index_list.append(passes.index[0] + 1)
minimum_index_method_list.append(f"z{zfactor}")
# Add last minimum_index_method
if False:
minimum_index_list.append(last_minimum_index[1])
minimum_index_method_list.append(last_minimum_index[2])
# Minimum index set by proposals
minimum_index_list.append(self.minimum_index_proposal)
minimum_index_method_list.append("PR")
# Minimum index set by temperature adaptation
minimum_index_list.append(self.minimum_index_adapt)
minimum_index_method_list.append("AD")
# Calculate the maximum minimum index and associated method (reporting)
minimum_index = int(np.max(minimum_index_list))
minimum_index_method = minimum_index_method_list[np.argmax(minimum_index_list)]
# Cache the method
self._last_minimum_index = (position, minimum_index, minimum_index_method)
self.minimum_index_method = minimum_index_method
return minimum_index
@property
def minimum_index_proposal(self):
return self._minimum_index_proposal
@minimum_index_proposal.setter
def minimum_index_proposal(self, minimum_index_proposal):
if minimum_index_proposal > self._minimum_index_proposal:
self._minimum_index_proposal = minimum_index_proposal
@property
def minimum_index_adapt(self):
return self._minimum_index_adapt
@minimum_index_adapt.setter
def minimum_index_adapt(self, minimum_index_adapt):
if minimum_index_adapt > self._minimum_index_adapt:
self._minimum_index_adapt = minimum_index_adapt
@property
def tau(self):
"""The maximum ACT over all parameters"""
if self.position in self.max_tau_dict:
# If we have the ACT at the current position, return it
return self.max_tau_dict[self.position]
elif (
self.tau_last < np.inf
and self.cached_tau_count < 50
and self.nsamples_last > 50
):
# If we have a recent ACT return it
self.cached_tau_count += 1
return self.tau_last
else:
# Calculate the ACT
return self.tau_nocache
@property
def tau_nocache(self):
"""Calculate tau forcing a recalculation (no cached tau)"""
tau = max(self.tau_dict.values())
self.max_tau_dict[self.position] = tau
self.cached_tau_count = 0
return tau
@property
def tau_last(self):
"""Return the last-calculated tau if it exists, else inf"""
if len(self.max_tau_dict) > 0:
return list(self.max_tau_dict.values())[-1]
else:
return np.inf
@property
def _tau_for_full_chain(self):
"""The maximum ACT over all parameters"""
return max(self._tau_dict_for_full_chain.values())
@property
def _tau_dict_for_full_chain(self):
return self._calculate_tau_dict(minimum_index=0)
@property
def tau_dict(self):
"""Calculate a dictionary of tau (ACT) for every parameter"""
return self._calculate_tau_dict(self.minimum_index)
def _calculate_tau_dict(self, minimum_index):
"""Calculate a dictionary of tau (ACT) for every parameter"""
logger.debug(f"Calculating tau_dict {self}")
# If there are too few samples to calculate tau
if (self.position - minimum_index) < 2 * self.autocorr_c:
return {key: np.inf for key in self.parameter_keys}
# Choose minimimum index for the ACT calculation
last_tau = self.tau_last
if self.tau_window is not None and last_tau < np.inf:
minimum_index_for_act = max(
minimum_index, int(self.position - self.tau_window * last_tau)
)
else:
minimum_index_for_act = minimum_index
# Calculate a dictionary of tau's for each parameter
taus = {}
for key in self.parameter_keys:
if self.fixed_tau is None:
x = self.get_1d_array(key)[minimum_index_for_act:]
tau = calculate_tau(x, self.autocorr_c)
taux = round(tau, 1)
else:
taux = self.fixed_tau
taus[key] = max(taux, self.min_tau)
# Cache the last tau dictionary for future use
self.last_full_tau_dict = taus
return taus
@property
def thin(self):
if np.isfinite(self.tau):
return np.max([1, int(self.thin_by_nact * self.tau)])
else:
return 1
@property
def nsamples(self):
nuseable_steps = self.position - self.minimum_index
n_independent_samples = nuseable_steps / self.tau
nsamples = int(n_independent_samples / self.thin_by_nact)
if nuseable_steps >= nsamples:
return nsamples
else:
return 0
@property
def nsamples_last(self):
nuseable_steps = self.position - self.minimum_index
return int(nuseable_steps / (self.thin_by_nact * self.tau_last))
@property
def samples(self):
samples = self._chain_array[self.minimum_index : self.position : self.thin]
return pd.DataFrame(samples, columns=self.keys)
def plot(self, outdir=".", label="label", priors=None, all_samples=None):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(
nrows=self.ndim + 3, ncols=2, figsize=(8, 9 + 3 * (self.ndim))
)
scatter_kwargs = dict(
lw=0,
marker="o",
)
K = 1000
nburn = self.minimum_index
plot_setups = zip(
[0, nburn, nburn],
[nburn, self.position, self.position],
[1, 1, self.thin], # Thin-by factor
["tab:red", "tab:grey", "tab:blue"], # Color
[0.5, 0.05, 0.5], # Alpha
[1, 1, 1], # Marker size
)
position_indexes = np.arange(self.position + 1)
# Plot the traceplots
for (start, stop, thin, color, alpha, ms) in plot_setups:
for ax, key in zip(axes[:, 0], self.keys):
xx = position_indexes[start:stop:thin] / K
yy = self.get_1d_array(key)[start:stop:thin]
# Downsample plots to max_pts: avoid memory issues
max_pts = 10000
while len(xx) > max_pts:
xx = xx[::2]
yy = yy[::2]
ax.plot(
xx,
yy,
color=color,
alpha=alpha,
ms=ms,
**scatter_kwargs,
)
ax.set_ylabel(self._get_plot_label_by_key(key, priors))
if key not in [LOGLKEY, LOGPKEY]:
msg = r"$\tau=$" + f"{self.last_full_tau_dict[key]:0.1f}"
ax.set_title(msg)
# Plot the histograms
for ax, key in zip(axes[:, 1], self.keys):
if all_samples is not None:
yy_all = all_samples[key]
if np.any(np.isinf(yy_all)):
logger.warning(
f"Could not plot histogram for parameter {key} due to infinite values"
)
else:
ax.hist(yy_all, bins=50, alpha=0.6, density=True, color="k")
yy = self.get_1d_array(key)[nburn : self.position : self.thin]
if np.any(np.isinf(yy)):
logger.warning(
f"Could not plot histogram for parameter {key} due to infinite values"
)
else:
ax.hist(yy, bins=50, alpha=0.8, density=True)
ax.set_xlabel(self._get_plot_label_by_key(key, priors))
# Add x-axes labels to the traceplots
axes[-1, 0].set_xlabel(r"Iteration $[\times 10^{3}]$")
# Plot the calculated ACT
ax = axes[-1, 0]
tausit = np.array(list(self.max_tau_dict.keys()) + [self.position]) / K
taus = list(self.max_tau_dict.values()) + [self.tau_last]
ax.plot(tausit, taus, color="C3")
ax.set(ylabel=r"Maximum $\tau$")
axes[-1, 1].set_axis_off()
filename = "{}/{}_checkpoint_trace.png".format(outdir, label)
msg = [
r"Maximum $\tau$" + f"={self.tau:0.1f} ",
r"$n_{\rm samples}=$" + f"{self.nsamples} ",
]
if self.thin_by_nact != 1:
msg += [
r"$n_{\rm samples}^{\rm eff}=$"
+ f"{int(self.nsamples * self.thin_by_nact)} "
]
fig.suptitle(
"| ".join(msg),
y=1,
)
fig.tight_layout()
fig.savefig(filename, dpi=200)
plt.close(fig)
@staticmethod
def _get_plot_label_by_key(key, priors=None):
if priors is not None and key in priors:
return priors[key].latex_label
elif key == LOGLKEY:
return LOGLLATEXKEY
elif key == LOGPKEY:
return LOGPLATEXKEY
else:
return key
class Sample(object):
def __init__(self, sample_dict):
"""A single sample
Parameters
----------
sample_dict: dict
A dictionary of the sample
"""
self.sample_dict = sample_dict
self.keys = list(sample_dict.keys())
self.parameter_keys = [k for k in self.keys if k not in [LOGPKEY, LOGLKEY]]
self.ndim = len(self.parameter_keys)
def __getitem__(self, key):
return self.sample_dict[key]
def __setitem__(self, key, value):
self.sample_dict[key] = value
if key not in self.keys:
self.keys = list(self.sample_dict.keys())
@property
def list(self):
return list(self.sample_dict.values())
def __repr__(self):
return str(self.sample_dict)
@property
def parameter_only_dict(self):
return {key: self.sample_dict[key] for key in self.parameter_keys}
@property
def dict(self):
return {key: self.sample_dict[key] for key in self.keys}
def as_dict(self, keys=None):
sdict = self.dict
if keys is None:
return sdict
else:
return {key: sdict[key] for key in keys}
def __eq__(self, other_sample):
return self.list == other_sample.list
def copy(self):
return Sample(self.sample_dict.copy())
def calculate_tau(x, autocorr_c=5):
import emcee
if version.parse(emcee.__version__) < version.parse("3"):
raise SamplerError("bilby-mcmc requires emcee > 3.0 for autocorr analysis")
if np.all(np.diff(x) == 0):
return np.inf
try:
# Hard code tol=1: we perform this check internally
tau = emcee.autocorr.integrated_time(x, c=autocorr_c, tol=1)[0]
if np.isnan(tau):
tau = np.inf
return tau
except emcee.autocorr.AutocorrError:
return np.inf
import torch
from glasflow.nflows.distributions.normal import StandardNormal
from glasflow.nflows.flows.base import Flow
from glasflow.nflows.nn import nets as nets
from glasflow.nflows.transforms import (
CompositeTransform,
MaskedAffineAutoregressiveTransform,
RandomPermutation,
)
from glasflow.nflows.transforms.coupling import (
AdditiveCouplingTransform,
AffineCouplingTransform,
)
from glasflow.nflows.transforms.normalization import BatchNorm
from torch.nn import functional as F
# Turn off parallelism
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
class NVPFlow(Flow):
"""A simplified version of Real NVP for 1-dim inputs.
This implementation uses 1-dim checkerboard masking but doesn't use
multi-scaling.
Reference:
> L. Dinh et al., Density estimation using Real NVP, ICLR 2017.
This class has been modified from the example found at:
https://github.com/bayesiains/nflows/blob/master/nflows/flows/realnvp.py
"""
def __init__(
self,
features,
hidden_features,
num_layers,
num_blocks_per_layer,
use_volume_preserving=False,
activation=F.relu,
dropout_probability=0.0,
batch_norm_within_layers=False,
batch_norm_between_layers=False,
random_permutation=True,
):
if use_volume_preserving:
coupling_constructor = AdditiveCouplingTransform
else:
coupling_constructor = AffineCouplingTransform
mask = torch.ones(features)
mask[::2] = -1
def create_resnet(in_features, out_features):
return nets.ResidualNet(
in_features,
out_features,
hidden_features=hidden_features,
num_blocks=num_blocks_per_layer,
activation=activation,
dropout_probability=dropout_probability,
use_batch_norm=batch_norm_within_layers,
)
layers = []
for _ in range(num_layers):
transform = coupling_constructor(
mask=mask, transform_net_create_fn=create_resnet
)
layers.append(transform)
mask *= -1
if batch_norm_between_layers:
layers.append(BatchNorm(features=features))
if random_permutation:
layers.append(RandomPermutation(features=features))
super().__init__(
transform=CompositeTransform(layers),
distribution=StandardNormal([features]),
)
class BasicFlow(Flow):
def __init__(self, features):
transform = CompositeTransform(
[
MaskedAffineAutoregressiveTransform(
features=features, hidden_features=2 * features
),
RandomPermutation(features=features),
]
)
distribution = StandardNormal(shape=[features])
super().__init__(
transform=transform,
distribution=distribution,
)
This diff is collapsed.
This diff is collapsed.
from collections import namedtuple
LOGLKEY = "logl"
LOGLLATEXKEY = r"$\log\mathcal{L}$"
LOGPKEY = "logp"
LOGPLATEXKEY = r"$\log\pi$"
ConvergenceInputs = namedtuple(
"ConvergenceInputs",
[
"autocorr_c",
"burn_in_nact",
"thin_by_nact",
"fixed_discard",
"target_nsamples",
"stop_after_convergence",
"L1steps",
"L2steps",
"min_tau",
"fixed_tau",
"tau_window",
],
)
ParallelTemperingInputs = namedtuple(
"ParallelTemperingInputs",
[
"ntemps",
"nensemble",
"Tmax",
"Tmax_from_SNR",
"initial_betas",
"adapt",
"adapt_t0",
"adapt_nu",
"pt_ensemble",
],
)
from __future__ import absolute_import
from . import grid, likelihood, prior, result, sampler, series, utils
from . import grid, likelihood, prior, result, sampler, series, utils, fisher
import numpy as np
import pandas as pd
import scipy.linalg
from scipy.optimize import minimize
class FisherMatrixPosteriorEstimator(object):
def __init__(self, likelihood, priors, parameters=None, fd_eps=1e-6, n_prior_samples=100):
""" A class to estimate posteriors using the Fisher Matrix approach
Parameters
----------
likelihood: bilby.core.likelihood.Likelihood
A bilby likelihood object
priors: bilby.core.prior.PriorDict
A bilby prior object
parameters: list
Names of parameters to sample in
fd_eps: float
A parameter to control the size of perturbation used when finite
differencing the likelihood
n_prior_samples: int
The number of prior samples to draw and use to attempt estimatation
of the maximum likelihood sample.
"""
self.likelihood = likelihood
if parameters is None:
self.parameter_names = priors.non_fixed_keys
else:
self.parameter_names = parameters
self.fd_eps = fd_eps
self.n_prior_samples = n_prior_samples
self.N = len(self.parameter_names)
self.prior_samples = [
priors.sample_subset(self.parameter_names) for _ in range(n_prior_samples)
]
self.prior_bounds = [(priors[key].minimum, priors[key].maximum) for key in self.parameter_names]
self.prior_width_dict = {}
for key in self.parameter_names:
width = priors[key].width
if np.isnan(width):
raise ValueError(f"Prior width is ill-formed for {key}")
self.prior_width_dict[key] = width
def log_likelihood(self, sample):
self.likelihood.parameters.update(sample)
return self.likelihood.log_likelihood()
def calculate_iFIM(self, sample):
FIM = self.calculate_FIM(sample)
iFIM = scipy.linalg.inv(FIM)
# Ensure iFIM is positive definite
min_eig = np.min(np.real(np.linalg.eigvals(iFIM)))
if min_eig < 0:
iFIM -= 10 * min_eig * np.eye(*iFIM.shape)
return iFIM
def sample_array(self, sample, n=1):
from .utils.random import rng
if sample == "maxL":
sample = self.get_maximum_likelihood_sample()
self.mean = np.array(list(sample.values()))
self.iFIM = self.calculate_iFIM(sample)
return rng.multivariate_normal(self.mean, self.iFIM, n)
def sample_dataframe(self, sample, n=1):
samples = self.sample_array(sample, n)
return pd.DataFrame(samples, columns=self.parameter_names)
def calculate_FIM(self, sample):
FIM = np.zeros((self.N, self.N))
for ii, ii_key in enumerate(self.parameter_names):
for jj, jj_key in enumerate(self.parameter_names):
FIM[ii, jj] = -self.get_second_order_derivative(sample, ii_key, jj_key)
return FIM
def get_second_order_derivative(self, sample, ii, jj):
if ii == jj:
return self.get_finite_difference_xx(sample, ii)
else:
return self.get_finite_difference_xy(sample, ii, jj)
def get_finite_difference_xx(self, sample, ii):
# Sample grid
p = self.shift_sample_x(sample, ii, 1)
m = self.shift_sample_x(sample, ii, -1)
dx = .5 * (p[ii] - m[ii])
loglp = self.log_likelihood(p)
logl = self.log_likelihood(sample)
loglm = self.log_likelihood(m)
return (loglp - 2 * logl + loglm) / dx ** 2
def get_finite_difference_xy(self, sample, ii, jj):
# Sample grid
pp = self.shift_sample_xy(sample, ii, 1, jj, 1)
pm = self.shift_sample_xy(sample, ii, 1, jj, -1)
mp = self.shift_sample_xy(sample, ii, -1, jj, 1)
mm = self.shift_sample_xy(sample, ii, -1, jj, -1)
dx = .5 * (pp[ii] - mm[ii])
dy = .5 * (pp[jj] - mm[jj])
loglpp = self.log_likelihood(pp)
loglpm = self.log_likelihood(pm)
loglmp = self.log_likelihood(mp)
loglmm = self.log_likelihood(mm)
return (loglpp - loglpm - loglmp + loglmm) / (4 * dx * dy)
def shift_sample_x(self, sample, x_key, x_coef):
vx = sample[x_key]
dvx = self.fd_eps * self.prior_width_dict[x_key]
shift_sample = sample.copy()
shift_sample[x_key] = vx + x_coef * dvx
return shift_sample
def shift_sample_xy(self, sample, x_key, x_coef, y_key, y_coef):
vx = sample[x_key]
vy = sample[y_key]
dvx = self.fd_eps * self.prior_width_dict[x_key]
dvy = self.fd_eps * self.prior_width_dict[y_key]
shift_sample = sample.copy()
shift_sample[x_key] = vx + x_coef * dvx
shift_sample[y_key] = vy + y_coef * dvy
return shift_sample
def get_maximum_likelihood_sample(self, initial_sample=None):
""" A method to attempt optimization of the maximum likelihood
This uses a simple scipy optimization approach, starting from a number
of draws from the prior to avoid problems with local optimization.
Note: this approach works well in small numbers of dimensions when the
posterior is narrow relative to the prior. But, if the number of dimensions
is large or the posterior is wide relative to the prior, the method fails
to find the global maximum in high dimensional problems.
"""
minlogL = np.inf
for i in range(self.n_prior_samples):
initial_sample = self.prior_samples[i]
x0 = list(initial_sample.values())
def neg_log_like(x, self, T=1):
sample = {key: val for key, val in zip(self.parameter_names, x)}
return - 1 / T * self.log_likelihood(sample)
out = minimize(
neg_log_like,
x0,
args=(self, 1),
bounds=self.prior_bounds,
method="L-BFGS-B",
)
if out.fun < minlogL:
minout = out
return {key: val for key, val in zip(self.parameter_names, minout.x)}
This diff is collapsed.