Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (137)
Showing
with 3600 additions and 207 deletions
hist
livetime
iff
amin
amax
...@@ -15,6 +15,26 @@ stages: ...@@ -15,6 +15,26 @@ stages:
- docs - docs
- deploy - deploy
# ------------------- Initial stage -------------------------------------------
# Check author list is up to date
authors:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- python test/check_author_list.py
# Test containers scripts are up to date
containers:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- cd containers
- python write_dockerfiles.py #HACK
# Fail if differences exist. If this fails, you may need to run
# write_dockerfiles.py and commit the changes.
- git diff --exit-code
.test-python: &test-python .test-python: &test-python
stage: initial stage: initial
image: python image: python
...@@ -38,16 +58,68 @@ stages: ...@@ -38,16 +58,68 @@ stages:
${script} --help; ${script} --help;
done done
# test basic setup on python3 # Test basic setup on python 3.7
basic-3.7: basic-3.7:
<<: *test-python <<: *test-python
image: python:3.7 image: python:3.7
# test example on python 3.7 # Test basic setup on python 3.8
basic-3.8:
<<: *test-python
image: python:3.8
# Test basic setup on python 3.9
basic-3.9:
<<: *test-python
image: python:3.9
precommits-py3.7:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- source activate python37
- mkdir -p .pip37
- pip install --upgrade pip
- pip --cache-dir=.pip37 install --upgrade bilby
- pip --cache-dir=.pip37 install .
- pip --cache-dir=.pip37 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
precommits-py3.8:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
script:
- source activate python38
- mkdir -p .pip38
- pip install --upgrade pip
- pip --cache-dir=.pip38 install --upgrade bilby
- pip --cache-dir=.pip38 install .
- pip --cache-dir=.pip38 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
precommits-py3.9:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script:
- source activate python39
- mkdir -p .pip38
- pip install --upgrade pip
- pip --cache-dir=.pip39 install --upgrade bilby
- pip --cache-dir=.pip39 install .
- pip --cache-dir=.pip39 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
# ------------------- Test stage -------------------------------------------
# test example on python 3.7 and build coverage
python-3.7: python-3.7:
stage: test stage: test
needs: ["basic-3.7", "precommits-py3.7"] needs: ["basic-3.7", "precommits-py3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37 image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script: script:
- python -m pip install . - python -m pip install .
...@@ -64,114 +136,107 @@ python-3.7: ...@@ -64,114 +136,107 @@ python-3.7:
- coverage_badge.svg - coverage_badge.svg
- htmlcov/ - htmlcov/
docs:
stage: docs
needs: ["basic-3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
# Make the documentation
- apt-get -yqq install pandoc
- python -m pip install .
- cd docs
- pip install ipykernel ipython jupyter
- cp ../examples/tutorials/*.ipynb ./
- rm basic_ptmcmc_tutorial.ipynb
- rm compare_samplers.ipynb
- rm visualising_the_results.ipynb
- jupyter nbconvert --to notebook --execute *.ipynb --inplace
- make clean
- make html
artifacts:
paths:
- docs/_build/html/
# test example on python 3.8 # test example on python 3.8
python-3.8: python-3.8:
stage: test stage: test
needs: ["basic-3.7", "precommits-py3.7"] needs: ["basic-3.8", "precommits-py3.8"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python38 image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
script: script:
- python -m pip install . - python -m pip install .
- pytest - pytest
# test example on python 3.6 # test example on python 3.9
python-3.6: python-3.9:
stage: test stage: test
needs: ["basic-3.7", "precommits-py3.7"] needs: ["basic-3.9", "precommits-py3.9"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python36 image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script: script:
- python -m pip install . - python -m pip install .
- pytest - pytest
# test samplers on python 3.7 # test samplers on python 3.7
python-3.7-samplers: python-3.7-samplers:
stage: test stage: test
needs: ["basic-3.7", "precommits-py3.7"] needs: ["basic-3.7", "precommits-py3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37 image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script: script:
- python -m pip install . - python -m pip install .
- pytest test/integration/sampler_run_test.py --durations 10 - pytest test/integration/sampler_run_test.py --durations 10
# test samplers on python 3.6 # test samplers on python 3.8
python-3.6-samplers: python-3.8-samplers:
stage: test stage: test
needs: ["basic-3.7", "precommits-py3.7"] needs: ["basic-3.8", "precommits-py3.8"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python36 image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
script: script:
- python -m pip install . - python -m pip install .
- pytest test/integration/sampler_run_test.py - pytest test/integration/sampler_run_test.py --durations 10
# Test containers are up to date # test samplers on python 3.9
containers: python-3.9-samplers:
stage: initial stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37 needs: ["basic-3.9", "precommits-py3.9"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script: script:
- cd containers - python -m pip install .
- python write_dockerfiles.py
# Fail if differences exist. If this fails, you may need to run - pytest test/integration/sampler_run_test.py --durations 10
# write_dockerfiles.py and commit the changes.
- git diff --exit-code
# Tests run at a fixed schedule rather than on push integration-tests-python-3.7:
scheduled-python-3.7:
stage: test stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37 image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
needs: ["basic-3.7", "precommits-py3.7"]
only: only:
- schedules - schedules
script: script:
- python -m pip install . - python -m pip install .
# Run tests which are only done on schedule # Run tests which are only done on schedule
- pytest test/integration/example_test.py - pytest test/integration/example_test.py
plotting: plotting-python-3.7:
stage: test stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37 image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
needs: ["basic-3.7", "precommits-py3.7"]
only: only:
- schedules - schedules
script: script:
- python -m pip install . - python -m pip install .
- python -m pip install ligo.skymap - python -m pip install ligo.skymap
- pytest test/gw/plot_test.py - pytest test/gw/plot_test.py
authors:
stage: initial # ------------------- Docs stage -------------------------------------------
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
docs:
stage: docs
needs: ["python-3.7"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script: script:
- python test/check_author_list.py # Make the documentation
- apt-get -yqq install pandoc
- python -m pip install .
- cd docs
- pip install ipykernel ipython jupyter
- cp ../examples/tutorials/*.ipynb ./
- rm basic_ptmcmc_tutorial.ipynb
- rm compare_samplers.ipynb
- rm visualising_the_results.ipynb
- jupyter nbconvert --to notebook --execute *.ipynb --inplace
- make clean
- make html
artifacts:
paths:
- docs/_build/html/
# ------------------- Deploy stage -------------------------------------------
pages: deploy-docs:
stage: deploy stage: deploy
needs: ["docs", "python-3.7"] needs: ["docs", "python-3.7"]
dependencies:
- docs
- python-3.7
script: script:
- mkdir public/ - mkdir public/
- mv htmlcov/ public/ - mv htmlcov/ public/
...@@ -184,9 +249,49 @@ pages: ...@@ -184,9 +249,49 @@ pages:
only: only:
- master - master
deploy_release: # Build the containers
build-python37-container:
stage: deploy
image: docker:19.03.12
needs: ["containers"]
only:
- schedules
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --tag v3-bilby-python37 - < v3-dockerfile-test-suite-python37
- docker image tag v3-bilby-python37 containers.ligo.org/lscsoft/bilby/v2-bilby-python37:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-python37:latest
build-python38-container:
stage: deploy stage: deploy
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37 image: docker:19.03.12
needs: ["containers"]
only:
- schedules
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --tag v3-bilby-python38 - < v3-dockerfile-test-suite-python38
- docker image tag v3-bilby-python38 containers.ligo.org/lscsoft/bilby/v2-bilby-python38:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-python38:latest
build-python39-container:
stage: deploy
image: docker:19.03.12
needs: ["containers"]
only:
- schedules
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --tag v3-bilby-python39 - < v3-dockerfile-test-suite-python39
- docker image tag v3-bilby-python39 containers.ligo.org/lscsoft/bilby/v2-bilby-python39:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-python39:latest
pypi-release:
stage: deploy
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
variables: variables:
TWINE_USERNAME: $PYPI_USERNAME TWINE_USERNAME: $PYPI_USERNAME
TWINE_PASSWORD: $PYPI_PASSWORD TWINE_PASSWORD: $PYPI_PASSWORD
...@@ -197,18 +302,3 @@ deploy_release: ...@@ -197,18 +302,3 @@ deploy_release:
- twine upload dist/* - twine upload dist/*
only: only:
- tags - tags
precommits-py3.7:
stage: initial
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- source activate python37
- mkdir -p .pip37
- pip install --upgrade pip
- pip --cache-dir=.pip37 install --upgrade bilby
- pip --cache-dir=.pip37 install .
- pip --cache-dir=.pip37 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
...@@ -4,19 +4,26 @@ repos: ...@@ -4,19 +4,26 @@ repos:
hooks: hooks:
- id: check-merge-conflict # prevent committing files with merge conflicts - id: check-merge-conflict # prevent committing files with merge conflicts
- id: flake8 # checks for flake8 errors - id: flake8 # checks for flake8 errors
#- repo: https://github.com/codespell-project/codespell - repo: https://github.com/psf/black
# rev: v1.16.0 rev: 20.8b1
# hooks: hooks:
# - id: codespell # Spellchecker - id: black
# args: [-L, nd, --skip, "*.ipynb,*.html", --ignore-words=.dictionary.txt] language_version: python3
# exclude: ^examples/tutorials/ files: ^bilby/bilby_mcmc/
#- repo: https://github.com/asottile/seed-isort-config - repo: https://github.com/codespell-project/codespell
# rev: v1.3.0 rev: v1.16.0
# hooks: hooks:
# - id: seed-isort-config - id: codespell
# args: [--application-directories, 'bilby/'] args: [--ignore-words=.dictionary.txt]
#- repo: https://github.com/pre-commit/mirrors-isort - repo: https://github.com/asottile/seed-isort-config
# rev: v4.3.21 rev: v1.3.0
# hooks: hooks:
# - id: isort # sort imports alphabetically and separates import into sections - id: seed-isort-config
# args: [-w=88, -m=3, -tc, -sp=setup.cfg ] args: [--application-directories, 'bilby/']
files: ^bilby/bilby_mcmc/
- repo: https://github.com/pre-commit/mirrors-isort
rev: v4.3.21
hooks:
- id: isort # sort imports alphabetically and separates import into sections
args: [-w=88, -m=3, -tc, -sp=setup.cfg ]
files: ^bilby/bilby_mcmc/
...@@ -14,7 +14,7 @@ Bruce Edelman ...@@ -14,7 +14,7 @@ Bruce Edelman
Carl-Johan Haster Carl-Johan Haster
Cecilio Garcia-Quiros Cecilio Garcia-Quiros
Charlie Hoy Charlie Hoy
Christopher Berry Christopher Philip Luke Berry
Christos Karathanasis Christos Karathanasis
Colm Talbot Colm Talbot
Daniel Williams Daniel Williams
...@@ -29,11 +29,14 @@ Ignacio Magaña Hernandez ...@@ -29,11 +29,14 @@ Ignacio Magaña Hernandez
Isobel Marguarethe Romero-Shaw Isobel Marguarethe Romero-Shaw
Jade Powell Jade Powell
James A Clark James A Clark
Jeremy G Baier
John Veitch John Veitch
Joshua Brandt
Katerina Chatziioannou Katerina Chatziioannou
Kaylee de Soto Kaylee de Soto
Khun Sang Phukon Khun Sang Phukon
Kshipraa Athar Kshipraa Athar
Leslie Wade
Liting Xiao Liting Xiao
Maite Mateu-Lucena Maite Mateu-Lucena
Marc Arene Marc Arene
...@@ -49,10 +52,12 @@ Moritz Huebner ...@@ -49,10 +52,12 @@ Moritz Huebner
Nicola De Lillo Nicola De Lillo
Nikhil Sarin Nikhil Sarin
Nirban Bose Nirban Bose
Olivia Wilk
Paul Easter Paul Easter
Paul Lasky Paul Lasky
Philip Relton Philip Relton
Rhys Green Rhys Green
Rico Lo
Roberto Cotesta Roberto Cotesta
Rory Smith Rory Smith
S. H. Oh S. H. Oh
...@@ -69,3 +74,4 @@ Sylvia Biscoveanu ...@@ -69,3 +74,4 @@ Sylvia Biscoveanu
Tathagata Ghosh Tathagata Ghosh
Virginia d'Emilio Virginia d'Emilio
Vivien Raymond Vivien Raymond
Ka-Lok Lo
# All notable changes will be documented in this file # All notable changes will be documented in this file
## [1.1.4] 2021-10-08
Version 1.1.4 release of bilby
### Added
- Version of dynesty pinned to less than v1.1 to anticipate breaking changes (!1020)
- Pool to computation of SNR (!1013)
- Ability to load results produced with custom priors (!1010)
- The nestcheck test (!1005)
- Bilby-mcmc guide to docs (!1001)
- Codespell pre-commit (!996)
- MBGravitationalWaveTransient (!972)
- Zeus MCMC sampler support (!962)
- Option to use print rather than tqdm (!937)
### Changes
- Updates citation guide (!1030)
- Minor bug fixes (!1029, !1025, !1022, !1016, !1018, !1014, !1007, !1004)
- Typo fix in eart light crossing (!1003)
- Fix zero spin conversion (!1002)
## [1.1.3] 2021-07-02
Version 1.1.3 release of bilby
### Added
- Added `Categorical` prior (!982)(!990)
- Added a built-in mcmc sampler (`bilby_mcmc`) (!905)(!985)
- Added run statistics to the `dynesty` meta data (!969)
- Added `cdf` method to `PriorDict` classes (!943)
### Changes
- Removed the autoburnin causing `kombine` to fail the CI tests (!988)
- Sped up the spline interpolation in ROQ (!971)
- Replaced bessel interpolant to scipy function (!976)
- Improved checkpoint stats plot (!977)
- Fixed a typo in the sampler documentation (!986)
- Fixed issue that causes ConditionalDeltaFunction posterior samples not to be saved correctly (!973)
- Solved an issue where injected SNRs were logged incorrectly (!980)
- Made Python 3.6+ a specific requirement (!978)
- Fixed the calibration and time marginalized likelihood (!978)
- Removed a possible error in the distance marginalization (!960)
- Fixed an issue where `check_draw` did not catch `np.nan` values (!965)
- Removed a superfluous line in the docs configuration file (!963)
- Added a warning about class side effects to the `GravtiationalWaveTransient` likelihood classes (!964)
- Allow `ptemcee` initialization with array (!955)
- Removed `Prior.test_valid_for_rescaling` (!956)
- Replaced deprecated numpy aliases builtins (!970)
- Fixed a bug in the algorithm to determine time resolution of ROQ (!967)
- Restructured utils module into several submodules. API remains backwards compatible (!873)
- Changed number of default walks in `dynesty` from `10*self.ndim` to `100` (!961)
## [1.1.2] 2021-05-05 ## [1.1.2] 2021-05-05
Version 1.1.2 release of bilby Version 1.1.2 release of bilby
...@@ -69,7 +119,7 @@ Version 1.1.0 release of bilby ...@@ -69,7 +119,7 @@ Version 1.1.0 release of bilby
- Fixed `ultranest` from failing - Fixed `ultranest` from failing
- Fixed issues with plotting failing in tests (!904) - Fixed issues with plotting failing in tests (!904)
- Changed the CI to run on auto-built images (!899) - Changed the CI to run on auto-built images (!899)
- Resolved a `matplotlib` error occuring at `dynesty` checkpoint plots (!902) - Resolved a `matplotlib` error occurring at `dynesty` checkpoint plots (!902)
- Fixed the multidimensional Gaussian example (!901) - Fixed the multidimensional Gaussian example (!901)
- Now allow any lal dictionary option and added a numerical relativity file (!896) - Now allow any lal dictionary option and added a numerical relativity file (!896)
- Fixed the likelihood count in `dynesty` (!853) - Fixed the likelihood count in `dynesty` (!853)
...@@ -141,7 +191,7 @@ Version 1.0.1 release of bilby ...@@ -141,7 +191,7 @@ Version 1.0.1 release of bilby
- Fixed a minor issue with conditional priors that could cause unexpected behaviour in edge cases (!838) - Fixed a minor issue with conditional priors that could cause unexpected behaviour in edge cases (!838)
- Fixed `__repr__` method in the `FromFile` prior (!836) - Fixed `__repr__` method in the `FromFile` prior (!836)
- Fixed an issue that caused problems for some users when plotting with a latex backend (!816) - Fixed an issue that caused problems for some users when plotting with a latex backend (!816)
- Fixed bug that occured when min/max of interpolated priors was changed (!815) - Fixed bug that occurred when min/max of interpolated priors was changed (!815)
- Fixed time domain waveform epoch (!736) - Fixed time domain waveform epoch (!736)
- Fixed time keeping in multinest (!830) - Fixed time keeping in multinest (!830)
- Now checks if marginalised priors were defined before marginalising (!829) - Now checks if marginalised priors were defined before marginalising (!829)
...@@ -179,7 +229,7 @@ for details ...@@ -179,7 +229,7 @@ for details
- Updated the default PSD to O4 (!757) - Updated the default PSD to O4 (!757)
- Make multinest allow long file names, optional and work with MPI (!764 !785) - Make multinest allow long file names, optional and work with MPI (!764 !785)
- Add min/max to aligned spin prior (!787) - Add min/max to aligned spin prior (!787)
- Reduce redudant code (!703) - Reduce redundant code (!703)
- Added testing for python 3.8 (!762) - Added testing for python 3.8 (!762)
- Improvements to the waveform plot (!769) - Improvements to the waveform plot (!769)
...@@ -280,7 +330,7 @@ parameters. ...@@ -280,7 +330,7 @@ parameters.
## Changes ## Changes
- Speed up the prior evaluations by implementing directly with checks to scipy !627 - Speed up the prior evaluations by implementing directly with checks to scipy !627
- Soft initalisation option for the Sampler class !620 - Soft initialisation option for the Sampler class !620
- Improvements to JSON reading and writing for functions !621 - Improvements to JSON reading and writing for functions !621
- Fixed bug in prior reading !618 !617 - Fixed bug in prior reading !618 !617
- Fixes to the examples !619 !614 !626 !616 - Fixes to the examples !619 !614 !626 !616
...@@ -341,7 +391,7 @@ parameters. ...@@ -341,7 +391,7 @@ parameters.
- Removed the sqrt(2) normalisation from the scalar longitudinal mode - Removed the sqrt(2) normalisation from the scalar longitudinal mode
- Improve PSD filename reading (no longer required "/" to read local files) - Improve PSD filename reading (no longer required "/" to read local files)
- Fix bug in emcee chains - Fix bug in emcee chains
- Added a try/except cluase for building the lookup table - Added a try/except clause for building the lookup table
## [0.5.4] 2019-07-30 ## [0.5.4] 2019-07-30
...@@ -382,7 +432,7 @@ shown (!564) to provide better convergence for the long-duration high-spin tests ...@@ -382,7 +432,7 @@ shown (!564) to provide better convergence for the long-duration high-spin tests
### Changed ### Changed
- Updated and fixed bugs in examples - Updated and fixed bugs in examples
- Resolve sampling time persistence for runs which are interupted - Resolve sampling time persistence for runs which are interrupted
- Improvements to the PP plot - Improvements to the PP plot
- Speed up of the distance calculation - Speed up of the distance calculation
- Fixed a bug in the inteference of bilby command line arguments with user specified command lines - Fixed a bug in the inteference of bilby command line arguments with user specified command lines
...@@ -678,7 +728,7 @@ re-instantiate the Prior in most cases ...@@ -678,7 +728,7 @@ re-instantiate the Prior in most cases
- Changed to using `setuptools` for installation. - Changed to using `setuptools` for installation.
- Clean up of real data handling: all data is now windowed with a 0.4s roll off (unless set otherwise) and low-pass filtered. - Clean up of real data handling: all data is now windowed with a 0.4s roll off (unless set otherwise) and low-pass filtered.
- Add explicit method to create a power spectral density from time-domain data - Add explicit method to create a power spectral density from time-domain data
- Clean up of `PowerSpectralDensity()` - addds `set_from` methods to handle various ways to define the PSD. - Clean up of `PowerSpectralDensity()` - adds `set_from` methods to handle various ways to define the PSD.
- Clean up of `detectors.py`: adds an `InterferometerStrainData` to handle strain data and `InterferometerSet` to handle multiple interferometers. All data setting should primarily be done through the `Interferometer.set_strain_data..` methods. - Clean up of `detectors.py`: adds an `InterferometerStrainData` to handle strain data and `InterferometerSet` to handle multiple interferometers. All data setting should primarily be done through the `Interferometer.set_strain_data..` methods.
- Fix the comments and units of `nfft` and `infft` and general improvement to documentation of data. - Fix the comments and units of `nfft` and `infft` and general improvement to documentation of data.
- Fixed a bug in create_time_series - Fixed a bug in create_time_series
......
...@@ -275,7 +275,7 @@ help orient users and make it easier to contribute. The layout is intended to ...@@ -275,7 +275,7 @@ help orient users and make it easier to contribute. The layout is intended to
define the logic of the code and new merge requests should aim to fit within define the logic of the code and new merge requests should aim to fit within
this logic (unless there is a good argument to change it). For example, code this logic (unless there is a good argument to change it). For example, code
which adds a new sampler should not effect the gravitational-wave specific which adds a new sampler should not effect the gravitational-wave specific
parts of the code. Note that this document is not programatically generated and parts of the code. Note that this document is not programmatically generated and
so may get out of date with time. If you notice something wrong, please open an so may get out of date with time. If you notice something wrong, please open an
issue. issue.
......
...@@ -35,6 +35,12 @@ If you use :code:`bilby` in a scientific publication, please cite ...@@ -35,6 +35,12 @@ If you use :code:`bilby` in a scientific publication, please cite
* `Bilby: A user-friendly Bayesian inference library for gravitational-wave * `Bilby: A user-friendly Bayesian inference library for gravitational-wave
astronomy astronomy
<https://ui.adsabs.harvard.edu/#abs/2018arXiv181102042A/abstract>`__ <https://ui.adsabs.harvard.edu/#abs/2018arXiv181102042A/abstract>`__
* `Bayesian inference for compact binary coalescences with BILBY: validation and application to the first LIGO-Virgo gravitational-wave transient catalogue <https://ui.adsabs.harvard.edu/abs/2020MNRAS.499.3295R/abstract>`__
The first of these papers introduces the software, while the second introduces advances in the sampling approaches and validation of the software.
If you use the :code:`bilby_mcmc` sampler, please additionally cite
* `BILBY-MCMC: an MCMC sampler for gravitational-wave inference <https://ui.adsabs.harvard.edu/abs/2021MNRAS.507.2037A/abstract>`__
Additionally, :code:`bilby` builds on a number of open-source packages. If you Additionally, :code:`bilby` builds on a number of open-source packages. If you
make use of this functionality in your publications, we recommend you cite them make use of this functionality in your publications, we recommend you cite them
......
from .sampler import Bilby_MCMC
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from ..core.sampler.base_sampler import SamplerError
from ..core.utils import logger
from .utils import LOGLKEY, LOGLLATEXKEY, LOGPKEY, LOGPLATEXKEY
class Chain(object):
def __init__(
self,
initial_sample,
burn_in_nact=1,
thin_by_nact=1,
fixed_discard=0,
autocorr_c=5,
min_tau=1,
fixed_tau=None,
tau_window=None,
block_length=100000,
):
"""Object to store a single mcmc chain
Parameters
----------
initial_sample: bilby.bilby_mcmc.chain.Sample
The starting point of the chain
burn_in_nact, thin_by_nact : int (1, 1)
The number of autocorrelation times (tau) to discard for burn-in
and the multiplicative factor to thin by (thin_by_nact < 1). I.e
burn_in_nact=10 and thin_by_nact=1 will discard 10*tau samples from
the start of the chain, then thin the final chain by a factor
of 1*tau (resulting in independent samples).
fixed_discard: int (0)
A fixed minimum number of samples to discard (can be used to
override the burn_in_nact if it is too small).
autocorr_c: float (5)
The step size of the window search used by emcee.autocorr when
estimating the autocorrelation time.
min_tau: int (1)
A minimum value for the autocorrelation time.
fixed_tau: int (None)
A fixed value for the autocorrelation (overrides the automated
autocorrelation time estimation). Used in testing.
tau_window: int (None)
Only calculate the autocorrelation time in a trailing window. If
None (default) this method is not used.
block_length: int
The incremental size to extend the array by when it runs out of
space.
"""
self.autocorr_c = autocorr_c
self.min_tau = min_tau
self.burn_in_nact = burn_in_nact
self.thin_by_nact = thin_by_nact
self.block_length = block_length
self.fixed_discard = int(fixed_discard)
self.fixed_tau = fixed_tau
self.tau_window = tau_window
self.ndim = initial_sample.ndim
self.current_sample = initial_sample
self.keys = self.current_sample.keys
self.parameter_keys = self.current_sample.parameter_keys
# Initialize chain
self._chain_array = self._get_zero_chain_array()
self._chain_array_length = block_length
self.position = -1
self.max_log_likelihood = -np.inf
self.max_tau_dict = {}
self.converged = False
self.cached_tau_count = 0
self._minimum_index_proposal = 0
self._minimum_index_adapt = 0
self._last_minimum_index = (0, 0, "I")
self.last_full_tau_dict = {key: np.inf for key in self.parameter_keys}
# Append the initial sample
self.append(self.current_sample)
def _get_zero_chain_array(self):
return np.zeros((self.block_length, self.ndim + 2), dtype=np.float64)
def _extend_chain_array(self):
self._chain_array = np.concatenate(
(self._chain_array, self._get_zero_chain_array()), axis=0
)
self._chain_array_length = len(self._chain_array)
@property
def current_sample(self):
return self._current_sample.copy()
@current_sample.setter
def current_sample(self, current_sample):
self._current_sample = current_sample
def append(self, sample):
self.position += 1
# Extend the array if needed
if self.position >= self._chain_array_length:
self._extend_chain_array()
# Store the current sample and append to the array
self.current_sample = sample
self._chain_array[self.position] = sample.list
# Update the maximum log_likelihood
if sample[LOGLKEY] > self.max_log_likelihood:
self.max_log_likelihood = sample[LOGLKEY]
def __getitem__(self, index):
if index < 0:
index = index + self.position + 1
if index <= self.position:
values = self._chain_array[index]
return Sample({k: v for k, v in zip(self.keys, values)})
else:
raise SamplerError(f"Requested index {index} out of bounds")
def __setitem__(self, index, sample):
if index < 0:
index = index + self.position + 1
self._chain_array[index] = sample.list
def key_to_idx(self, key):
return self.keys.index(key)
def get_1d_array(self, key):
return self._chain_array[: 1 + self.position, self.key_to_idx(key)]
@property
def _random_idx(self):
mindex = self._last_minimum_index[1]
# Check if mindex exceeds current position by 10 ACT: if so use a random sample
# otherwise we draw only from the chain past the minimum_index
if np.isinf(self.tau_last) or self.position - mindex < 10 * self.tau_last:
mindex = 0
return np.random.randint(mindex, self.position + 1)
@property
def random_sample(self):
return self[self._random_idx]
@property
def fixed_discard(self):
return self._fixed_discard
@fixed_discard.setter
def fixed_discard(self, fixed_discard):
self._fixed_discard = int(fixed_discard)
@property
def minimum_index(self):
"""This calculated a minimum index from which to discard samples
A number of methods are provided for the calculation. A subset are
switched off (by `if False` statements) for future development
"""
position = self.position
# Return cached minimum index
last_minimum_index = self._last_minimum_index
if position == last_minimum_index[0]:
return int(last_minimum_index[1])
# If fixed discard is not yet reached, just return that
if position < self.fixed_discard:
self.minimum_index_method = "FD"
return self.fixed_discard
# Initialize list of minimum index methods with the fixed discard (FD)
minimum_index_list = [self.fixed_discard]
minimum_index_method_list = ["FD"]
# Calculate minimum index from tau
if self.tau_last < np.inf:
tau = self.tau_last
elif len(self.max_tau_dict) == 0:
# Bootstrap calculating tau when minimum index has not yet been calculated
tau = self._tau_for_full_chain
else:
tau = np.inf
if tau < np.inf:
minimum_index_list.append(self.burn_in_nact * tau)
minimum_index_method_list.append(f"{self.burn_in_nact}tau")
# Calculate points when log-posterior is within z std of the mean
if True:
zfactor = 1
N = 100
delta_lnP = zfactor * self.ndim / 2
logl = self.get_1d_array(LOGLKEY)
log_prior = self.get_1d_array(LOGPKEY)
log_posterior = logl + log_prior
max_posterior = np.max(log_posterior)
ave = pd.Series(log_posterior).rolling(window=N).mean().iloc[N - 1 :]
delta = max_posterior - ave
passes = ave[delta < delta_lnP]
if len(passes) > 0:
minimum_index_list.append(passes.index[0] + 1)
minimum_index_method_list.append(f"z{zfactor}")
# Add last minimum_index_method
if False:
minimum_index_list.append(last_minimum_index[1])
minimum_index_method_list.append(last_minimum_index[2])
# Minimum index set by proposals
minimum_index_list.append(self.minimum_index_proposal)
minimum_index_method_list.append("PR")
# Minimum index set by temperature adaptation
minimum_index_list.append(self.minimum_index_adapt)
minimum_index_method_list.append("AD")
# Calculate the maximum minimum index and associated method (reporting)
minimum_index = int(np.max(minimum_index_list))
minimum_index_method = minimum_index_method_list[np.argmax(minimum_index_list)]
# Cache the method
self._last_minimum_index = (position, minimum_index, minimum_index_method)
self.minimum_index_method = minimum_index_method
return minimum_index
@property
def minimum_index_proposal(self):
return self._minimum_index_proposal
@minimum_index_proposal.setter
def minimum_index_proposal(self, minimum_index_proposal):
if minimum_index_proposal > self._minimum_index_proposal:
self._minimum_index_proposal = minimum_index_proposal
@property
def minimum_index_adapt(self):
return self._minimum_index_adapt
@minimum_index_adapt.setter
def minimum_index_adapt(self, minimum_index_adapt):
if minimum_index_adapt > self._minimum_index_adapt:
self._minimum_index_adapt = minimum_index_adapt
@property
def tau(self):
""" The maximum ACT over all parameters """
if self.position in self.max_tau_dict:
# If we have the ACT at the current position, return it
return self.max_tau_dict[self.position]
elif (
self.tau_last < np.inf
and self.cached_tau_count < 50
and self.nsamples_last > 50
):
# If we have a recent ACT return it
self.cached_tau_count += 1
return self.tau_last
else:
# Calculate the ACT
return self.tau_nocache
@property
def tau_nocache(self):
""" Calculate tau forcing a recalculation (no cached tau) """
tau = max(self.tau_dict.values())
self.max_tau_dict[self.position] = tau
self.cached_tau_count = 0
return tau
@property
def tau_last(self):
""" Return the last-calculated tau if it exists, else inf """
if len(self.max_tau_dict) > 0:
return list(self.max_tau_dict.values())[-1]
else:
return np.inf
@property
def _tau_for_full_chain(self):
""" The maximum ACT over all parameters """
return max(self._tau_dict_for_full_chain.values())
@property
def _tau_dict_for_full_chain(self):
return self._calculate_tau_dict(minimum_index=0)
@property
def tau_dict(self):
""" Calculate a dictionary of tau (ACT) for every parameter """
return self._calculate_tau_dict(self.minimum_index)
def _calculate_tau_dict(self, minimum_index):
""" Calculate a dictionary of tau (ACT) for every parameter """
logger.debug(f"Calculating tau_dict {self}")
# If there are too few samples to calculate tau
if (self.position - minimum_index) < 2 * self.autocorr_c:
return {key: np.inf for key in self.parameter_keys}
# Choose minimimum index for the ACT calculation
last_tau = self.tau_last
if self.tau_window is not None and last_tau < np.inf:
minimum_index_for_act = max(
minimum_index, int(self.position - self.tau_window * last_tau)
)
else:
minimum_index_for_act = minimum_index
# Calculate a dictionary of tau's for each parameter
taus = {}
for key in self.parameter_keys:
if self.fixed_tau is None:
x = self.get_1d_array(key)[minimum_index_for_act:]
tau = calculate_tau(x, self.autocorr_c)
taux = round(tau, 1)
else:
taux = self.fixed_tau
taus[key] = max(taux, self.min_tau)
# Cache the last tau dictionary for future use
self.last_full_tau_dict = taus
return taus
@property
def thin(self):
if np.isfinite(self.tau):
return np.max([1, int(self.thin_by_nact * self.tau)])
else:
return 1
@property
def nsamples(self):
nuseable_steps = self.position - self.minimum_index
return int(nuseable_steps / (self.thin_by_nact * self.tau))
@property
def nsamples_last(self):
nuseable_steps = self.position - self.minimum_index
return int(nuseable_steps / (self.thin_by_nact * self.tau_last))
@property
def samples(self):
samples = self._chain_array[self.minimum_index : self.position : self.thin]
return pd.DataFrame(samples, columns=self.keys)
def plot(self, outdir=".", label="label", priors=None, all_samples=None):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(
nrows=self.ndim + 3, ncols=2, figsize=(8, 9 + 3 * (self.ndim))
)
scatter_kwargs = dict(
lw=0,
marker="o",
)
K = 1000
nburn = self.minimum_index
plot_setups = zip(
[0, nburn, nburn],
[nburn, self.position, self.position],
[1, 1, self.thin], # Thin-by factor
["tab:red", "tab:grey", "tab:blue"], # Color
[0.5, 0.05, 0.5], # Alpha
[1, 1, 1], # Marker size
)
position_indexes = np.arange(self.position + 1)
# Plot the traceplots
for (start, stop, thin, color, alpha, ms) in plot_setups:
for ax, key in zip(axes[:, 0], self.keys):
xx = position_indexes[start:stop:thin] / K
yy = self.get_1d_array(key)[start:stop:thin]
# Downsample plots to max_pts: avoid memory issues
max_pts = 10000
while len(xx) > max_pts:
xx = xx[::2]
yy = yy[::2]
ax.plot(
xx,
yy,
color=color,
alpha=alpha,
ms=ms,
**scatter_kwargs,
)
ax.set_ylabel(self._get_plot_label_by_key(key, priors))
if key not in [LOGLKEY, LOGPKEY]:
msg = r"$\tau=$" + f"{self.last_full_tau_dict[key]:0.1f}"
ax.set_title(msg)
# Plot the histograms
for ax, key in zip(axes[:, 1], self.keys):
if all_samples is not None:
yy_all = all_samples[key]
ax.hist(yy_all, bins=50, alpha=0.6, density=True, color="k")
yy = self.get_1d_array(key)[nburn : self.position : self.thin]
ax.hist(yy, bins=50, alpha=0.8, density=True)
ax.set_xlabel(self._get_plot_label_by_key(key, priors))
# Add x-axes labels to the traceplots
axes[-1, 0].set_xlabel(r"Iteration $[\times 10^{3}]$")
# Plot the calculated ACT
ax = axes[-1, 0]
tausit = np.array(list(self.max_tau_dict.keys()) + [self.position]) / K
taus = list(self.max_tau_dict.values()) + [self.tau_last]
ax.plot(tausit, taus, color="C3")
ax.set(ylabel=r"Maximum $\tau$")
axes[-1, 1].set_axis_off()
filename = "{}/{}_checkpoint_trace.png".format(outdir, label)
msg = [
r"Maximum $\tau$" + f"={self.tau:0.1f} ",
r"$n_{\rm samples}=$" + f"{self.nsamples} ",
]
if self.thin_by_nact != 1:
msg += [
r"$n_{\rm samples}^{\rm eff}=$"
+ f"{int(self.nsamples * self.thin_by_nact)} "
]
fig.suptitle(
"| ".join(msg),
y=1,
)
fig.tight_layout()
fig.savefig(filename, dpi=200)
plt.close(fig)
@staticmethod
def _get_plot_label_by_key(key, priors=None):
if priors is not None and key in priors:
return priors[key].latex_label
elif key == LOGLKEY:
return LOGLLATEXKEY
elif key == LOGPKEY:
return LOGPLATEXKEY
else:
return key
class Sample(object):
def __init__(self, sample_dict):
"""A single sample
Parameters
----------
sample_dict: dict
A dictionary of the sample
"""
self.sample_dict = sample_dict
self.keys = list(sample_dict.keys())
self.parameter_keys = [k for k in self.keys if k not in [LOGPKEY, LOGLKEY]]
self.ndim = len(self.parameter_keys)
def __getitem__(self, key):
return self.sample_dict[key]
def __setitem__(self, key, value):
self.sample_dict[key] = value
if key not in self.keys:
self.keys = list(self.sample_dict.keys())
@property
def list(self):
return list(self.sample_dict.values())
def __repr__(self):
return str(self.sample_dict)
@property
def parameter_only_dict(self):
return {key: self.sample_dict[key] for key in self.parameter_keys}
@property
def dict(self):
return {key: self.sample_dict[key] for key in self.keys}
def as_dict(self, keys=None):
sdict = self.dict
if keys is None:
return sdict
else:
return {key: sdict[key] for key in keys}
def __eq__(self, other_sample):
return self.list == other_sample.list
def copy(self):
return Sample(self.sample_dict.copy())
def calculate_tau(x, autocorr_c=5):
import emcee
if LooseVersion(emcee.__version__) < LooseVersion("3"):
raise SamplerError("bilby-mcmc requires emcee > 3.0 for autocorr analysis")
if np.all(np.diff(x) == 0):
return np.inf
try:
# Hard code tol=1: we perform this check internally
tau = emcee.autocorr.integrated_time(x, c=autocorr_c, tol=1)[0]
if np.isnan(tau):
tau = np.inf
return tau
except emcee.autocorr.AutocorrError:
return np.inf
import torch
from nflows.distributions.normal import StandardNormal
from nflows.flows.base import Flow
from nflows.nn import nets as nets
from nflows.transforms import (
CompositeTransform,
MaskedAffineAutoregressiveTransform,
RandomPermutation,
)
from nflows.transforms.coupling import (
AdditiveCouplingTransform,
AffineCouplingTransform,
)
from nflows.transforms.normalization import BatchNorm
from torch.nn import functional as F
# Turn off parallelism
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
class NVPFlow(Flow):
"""A simplified version of Real NVP for 1-dim inputs.
This implementation uses 1-dim checkerboard masking but doesn't use
multi-scaling.
Reference:
> L. Dinh et al., Density estimation using Real NVP, ICLR 2017.
This class has been modified from the example found at:
https://github.com/bayesiains/nflows/blob/master/nflows/flows/realnvp.py
"""
def __init__(
self,
features,
hidden_features,
num_layers,
num_blocks_per_layer,
use_volume_preserving=False,
activation=F.relu,
dropout_probability=0.0,
batch_norm_within_layers=False,
batch_norm_between_layers=False,
random_permutation=True,
):
if use_volume_preserving:
coupling_constructor = AdditiveCouplingTransform
else:
coupling_constructor = AffineCouplingTransform
mask = torch.ones(features)
mask[::2] = -1
def create_resnet(in_features, out_features):
return nets.ResidualNet(
in_features,
out_features,
hidden_features=hidden_features,
num_blocks=num_blocks_per_layer,
activation=activation,
dropout_probability=dropout_probability,
use_batch_norm=batch_norm_within_layers,
)
layers = []
for _ in range(num_layers):
transform = coupling_constructor(
mask=mask, transform_net_create_fn=create_resnet
)
layers.append(transform)
mask *= -1
if batch_norm_between_layers:
layers.append(BatchNorm(features=features))
if random_permutation:
layers.append(RandomPermutation(features=features))
super().__init__(
transform=CompositeTransform(layers),
distribution=StandardNormal([features]),
)
class BasicFlow(Flow):
def __init__(self, features):
transform = CompositeTransform(
[
MaskedAffineAutoregressiveTransform(
features=features, hidden_features=2 * features
),
RandomPermutation(features=features),
]
)
distribution = StandardNormal(shape=[features])
super().__init__(
transform=transform,
distribution=distribution,
)
This diff is collapsed.
This diff is collapsed.
from collections import namedtuple
LOGLKEY = "logl"
LOGLLATEXKEY = r"$\log\mathcal{L}$"
LOGPKEY = "logp"
LOGPLATEXKEY = r"$\log\pi$"
ConvergenceInputs = namedtuple(
"ConvergenceInputs",
[
"autocorr_c",
"burn_in_nact",
"thin_by_nact",
"fixed_discard",
"target_nsamples",
"stop_after_convergence",
"L1steps",
"L2steps",
"min_tau",
"fixed_tau",
"tau_window",
],
)
ParallelTemperingInputs = namedtuple(
"ParallelTemperingInputs",
[
"ntemps",
"nensemble",
"Tmax",
"Tmax_from_SNR",
"initial_betas",
"adapt",
"adapt_t0",
"adapt_nu",
"pt_ensemble",
],
)
...@@ -191,8 +191,10 @@ class Grid(object): ...@@ -191,8 +191,10 @@ class Grid(object):
places = self.sample_points[name] places = self.sample_points[name]
if len(places) > 1: if len(places) > 1:
dx = np.diff(places)
out = np.apply_along_axis( out = np.apply_along_axis(
logtrapzexp, axis, log_array, places[1] - places[0]) logtrapzexp, axis, log_array, dx
)
else: else:
# no marginalisation required, just remove the singleton dimension # no marginalisation required, just remove the singleton dimension
z = log_array.shape z = log_array.shape
......
...@@ -40,7 +40,6 @@ class DeltaFunction(Prior): ...@@ -40,7 +40,6 @@ class DeltaFunction(Prior):
======= =======
float: Rescaled probability, equivalent to peak float: Rescaled probability, equivalent to peak
""" """
self.test_valid_for_rescaling(val)
return self.peak * val ** 0 return self.peak * val ** 0
def prob(self, val): def prob(self, val):
...@@ -105,7 +104,6 @@ class PowerLaw(Prior): ...@@ -105,7 +104,6 @@ class PowerLaw(Prior):
======= =======
Union[float, array_like]: Rescaled probability Union[float, array_like]: Rescaled probability
""" """
self.test_valid_for_rescaling(val)
if self.alpha == -1: if self.alpha == -1:
return self.minimum * np.exp(val * np.log(self.maximum / self.minimum)) return self.minimum * np.exp(val * np.log(self.maximum / self.minimum))
else: else:
...@@ -206,7 +204,6 @@ class Uniform(Prior): ...@@ -206,7 +204,6 @@ class Uniform(Prior):
======= =======
Union[float, array_like]: Rescaled probability Union[float, array_like]: Rescaled probability
""" """
self.test_valid_for_rescaling(val)
return self.minimum + val * (self.maximum - self.minimum) return self.minimum + val * (self.maximum - self.minimum)
def prob(self, val): def prob(self, val):
...@@ -273,7 +270,7 @@ class SymmetricLogUniform(Prior): ...@@ -273,7 +270,7 @@ class SymmetricLogUniform(Prior):
def __init__(self, minimum, maximum, name=None, latex_label=None, def __init__(self, minimum, maximum, name=None, latex_label=None,
unit=None, boundary=None): unit=None, boundary=None):
"""Symmetric Log-Uniform distribtions with bounds """Symmetric Log-Uniform distributions with bounds
This is identical to a Log-Uniform distribution, but mirrored about This is identical to a Log-Uniform distribution, but mirrored about
the zero-axis and subsequently normalized. As such, the distribution the zero-axis and subsequently normalized. As such, the distribution
...@@ -314,7 +311,6 @@ class SymmetricLogUniform(Prior): ...@@ -314,7 +311,6 @@ class SymmetricLogUniform(Prior):
======= =======
Union[float, array_like]: Rescaled probability Union[float, array_like]: Rescaled probability
""" """
self.test_valid_for_rescaling(val)
if isinstance(val, (float, int)): if isinstance(val, (float, int)):
if val < 0.5: if val < 0.5:
return -self.maximum * np.exp(-2 * val * np.log(self.maximum / self.minimum)) return -self.maximum * np.exp(-2 * val * np.log(self.maximum / self.minimum))
...@@ -401,7 +397,6 @@ class Cosine(Prior): ...@@ -401,7 +397,6 @@ class Cosine(Prior):
This maps to the inverse CDF. This has been analytically solved for this case. This maps to the inverse CDF. This has been analytically solved for this case.
""" """
self.test_valid_for_rescaling(val)
norm = 1 / (np.sin(self.maximum) - np.sin(self.minimum)) norm = 1 / (np.sin(self.maximum) - np.sin(self.minimum))
return np.arcsin(val / norm + np.sin(self.minimum)) return np.arcsin(val / norm + np.sin(self.minimum))
...@@ -456,7 +451,6 @@ class Sine(Prior): ...@@ -456,7 +451,6 @@ class Sine(Prior):
This maps to the inverse CDF. This has been analytically solved for this case. This maps to the inverse CDF. This has been analytically solved for this case.
""" """
self.test_valid_for_rescaling(val)
norm = 1 / (np.cos(self.minimum) - np.cos(self.maximum)) norm = 1 / (np.cos(self.minimum) - np.cos(self.maximum))
return np.arccos(np.cos(self.minimum) - val / norm) return np.arccos(np.cos(self.minimum) - val / norm)
...@@ -515,7 +509,6 @@ class Gaussian(Prior): ...@@ -515,7 +509,6 @@ class Gaussian(Prior):
This maps to the inverse CDF. This has been analytically solved for this case. This maps to the inverse CDF. This has been analytically solved for this case.
""" """
self.test_valid_for_rescaling(val)
return self.mu + erfinv(2 * val - 1) * 2 ** 0.5 * self.sigma return self.mu + erfinv(2 * val - 1) * 2 ** 0.5 * self.sigma
def prob(self, val): def prob(self, val):
...@@ -602,7 +595,6 @@ class TruncatedGaussian(Prior): ...@@ -602,7 +595,6 @@ class TruncatedGaussian(Prior):
This maps to the inverse CDF. This has been analytically solved for this case. This maps to the inverse CDF. This has been analytically solved for this case.
""" """
self.test_valid_for_rescaling(val)
return erfinv(2 * val * self.normalisation + erf( return erfinv(2 * val * self.normalisation + erf(
(self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) * 2 ** 0.5 * self.sigma + self.mu (self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) * 2 ** 0.5 * self.sigma + self.mu
...@@ -695,7 +687,6 @@ class LogNormal(Prior): ...@@ -695,7 +687,6 @@ class LogNormal(Prior):
This maps to the inverse CDF. This has been analytically solved for this case. This maps to the inverse CDF. This has been analytically solved for this case.
""" """
self.test_valid_for_rescaling(val)
return np.exp(self.mu + np.sqrt(2 * self.sigma ** 2) * erfinv(2 * val - 1)) return np.exp(self.mu + np.sqrt(2 * self.sigma ** 2) * erfinv(2 * val - 1))
def prob(self, val): def prob(self, val):
...@@ -790,7 +781,6 @@ class Exponential(Prior): ...@@ -790,7 +781,6 @@ class Exponential(Prior):
This maps to the inverse CDF. This has been analytically solved for this case. This maps to the inverse CDF. This has been analytically solved for this case.
""" """
self.test_valid_for_rescaling(val)
return -self.mu * log1p(-val) return -self.mu * log1p(-val)
def prob(self, val): def prob(self, val):
...@@ -887,7 +877,6 @@ class StudentT(Prior): ...@@ -887,7 +877,6 @@ class StudentT(Prior):
This maps to the inverse CDF. This has been analytically solved for this case. This maps to the inverse CDF. This has been analytically solved for this case.
""" """
self.test_valid_for_rescaling(val)
if isinstance(val, (float, int)): if isinstance(val, (float, int)):
if val == 0: if val == 0:
rescaled = -np.inf rescaled = -np.inf
...@@ -977,7 +966,6 @@ class Beta(Prior): ...@@ -977,7 +966,6 @@ class Beta(Prior):
This maps to the inverse CDF. This has been analytically solved for this case. This maps to the inverse CDF. This has been analytically solved for this case.
""" """
self.test_valid_for_rescaling(val)
return btdtri(self.alpha, self.beta, val) * (self.maximum - self.minimum) + self.minimum return btdtri(self.alpha, self.beta, val) * (self.maximum - self.minimum) + self.minimum
def prob(self, val): def prob(self, val):
...@@ -1013,7 +1001,7 @@ class Beta(Prior): ...@@ -1013,7 +1001,7 @@ class Beta(Prior):
return _ln_prob return _ln_prob
return -np.inf return -np.inf
else: else:
_ln_prob_sub = -np.inf * np.ones(val.size) _ln_prob_sub = np.full_like(val, -np.inf)
idx = np.isfinite(_ln_prob) & (val >= self.minimum) & (val <= self.maximum) idx = np.isfinite(_ln_prob) & (val >= self.minimum) & (val <= self.maximum)
_ln_prob_sub[idx] = _ln_prob[idx] _ln_prob_sub[idx] = _ln_prob[idx]
return _ln_prob_sub return _ln_prob_sub
...@@ -1070,7 +1058,6 @@ class Logistic(Prior): ...@@ -1070,7 +1058,6 @@ class Logistic(Prior):
This maps to the inverse CDF. This has been analytically solved for this case. This maps to the inverse CDF. This has been analytically solved for this case.
""" """
self.test_valid_for_rescaling(val)
if isinstance(val, (float, int)): if isinstance(val, (float, int)):
if val == 0: if val == 0:
rescaled = -np.inf rescaled = -np.inf
...@@ -1151,7 +1138,6 @@ class Cauchy(Prior): ...@@ -1151,7 +1138,6 @@ class Cauchy(Prior):
This maps to the inverse CDF. This has been analytically solved for this case. This maps to the inverse CDF. This has been analytically solved for this case.
""" """
self.test_valid_for_rescaling(val)
rescaled = self.alpha + self.beta * np.tan(np.pi * (val - 0.5)) rescaled = self.alpha + self.beta * np.tan(np.pi * (val - 0.5))
if isinstance(val, (float, int)): if isinstance(val, (float, int)):
if val == 1: if val == 1:
...@@ -1233,7 +1219,6 @@ class Gamma(Prior): ...@@ -1233,7 +1219,6 @@ class Gamma(Prior):
This maps to the inverse CDF. This has been analytically solved for this case. This maps to the inverse CDF. This has been analytically solved for this case.
""" """
self.test_valid_for_rescaling(val)
return gammaincinv(self.k, val) * self.theta return gammaincinv(self.k, val) * self.theta
def prob(self, val): def prob(self, val):
...@@ -1385,8 +1370,6 @@ class FermiDirac(Prior): ...@@ -1385,8 +1370,6 @@ class FermiDirac(Prior):
.. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1 .. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
<https:arxiv.org/abs/1705.08978v1>`_, 2017. <https:arxiv.org/abs/1705.08978v1>`_, 2017.
""" """
self.test_valid_for_rescaling(val)
inv = (-np.exp(-1. * self.r) + (1. + np.exp(self.r)) ** -val + inv = (-np.exp(-1. * self.r) + (1. + np.exp(self.r)) ** -val +
np.exp(-1. * self.r) * (1. + np.exp(self.r)) ** -val) np.exp(-1. * self.r) * (1. + np.exp(self.r)) ** -val)
...@@ -1440,3 +1423,97 @@ class FermiDirac(Prior): ...@@ -1440,3 +1423,97 @@ class FermiDirac(Prior):
idx = val >= self.minimum idx = val >= self.minimum
lnp[idx] = norm - np.logaddexp((val[idx] / self.sigma) - self.r, 0.) lnp[idx] = norm - np.logaddexp((val[idx] / self.sigma) - self.r, 0.)
return lnp return lnp
class Categorical(Prior):
def __init__(self, ncategories, name=None, latex_label=None,
unit=None, boundary="periodic"):
""" An equal-weighted Categorical prior
Parameters:
-----------
ncategories: int
The number of available categories. The prior mass support is then
integers [0, ncategories - 1].
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
minimum = 0
# Small delta added to help with MCMC walking
maximum = ncategories - 1 + 1e-15
super(Categorical, self).__init__(
name=name, latex_label=latex_label, minimum=minimum,
maximum=maximum, unit=unit, boundary=boundary)
self.ncategories = ncategories
self.categories = np.arange(self.minimum, self.maximum)
self.p = 1 / self.ncategories
self.lnp = -np.log(self.ncategories)
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the categorical prior.
This maps to the inverse CDF. This has been analytically solved for this case.
Parameters
==========
val: Union[float, int, array_like]
Uniform probability
Returns
=======
Union[float, array_like]: Rescaled probability
"""
return np.floor(val * (1 + self.maximum))
def prob(self, val):
"""Return the prior probability of val.
Parameters
==========
val: Union[float, int, array_like]
Returns
=======
float: Prior probability of val
"""
if isinstance(val, (float, int)):
if val in self.categories:
return self.p
else:
return 0
else:
val = np.atleast_1d(val)
probs = np.zeros_like(val, dtype=np.float64)
idxs = np.isin(val, self.categories)
probs[idxs] = self.p
return probs
def ln_prob(self, val):
"""Return the logarithmic prior probability of val
Parameters
==========
val: Union[float, int, array_like]
Returns
=======
float:
"""
if isinstance(val, (float, int)):
if val in self.categories:
return self.lnp
else:
return -np.inf
else:
val = np.atleast_1d(val)
probs = -np.inf * np.ones_like(val, dtype=np.float64)
idxs = np.isin(val, self.categories)
probs[idxs] = self.lnp
return probs
...@@ -202,23 +202,6 @@ class Prior(object): ...@@ -202,23 +202,6 @@ class Prior(object):
""" """
return (val >= self.minimum) & (val <= self.maximum) return (val >= self.minimum) & (val <= self.maximum)
@staticmethod
def test_valid_for_rescaling(val):
"""Test if 0 < val < 1
Parameters
==========
val: Union[float, int, array_like]
Raises
=======
ValueError: If val is not between 0 and 1
"""
valarray = np.atleast_1d(val)
tests = (valarray < 0) + (valarray > 1)
if np.any(tests):
raise ValueError("Number to be rescaled should be in [0, 1]")
def __repr__(self): def __repr__(self):
"""Overrides the special method __repr__. """Overrides the special method __repr__.
...@@ -430,7 +413,7 @@ class Prior(object): ...@@ -430,7 +413,7 @@ class Prior(object):
Parameters Parameters
========== ==========
val: str val: str
The string version of the agument The string version of the argument
Returns Returns
======= =======
......
...@@ -168,8 +168,7 @@ class PriorDict(dict): ...@@ -168,8 +168,7 @@ class PriorDict(dict):
for key in ["__module__", "__name__", "__prior_dict__"]: for key in ["__module__", "__name__", "__prior_dict__"]:
if key in prior_dict: if key in prior_dict:
del prior_dict[key] del prior_dict[key]
obj = class_(dict()) obj = class_(prior_dict)
obj.from_dictionary(prior_dict)
return obj return obj
@classmethod @classmethod
...@@ -424,27 +423,38 @@ class PriorDict(dict): ...@@ -424,27 +423,38 @@ class PriorDict(dict):
} }
return all_samples return all_samples
def normalize_constraint_factor(self, keys): def normalize_constraint_factor(self, keys, min_accept=10000, sampling_chunk=50000, nrepeats=10):
if keys in self._cached_normalizations.keys(): if keys in self._cached_normalizations.keys():
return self._cached_normalizations[keys] return self._cached_normalizations[keys]
else: else:
min_accept = 1000 factor_estimates = [
sampling_chunk = 5000 self._estimate_normalization(keys, min_accept, sampling_chunk)
for _ in range(nrepeats)
]
factor = np.mean(factor_estimates)
if np.std(factor_estimates) > 0:
decimals = int(-np.floor(np.log10(3 * np.std(factor_estimates))))
factor_rounded = np.round(factor, decimals)
else:
factor_rounded = factor
self._cached_normalizations[keys] = factor_rounded
return factor_rounded
def _estimate_normalization(self, keys, min_accept, sampling_chunk):
samples = self.sample_subset(keys=keys, size=sampling_chunk)
keep = np.atleast_1d(self.evaluate_constraints(samples))
if len(keep) == 1:
self._cached_normalizations[keys] = 1
return 1
all_samples = {key: np.array([]) for key in keys}
while np.count_nonzero(keep) < min_accept:
samples = self.sample_subset(keys=keys, size=sampling_chunk) samples = self.sample_subset(keys=keys, size=sampling_chunk)
keep = np.atleast_1d(self.evaluate_constraints(samples)) for key in samples:
if len(keep) == 1: all_samples[key] = np.hstack(
self._cached_normalizations[keys] = 1 [all_samples[key], samples[key].flatten()])
return 1 keep = np.array(self.evaluate_constraints(all_samples), dtype=bool)
all_samples = {key: np.array([]) for key in keys} factor = len(keep) / np.count_nonzero(keep)
while np.count_nonzero(keep) < min_accept: return factor
samples = self.sample_subset(keys=keys, size=sampling_chunk)
for key in samples:
all_samples[key] = np.hstack(
[all_samples[key], samples[key].flatten()])
keep = np.array(self.evaluate_constraints(all_samples), dtype=bool)
factor = len(keep) / np.count_nonzero(keep)
self._cached_normalizations[keys] = factor
return factor
def prob(self, sample, **kwargs): def prob(self, sample, **kwargs):
""" """
...@@ -469,11 +479,11 @@ class PriorDict(dict): ...@@ -469,11 +479,11 @@ class PriorDict(dict):
def check_prob(self, sample, prob): def check_prob(self, sample, prob):
ratio = self.normalize_constraint_factor(tuple(sample.keys())) ratio = self.normalize_constraint_factor(tuple(sample.keys()))
if np.all(prob == 0.): if np.all(prob == 0.):
return prob return prob * ratio
else: else:
if isinstance(prob, float): if isinstance(prob, float):
if self.evaluate_constraints(sample): if self.evaluate_constraints(sample):
return prob return prob * ratio
else: else:
return 0. return 0.
else: else:
...@@ -509,7 +519,7 @@ class PriorDict(dict): ...@@ -509,7 +519,7 @@ class PriorDict(dict):
else: else:
if isinstance(ln_prob, float): if isinstance(ln_prob, float):
if self.evaluate_constraints(sample): if self.evaluate_constraints(sample):
return ln_prob return ln_prob + np.log(ratio)
else: else:
return -np.inf return -np.inf
else: else:
...@@ -518,6 +528,21 @@ class PriorDict(dict): ...@@ -518,6 +528,21 @@ class PriorDict(dict):
constrained_ln_prob[keep] = ln_prob[keep] + np.log(ratio) constrained_ln_prob[keep] = ln_prob[keep] + np.log(ratio)
return constrained_ln_prob return constrained_ln_prob
def cdf(self, sample):
"""Evaluate the cumulative distribution function at the provided points
Parameters
----------
sample: dict, pandas.DataFrame
Dictionary of the samples of which to calculate the CDF
Returns
-------
dict, pandas.DataFrame: Dictionary containing the CDF values
"""
return sample.__class__({key: self[key].cdf(sample) for key, sample in sample.items()})
def rescale(self, keys, theta): def rescale(self, keys, theta):
"""Rescale samples from unit cube to prior """Rescale samples from unit cube to prior
...@@ -681,9 +706,7 @@ class ConditionalPriorDict(PriorDict): ...@@ -681,9 +706,7 @@ class ConditionalPriorDict(PriorDict):
float: Joint probability of all individual sample probabilities float: Joint probability of all individual sample probabilities
""" """
self._check_resolved() self._prepare_evaluation(*zip(*sample.items()))
for key, value in sample.items():
self[key].least_recently_sampled = value
res = [self[key].prob(sample[key], **self.get_required_variables(key)) for key in sample] res = [self[key].prob(sample[key], **self.get_required_variables(key)) for key in sample]
prob = np.product(res, **kwargs) prob = np.product(res, **kwargs)
return self.check_prob(sample, prob) return self.check_prob(sample, prob)
...@@ -703,13 +726,16 @@ class ConditionalPriorDict(PriorDict): ...@@ -703,13 +726,16 @@ class ConditionalPriorDict(PriorDict):
float: Joint log probability of all the individual sample probabilities float: Joint log probability of all the individual sample probabilities
""" """
self._check_resolved() self._prepare_evaluation(*zip(*sample.items()))
for key, value in sample.items():
self[key].least_recently_sampled = value
res = [self[key].ln_prob(sample[key], **self.get_required_variables(key)) for key in sample] res = [self[key].ln_prob(sample[key], **self.get_required_variables(key)) for key in sample]
ln_prob = np.sum(res, axis=axis) ln_prob = np.sum(res, axis=axis)
return self.check_ln_prob(sample, ln_prob) return self.check_ln_prob(sample, ln_prob)
def cdf(self, sample):
self._prepare_evaluation(*zip(*sample.items()))
res = {key: self[key].cdf(sample[key], **self.get_required_variables(key)) for key in sample}
return sample.__class__(res)
def rescale(self, keys, theta): def rescale(self, keys, theta):
"""Rescale samples from unit cube to prior """Rescale samples from unit cube to prior
...@@ -724,12 +750,14 @@ class ConditionalPriorDict(PriorDict): ...@@ -724,12 +750,14 @@ class ConditionalPriorDict(PriorDict):
======= =======
list: List of floats containing the rescaled sample list: List of floats containing the rescaled sample
""" """
keys = list(keys)
theta = list(theta)
self._check_resolved() self._check_resolved()
self._update_rescale_keys(keys) self._update_rescale_keys(keys)
result = dict() result = dict()
for key, index in zip(self.sorted_keys_without_fixed_parameters, self._rescale_indexes): for key, index in zip(self.sorted_keys_without_fixed_parameters, self._rescale_indexes):
required_variables = {k: result[k] for k in getattr(self[key], 'required_variables', [])} result[key] = self[key].rescale(theta[index], **self.get_required_variables(key))
result[key] = self[key].rescale(theta[index], **required_variables) self[key].least_recently_sampled = result[key]
return [result[key] for key in keys] return [result[key] for key in keys]
def _update_rescale_keys(self, keys): def _update_rescale_keys(self, keys):
...@@ -737,6 +765,11 @@ class ConditionalPriorDict(PriorDict): ...@@ -737,6 +765,11 @@ class ConditionalPriorDict(PriorDict):
self._rescale_indexes = [keys.index(element) for element in self.sorted_keys_without_fixed_parameters] self._rescale_indexes = [keys.index(element) for element in self.sorted_keys_without_fixed_parameters]
self._least_recently_rescaled_keys = keys self._least_recently_rescaled_keys = keys
def _prepare_evaluation(self, keys, theta):
self._check_resolved()
for key, value in zip(keys, theta):
self[key].least_recently_sampled = value
def _check_resolved(self): def _check_resolved(self):
if not self._resolved: if not self._resolved:
raise IllegalConditionsException("The current set of priors contains unresolveable conditions.") raise IllegalConditionsException("The current set of priors contains unresolveable conditions.")
......
...@@ -86,7 +86,6 @@ class Interped(Prior): ...@@ -86,7 +86,6 @@ class Interped(Prior):
This maps to the inverse CDF. This is done using interpolation. This maps to the inverse CDF. This is done using interpolation.
""" """
self.test_valid_for_rescaling(val)
rescaled = self.inverse_cumulative_distribution(val) rescaled = self.inverse_cumulative_distribution(val)
if rescaled.shape == (): if rescaled.shape == ():
rescaled = float(rescaled) rescaled = float(rescaled)
......
...@@ -11,7 +11,7 @@ class BaseJointPriorDist(object): ...@@ -11,7 +11,7 @@ class BaseJointPriorDist(object):
def __init__(self, names, bounds=None): def __init__(self, names, bounds=None):
""" """
A class defining JointPriorDist that will be overwritten with child A class defining JointPriorDist that will be overwritten with child
classes defining the joint prior distribtuions between given parameters, classes defining the joint prior distributions between given parameters,
Parameters Parameters
...@@ -172,7 +172,7 @@ class BaseJointPriorDist(object): ...@@ -172,7 +172,7 @@ class BaseJointPriorDist(object):
raise ValueError("Array is the wrong shape") raise ValueError("Array is the wrong shape")
# check sample(s) is within bounds # check sample(s) is within bounds
outbounds = np.ones(samp.shape[0], dtype=np.bool) outbounds = np.ones(samp.shape[0], dtype=bool)
for s, bound in zip(samp.T, self.bounds.values()): for s, bound in zip(samp.T, self.bounds.values()):
outbounds = (s < bound[0]) | (s > bound[1]) outbounds = (s < bound[0]) | (s > bound[1])
if np.any(outbounds): if np.any(outbounds):
...@@ -210,7 +210,7 @@ class BaseJointPriorDist(object): ...@@ -210,7 +210,7 @@ class BaseJointPriorDist(object):
samp: vector samp: vector
sample to evaluate the ln_prob at sample to evaluate the ln_prob at
lnprob: vector lnprob: vector
of -inf pased in with the same shape as the number of samples of -inf passed in with the same shape as the number of samples
outbounds: array_like outbounds: array_like
boolean array showing which samples in lnprob vector are out of the given bounds boolean array showing which samples in lnprob vector are out of the given bounds
...@@ -231,7 +231,7 @@ class BaseJointPriorDist(object): ...@@ -231,7 +231,7 @@ class BaseJointPriorDist(object):
Parameters Parameters
========== ==========
size: int size: int
number of samples to generate, defualts to 1 number of samples to generate, defaults to 1
""" """
if size is None: if size is None:
...@@ -250,7 +250,7 @@ class BaseJointPriorDist(object): ...@@ -250,7 +250,7 @@ class BaseJointPriorDist(object):
Parameters Parameters
========== ==========
size: int size: int
number of samples to generate, defualts to 1 number of samples to generate, defaults to 1
""" """
samps = np.zeros((size, len(self))) samps = np.zeros((size, len(self)))
""" """
...@@ -299,7 +299,7 @@ class BaseJointPriorDist(object): ...@@ -299,7 +299,7 @@ class BaseJointPriorDist(object):
Parameters Parameters
========== ==========
samp: numpy array samp: numpy array
this is a vector sample drawn from a uniform distribtuion to be rescaled to the distribution this is a vector sample drawn from a uniform distribution to be rescaled to the distribution
""" """
""" """
Here is where the subclass where overwrite rescale method Here is where the subclass where overwrite rescale method
...@@ -630,7 +630,7 @@ class MultivariateGaussianDist(BaseJointPriorDist): ...@@ -630,7 +630,7 @@ class MultivariateGaussianDist(BaseJointPriorDist):
elif isinstance(self.__dict__[key], (np.ndarray, list)): elif isinstance(self.__dict__[key], (np.ndarray, list)):
thisarr = np.asarray(self.__dict__[key]) thisarr = np.asarray(self.__dict__[key])
otherarr = np.asarray(other.__dict__[key]) otherarr = np.asarray(other.__dict__[key])
if thisarr.dtype == np.float and otherarr.dtype == np.float: if thisarr.dtype == float and otherarr.dtype == float:
fin1 = np.isfinite(np.asarray(self.__dict__[key])) fin1 = np.isfinite(np.asarray(self.__dict__[key]))
fin2 = np.isfinite(np.asarray(other.__dict__[key])) fin2 = np.isfinite(np.asarray(other.__dict__[key]))
if not np.array_equal(fin1, fin2): if not np.array_equal(fin1, fin2):
...@@ -707,10 +707,9 @@ class JointPrior(Prior): ...@@ -707,10 +707,9 @@ class JointPrior(Prior):
Returns Returns
======= =======
float: float:
A sample from the prior paramter. A sample from the prior parameter.
""" """
self.test_valid_for_rescaling(val)
self.dist.rescale_parameters[self.name] = val self.dist.rescale_parameters[self.name] = val
if self.dist.filled_rescale(): if self.dist.filled_rescale():
...@@ -734,7 +733,7 @@ class JointPrior(Prior): ...@@ -734,7 +733,7 @@ class JointPrior(Prior):
Returns Returns
======= =======
float: float:
A sample from the prior paramter. A sample from the prior parameter.
""" """
if self.name in self.dist.sampled_parameters: if self.name in self.dist.sampled_parameters:
......
import datetime
import inspect import inspect
import json import json
import os import os
...@@ -22,7 +23,10 @@ from .utils import ( ...@@ -22,7 +23,10 @@ from .utils import (
recursively_load_dict_contents_from_group, recursively_load_dict_contents_from_group,
recursively_decode_bilby_json, recursively_decode_bilby_json,
) )
from .prior import Prior, PriorDict, DeltaFunction from .prior import Prior, PriorDict, DeltaFunction, ConditionalDeltaFunction
EXTENSIONS = ["json", "hdf5", "h5", "pickle", "pkl"]
def result_file_name(outdir, label, extension='json', gzip=False): def result_file_name(outdir, label, extension='json', gzip=False):
...@@ -359,7 +363,7 @@ class Result(object): ...@@ -359,7 +363,7 @@ class Result(object):
The number of times the likelihood function is called The number of times the likelihood function is called
log_prior_evaluations: array_like log_prior_evaluations: array_like
The evaluations of the prior for each sample point The evaluations of the prior for each sample point
sampling_time: float sampling_time: (datetime.timedelta, float)
The time taken to complete the sampling The time taken to complete the sampling
nburn: int nburn: int
The number of burn-in steps discarded for MCMC samplers The number of burn-in steps discarded for MCMC samplers
...@@ -409,6 +413,8 @@ class Result(object): ...@@ -409,6 +413,8 @@ class Result(object):
self.log_likelihood_evaluations = log_likelihood_evaluations self.log_likelihood_evaluations = log_likelihood_evaluations
self.log_prior_evaluations = log_prior_evaluations self.log_prior_evaluations = log_prior_evaluations
self.num_likelihood_evaluations = num_likelihood_evaluations self.num_likelihood_evaluations = num_likelihood_evaluations
if isinstance(sampling_time, float):
sampling_time = datetime.timedelta(seconds=sampling_time)
self.sampling_time = sampling_time self.sampling_time = sampling_time
self.version = version self.version = version
self.max_autocorrelation_time = max_autocorrelation_time self.max_autocorrelation_time = max_autocorrelation_time
...@@ -801,7 +807,7 @@ class Result(object): ...@@ -801,7 +807,7 @@ class Result(object):
def save_posterior_samples(self, filename=None, outdir=None, label=None): def save_posterior_samples(self, filename=None, outdir=None, label=None):
""" Saves posterior samples to a file """ Saves posterior samples to a file
Generates a .dat file containing the posterior samples and auxillary Generates a .dat file containing the posterior samples and auxiliary
data saved in the posterior. Note, strings in the posterior are data saved in the posterior. Note, strings in the posterior are
removed while complex numbers will be given as absolute values with removed while complex numbers will be given as absolute values with
abs appended to the column name abs appended to the column name
...@@ -1399,7 +1405,8 @@ class Result(object): ...@@ -1399,7 +1405,8 @@ class Result(object):
if priors is None: if priors is None:
return posterior return posterior
for key in priors: for key in priors:
if isinstance(priors[key], DeltaFunction): if isinstance(priors[key], DeltaFunction) and \
not isinstance(priors[key], ConditionalDeltaFunction):
posterior[key] = priors[key].peak posterior[key] = priors[key].peak
elif isinstance(priors[key], float): elif isinstance(priors[key], float):
posterior[key] = priors[key] posterior[key] = priors[key]
...@@ -1422,20 +1429,19 @@ class Result(object): ...@@ -1422,20 +1429,19 @@ class Result(object):
Function which adds in extra parameters to the data frame, Function which adds in extra parameters to the data frame,
should take the data_frame, likelihood and prior as arguments. should take the data_frame, likelihood and prior as arguments.
""" """
try:
data_frame = self.posterior data_frame = pd.DataFrame(
except ValueError: self.samples, columns=self.search_parameter_keys)
data_frame = pd.DataFrame( data_frame = self._add_prior_fixed_values_to_posterior(
self.samples, columns=self.search_parameter_keys) data_frame, priors)
data_frame = self._add_prior_fixed_values_to_posterior( data_frame['log_likelihood'] = getattr(
data_frame, priors) self, 'log_likelihood_evaluations', np.nan)
data_frame['log_likelihood'] = getattr( if self.log_prior_evaluations is None and priors is not None:
self, 'log_likelihood_evaluations', np.nan) data_frame['log_prior'] = priors.ln_prob(
if self.log_prior_evaluations is None and priors is not None: dict(data_frame[self.search_parameter_keys]), axis=0)
data_frame['log_prior'] = priors.ln_prob( else:
dict(data_frame[self.search_parameter_keys]), axis=0) data_frame['log_prior'] = self.log_prior_evaluations
else:
data_frame['log_prior'] = self.log_prior_evaluations
if conversion_function is not None: if conversion_function is not None:
if "npool" in inspect.getargspec(conversion_function).args: if "npool" in inspect.getargspec(conversion_function).args:
data_frame = conversion_function(data_frame, likelihood, priors, npool=npool) data_frame = conversion_function(data_frame, likelihood, priors, npool=npool)
...@@ -1723,7 +1729,7 @@ class ResultList(list): ...@@ -1723,7 +1729,7 @@ class ResultList(list):
def __init__(self, results=None): def __init__(self, results=None):
""" A class to store a list of :class:`bilby.core.result.Result` objects """ A class to store a list of :class:`bilby.core.result.Result` objects
from equivalent runs on the same data. This provides methods for from equivalent runs on the same data. This provides methods for
outputing combined results. outputting combined results.
Parameters Parameters
========== ==========
...@@ -1775,7 +1781,7 @@ class ResultList(list): ...@@ -1775,7 +1781,7 @@ class ResultList(list):
# check which kind of sampler was used: MCMC or Nested Sampling # check which kind of sampler was used: MCMC or Nested Sampling
if result._nested_samples is not None: if result._nested_samples is not None:
posteriors, result = self._combine_nested_sampled_runs(result) posteriors, result = self._combine_nested_sampled_runs(result)
elif result.sampler in ["bilbymcmc"]: elif result.sampler in ["bilby_mcmc", "bilbymcmc"]:
posteriors, result = self._combine_mcmc_sampled_runs(result) posteriors, result = self._combine_mcmc_sampled_runs(result)
else: else:
posteriors = [res.posterior for res in self] posteriors = [res.posterior for res in self]
...@@ -1819,7 +1825,7 @@ class ResultList(list): ...@@ -1819,7 +1825,7 @@ class ResultList(list):
result.log_evidence = logsumexp(log_evidences, b=1. / len(self)) result.log_evidence = logsumexp(log_evidences, b=1. / len(self))
result.log_bayes_factor = result.log_evidence - result.log_noise_evidence result.log_bayes_factor = result.log_evidence - result.log_noise_evidence
# Propogate uncertainty in combined evidence # Propagate uncertainty in combined evidence
log_errs = [res.log_evidence_err for res in self if np.isfinite(res.log_evidence_err)] log_errs = [res.log_evidence_err for res in self if np.isfinite(res.log_evidence_err)]
if len(log_errs) > 0: if len(log_errs) > 0:
result.log_evidence_err = 0.5 * logsumexp(2 * np.array(log_errs), b=1. / len(self)) result.log_evidence_err = 0.5 * logsumexp(2 * np.array(log_errs), b=1. / len(self))
...@@ -1865,7 +1871,7 @@ class ResultList(list): ...@@ -1865,7 +1871,7 @@ class ResultList(list):
result.log_evidence = logsumexp(log_evidences, b=1. / len(self)) result.log_evidence = logsumexp(log_evidences, b=1. / len(self))
result.log_bayes_factor = result.log_evidence - result.log_noise_evidence result.log_bayes_factor = result.log_evidence - result.log_noise_evidence
# Propogate uncertainty in combined evidence # Propagate uncertainty in combined evidence
log_errs = [res.log_evidence_err for res in self if np.isfinite(res.log_evidence_err)] log_errs = [res.log_evidence_err for res in self if np.isfinite(res.log_evidence_err)]
if len(log_errs) > 0: if len(log_errs) > 0:
result.log_evidence_err = 0.5 * logsumexp(2 * np.array(log_errs), b=1. / len(self)) result.log_evidence_err = 0.5 * logsumexp(2 * np.array(log_errs), b=1. / len(self))
...@@ -2135,7 +2141,7 @@ class ResultError(Exception): ...@@ -2135,7 +2141,7 @@ class ResultError(Exception):
class ResultListError(ResultError): class ResultListError(ResultError):
""" For Errors occuring during combining results. """ """ For Errors occurring during combining results. """
class FileMovedError(ResultError): class FileMovedError(ResultError):
......