Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (137)
Showing
with 3600 additions and 207 deletions
hist
livetime
iff
amin
amax
......@@ -15,6 +15,26 @@ stages:
- docs
- deploy
# ------------------- Initial stage -------------------------------------------
# Check author list is up to date
authors:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- python test/check_author_list.py
# Test containers scripts are up to date
containers:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- cd containers
- python write_dockerfiles.py #HACK
# Fail if differences exist. If this fails, you may need to run
# write_dockerfiles.py and commit the changes.
- git diff --exit-code
.test-python: &test-python
stage: initial
image: python
......@@ -38,16 +58,68 @@ stages:
${script} --help;
done
# test basic setup on python3
# Test basic setup on python 3.7
basic-3.7:
<<: *test-python
image: python:3.7
# test example on python 3.7
# Test basic setup on python 3.8
basic-3.8:
<<: *test-python
image: python:3.8
# Test basic setup on python 3.9
basic-3.9:
<<: *test-python
image: python:3.9
precommits-py3.7:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- source activate python37
- mkdir -p .pip37
- pip install --upgrade pip
- pip --cache-dir=.pip37 install --upgrade bilby
- pip --cache-dir=.pip37 install .
- pip --cache-dir=.pip37 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
precommits-py3.8:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
script:
- source activate python38
- mkdir -p .pip38
- pip install --upgrade pip
- pip --cache-dir=.pip38 install --upgrade bilby
- pip --cache-dir=.pip38 install .
- pip --cache-dir=.pip38 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
precommits-py3.9:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script:
- source activate python39
- mkdir -p .pip38
- pip install --upgrade pip
- pip --cache-dir=.pip39 install --upgrade bilby
- pip --cache-dir=.pip39 install .
- pip --cache-dir=.pip39 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
# ------------------- Test stage -------------------------------------------
# test example on python 3.7 and build coverage
python-3.7:
stage: test
needs: ["basic-3.7", "precommits-py3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- python -m pip install .
......@@ -64,114 +136,107 @@ python-3.7:
- coverage_badge.svg
- htmlcov/
docs:
stage: docs
needs: ["basic-3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
# Make the documentation
- apt-get -yqq install pandoc
- python -m pip install .
- cd docs
- pip install ipykernel ipython jupyter
- cp ../examples/tutorials/*.ipynb ./
- rm basic_ptmcmc_tutorial.ipynb
- rm compare_samplers.ipynb
- rm visualising_the_results.ipynb
- jupyter nbconvert --to notebook --execute *.ipynb --inplace
- make clean
- make html
artifacts:
paths:
- docs/_build/html/
# test example on python 3.8
python-3.8:
stage: test
needs: ["basic-3.7", "precommits-py3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python38
needs: ["basic-3.8", "precommits-py3.8"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
script:
- python -m pip install .
- pytest
# test example on python 3.6
python-3.6:
# test example on python 3.9
python-3.9:
stage: test
needs: ["basic-3.7", "precommits-py3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python36
needs: ["basic-3.9", "precommits-py3.9"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script:
- python -m pip install .
- pytest
# test samplers on python 3.7
python-3.7-samplers:
stage: test
needs: ["basic-3.7", "precommits-py3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- python -m pip install .
- pytest test/integration/sampler_run_test.py --durations 10
# test samplers on python 3.6
python-3.6-samplers:
# test samplers on python 3.8
python-3.8-samplers:
stage: test
needs: ["basic-3.7", "precommits-py3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python36
needs: ["basic-3.8", "precommits-py3.8"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
script:
- python -m pip install .
- pytest test/integration/sampler_run_test.py
- pytest test/integration/sampler_run_test.py --durations 10
# Test containers are up to date
containers:
stage: initial
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
# test samplers on python 3.9
python-3.9-samplers:
stage: test
needs: ["basic-3.9", "precommits-py3.9"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script:
- cd containers
- python write_dockerfiles.py
# Fail if differences exist. If this fails, you may need to run
# write_dockerfiles.py and commit the changes.
- git diff --exit-code
- python -m pip install .
- pytest test/integration/sampler_run_test.py --durations 10
# Tests run at a fixed schedule rather than on push
scheduled-python-3.7:
integration-tests-python-3.7:
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
needs: ["basic-3.7", "precommits-py3.7"]
only:
- schedules
script:
- python -m pip install .
# Run tests which are only done on schedule
- pytest test/integration/example_test.py
plotting:
plotting-python-3.7:
stage: test
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
needs: ["basic-3.7", "precommits-py3.7"]
only:
- schedules
script:
- python -m pip install .
- python -m pip install ligo.skymap
- pytest test/gw/plot_test.py
authors:
stage: initial
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
# ------------------- Docs stage -------------------------------------------
docs:
stage: docs
needs: ["python-3.7"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- python test/check_author_list.py
# Make the documentation
- apt-get -yqq install pandoc
- python -m pip install .
- cd docs
- pip install ipykernel ipython jupyter
- cp ../examples/tutorials/*.ipynb ./
- rm basic_ptmcmc_tutorial.ipynb
- rm compare_samplers.ipynb
- rm visualising_the_results.ipynb
- jupyter nbconvert --to notebook --execute *.ipynb --inplace
- make clean
- make html
artifacts:
paths:
- docs/_build/html/
# ------------------- Deploy stage -------------------------------------------
pages:
deploy-docs:
stage: deploy
needs: ["docs", "python-3.7"]
dependencies:
- docs
- python-3.7
script:
- mkdir public/
- mv htmlcov/ public/
......@@ -184,9 +249,49 @@ pages:
only:
- master
deploy_release:
# Build the containers
build-python37-container:
stage: deploy
image: docker:19.03.12
needs: ["containers"]
only:
- schedules
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --tag v3-bilby-python37 - < v3-dockerfile-test-suite-python37
- docker image tag v3-bilby-python37 containers.ligo.org/lscsoft/bilby/v2-bilby-python37:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-python37:latest
build-python38-container:
stage: deploy
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
image: docker:19.03.12
needs: ["containers"]
only:
- schedules
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --tag v3-bilby-python38 - < v3-dockerfile-test-suite-python38
- docker image tag v3-bilby-python38 containers.ligo.org/lscsoft/bilby/v2-bilby-python38:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-python38:latest
build-python39-container:
stage: deploy
image: docker:19.03.12
needs: ["containers"]
only:
- schedules
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --tag v3-bilby-python39 - < v3-dockerfile-test-suite-python39
- docker image tag v3-bilby-python39 containers.ligo.org/lscsoft/bilby/v2-bilby-python39:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-python39:latest
pypi-release:
stage: deploy
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
variables:
TWINE_USERNAME: $PYPI_USERNAME
TWINE_PASSWORD: $PYPI_PASSWORD
......@@ -197,18 +302,3 @@ deploy_release:
- twine upload dist/*
only:
- tags
precommits-py3.7:
stage: initial
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- source activate python37
- mkdir -p .pip37
- pip install --upgrade pip
- pip --cache-dir=.pip37 install --upgrade bilby
- pip --cache-dir=.pip37 install .
- pip --cache-dir=.pip37 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
......@@ -4,19 +4,26 @@ repos:
hooks:
- id: check-merge-conflict # prevent committing files with merge conflicts
- id: flake8 # checks for flake8 errors
#- repo: https://github.com/codespell-project/codespell
# rev: v1.16.0
# hooks:
# - id: codespell # Spellchecker
# args: [-L, nd, --skip, "*.ipynb,*.html", --ignore-words=.dictionary.txt]
# exclude: ^examples/tutorials/
#- repo: https://github.com/asottile/seed-isort-config
# rev: v1.3.0
# hooks:
# - id: seed-isort-config
# args: [--application-directories, 'bilby/']
#- repo: https://github.com/pre-commit/mirrors-isort
# rev: v4.3.21
# hooks:
# - id: isort # sort imports alphabetically and separates import into sections
# args: [-w=88, -m=3, -tc, -sp=setup.cfg ]
- repo: https://github.com/psf/black
rev: 20.8b1
hooks:
- id: black
language_version: python3
files: ^bilby/bilby_mcmc/
- repo: https://github.com/codespell-project/codespell
rev: v1.16.0
hooks:
- id: codespell
args: [--ignore-words=.dictionary.txt]
- repo: https://github.com/asottile/seed-isort-config
rev: v1.3.0
hooks:
- id: seed-isort-config
args: [--application-directories, 'bilby/']
files: ^bilby/bilby_mcmc/
- repo: https://github.com/pre-commit/mirrors-isort
rev: v4.3.21
hooks:
- id: isort # sort imports alphabetically and separates import into sections
args: [-w=88, -m=3, -tc, -sp=setup.cfg ]
files: ^bilby/bilby_mcmc/
......@@ -14,7 +14,7 @@ Bruce Edelman
Carl-Johan Haster
Cecilio Garcia-Quiros
Charlie Hoy
Christopher Berry
Christopher Philip Luke Berry
Christos Karathanasis
Colm Talbot
Daniel Williams
......@@ -29,11 +29,14 @@ Ignacio Magaña Hernandez
Isobel Marguarethe Romero-Shaw
Jade Powell
James A Clark
Jeremy G Baier
John Veitch
Joshua Brandt
Katerina Chatziioannou
Kaylee de Soto
Khun Sang Phukon
Kshipraa Athar
Leslie Wade
Liting Xiao
Maite Mateu-Lucena
Marc Arene
......@@ -49,10 +52,12 @@ Moritz Huebner
Nicola De Lillo
Nikhil Sarin
Nirban Bose
Olivia Wilk
Paul Easter
Paul Lasky
Philip Relton
Rhys Green
Rico Lo
Roberto Cotesta
Rory Smith
S. H. Oh
......@@ -69,3 +74,4 @@ Sylvia Biscoveanu
Tathagata Ghosh
Virginia d'Emilio
Vivien Raymond
Ka-Lok Lo
# All notable changes will be documented in this file
## [1.1.4] 2021-10-08
Version 1.1.4 release of bilby
### Added
- Version of dynesty pinned to less than v1.1 to anticipate breaking changes (!1020)
- Pool to computation of SNR (!1013)
- Ability to load results produced with custom priors (!1010)
- The nestcheck test (!1005)
- Bilby-mcmc guide to docs (!1001)
- Codespell pre-commit (!996)
- MBGravitationalWaveTransient (!972)
- Zeus MCMC sampler support (!962)
- Option to use print rather than tqdm (!937)
### Changes
- Updates citation guide (!1030)
- Minor bug fixes (!1029, !1025, !1022, !1016, !1018, !1014, !1007, !1004)
- Typo fix in eart light crossing (!1003)
- Fix zero spin conversion (!1002)
## [1.1.3] 2021-07-02
Version 1.1.3 release of bilby
### Added
- Added `Categorical` prior (!982)(!990)
- Added a built-in mcmc sampler (`bilby_mcmc`) (!905)(!985)
- Added run statistics to the `dynesty` meta data (!969)
- Added `cdf` method to `PriorDict` classes (!943)
### Changes
- Removed the autoburnin causing `kombine` to fail the CI tests (!988)
- Sped up the spline interpolation in ROQ (!971)
- Replaced bessel interpolant to scipy function (!976)
- Improved checkpoint stats plot (!977)
- Fixed a typo in the sampler documentation (!986)
- Fixed issue that causes ConditionalDeltaFunction posterior samples not to be saved correctly (!973)
- Solved an issue where injected SNRs were logged incorrectly (!980)
- Made Python 3.6+ a specific requirement (!978)
- Fixed the calibration and time marginalized likelihood (!978)
- Removed a possible error in the distance marginalization (!960)
- Fixed an issue where `check_draw` did not catch `np.nan` values (!965)
- Removed a superfluous line in the docs configuration file (!963)
- Added a warning about class side effects to the `GravtiationalWaveTransient` likelihood classes (!964)
- Allow `ptemcee` initialization with array (!955)
- Removed `Prior.test_valid_for_rescaling` (!956)
- Replaced deprecated numpy aliases builtins (!970)
- Fixed a bug in the algorithm to determine time resolution of ROQ (!967)
- Restructured utils module into several submodules. API remains backwards compatible (!873)
- Changed number of default walks in `dynesty` from `10*self.ndim` to `100` (!961)
## [1.1.2] 2021-05-05
Version 1.1.2 release of bilby
......@@ -69,7 +119,7 @@ Version 1.1.0 release of bilby
- Fixed `ultranest` from failing
- Fixed issues with plotting failing in tests (!904)
- Changed the CI to run on auto-built images (!899)
- Resolved a `matplotlib` error occuring at `dynesty` checkpoint plots (!902)
- Resolved a `matplotlib` error occurring at `dynesty` checkpoint plots (!902)
- Fixed the multidimensional Gaussian example (!901)
- Now allow any lal dictionary option and added a numerical relativity file (!896)
- Fixed the likelihood count in `dynesty` (!853)
......@@ -141,7 +191,7 @@ Version 1.0.1 release of bilby
- Fixed a minor issue with conditional priors that could cause unexpected behaviour in edge cases (!838)
- Fixed `__repr__` method in the `FromFile` prior (!836)
- Fixed an issue that caused problems for some users when plotting with a latex backend (!816)
- Fixed bug that occured when min/max of interpolated priors was changed (!815)
- Fixed bug that occurred when min/max of interpolated priors was changed (!815)
- Fixed time domain waveform epoch (!736)
- Fixed time keeping in multinest (!830)
- Now checks if marginalised priors were defined before marginalising (!829)
......@@ -179,7 +229,7 @@ for details
- Updated the default PSD to O4 (!757)
- Make multinest allow long file names, optional and work with MPI (!764 !785)
- Add min/max to aligned spin prior (!787)
- Reduce redudant code (!703)
- Reduce redundant code (!703)
- Added testing for python 3.8 (!762)
- Improvements to the waveform plot (!769)
......@@ -280,7 +330,7 @@ parameters.
## Changes
- Speed up the prior evaluations by implementing directly with checks to scipy !627
- Soft initalisation option for the Sampler class !620
- Soft initialisation option for the Sampler class !620
- Improvements to JSON reading and writing for functions !621
- Fixed bug in prior reading !618 !617
- Fixes to the examples !619 !614 !626 !616
......@@ -341,7 +391,7 @@ parameters.
- Removed the sqrt(2) normalisation from the scalar longitudinal mode
- Improve PSD filename reading (no longer required "/" to read local files)
- Fix bug in emcee chains
- Added a try/except cluase for building the lookup table
- Added a try/except clause for building the lookup table
## [0.5.4] 2019-07-30
......@@ -382,7 +432,7 @@ shown (!564) to provide better convergence for the long-duration high-spin tests
### Changed
- Updated and fixed bugs in examples
- Resolve sampling time persistence for runs which are interupted
- Resolve sampling time persistence for runs which are interrupted
- Improvements to the PP plot
- Speed up of the distance calculation
- Fixed a bug in the inteference of bilby command line arguments with user specified command lines
......@@ -678,7 +728,7 @@ re-instantiate the Prior in most cases
- Changed to using `setuptools` for installation.
- Clean up of real data handling: all data is now windowed with a 0.4s roll off (unless set otherwise) and low-pass filtered.
- Add explicit method to create a power spectral density from time-domain data
- Clean up of `PowerSpectralDensity()` - addds `set_from` methods to handle various ways to define the PSD.
- Clean up of `PowerSpectralDensity()` - adds `set_from` methods to handle various ways to define the PSD.
- Clean up of `detectors.py`: adds an `InterferometerStrainData` to handle strain data and `InterferometerSet` to handle multiple interferometers. All data setting should primarily be done through the `Interferometer.set_strain_data..` methods.
- Fix the comments and units of `nfft` and `infft` and general improvement to documentation of data.
- Fixed a bug in create_time_series
......
......@@ -275,7 +275,7 @@ help orient users and make it easier to contribute. The layout is intended to
define the logic of the code and new merge requests should aim to fit within
this logic (unless there is a good argument to change it). For example, code
which adds a new sampler should not effect the gravitational-wave specific
parts of the code. Note that this document is not programatically generated and
parts of the code. Note that this document is not programmatically generated and
so may get out of date with time. If you notice something wrong, please open an
issue.
......
......@@ -35,6 +35,12 @@ If you use :code:`bilby` in a scientific publication, please cite
* `Bilby: A user-friendly Bayesian inference library for gravitational-wave
astronomy
<https://ui.adsabs.harvard.edu/#abs/2018arXiv181102042A/abstract>`__
* `Bayesian inference for compact binary coalescences with BILBY: validation and application to the first LIGO-Virgo gravitational-wave transient catalogue <https://ui.adsabs.harvard.edu/abs/2020MNRAS.499.3295R/abstract>`__
The first of these papers introduces the software, while the second introduces advances in the sampling approaches and validation of the software.
If you use the :code:`bilby_mcmc` sampler, please additionally cite
* `BILBY-MCMC: an MCMC sampler for gravitational-wave inference <https://ui.adsabs.harvard.edu/abs/2021MNRAS.507.2037A/abstract>`__
Additionally, :code:`bilby` builds on a number of open-source packages. If you
make use of this functionality in your publications, we recommend you cite them
......
from .sampler import Bilby_MCMC
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from ..core.sampler.base_sampler import SamplerError
from ..core.utils import logger
from .utils import LOGLKEY, LOGLLATEXKEY, LOGPKEY, LOGPLATEXKEY
class Chain(object):
def __init__(
self,
initial_sample,
burn_in_nact=1,
thin_by_nact=1,
fixed_discard=0,
autocorr_c=5,
min_tau=1,
fixed_tau=None,
tau_window=None,
block_length=100000,
):
"""Object to store a single mcmc chain
Parameters
----------
initial_sample: bilby.bilby_mcmc.chain.Sample
The starting point of the chain
burn_in_nact, thin_by_nact : int (1, 1)
The number of autocorrelation times (tau) to discard for burn-in
and the multiplicative factor to thin by (thin_by_nact < 1). I.e
burn_in_nact=10 and thin_by_nact=1 will discard 10*tau samples from
the start of the chain, then thin the final chain by a factor
of 1*tau (resulting in independent samples).
fixed_discard: int (0)
A fixed minimum number of samples to discard (can be used to
override the burn_in_nact if it is too small).
autocorr_c: float (5)
The step size of the window search used by emcee.autocorr when
estimating the autocorrelation time.
min_tau: int (1)
A minimum value for the autocorrelation time.
fixed_tau: int (None)
A fixed value for the autocorrelation (overrides the automated
autocorrelation time estimation). Used in testing.
tau_window: int (None)
Only calculate the autocorrelation time in a trailing window. If
None (default) this method is not used.
block_length: int
The incremental size to extend the array by when it runs out of
space.
"""
self.autocorr_c = autocorr_c
self.min_tau = min_tau
self.burn_in_nact = burn_in_nact
self.thin_by_nact = thin_by_nact
self.block_length = block_length
self.fixed_discard = int(fixed_discard)
self.fixed_tau = fixed_tau
self.tau_window = tau_window
self.ndim = initial_sample.ndim
self.current_sample = initial_sample
self.keys = self.current_sample.keys
self.parameter_keys = self.current_sample.parameter_keys
# Initialize chain
self._chain_array = self._get_zero_chain_array()
self._chain_array_length = block_length
self.position = -1
self.max_log_likelihood = -np.inf
self.max_tau_dict = {}
self.converged = False
self.cached_tau_count = 0
self._minimum_index_proposal = 0
self._minimum_index_adapt = 0
self._last_minimum_index = (0, 0, "I")
self.last_full_tau_dict = {key: np.inf for key in self.parameter_keys}
# Append the initial sample
self.append(self.current_sample)
def _get_zero_chain_array(self):
return np.zeros((self.block_length, self.ndim + 2), dtype=np.float64)
def _extend_chain_array(self):
self._chain_array = np.concatenate(
(self._chain_array, self._get_zero_chain_array()), axis=0
)
self._chain_array_length = len(self._chain_array)
@property
def current_sample(self):
return self._current_sample.copy()
@current_sample.setter
def current_sample(self, current_sample):
self._current_sample = current_sample
def append(self, sample):
self.position += 1
# Extend the array if needed
if self.position >= self._chain_array_length:
self._extend_chain_array()
# Store the current sample and append to the array
self.current_sample = sample
self._chain_array[self.position] = sample.list
# Update the maximum log_likelihood
if sample[LOGLKEY] > self.max_log_likelihood:
self.max_log_likelihood = sample[LOGLKEY]
def __getitem__(self, index):
if index < 0:
index = index + self.position + 1
if index <= self.position:
values = self._chain_array[index]
return Sample({k: v for k, v in zip(self.keys, values)})
else:
raise SamplerError(f"Requested index {index} out of bounds")
def __setitem__(self, index, sample):
if index < 0:
index = index + self.position + 1
self._chain_array[index] = sample.list
def key_to_idx(self, key):
return self.keys.index(key)
def get_1d_array(self, key):
return self._chain_array[: 1 + self.position, self.key_to_idx(key)]
@property
def _random_idx(self):
mindex = self._last_minimum_index[1]
# Check if mindex exceeds current position by 10 ACT: if so use a random sample
# otherwise we draw only from the chain past the minimum_index
if np.isinf(self.tau_last) or self.position - mindex < 10 * self.tau_last:
mindex = 0
return np.random.randint(mindex, self.position + 1)
@property
def random_sample(self):
return self[self._random_idx]
@property
def fixed_discard(self):
return self._fixed_discard
@fixed_discard.setter
def fixed_discard(self, fixed_discard):
self._fixed_discard = int(fixed_discard)
@property
def minimum_index(self):
"""This calculated a minimum index from which to discard samples
A number of methods are provided for the calculation. A subset are
switched off (by `if False` statements) for future development
"""
position = self.position
# Return cached minimum index
last_minimum_index = self._last_minimum_index
if position == last_minimum_index[0]:
return int(last_minimum_index[1])
# If fixed discard is not yet reached, just return that
if position < self.fixed_discard:
self.minimum_index_method = "FD"
return self.fixed_discard
# Initialize list of minimum index methods with the fixed discard (FD)
minimum_index_list = [self.fixed_discard]
minimum_index_method_list = ["FD"]
# Calculate minimum index from tau
if self.tau_last < np.inf:
tau = self.tau_last
elif len(self.max_tau_dict) == 0:
# Bootstrap calculating tau when minimum index has not yet been calculated
tau = self._tau_for_full_chain
else:
tau = np.inf
if tau < np.inf:
minimum_index_list.append(self.burn_in_nact * tau)
minimum_index_method_list.append(f"{self.burn_in_nact}tau")
# Calculate points when log-posterior is within z std of the mean
if True:
zfactor = 1
N = 100
delta_lnP = zfactor * self.ndim / 2
logl = self.get_1d_array(LOGLKEY)
log_prior = self.get_1d_array(LOGPKEY)
log_posterior = logl + log_prior
max_posterior = np.max(log_posterior)
ave = pd.Series(log_posterior).rolling(window=N).mean().iloc[N - 1 :]
delta = max_posterior - ave
passes = ave[delta < delta_lnP]
if len(passes) > 0:
minimum_index_list.append(passes.index[0] + 1)
minimum_index_method_list.append(f"z{zfactor}")
# Add last minimum_index_method
if False:
minimum_index_list.append(last_minimum_index[1])
minimum_index_method_list.append(last_minimum_index[2])
# Minimum index set by proposals
minimum_index_list.append(self.minimum_index_proposal)
minimum_index_method_list.append("PR")
# Minimum index set by temperature adaptation
minimum_index_list.append(self.minimum_index_adapt)
minimum_index_method_list.append("AD")
# Calculate the maximum minimum index and associated method (reporting)
minimum_index = int(np.max(minimum_index_list))
minimum_index_method = minimum_index_method_list[np.argmax(minimum_index_list)]
# Cache the method
self._last_minimum_index = (position, minimum_index, minimum_index_method)
self.minimum_index_method = minimum_index_method
return minimum_index
@property
def minimum_index_proposal(self):
return self._minimum_index_proposal
@minimum_index_proposal.setter
def minimum_index_proposal(self, minimum_index_proposal):
if minimum_index_proposal > self._minimum_index_proposal:
self._minimum_index_proposal = minimum_index_proposal
@property
def minimum_index_adapt(self):
return self._minimum_index_adapt
@minimum_index_adapt.setter
def minimum_index_adapt(self, minimum_index_adapt):
if minimum_index_adapt > self._minimum_index_adapt:
self._minimum_index_adapt = minimum_index_adapt
@property
def tau(self):
""" The maximum ACT over all parameters """
if self.position in self.max_tau_dict:
# If we have the ACT at the current position, return it
return self.max_tau_dict[self.position]
elif (
self.tau_last < np.inf
and self.cached_tau_count < 50
and self.nsamples_last > 50
):
# If we have a recent ACT return it
self.cached_tau_count += 1
return self.tau_last
else:
# Calculate the ACT
return self.tau_nocache
@property
def tau_nocache(self):
""" Calculate tau forcing a recalculation (no cached tau) """
tau = max(self.tau_dict.values())
self.max_tau_dict[self.position] = tau
self.cached_tau_count = 0
return tau
@property
def tau_last(self):
""" Return the last-calculated tau if it exists, else inf """
if len(self.max_tau_dict) > 0:
return list(self.max_tau_dict.values())[-1]
else:
return np.inf
@property
def _tau_for_full_chain(self):
""" The maximum ACT over all parameters """
return max(self._tau_dict_for_full_chain.values())
@property
def _tau_dict_for_full_chain(self):
return self._calculate_tau_dict(minimum_index=0)
@property
def tau_dict(self):
""" Calculate a dictionary of tau (ACT) for every parameter """
return self._calculate_tau_dict(self.minimum_index)
def _calculate_tau_dict(self, minimum_index):
""" Calculate a dictionary of tau (ACT) for every parameter """
logger.debug(f"Calculating tau_dict {self}")
# If there are too few samples to calculate tau
if (self.position - minimum_index) < 2 * self.autocorr_c:
return {key: np.inf for key in self.parameter_keys}
# Choose minimimum index for the ACT calculation
last_tau = self.tau_last
if self.tau_window is not None and last_tau < np.inf:
minimum_index_for_act = max(
minimum_index, int(self.position - self.tau_window * last_tau)
)
else:
minimum_index_for_act = minimum_index
# Calculate a dictionary of tau's for each parameter
taus = {}
for key in self.parameter_keys:
if self.fixed_tau is None:
x = self.get_1d_array(key)[minimum_index_for_act:]
tau = calculate_tau(x, self.autocorr_c)
taux = round(tau, 1)
else:
taux = self.fixed_tau
taus[key] = max(taux, self.min_tau)
# Cache the last tau dictionary for future use
self.last_full_tau_dict = taus
return taus
@property
def thin(self):
if np.isfinite(self.tau):
return np.max([1, int(self.thin_by_nact * self.tau)])
else:
return 1
@property
def nsamples(self):
nuseable_steps = self.position - self.minimum_index
return int(nuseable_steps / (self.thin_by_nact * self.tau))
@property
def nsamples_last(self):
nuseable_steps = self.position - self.minimum_index
return int(nuseable_steps / (self.thin_by_nact * self.tau_last))
@property
def samples(self):
samples = self._chain_array[self.minimum_index : self.position : self.thin]
return pd.DataFrame(samples, columns=self.keys)
def plot(self, outdir=".", label="label", priors=None, all_samples=None):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(
nrows=self.ndim + 3, ncols=2, figsize=(8, 9 + 3 * (self.ndim))
)
scatter_kwargs = dict(
lw=0,
marker="o",
)
K = 1000
nburn = self.minimum_index
plot_setups = zip(
[0, nburn, nburn],
[nburn, self.position, self.position],
[1, 1, self.thin], # Thin-by factor
["tab:red", "tab:grey", "tab:blue"], # Color
[0.5, 0.05, 0.5], # Alpha
[1, 1, 1], # Marker size
)
position_indexes = np.arange(self.position + 1)
# Plot the traceplots
for (start, stop, thin, color, alpha, ms) in plot_setups:
for ax, key in zip(axes[:, 0], self.keys):
xx = position_indexes[start:stop:thin] / K
yy = self.get_1d_array(key)[start:stop:thin]
# Downsample plots to max_pts: avoid memory issues
max_pts = 10000
while len(xx) > max_pts:
xx = xx[::2]
yy = yy[::2]
ax.plot(
xx,
yy,
color=color,
alpha=alpha,
ms=ms,
**scatter_kwargs,
)
ax.set_ylabel(self._get_plot_label_by_key(key, priors))
if key not in [LOGLKEY, LOGPKEY]:
msg = r"$\tau=$" + f"{self.last_full_tau_dict[key]:0.1f}"
ax.set_title(msg)
# Plot the histograms
for ax, key in zip(axes[:, 1], self.keys):
if all_samples is not None:
yy_all = all_samples[key]
ax.hist(yy_all, bins=50, alpha=0.6, density=True, color="k")
yy = self.get_1d_array(key)[nburn : self.position : self.thin]
ax.hist(yy, bins=50, alpha=0.8, density=True)
ax.set_xlabel(self._get_plot_label_by_key(key, priors))
# Add x-axes labels to the traceplots
axes[-1, 0].set_xlabel(r"Iteration $[\times 10^{3}]$")
# Plot the calculated ACT
ax = axes[-1, 0]
tausit = np.array(list(self.max_tau_dict.keys()) + [self.position]) / K
taus = list(self.max_tau_dict.values()) + [self.tau_last]
ax.plot(tausit, taus, color="C3")
ax.set(ylabel=r"Maximum $\tau$")
axes[-1, 1].set_axis_off()
filename = "{}/{}_checkpoint_trace.png".format(outdir, label)
msg = [
r"Maximum $\tau$" + f"={self.tau:0.1f} ",
r"$n_{\rm samples}=$" + f"{self.nsamples} ",
]
if self.thin_by_nact != 1:
msg += [
r"$n_{\rm samples}^{\rm eff}=$"
+ f"{int(self.nsamples * self.thin_by_nact)} "
]
fig.suptitle(
"| ".join(msg),
y=1,
)
fig.tight_layout()
fig.savefig(filename, dpi=200)
plt.close(fig)
@staticmethod
def _get_plot_label_by_key(key, priors=None):
if priors is not None and key in priors:
return priors[key].latex_label
elif key == LOGLKEY:
return LOGLLATEXKEY
elif key == LOGPKEY:
return LOGPLATEXKEY
else:
return key
class Sample(object):
def __init__(self, sample_dict):
"""A single sample
Parameters
----------
sample_dict: dict
A dictionary of the sample
"""
self.sample_dict = sample_dict
self.keys = list(sample_dict.keys())
self.parameter_keys = [k for k in self.keys if k not in [LOGPKEY, LOGLKEY]]
self.ndim = len(self.parameter_keys)
def __getitem__(self, key):
return self.sample_dict[key]
def __setitem__(self, key, value):
self.sample_dict[key] = value
if key not in self.keys:
self.keys = list(self.sample_dict.keys())
@property
def list(self):
return list(self.sample_dict.values())
def __repr__(self):
return str(self.sample_dict)
@property
def parameter_only_dict(self):
return {key: self.sample_dict[key] for key in self.parameter_keys}
@property
def dict(self):
return {key: self.sample_dict[key] for key in self.keys}
def as_dict(self, keys=None):
sdict = self.dict
if keys is None:
return sdict
else:
return {key: sdict[key] for key in keys}
def __eq__(self, other_sample):
return self.list == other_sample.list
def copy(self):
return Sample(self.sample_dict.copy())
def calculate_tau(x, autocorr_c=5):
import emcee
if LooseVersion(emcee.__version__) < LooseVersion("3"):
raise SamplerError("bilby-mcmc requires emcee > 3.0 for autocorr analysis")
if np.all(np.diff(x) == 0):
return np.inf
try:
# Hard code tol=1: we perform this check internally
tau = emcee.autocorr.integrated_time(x, c=autocorr_c, tol=1)[0]
if np.isnan(tau):
tau = np.inf
return tau
except emcee.autocorr.AutocorrError:
return np.inf
import torch
from nflows.distributions.normal import StandardNormal
from nflows.flows.base import Flow
from nflows.nn import nets as nets
from nflows.transforms import (
CompositeTransform,
MaskedAffineAutoregressiveTransform,
RandomPermutation,
)
from nflows.transforms.coupling import (
AdditiveCouplingTransform,
AffineCouplingTransform,
)
from nflows.transforms.normalization import BatchNorm
from torch.nn import functional as F
# Turn off parallelism
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
class NVPFlow(Flow):
"""A simplified version of Real NVP for 1-dim inputs.
This implementation uses 1-dim checkerboard masking but doesn't use
multi-scaling.
Reference:
> L. Dinh et al., Density estimation using Real NVP, ICLR 2017.
This class has been modified from the example found at:
https://github.com/bayesiains/nflows/blob/master/nflows/flows/realnvp.py
"""
def __init__(
self,
features,
hidden_features,
num_layers,
num_blocks_per_layer,
use_volume_preserving=False,
activation=F.relu,
dropout_probability=0.0,
batch_norm_within_layers=False,
batch_norm_between_layers=False,
random_permutation=True,
):
if use_volume_preserving:
coupling_constructor = AdditiveCouplingTransform
else:
coupling_constructor = AffineCouplingTransform
mask = torch.ones(features)
mask[::2] = -1
def create_resnet(in_features, out_features):
return nets.ResidualNet(
in_features,
out_features,
hidden_features=hidden_features,
num_blocks=num_blocks_per_layer,
activation=activation,
dropout_probability=dropout_probability,
use_batch_norm=batch_norm_within_layers,
)
layers = []
for _ in range(num_layers):
transform = coupling_constructor(
mask=mask, transform_net_create_fn=create_resnet
)
layers.append(transform)
mask *= -1
if batch_norm_between_layers:
layers.append(BatchNorm(features=features))
if random_permutation:
layers.append(RandomPermutation(features=features))
super().__init__(
transform=CompositeTransform(layers),
distribution=StandardNormal([features]),
)
class BasicFlow(Flow):
def __init__(self, features):
transform = CompositeTransform(
[
MaskedAffineAutoregressiveTransform(
features=features, hidden_features=2 * features
),
RandomPermutation(features=features),
]
)
distribution = StandardNormal(shape=[features])
super().__init__(
transform=transform,
distribution=distribution,
)
This diff is collapsed.
This diff is collapsed.
from collections import namedtuple
LOGLKEY = "logl"
LOGLLATEXKEY = r"$\log\mathcal{L}$"
LOGPKEY = "logp"
LOGPLATEXKEY = r"$\log\pi$"
ConvergenceInputs = namedtuple(
"ConvergenceInputs",
[
"autocorr_c",
"burn_in_nact",
"thin_by_nact",
"fixed_discard",
"target_nsamples",
"stop_after_convergence",
"L1steps",
"L2steps",
"min_tau",
"fixed_tau",
"tau_window",
],
)
ParallelTemperingInputs = namedtuple(
"ParallelTemperingInputs",
[
"ntemps",
"nensemble",
"Tmax",
"Tmax_from_SNR",
"initial_betas",
"adapt",
"adapt_t0",
"adapt_nu",
"pt_ensemble",
],
)
......@@ -191,8 +191,10 @@ class Grid(object):
places = self.sample_points[name]
if len(places) > 1:
dx = np.diff(places)
out = np.apply_along_axis(
logtrapzexp, axis, log_array, places[1] - places[0])
logtrapzexp, axis, log_array, dx
)
else:
# no marginalisation required, just remove the singleton dimension
z = log_array.shape
......
......@@ -40,7 +40,6 @@ class DeltaFunction(Prior):
=======
float: Rescaled probability, equivalent to peak
"""
self.test_valid_for_rescaling(val)
return self.peak * val ** 0
def prob(self, val):
......@@ -105,7 +104,6 @@ class PowerLaw(Prior):
=======
Union[float, array_like]: Rescaled probability
"""
self.test_valid_for_rescaling(val)
if self.alpha == -1:
return self.minimum * np.exp(val * np.log(self.maximum / self.minimum))
else:
......@@ -206,7 +204,6 @@ class Uniform(Prior):
=======
Union[float, array_like]: Rescaled probability
"""
self.test_valid_for_rescaling(val)
return self.minimum + val * (self.maximum - self.minimum)
def prob(self, val):
......@@ -273,7 +270,7 @@ class SymmetricLogUniform(Prior):
def __init__(self, minimum, maximum, name=None, latex_label=None,
unit=None, boundary=None):
"""Symmetric Log-Uniform distribtions with bounds
"""Symmetric Log-Uniform distributions with bounds
This is identical to a Log-Uniform distribution, but mirrored about
the zero-axis and subsequently normalized. As such, the distribution
......@@ -314,7 +311,6 @@ class SymmetricLogUniform(Prior):
=======
Union[float, array_like]: Rescaled probability
"""
self.test_valid_for_rescaling(val)
if isinstance(val, (float, int)):
if val < 0.5:
return -self.maximum * np.exp(-2 * val * np.log(self.maximum / self.minimum))
......@@ -401,7 +397,6 @@ class Cosine(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
norm = 1 / (np.sin(self.maximum) - np.sin(self.minimum))
return np.arcsin(val / norm + np.sin(self.minimum))
......@@ -456,7 +451,6 @@ class Sine(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
norm = 1 / (np.cos(self.minimum) - np.cos(self.maximum))
return np.arccos(np.cos(self.minimum) - val / norm)
......@@ -515,7 +509,6 @@ class Gaussian(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return self.mu + erfinv(2 * val - 1) * 2 ** 0.5 * self.sigma
def prob(self, val):
......@@ -602,7 +595,6 @@ class TruncatedGaussian(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return erfinv(2 * val * self.normalisation + erf(
(self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) * 2 ** 0.5 * self.sigma + self.mu
......@@ -695,7 +687,6 @@ class LogNormal(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return np.exp(self.mu + np.sqrt(2 * self.sigma ** 2) * erfinv(2 * val - 1))
def prob(self, val):
......@@ -790,7 +781,6 @@ class Exponential(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return -self.mu * log1p(-val)
def prob(self, val):
......@@ -887,7 +877,6 @@ class StudentT(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
if isinstance(val, (float, int)):
if val == 0:
rescaled = -np.inf
......@@ -977,7 +966,6 @@ class Beta(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return btdtri(self.alpha, self.beta, val) * (self.maximum - self.minimum) + self.minimum
def prob(self, val):
......@@ -1013,7 +1001,7 @@ class Beta(Prior):
return _ln_prob
return -np.inf
else:
_ln_prob_sub = -np.inf * np.ones(val.size)
_ln_prob_sub = np.full_like(val, -np.inf)
idx = np.isfinite(_ln_prob) & (val >= self.minimum) & (val <= self.maximum)
_ln_prob_sub[idx] = _ln_prob[idx]
return _ln_prob_sub
......@@ -1070,7 +1058,6 @@ class Logistic(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
if isinstance(val, (float, int)):
if val == 0:
rescaled = -np.inf
......@@ -1151,7 +1138,6 @@ class Cauchy(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
rescaled = self.alpha + self.beta * np.tan(np.pi * (val - 0.5))
if isinstance(val, (float, int)):
if val == 1:
......@@ -1233,7 +1219,6 @@ class Gamma(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return gammaincinv(self.k, val) * self.theta
def prob(self, val):
......@@ -1385,8 +1370,6 @@ class FermiDirac(Prior):
.. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
<https:arxiv.org/abs/1705.08978v1>`_, 2017.
"""
self.test_valid_for_rescaling(val)
inv = (-np.exp(-1. * self.r) + (1. + np.exp(self.r)) ** -val +
np.exp(-1. * self.r) * (1. + np.exp(self.r)) ** -val)
......@@ -1440,3 +1423,97 @@ class FermiDirac(Prior):
idx = val >= self.minimum
lnp[idx] = norm - np.logaddexp((val[idx] / self.sigma) - self.r, 0.)
return lnp
class Categorical(Prior):
def __init__(self, ncategories, name=None, latex_label=None,
unit=None, boundary="periodic"):
""" An equal-weighted Categorical prior
Parameters:
-----------
ncategories: int
The number of available categories. The prior mass support is then
integers [0, ncategories - 1].
name: str
See superclass
latex_label: str
See superclass
unit: str
See superclass
"""
minimum = 0
# Small delta added to help with MCMC walking
maximum = ncategories - 1 + 1e-15
super(Categorical, self).__init__(
name=name, latex_label=latex_label, minimum=minimum,
maximum=maximum, unit=unit, boundary=boundary)
self.ncategories = ncategories
self.categories = np.arange(self.minimum, self.maximum)
self.p = 1 / self.ncategories
self.lnp = -np.log(self.ncategories)
def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the categorical prior.
This maps to the inverse CDF. This has been analytically solved for this case.
Parameters
==========
val: Union[float, int, array_like]
Uniform probability
Returns
=======
Union[float, array_like]: Rescaled probability
"""
return np.floor(val * (1 + self.maximum))
def prob(self, val):
"""Return the prior probability of val.
Parameters
==========
val: Union[float, int, array_like]
Returns
=======
float: Prior probability of val
"""
if isinstance(val, (float, int)):
if val in self.categories:
return self.p
else:
return 0
else:
val = np.atleast_1d(val)
probs = np.zeros_like(val, dtype=np.float64)
idxs = np.isin(val, self.categories)
probs[idxs] = self.p
return probs
def ln_prob(self, val):
"""Return the logarithmic prior probability of val
Parameters
==========
val: Union[float, int, array_like]
Returns
=======
float:
"""
if isinstance(val, (float, int)):
if val in self.categories:
return self.lnp
else:
return -np.inf
else:
val = np.atleast_1d(val)
probs = -np.inf * np.ones_like(val, dtype=np.float64)
idxs = np.isin(val, self.categories)
probs[idxs] = self.lnp
return probs
......@@ -202,23 +202,6 @@ class Prior(object):
"""
return (val >= self.minimum) & (val <= self.maximum)
@staticmethod
def test_valid_for_rescaling(val):
"""Test if 0 < val < 1
Parameters
==========
val: Union[float, int, array_like]
Raises
=======
ValueError: If val is not between 0 and 1
"""
valarray = np.atleast_1d(val)
tests = (valarray < 0) + (valarray > 1)
if np.any(tests):
raise ValueError("Number to be rescaled should be in [0, 1]")
def __repr__(self):
"""Overrides the special method __repr__.
......@@ -430,7 +413,7 @@ class Prior(object):
Parameters
==========
val: str
The string version of the agument
The string version of the argument
Returns
=======
......
......@@ -168,8 +168,7 @@ class PriorDict(dict):
for key in ["__module__", "__name__", "__prior_dict__"]:
if key in prior_dict:
del prior_dict[key]
obj = class_(dict())
obj.from_dictionary(prior_dict)
obj = class_(prior_dict)
return obj
@classmethod
......@@ -424,27 +423,38 @@ class PriorDict(dict):
}
return all_samples
def normalize_constraint_factor(self, keys):
def normalize_constraint_factor(self, keys, min_accept=10000, sampling_chunk=50000, nrepeats=10):
if keys in self._cached_normalizations.keys():
return self._cached_normalizations[keys]
else:
min_accept = 1000
sampling_chunk = 5000
factor_estimates = [
self._estimate_normalization(keys, min_accept, sampling_chunk)
for _ in range(nrepeats)
]
factor = np.mean(factor_estimates)
if np.std(factor_estimates) > 0:
decimals = int(-np.floor(np.log10(3 * np.std(factor_estimates))))
factor_rounded = np.round(factor, decimals)
else:
factor_rounded = factor
self._cached_normalizations[keys] = factor_rounded
return factor_rounded
def _estimate_normalization(self, keys, min_accept, sampling_chunk):
samples = self.sample_subset(keys=keys, size=sampling_chunk)
keep = np.atleast_1d(self.evaluate_constraints(samples))
if len(keep) == 1:
self._cached_normalizations[keys] = 1
return 1
all_samples = {key: np.array([]) for key in keys}
while np.count_nonzero(keep) < min_accept:
samples = self.sample_subset(keys=keys, size=sampling_chunk)
keep = np.atleast_1d(self.evaluate_constraints(samples))
if len(keep) == 1:
self._cached_normalizations[keys] = 1
return 1
all_samples = {key: np.array([]) for key in keys}
while np.count_nonzero(keep) < min_accept:
samples = self.sample_subset(keys=keys, size=sampling_chunk)
for key in samples:
all_samples[key] = np.hstack(
[all_samples[key], samples[key].flatten()])
keep = np.array(self.evaluate_constraints(all_samples), dtype=bool)
factor = len(keep) / np.count_nonzero(keep)
self._cached_normalizations[keys] = factor
return factor
for key in samples:
all_samples[key] = np.hstack(
[all_samples[key], samples[key].flatten()])
keep = np.array(self.evaluate_constraints(all_samples), dtype=bool)
factor = len(keep) / np.count_nonzero(keep)
return factor
def prob(self, sample, **kwargs):
"""
......@@ -469,11 +479,11 @@ class PriorDict(dict):
def check_prob(self, sample, prob):
ratio = self.normalize_constraint_factor(tuple(sample.keys()))
if np.all(prob == 0.):
return prob
return prob * ratio
else:
if isinstance(prob, float):
if self.evaluate_constraints(sample):
return prob
return prob * ratio
else:
return 0.
else:
......@@ -509,7 +519,7 @@ class PriorDict(dict):
else:
if isinstance(ln_prob, float):
if self.evaluate_constraints(sample):
return ln_prob
return ln_prob + np.log(ratio)
else:
return -np.inf
else:
......@@ -518,6 +528,21 @@ class PriorDict(dict):
constrained_ln_prob[keep] = ln_prob[keep] + np.log(ratio)
return constrained_ln_prob
def cdf(self, sample):
"""Evaluate the cumulative distribution function at the provided points
Parameters
----------
sample: dict, pandas.DataFrame
Dictionary of the samples of which to calculate the CDF
Returns
-------
dict, pandas.DataFrame: Dictionary containing the CDF values
"""
return sample.__class__({key: self[key].cdf(sample) for key, sample in sample.items()})
def rescale(self, keys, theta):
"""Rescale samples from unit cube to prior
......@@ -681,9 +706,7 @@ class ConditionalPriorDict(PriorDict):
float: Joint probability of all individual sample probabilities
"""
self._check_resolved()
for key, value in sample.items():
self[key].least_recently_sampled = value
self._prepare_evaluation(*zip(*sample.items()))
res = [self[key].prob(sample[key], **self.get_required_variables(key)) for key in sample]
prob = np.product(res, **kwargs)
return self.check_prob(sample, prob)
......@@ -703,13 +726,16 @@ class ConditionalPriorDict(PriorDict):
float: Joint log probability of all the individual sample probabilities
"""
self._check_resolved()
for key, value in sample.items():
self[key].least_recently_sampled = value
self._prepare_evaluation(*zip(*sample.items()))
res = [self[key].ln_prob(sample[key], **self.get_required_variables(key)) for key in sample]
ln_prob = np.sum(res, axis=axis)
return self.check_ln_prob(sample, ln_prob)
def cdf(self, sample):
self._prepare_evaluation(*zip(*sample.items()))
res = {key: self[key].cdf(sample[key], **self.get_required_variables(key)) for key in sample}
return sample.__class__(res)
def rescale(self, keys, theta):
"""Rescale samples from unit cube to prior
......@@ -724,12 +750,14 @@ class ConditionalPriorDict(PriorDict):
=======
list: List of floats containing the rescaled sample
"""
keys = list(keys)
theta = list(theta)
self._check_resolved()
self._update_rescale_keys(keys)
result = dict()
for key, index in zip(self.sorted_keys_without_fixed_parameters, self._rescale_indexes):
required_variables = {k: result[k] for k in getattr(self[key], 'required_variables', [])}
result[key] = self[key].rescale(theta[index], **required_variables)
result[key] = self[key].rescale(theta[index], **self.get_required_variables(key))
self[key].least_recently_sampled = result[key]
return [result[key] for key in keys]
def _update_rescale_keys(self, keys):
......@@ -737,6 +765,11 @@ class ConditionalPriorDict(PriorDict):
self._rescale_indexes = [keys.index(element) for element in self.sorted_keys_without_fixed_parameters]
self._least_recently_rescaled_keys = keys
def _prepare_evaluation(self, keys, theta):
self._check_resolved()
for key, value in zip(keys, theta):
self[key].least_recently_sampled = value
def _check_resolved(self):
if not self._resolved:
raise IllegalConditionsException("The current set of priors contains unresolveable conditions.")
......
......@@ -86,7 +86,6 @@ class Interped(Prior):
This maps to the inverse CDF. This is done using interpolation.
"""
self.test_valid_for_rescaling(val)
rescaled = self.inverse_cumulative_distribution(val)
if rescaled.shape == ():
rescaled = float(rescaled)
......
......@@ -11,7 +11,7 @@ class BaseJointPriorDist(object):
def __init__(self, names, bounds=None):
"""
A class defining JointPriorDist that will be overwritten with child
classes defining the joint prior distribtuions between given parameters,
classes defining the joint prior distributions between given parameters,
Parameters
......@@ -172,7 +172,7 @@ class BaseJointPriorDist(object):
raise ValueError("Array is the wrong shape")
# check sample(s) is within bounds
outbounds = np.ones(samp.shape[0], dtype=np.bool)
outbounds = np.ones(samp.shape[0], dtype=bool)
for s, bound in zip(samp.T, self.bounds.values()):
outbounds = (s < bound[0]) | (s > bound[1])
if np.any(outbounds):
......@@ -210,7 +210,7 @@ class BaseJointPriorDist(object):
samp: vector
sample to evaluate the ln_prob at
lnprob: vector
of -inf pased in with the same shape as the number of samples
of -inf passed in with the same shape as the number of samples
outbounds: array_like
boolean array showing which samples in lnprob vector are out of the given bounds
......@@ -231,7 +231,7 @@ class BaseJointPriorDist(object):
Parameters
==========
size: int
number of samples to generate, defualts to 1
number of samples to generate, defaults to 1
"""
if size is None:
......@@ -250,7 +250,7 @@ class BaseJointPriorDist(object):
Parameters
==========
size: int
number of samples to generate, defualts to 1
number of samples to generate, defaults to 1
"""
samps = np.zeros((size, len(self)))
"""
......@@ -299,7 +299,7 @@ class BaseJointPriorDist(object):
Parameters
==========
samp: numpy array
this is a vector sample drawn from a uniform distribtuion to be rescaled to the distribution
this is a vector sample drawn from a uniform distribution to be rescaled to the distribution
"""
"""
Here is where the subclass where overwrite rescale method
......@@ -630,7 +630,7 @@ class MultivariateGaussianDist(BaseJointPriorDist):
elif isinstance(self.__dict__[key], (np.ndarray, list)):
thisarr = np.asarray(self.__dict__[key])
otherarr = np.asarray(other.__dict__[key])
if thisarr.dtype == np.float and otherarr.dtype == np.float:
if thisarr.dtype == float and otherarr.dtype == float:
fin1 = np.isfinite(np.asarray(self.__dict__[key]))
fin2 = np.isfinite(np.asarray(other.__dict__[key]))
if not np.array_equal(fin1, fin2):
......@@ -707,10 +707,9 @@ class JointPrior(Prior):
Returns
=======
float:
A sample from the prior paramter.
A sample from the prior parameter.
"""
self.test_valid_for_rescaling(val)
self.dist.rescale_parameters[self.name] = val
if self.dist.filled_rescale():
......@@ -734,7 +733,7 @@ class JointPrior(Prior):
Returns
=======
float:
A sample from the prior paramter.
A sample from the prior parameter.
"""
if self.name in self.dist.sampled_parameters:
......
import datetime
import inspect
import json
import os
......@@ -22,7 +23,10 @@ from .utils import (
recursively_load_dict_contents_from_group,
recursively_decode_bilby_json,
)
from .prior import Prior, PriorDict, DeltaFunction
from .prior import Prior, PriorDict, DeltaFunction, ConditionalDeltaFunction
EXTENSIONS = ["json", "hdf5", "h5", "pickle", "pkl"]
def result_file_name(outdir, label, extension='json', gzip=False):
......@@ -359,7 +363,7 @@ class Result(object):
The number of times the likelihood function is called
log_prior_evaluations: array_like
The evaluations of the prior for each sample point
sampling_time: float
sampling_time: (datetime.timedelta, float)
The time taken to complete the sampling
nburn: int
The number of burn-in steps discarded for MCMC samplers
......@@ -409,6 +413,8 @@ class Result(object):
self.log_likelihood_evaluations = log_likelihood_evaluations
self.log_prior_evaluations = log_prior_evaluations
self.num_likelihood_evaluations = num_likelihood_evaluations
if isinstance(sampling_time, float):
sampling_time = datetime.timedelta(seconds=sampling_time)
self.sampling_time = sampling_time
self.version = version
self.max_autocorrelation_time = max_autocorrelation_time
......@@ -801,7 +807,7 @@ class Result(object):
def save_posterior_samples(self, filename=None, outdir=None, label=None):
""" Saves posterior samples to a file
Generates a .dat file containing the posterior samples and auxillary
Generates a .dat file containing the posterior samples and auxiliary
data saved in the posterior. Note, strings in the posterior are
removed while complex numbers will be given as absolute values with
abs appended to the column name
......@@ -1399,7 +1405,8 @@ class Result(object):
if priors is None:
return posterior
for key in priors:
if isinstance(priors[key], DeltaFunction):
if isinstance(priors[key], DeltaFunction) and \
not isinstance(priors[key], ConditionalDeltaFunction):
posterior[key] = priors[key].peak
elif isinstance(priors[key], float):
posterior[key] = priors[key]
......@@ -1422,20 +1429,19 @@ class Result(object):
Function which adds in extra parameters to the data frame,
should take the data_frame, likelihood and prior as arguments.
"""
try:
data_frame = self.posterior
except ValueError:
data_frame = pd.DataFrame(
self.samples, columns=self.search_parameter_keys)
data_frame = self._add_prior_fixed_values_to_posterior(
data_frame, priors)
data_frame['log_likelihood'] = getattr(
self, 'log_likelihood_evaluations', np.nan)
if self.log_prior_evaluations is None and priors is not None:
data_frame['log_prior'] = priors.ln_prob(
dict(data_frame[self.search_parameter_keys]), axis=0)
else:
data_frame['log_prior'] = self.log_prior_evaluations
data_frame = pd.DataFrame(
self.samples, columns=self.search_parameter_keys)
data_frame = self._add_prior_fixed_values_to_posterior(
data_frame, priors)
data_frame['log_likelihood'] = getattr(
self, 'log_likelihood_evaluations', np.nan)
if self.log_prior_evaluations is None and priors is not None:
data_frame['log_prior'] = priors.ln_prob(
dict(data_frame[self.search_parameter_keys]), axis=0)
else:
data_frame['log_prior'] = self.log_prior_evaluations
if conversion_function is not None:
if "npool" in inspect.getargspec(conversion_function).args:
data_frame = conversion_function(data_frame, likelihood, priors, npool=npool)
......@@ -1723,7 +1729,7 @@ class ResultList(list):
def __init__(self, results=None):
""" A class to store a list of :class:`bilby.core.result.Result` objects
from equivalent runs on the same data. This provides methods for
outputing combined results.
outputting combined results.
Parameters
==========
......@@ -1775,7 +1781,7 @@ class ResultList(list):
# check which kind of sampler was used: MCMC or Nested Sampling
if result._nested_samples is not None:
posteriors, result = self._combine_nested_sampled_runs(result)
elif result.sampler in ["bilbymcmc"]:
elif result.sampler in ["bilby_mcmc", "bilbymcmc"]:
posteriors, result = self._combine_mcmc_sampled_runs(result)
else:
posteriors = [res.posterior for res in self]
......@@ -1819,7 +1825,7 @@ class ResultList(list):
result.log_evidence = logsumexp(log_evidences, b=1. / len(self))
result.log_bayes_factor = result.log_evidence - result.log_noise_evidence
# Propogate uncertainty in combined evidence
# Propagate uncertainty in combined evidence
log_errs = [res.log_evidence_err for res in self if np.isfinite(res.log_evidence_err)]
if len(log_errs) > 0:
result.log_evidence_err = 0.5 * logsumexp(2 * np.array(log_errs), b=1. / len(self))
......@@ -1865,7 +1871,7 @@ class ResultList(list):
result.log_evidence = logsumexp(log_evidences, b=1. / len(self))
result.log_bayes_factor = result.log_evidence - result.log_noise_evidence
# Propogate uncertainty in combined evidence
# Propagate uncertainty in combined evidence
log_errs = [res.log_evidence_err for res in self if np.isfinite(res.log_evidence_err)]
if len(log_errs) > 0:
result.log_evidence_err = 0.5 * logsumexp(2 * np.array(log_errs), b=1. / len(self))
......@@ -2135,7 +2141,7 @@ class ResultError(Exception):
class ResultListError(ResultError):
""" For Errors occuring during combining results. """
""" For Errors occurring during combining results. """
class FileMovedError(ResultError):
......