Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (130)
Showing
with 1366 additions and 788 deletions
......@@ -20,14 +20,14 @@ stages:
# Check author list is up to date
authors:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script:
- python test/check_author_list.py
# Test containers scripts are up to date
containers:
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script:
- cd containers
- python write_dockerfiles.py #HACK
......@@ -38,10 +38,6 @@ containers:
.test-python: &test-python
stage: initial
image: python
before_script:
# this is required because pytables doesn't use a wheel on py37
- apt-get -yqq update
- apt-get -yqq install libhdf5-dev
script:
- python -m pip install .
- python -m pip list installed
......@@ -63,143 +59,128 @@ containers:
${script} --help;
done
# Test basic setup on python 3.7
basic-3.7:
<<: *test-python
image: python:3.7
# Test basic setup on python 3.8
basic-3.8:
<<: *test-python
image: python:3.8
# Test basic setup on python 3.9
basic-3.9:
<<: *test-python
image: python:3.9
basic-3.10:
<<: *test-python
image: python:3.10
precommits-py3.7:
.precommits: &precommits
stage: initial
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- source activate python37
- mkdir -p .pip37
- source activate $PYVERSION
- mkdir -p $CACHE_DIR
- pip install --upgrade pip
- pip --cache-dir=.pip37 install --upgrade bilby
- pip --cache-dir=.pip37 install .
- pip --cache-dir=.pip37 install pre-commit
- pip --cache-dir=$CACHE_DIR install --upgrade bilby
- pip --cache-dir=$CACHE_DIR install .
- pip --cache-dir=$CACHE_DIR install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
precommits-py3.8:
stage: initial
<<: *precommits
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
script:
- source activate python38
- mkdir -p .pip38
- pip install --upgrade pip
- pip --cache-dir=.pip38 install --upgrade bilby
- pip --cache-dir=.pip38 install .
- pip --cache-dir=.pip38 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
variables:
CACHE_DIR: ".pip38"
PYVERSION: "python38"
precommits-py3.9:
<<: *precommits
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
variables:
CACHE_DIR: ".pip39"
PYVERSION: "python39"
# FIXME: when image builds for 3.10 change this back.
#precommits-py3.10:
# <<: *precommits
# image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
# variables:
# CACHE_DIR: ".pip310"
# PYVERSION: "python310"
install:
stage: initial
parallel:
matrix:
- EXTRA: [gw, mcmc, all]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script:
- source activate python39
- mkdir -p .pip38
- pip install --upgrade pip
- pip --cache-dir=.pip39 install --upgrade bilby
- pip --cache-dir=.pip39 install .
- pip --cache-dir=.pip39 install pre-commit
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure
- pip install .[$EXTRA]
# ------------------- Test stage -------------------------------------------
# test example on python 3.7 and build coverage
python-3.7:
.unit-tests: &unit-test
stage: test
needs: ["basic-3.7", "precommits-py3.7"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- python -m pip install .
- python -m pip list installed
# Run pyflakes
- flake8 .
# Run tests and collect coverage data
- pytest --cov=bilby --durations 10
- coverage html
- coverage-badge -o coverage_badge.svg -f
artifacts:
paths:
- coverage_badge.svg
- htmlcov/
# test example on python 3.8
python-3.8:
stage: test
<<: *unit-test
needs: ["basic-3.8", "precommits-py3.8"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
script:
- python -m pip install .
- python -m pip list installed
- pytest
# test example on python 3.9
python-3.9:
stage: test
<<: *unit-test
needs: ["basic-3.9", "precommits-py3.9"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script:
- python -m pip install .
- python -m pip list installed
- pytest
after_script:
- coverage html
- coverage xml
coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
paths:
- htmlcov/
expire_in: 30 days
# add back when 3.10 image is available
#python-3.10:
# <<: *unit-test
# needs: ["basic-3.10", "precommits-py3.10"]
# image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
# test samplers on python 3.7
python-3.7-samplers:
.test-sampler: &test-sampler
stage: test
needs: ["basic-3.7", "precommits-py3.7"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
script:
- python -m pip install .
- python -m pip install schwimmbad
- python -m pip list installed
- pytest test/integration/sampler_run_test.py --durations 10
- pytest test/integration/sampler_run_test.py --durations 10 -v
# test samplers on python 3.8
python-3.8-samplers:
stage: test
<<: *test-sampler
needs: ["basic-3.8", "precommits-py3.8"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
script:
- python -m pip install .
- python -m pip list installed
- pytest test/integration/sampler_run_test.py --durations 10
# test samplers on python 3.9
python-3.9-samplers:
stage: test
<<: *test-sampler
needs: ["basic-3.9", "precommits-py3.9"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
script:
- python -m pip install .
- python -m pip list installed
- pytest test/integration/sampler_run_test.py --durations 10
# add back when 3.10 image is available
#python-3.10-samplers:
# <<: *test-sampler
# needs: ["basic-3.10", "precommits-py3.10"]
# image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
integration-tests-python-3.7:
integration-tests-python-3.9:
stage: test
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
needs: ["basic-3.7", "precommits-py3.7"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
needs: ["basic-3.9", "precommits-py3.9"]
only:
- schedules
script:
......@@ -208,49 +189,47 @@ integration-tests-python-3.7:
# Run tests which are only done on schedule
- pytest test/integration/example_test.py
plotting-python-3.7:
.plotting: &plotting
stage: test
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
needs: ["basic-3.7", "precommits-py3.7"]
only:
- schedules
script:
- python -m pip install .
- python -m pip install "ligo.skymap>=0.5.3"
- python -m pip list installed
- pytest test/gw/plot_test.py
plotting-python-3.8:
<<: *plotting
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
needs: ["basic-3.8", "precommits-py3.8"]
plotting-python-3.9:
stage: test
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
<<: *plotting
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
needs: ["basic-3.9", "precommits-py3.9"]
only:
- schedules
script:
- python -m pip install .
- python -m pip install "ligo.skymap>=0.5.3"
- python -m pip list installed
- pytest test/gw/plot_test.py
# add back when 3.10 image is available
#plotting-python-3.10:
# <<: *plotting
# image: containers.ligo.org/lscsoft/bilby/v2-bilby-python310
# needs: ["basic-3.10", "precommits-py3.10"]
# ------------------- Docs stage -------------------------------------------
docs:
stage: docs
needs: ["python-3.7"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
before_script:
- conda install -c conda-forge pandoc -y
- python -m pip install --upgrade ipykernel ipython jupyter nbconvert
- python -m ipykernel install
script:
# Make the documentation
- apt-get -yqq install pandoc
- python -m pip install .
- cd docs
- pip install ipykernel ipython jupyter
- cp ../examples/tutorials/*.ipynb ./
- rm basic_ptmcmc_tutorial.ipynb
- rm compare_samplers.ipynb
- rm visualising_the_results.ipynb
- jupyter nbconvert --to notebook --execute *.ipynb --inplace
- cd examples/tutorials
- jupyter nbconvert --to notebook --execute *.ipynb --output-dir ../../docs
- cd ../../docs
- make clean
- make html
......@@ -260,13 +239,12 @@ docs:
# ------------------- Deploy stage -------------------------------------------
deploy-docs:
pages:
stage: deploy
needs: ["docs", "python-3.7"]
needs: ["docs", "python-3.9"]
script:
- mkdir public/
- mv htmlcov/ public/
- mv coverage_badge.svg public/
- mv docs/_build/html/* public/
artifacts:
paths:
......@@ -275,49 +253,38 @@ deploy-docs:
only:
- master
# Build the containers
build-python37-container:
.build-container: &build-container
stage: deploy
image: docker:19.03.12
image: docker:20.10.12
needs: ["containers"]
only:
- schedules
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --tag v3-bilby-python37 - < v3-dockerfile-test-suite-python37
- docker image tag v3-bilby-python37 containers.ligo.org/lscsoft/bilby/v2-bilby-python37:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-python37:latest
- docker build --tag v3-bilby-$PYVERSION - < v3-dockerfile-test-suite-$PYVERSION
- docker image tag v3-bilby-$PYVERSION containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest
build-python38-container:
stage: deploy
image: docker:19.03.12
needs: ["containers"]
only:
- schedules
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --tag v3-bilby-python38 - < v3-dockerfile-test-suite-python38
- docker image tag v3-bilby-python38 containers.ligo.org/lscsoft/bilby/v2-bilby-python38:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-python38:latest
<<: *build-container
variables:
PYVERSION: "python38"
build-python39-container:
stage: deploy
image: docker:19.03.12
needs: ["containers"]
only:
- schedules
script:
- cd containers
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build --tag v3-bilby-python39 - < v3-dockerfile-test-suite-python39
- docker image tag v3-bilby-python39 containers.ligo.org/lscsoft/bilby/v2-bilby-python39:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-python39:latest
<<: *build-container
variables:
PYVERSION: "python39"
# add back when 3.10 image is available
#build-python310-container:
# <<: *build-container
# variables:
# PYVERSION: "python310"
pypi-release:
stage: deploy
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python37
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python39
variables:
TWINE_USERNAME: $PYPI_USERNAME
TWINE_PASSWORD: $PYPI_PASSWORD
......
......@@ -5,13 +5,13 @@ repos:
- id: check-merge-conflict # prevent committing files with merge conflicts
- id: flake8 # checks for flake8 errors
- repo: https://github.com/psf/black
rev: 20.8b1
rev: 22.3.0
hooks:
- id: black
language_version: python3
files: ^bilby/bilby_mcmc/
files: '^(bilby/bilby_mcmc/|bilby/core/sampler/|examples/)'
- repo: https://github.com/codespell-project/codespell
rev: v1.16.0
rev: v2.1.0
hooks:
- id: codespell
args: [--ignore-words=.dictionary.txt]
......@@ -20,4 +20,23 @@ repos:
hooks:
- id: isort # sort imports alphabetically and separates import into sections
args: [-w=88, -m=3, -tc, -sp=setup.cfg ]
files: ^bilby/bilby_mcmc/
files: '^(bilby/bilby_mcmc/|bilby/core/sampler/|examples/)'
- repo: https://github.com/datarootsio/databooks
rev: 0.1.14
hooks:
- id: databooks
name: databooks
description:
"Remove notebook metadata using `databooks`."
entry: databooks meta
language: python
minimum_pre_commit_version: 2.9.2
types: [ jupyter ]
args: [-w]
- repo: local
hooks:
- id: jupyter-nb-clear-output
name: jupyter-nb-clear-output
files: \.ipynb$
language: system
entry: jupyter nbconvert --ClearOutputPreprocessor.enabled=True --inplace
......@@ -33,6 +33,7 @@ James A Clark
Jeremy G Baier
John Veitch
Joshua Brandt
Josh Willis
Katerina Chatziioannou
Kaylee de Soto
Khun Sang Phukon
......@@ -77,3 +78,4 @@ Tathagata Ghosh
Virginia d'Emilio
Vivien Raymond
Ka-Lok Lo
Isaac Legred
\ No newline at end of file
# All notable changes will be documented in this file
## [1.2.0] 2022-08-15
Version 1.2.0 release of Bilby
This is the first release that drops support for `Python<3.8`.
This release involves major refactoring, especially of the sampler implementations.
Additionally, there are a range of improvements to how information is passed
with multiprocessing.
### Added
- Time marginalized ROQ likelihood (!1040)
- Multiple and multi-banded ROQ likelihood (!1093)
- Gaussian process likelihoods (!1086)
- `CBCWaveformGenerator` added with CBC specific defaults (!1080)
### Changes
- Fixes and improvements to multi-processing (!1084, !1043, !1096)
- Major refactoring of sampler implementations (!1043)
- Fixes for reading/writing priors (!1103, !1127, !1128)
- Fixes/updates to exmample scripts (!1050, !1031, !1076, !1081, !1074)
- Fixes to calibration correction in GW likelihoods (!1114, !1120, !1119)
### Deprecated/removed
- Require `Python>=3.8`
- Require `astropy>=5`
- `bilby.core.utils.conversion.gps_time_to_gmst`
- `bilby.core.utils.spherical_to_cartesian`
- `bilby.core.utils.progress`
- Deepdish IO for `Result`, `Interferometer`, and `InterferometerList`
## [1.1.5] 2022-01-14
Version 1.1.5 release of bilby
Version 1.1.5 release of Bilby
### Added
- Option to enforce that a GW signal fits into the segment duration (!1041)
......
include README.rst
include LICENSE.md
include requirements.txt
include gw_requirements.txt
include mcmc_requirements.txt
include optional_requirements.txt
include sampler_requirements.txt
recursive-include test *.py *.prior
......@@ -73,7 +73,7 @@ as requested in their associated documentation.
.. |pipeline status| image:: https://git.ligo.org/lscsoft/bilby/badges/master/pipeline.svg
:target: https://git.ligo.org/lscsoft/bilby/commits/master
.. |coverage report| image:: https://lscsoft.docs.ligo.org/bilby/coverage_badge.svg
.. |coverage report| image:: https://git.ligo.org/lscsoft/bilby/badges/master/coverage.svg
:target: https://lscsoft.docs.ligo.org/bilby/htmlcov/
.. |pypi| image:: https://badge.fury.io/py/bilby.svg
:target: https://pypi.org/project/bilby/
......
......@@ -253,7 +253,7 @@ class Chain(object):
@property
def tau(self):
""" The maximum ACT over all parameters """
"""The maximum ACT over all parameters"""
if self.position in self.max_tau_dict:
# If we have the ACT at the current position, return it
......@@ -272,7 +272,7 @@ class Chain(object):
@property
def tau_nocache(self):
""" Calculate tau forcing a recalculation (no cached tau) """
"""Calculate tau forcing a recalculation (no cached tau)"""
tau = max(self.tau_dict.values())
self.max_tau_dict[self.position] = tau
self.cached_tau_count = 0
......@@ -280,7 +280,7 @@ class Chain(object):
@property
def tau_last(self):
""" Return the last-calculated tau if it exists, else inf """
"""Return the last-calculated tau if it exists, else inf"""
if len(self.max_tau_dict) > 0:
return list(self.max_tau_dict.values())[-1]
else:
......@@ -288,7 +288,7 @@ class Chain(object):
@property
def _tau_for_full_chain(self):
""" The maximum ACT over all parameters """
"""The maximum ACT over all parameters"""
return max(self._tau_dict_for_full_chain.values())
@property
......@@ -297,11 +297,11 @@ class Chain(object):
@property
def tau_dict(self):
""" Calculate a dictionary of tau (ACT) for every parameter """
"""Calculate a dictionary of tau (ACT) for every parameter"""
return self._calculate_tau_dict(self.minimum_index)
def _calculate_tau_dict(self, minimum_index):
""" Calculate a dictionary of tau (ACT) for every parameter """
"""Calculate a dictionary of tau (ACT) for every parameter"""
logger.debug(f"Calculating tau_dict {self}")
# If there are too few samples to calculate tau
......
......@@ -930,7 +930,7 @@ def _stretch_move(sample, complement, scale, ndim, parameters):
class EnsembleProposal(BaseProposal):
""" Base EnsembleProposal class for ensemble-based swap proposals """
"""Base EnsembleProposal class for ensemble-based swap proposals"""
def __init__(self, priors, weight=1):
super(EnsembleProposal, self).__init__(priors, weight)
......@@ -983,9 +983,12 @@ def get_proposal_cycle(string, priors, L1steps=1, warn=True):
first_fit=1000, nsamples_for_density=10000, fit_multiplier=2
)
all_but_cal = [key for key in priors if "recalib" not in key]
plist = [
AdaptiveGaussianProposal(priors, weight=small_weight),
DifferentialEvolutionProposal(priors, weight=small_weight),
AdaptiveGaussianProposal(priors, weight=small_weight, subset=all_but_cal),
DifferentialEvolutionProposal(
priors, weight=small_weight, subset=all_but_cal
),
]
if GMMProposal.check_dependencies(warn=warn) is False:
......
import datetime
import os
import signal
import time
from collections import Counter
......@@ -8,7 +7,13 @@ import numpy as np
import pandas as pd
from ..core.result import rejection_sample
from ..core.sampler.base_sampler import MCMCSampler, ResumeError, SamplerError
from ..core.sampler.base_sampler import (
MCMCSampler,
ResumeError,
SamplerError,
_sampling_convenience_dump,
signal_wrapper,
)
from ..core.utils import check_directory_exists_and_if_not_mkdir, logger, safe_file_dump
from . import proposals
from .chain import Chain, Sample
......@@ -108,6 +113,8 @@ class Bilby_MCMC(MCMCSampler):
evidence_method: str, [stepping_stone, thermodynamic]
The evidence calculation method to use. Defaults to stepping_stone, but
the results of all available methods are stored in the ln_z_dict.
verbose: bool
Whether to print diagnostic output during the run.
"""
......@@ -129,7 +136,6 @@ class Bilby_MCMC(MCMCSampler):
autocorr_c=5,
L1steps=100,
L2steps=3,
npool=1,
printdt=60,
min_tau=1,
proposal_cycle="default",
......@@ -152,6 +158,7 @@ class Bilby_MCMC(MCMCSampler):
diagnostic=False,
resume=True,
exit_code=130,
verbose=True,
**kwargs,
):
......@@ -169,7 +176,6 @@ class Bilby_MCMC(MCMCSampler):
self.check_point_plot = check_point_plot
self.diagnostic = diagnostic
self.kwargs["target_nsamples"] = self.kwargs["nsamples"]
self.npool = self.kwargs["npool"]
self.L1steps = self.kwargs["L1steps"]
self.L2steps = self.kwargs["L2steps"]
self.pt_inputs = ParallelTemperingInputs(
......@@ -189,17 +195,7 @@ class Bilby_MCMC(MCMCSampler):
self.resume_file = "{}/{}_resume.pickle".format(self.outdir, self.label)
self.verify_configuration()
try:
signal.signal(signal.SIGTERM, self.write_current_state_and_exit)
signal.signal(signal.SIGINT, self.write_current_state_and_exit)
signal.signal(signal.SIGALRM, self.write_current_state_and_exit)
except AttributeError:
logger.debug(
"Setting signal attributes unavailable on this system. "
"This is likely the case if you are running on a Windows machine"
" and is no further concern."
)
self.verbose = verbose
def verify_configuration(self):
if self.convergence_inputs.burn_in_nact / self.kwargs["target_nsamples"] > 0.1:
......@@ -219,6 +215,7 @@ class Bilby_MCMC(MCMCSampler):
def target_nsamples(self):
return self.kwargs["target_nsamples"]
@signal_wrapper
def run_sampler(self):
self._setup_pool()
self.setup_chain_set()
......@@ -373,31 +370,12 @@ class Bilby_MCMC(MCMCSampler):
f"setup:\n{self.get_setup_string()}"
)
def write_current_state_and_exit(self, signum=None, frame=None):
"""
Make sure that if a pool of jobs is running only the parent tries to
checkpoint and exit. Only the parent has a 'pool' attribute.
"""
if self.npool == 1 or getattr(self, "pool", None) is not None:
if signum == 14:
logger.info(
"Run interrupted by alarm signal {}: checkpoint and exit on {}".format(
signum, self.exit_code
)
)
else:
logger.info(
"Run interrupted by signal {}: checkpoint and exit on {}".format(
signum, self.exit_code
)
)
self.write_current_state()
self._close_pool()
os._exit(self.exit_code)
def write_current_state(self):
import dill
if not hasattr(self, "ptsampler"):
logger.debug("Attempted checkpoint before initialization")
return
logger.debug("Check point")
check_directory_exists_and_if_not_mkdir(self.outdir)
......@@ -486,7 +464,8 @@ class Bilby_MCMC(MCMCSampler):
rse = 100 * count / nsamples
msg += f"|rse={rse:0.2f}%"
print(msg, flush=True)
if self.verbose:
print(msg, flush=True)
def print_per_proposal(self):
logger.info("Zero-temperature proposals:")
......@@ -529,39 +508,6 @@ class Bilby_MCMC(MCMCSampler):
all_samples=ptsampler.samples,
)
def _setup_pool(self):
if self.npool > 1:
logger.info(f"Setting up multiproccesing pool with {self.npool} processes")
import multiprocessing
self.pool = multiprocessing.Pool(
processes=self.npool,
initializer=_initialize_global_variables,
initargs=(
self.likelihood,
self.priors,
self._search_parameter_keys,
self.use_ratio,
),
)
else:
self.pool = None
_initialize_global_variables(
likelihood=self.likelihood,
priors=self.priors,
search_parameter_keys=self._search_parameter_keys,
use_ratio=self.use_ratio,
)
def _close_pool(self):
if getattr(self, "pool", None) is not None:
logger.info("Starting to close worker pool.")
self.pool.close()
self.pool.join()
self.pool = None
logger.info("Finished closing worker pool.")
class BilbyPTMCMCSampler(object):
def __init__(
......@@ -574,7 +520,6 @@ class BilbyPTMCMCSampler(object):
use_ratio,
evidence_method,
):
self.set_pt_inputs(pt_inputs)
self.use_ratio = use_ratio
self.setup_sampler_dictionary(convergence_inputs, proposal_cycle)
......@@ -592,7 +537,7 @@ class BilbyPTMCMCSampler(object):
self._nsamples_dict = {}
self.ensemble_proposal_cycle = proposals.get_default_ensemble_proposal_cycle(
_priors
_sampling_convenience_dump.priors
)
self.sampling_time = 0
self.ln_z_dict = dict()
......@@ -607,9 +552,9 @@ class BilbyPTMCMCSampler(object):
elif pt_inputs.Tmax is not None:
betas = np.logspace(0, -np.log10(pt_inputs.Tmax), pt_inputs.ntemps)
elif pt_inputs.Tmax_from_SNR is not None:
ndim = len(_priors.non_fixed_keys)
ndim = len(_sampling_convenience_dump.priors.non_fixed_keys)
target_hot_likelihood = ndim / 2
Tmax = pt_inputs.Tmax_from_SNR ** 2 / (2 * target_hot_likelihood)
Tmax = pt_inputs.Tmax_from_SNR**2 / (2 * target_hot_likelihood)
betas = np.logspace(0, -np.log10(Tmax), pt_inputs.ntemps)
else:
raise SamplerError("Unable to set temperature ladder from inputs")
......@@ -653,7 +598,7 @@ class BilbyPTMCMCSampler(object):
@property
def sampler_list(self):
""" A list of all individual samplers """
"""A list of all individual samplers"""
return [s for item in self.sampler_dictionary.values() for s in item]
@sampler_list.setter
......@@ -1130,17 +1075,22 @@ class BilbyMCMCSampler(object):
Eindex=0,
use_ratio=False,
):
from ..core.sampler.base_sampler import _sampling_convenience_dump
self._sampling_helper = _sampling_convenience_dump
self.beta = beta
self.Tindex = Tindex
self.Eindex = Eindex
self.use_ratio = use_ratio
self.parameters = _priors.non_fixed_keys
self.parameters = _sampling_convenience_dump.priors.non_fixed_keys
self.ndim = len(self.parameters)
full_sample_dict = _priors.sample()
full_sample_dict = _sampling_convenience_dump.priors.sample()
initial_sample = {
k: v for k, v in full_sample_dict.items() if k in _priors.non_fixed_keys
k: v
for k, v in full_sample_dict.items()
if k in _sampling_convenience_dump.priors.non_fixed_keys
}
initial_sample = Sample(initial_sample)
initial_sample[LOGLKEY] = self.log_likelihood(initial_sample)
......@@ -1163,7 +1113,10 @@ class BilbyMCMCSampler(object):
warn = False
self.proposal_cycle = proposals.get_proposal_cycle(
proposal_cycle, _priors, L1steps=self.chain.L1steps, warn=warn
proposal_cycle,
_sampling_convenience_dump.priors,
L1steps=self.chain.L1steps,
warn=warn,
)
elif isinstance(proposal_cycle, proposals.ProposalCycle):
self.proposal_cycle = proposal_cycle
......@@ -1180,17 +1133,17 @@ class BilbyMCMCSampler(object):
self.stop_after_convergence = convergence_inputs.stop_after_convergence
def log_likelihood(self, sample):
_likelihood.parameters.update(sample.sample_dict)
_sampling_convenience_dump.likelihood.parameters.update(sample.sample_dict)
if self.use_ratio:
logl = _likelihood.log_likelihood_ratio()
logl = _sampling_convenience_dump.likelihood.log_likelihood_ratio()
else:
logl = _likelihood.log_likelihood()
logl = _sampling_convenience_dump.likelihood.log_likelihood()
return logl
def log_prior(self, sample):
return _priors.ln_prob(sample.parameter_only_dict)
return _sampling_convenience_dump.priors.ln_prob(sample.parameter_only_dict)
def accept_proposal(self, prop, proposal):
self.chain.append(prop)
......@@ -1288,8 +1241,10 @@ class BilbyMCMCSampler(object):
zerotemp_logl = hot_samples[LOGLKEY]
# Revert to true likelihood if needed
if _use_ratio:
zerotemp_logl += _likelihood.noise_log_likelihood()
if _sampling_convenience_dump.use_ratio:
zerotemp_logl += (
_sampling_convenience_dump.likelihood.noise_log_likelihood()
)
# Calculate normalised weights
log_weights = (1 - beta) * zerotemp_logl
......@@ -1317,29 +1272,3 @@ class BilbyMCMCSampler(object):
def call_step(sampler):
sampler = sampler.step()
return sampler
_likelihood = None
_priors = None
_search_parameter_keys = None
_use_ratio = False
def _initialize_global_variables(
likelihood,
priors,
search_parameter_keys,
use_ratio,
):
"""
Store a global copy of the likelihood, priors, and search keys for
multiprocessing.
"""
global _likelihood
global _priors
global _search_parameter_keys
global _use_ratio
_likelihood = likelihood
_priors = priors
_search_parameter_keys = search_parameter_keys
_use_ratio = use_ratio
import json
import os
from collections import OrderedDict
import numpy as np
......@@ -352,7 +351,7 @@ class Grid(object):
'label', 'outdir', 'parameter_names', 'n_dims', 'priors',
'sample_points', 'ln_likelihood', 'ln_evidence',
'ln_noise_evidence']
dictionary = OrderedDict()
dictionary = dict()
for attr in save_attrs:
try:
dictionary[attr] = getattr(self, attr)
......@@ -437,7 +436,7 @@ class Grid(object):
grid: bilby.core.grid.Grid
Raises
=======
======
ValueError: If no filename is given and either outdir or label is None
If no bilby.core.grid.Grid is found in the path
......
......@@ -4,7 +4,7 @@ import numpy as np
from scipy.special import gammaln, xlogy
from scipy.stats import multivariate_normal
from .utils import infer_parameters_from_function
from .utils import infer_parameters_from_function, infer_args_from_function_except_n_args
class Likelihood(object):
......@@ -108,13 +108,14 @@ class Analytical1DLikelihood(Likelihood):
value is given).
"""
def __init__(self, x, y, func):
def __init__(self, x, y, func, **kwargs):
parameters = infer_parameters_from_function(func)
super(Analytical1DLikelihood, self).__init__(dict.fromkeys(parameters))
super(Analytical1DLikelihood, self).__init__(dict())
self.x = x
self.y = y
self._func = func
self._function_keys = list(self.parameters.keys())
self._function_keys = [key for key in parameters if key not in kwargs]
self.kwargs = kwargs
def __repr__(self):
return self.__class__.__name__ + '(x={}, y={}, func={})'.format(self.x, self.y, self.func.__name__)
......@@ -164,11 +165,11 @@ class Analytical1DLikelihood(Likelihood):
@property
def residual(self):
""" Residual of the function against the data. """
return self.y - self.func(self.x, **self.model_parameters)
return self.y - self.func(self.x, **self.model_parameters, **self.kwargs)
class GaussianLikelihood(Analytical1DLikelihood):
def __init__(self, x, y, func, sigma=None):
def __init__(self, x, y, func, sigma=None, **kwargs):
"""
A general Gaussian likelihood for known or unknown noise - the model
parameters are inferred from the arguments of function
......@@ -190,7 +191,7 @@ class GaussianLikelihood(Analytical1DLikelihood):
to that for `x` and `y`.
"""
super(GaussianLikelihood, self).__init__(x=x, y=y, func=func)
super(GaussianLikelihood, self).__init__(x=x, y=y, func=func, **kwargs)
self.sigma = sigma
# Check if sigma was provided, if not it is a parameter
......@@ -229,7 +230,7 @@ class GaussianLikelihood(Analytical1DLikelihood):
class PoissonLikelihood(Analytical1DLikelihood):
def __init__(self, x, y, func):
def __init__(self, x, y, func, **kwargs):
"""
A general Poisson likelihood for a rate - the model parameters are
inferred from the arguments of function, which provides a rate.
......@@ -251,10 +252,10 @@ class PoissonLikelihood(Analytical1DLikelihood):
fixed value is given).
"""
super(PoissonLikelihood, self).__init__(x=x, y=y, func=func)
super(PoissonLikelihood, self).__init__(x=x, y=y, func=func, **kwargs)
def log_likelihood(self):
rate = self.func(self.x, **self.model_parameters)
rate = self.func(self.x, **self.model_parameters, **self.kwargs)
if not isinstance(rate, np.ndarray):
raise ValueError(
"Poisson rate function returns wrong value type! "
......@@ -286,7 +287,7 @@ class PoissonLikelihood(Analytical1DLikelihood):
class ExponentialLikelihood(Analytical1DLikelihood):
def __init__(self, x, y, func):
def __init__(self, x, y, func, **kwargs):
"""
An exponential likelihood function.
......@@ -302,10 +303,10 @@ class ExponentialLikelihood(Analytical1DLikelihood):
value is given). The model should return the expected mean of
the exponential distribution for each data point.
"""
super(ExponentialLikelihood, self).__init__(x=x, y=y, func=func)
super(ExponentialLikelihood, self).__init__(x=x, y=y, func=func, **kwargs)
def log_likelihood(self):
mu = self.func(self.x, **self.model_parameters)
mu = self.func(self.x, **self.model_parameters, **self.kwargs)
if np.any(mu < 0.):
return -np.inf
return -np.sum(np.log(mu) + (self.y / mu))
......@@ -328,7 +329,7 @@ class ExponentialLikelihood(Analytical1DLikelihood):
class StudentTLikelihood(Analytical1DLikelihood):
def __init__(self, x, y, func, nu=None, sigma=1.):
def __init__(self, x, y, func, nu=None, sigma=1, **kwargs):
"""
A general Student's t-likelihood for known or unknown number of degrees
of freedom, and known or unknown scale (which tends toward the standard
......@@ -357,7 +358,7 @@ class StudentTLikelihood(Analytical1DLikelihood):
Set the scale of the distribution. If not given then this defaults
to 1, which specifies a standard (central) Student's t-distribution
"""
super(StudentTLikelihood, self).__init__(x=x, y=y, func=func)
super(StudentTLikelihood, self).__init__(x=x, y=y, func=func, **kwargs)
self.nu = nu
self.sigma = sigma
......@@ -406,7 +407,7 @@ class Multinomial(Likelihood):
Likelihood for system with N discrete possibilities.
"""
def __init__(self, data, n_dimensions, label="parameter_"):
def __init__(self, data, n_dimensions, base="parameter_"):
"""
Parameters
......@@ -415,19 +416,21 @@ class Multinomial(Likelihood):
The number of objects in each class
n_dimensions: int
The number of classes
base: str
The base of the parameter labels
"""
self.data = np.array(data)
self._total = np.sum(self.data)
super(Multinomial, self).__init__(dict())
self.n = n_dimensions
self.label = label
self.base = base
self._nll = None
def log_likelihood(self):
"""
Since n - 1 parameters are sampled, the last parameter is 1 - the rest
"""
probs = [self.parameters[self.label + str(ii)]
probs = [self.parameters[self.base + str(ii)]
for ii in range(self.n - 1)]
probs.append(1 - sum(probs))
return self._multinomial_ln_pdf(probs=probs)
......@@ -567,5 +570,166 @@ class JointLikelihood(Likelihood):
return sum([likelihood.noise_log_likelihood() for likelihood in self.likelihoods])
def function_to_celerite_mean_model(func):
from celerite.modeling import Model as CeleriteModel
return _function_to_gp_model(func, CeleriteModel)
def function_to_george_mean_model(func):
from celerite.modeling import Model as GeorgeModel
return _function_to_gp_model(func, GeorgeModel)
def _function_to_gp_model(func, cls):
class MeanModel(cls):
parameter_names = tuple(infer_args_from_function_except_n_args(func=func, n=1))
def get_value(self, t):
params = {name: getattr(self, name) for name in self.parameter_names}
return func(t, **params)
def compute_gradient(self, *args, **kwargs):
pass
return MeanModel
class _GPLikelihood(Likelihood):
def __init__(self, kernel, mean_model, t, y, yerr=1e-6, gp_class=None):
"""
Basic Gaussian Process likelihood interface for `celerite` and `george`.
For `celerite` documentation see: https://celerite.readthedocs.io/en/stable/
For `george` documentation see: https://george.readthedocs.io/en/latest/
Parameters
==========
kernel: Union[celerite.term.Term, george.kernels.Kernel]
`celerite` or `george` kernel. See the respective package documentation about the usage.
mean_model: Union[celerite.modeling.Model, george.modeling.Model]
Mean model
t: array_like
The `times` or `x` values of the data set.
y: array_like
The `y` values of the data set.
yerr: float, int, array_like, optional
The error values on the y-values. If a single value is given, it is assumed that the value
applies for all y-values. Default is 1e-6, effectively assuming that no y-errors are present.
gp_class: type, None, optional
GPClass to use. This is determined by the child class used to instantiate the GP. Should usually
not be given by the user and is mostly used for testing
"""
self.kernel = kernel
self.mean_model = mean_model
self.t = np.array(t)
self.y = np.array(y)
self.yerr = np.array(yerr)
self.GPClass = gp_class
self.gp = self.GPClass(kernel=self.kernel, mean=self.mean_model, fit_mean=True, fit_white_noise=True)
self.gp.compute(self.t, yerr=self.yerr)
super().__init__(parameters=self.gp.get_parameter_dict())
def set_parameters(self, parameters):
"""
Safely set a set of parameters to the internal instances of the `gp` and `mean_model`, as well as the
`parameters` dict.
Parameters
==========
parameters: dict, pandas.DataFrame
The set of parameters we would like to set.
"""
for name, value in parameters.items():
try:
self.gp.set_parameter(name=name, value=value)
except ValueError:
pass
self.parameters[name] = value
class CeleriteLikelihood(_GPLikelihood):
def __init__(self, kernel, mean_model, t, y, yerr=1e-6):
"""
Basic Gaussian Process likelihood interface for `celerite` and `george`.
For `celerite` documentation see: https://celerite.readthedocs.io/en/stable/
For `george` documentation see: https://george.readthedocs.io/en/latest/
Parameters
==========
kernel: celerite.term.Term
`celerite` or `george` kernel. See the respective package documentation about the usage.
mean_model: celerite.modeling.Model
Mean model
t: array_like
The `times` or `x` values of the data set.
y: array_like
The `y` values of the data set.
yerr: float, int, array_like, optional
The error values on the y-values. If a single value is given, it is assumed that the value
applies for all y-values. Default is 1e-6, effectively assuming that no y-errors are present.
"""
import celerite
super().__init__(kernel=kernel, mean_model=mean_model, t=t, y=y, yerr=yerr, gp_class=celerite.GP)
def log_likelihood(self):
"""
Calculate the log-likelihood for the Gaussian process given the current parameters.
Returns
=======
float: The log-likelihood value.
"""
self.gp.set_parameter_vector(vector=np.array(list(self.parameters.values())))
try:
return self.gp.log_likelihood(self.y)
except Exception:
return -np.inf
class GeorgeLikelihood(_GPLikelihood):
def __init__(self, kernel, mean_model, t, y, yerr=1e-6):
"""
Basic Gaussian Process likelihood interface for `celerite` and `george`.
For `celerite` documentation see: https://celerite.readthedocs.io/en/stable/
For `george` documentation see: https://george.readthedocs.io/en/latest/
Parameters
==========
kernel: george.kernels.Kernel
`celerite` or `george` kernel. See the respective package documentation about the usage.
mean_model: george.modeling.Model
Mean model
t: array_like
The `times` or `x` values of the data set.
y: array_like
The `y` values of the data set.
yerr: float, int, array_like, optional
The error values on the y-values. If a single value is given, it is assumed that the value
applies for all y-values. Default is 1e-6, effectively assuming that no y-errors are present.
"""
import george
super().__init__(kernel=kernel, mean_model=mean_model, t=t, y=y, yerr=yerr, gp_class=george.GP)
def log_likelihood(self):
"""
Calculate the log-likelihood for the Gaussian process given the current parameters.
Returns
=======
float: The log-likelihood value.
"""
for name, value in self.parameters.items():
try:
self.gp.set_parameter(name=name, value=value)
except ValueError:
raise ValueError(f"Parameter {name} not a valid parameter for the GP.")
try:
return self.gp.log_likelihood(self.y)
except Exception:
return -np.inf
class MarginalizedLikelihoodReconstructionError(Exception):
pass
......@@ -1430,8 +1430,8 @@ class Categorical(Prior):
unit=None, boundary="periodic"):
""" An equal-weighted Categorical prior
Parameters:
-----------
Parameters
==========
ncategories: int
The number of available categories. The prior mass support is then
integers [0, ncategories - 1].
......
......@@ -214,10 +214,13 @@ class Prior(object):
"""
prior_name = self.__class__.__name__
prior_module = self.__class__.__module__
instantiation_dict = self.get_instantiation_dict()
args = ', '.join(['{}={}'.format(key, repr(instantiation_dict[key]))
for key in instantiation_dict])
return "{}({})".format(prior_name, args)
args = ', '.join([f'{key}={repr(instantiation_dict[key])}' for key in instantiation_dict])
if "bilby.core.prior" in prior_module:
return f"{prior_name}({args})"
else:
return f"{prior_module}.{prior_name}({args})"
@property
def _repr_dict(self):
......@@ -333,7 +336,7 @@ class Prior(object):
@classmethod
def from_repr(cls, string):
"""Generate the prior from it's __repr__"""
"""Generate the prior from its __repr__"""
return cls._from_repr(string)
@classmethod
......@@ -429,9 +432,9 @@ class Prior(object):
val = None
elif re.sub(r'\'.*\'', '', val) in ['r', 'u']:
val = val[2:-1]
elif "'" in val:
elif val.startswith("'") and val.endswith("'"):
val = val.strip("'")
elif '(' in val:
elif '(' in val and not val.startswith(("[", "{")):
other_cls = val.split('(')[0]
vals = '('.join(val.split('(')[1:])[:-1]
if "." in other_cls:
......
......@@ -371,7 +371,7 @@ class DirichletElement(ConditionalBeta):
self._required_variables = [
label + str(ii) for ii in range(order)
]
self.__class__.__name__ = 'Dirichlet'
self.__class__.__name__ = 'DirichletElement'
def dirichlet_condition(self, reference_parms, **kwargs):
remaining = 1 - sum(
......
This diff is collapsed.
import re
import numpy as np
import scipy.stats
from scipy.special import erfinv
......@@ -7,7 +9,6 @@ from ..utils import logger, infer_args_from_method, get_dict_with_properties
class BaseJointPriorDist(object):
def __init__(self, names, bounds=None):
"""
A class defining JointPriorDist that will be overwritten with child
......@@ -24,7 +25,7 @@ class BaseJointPriorDist(object):
A list of bounds on each parameter. The defaults are for bounds at
+/- infinity.
"""
self.distname = 'joint_dist'
self.distname = "joint_dist"
if not isinstance(names, list):
self.names = [names]
else:
......@@ -41,8 +42,9 @@ class BaseJointPriorDist(object):
for bound in bounds:
if isinstance(bounds, (list, tuple, np.ndarray)):
if len(bound) != 2:
raise ValueError("Bounds must contain an upper and "
"lower value.")
raise ValueError(
"Bounds must contain an upper and lower value."
)
else:
if bound[1] <= bound[0]:
raise ValueError("Bounds are not properly set")
......@@ -76,8 +78,7 @@ class BaseJointPriorDist(object):
Check if all requested parameters have been filled.
"""
return not np.any([val is None for val in
self.requested_parameters.values()])
return not np.any([val is None for val in self.requested_parameters.values()])
def reset_request(self):
"""
......@@ -92,8 +93,7 @@ class BaseJointPriorDist(object):
Check if all the rescaled parameters have been filled.
"""
return not np.any([val is None for val in
self.rescale_parameters.values()])
return not np.any([val is None for val in self.rescale_parameters.values()])
def reset_rescale(self):
"""
......@@ -131,8 +131,12 @@ class BaseJointPriorDist(object):
"""
dist_name = self.__class__.__name__
instantiation_dict = self.get_instantiation_dict()
args = ', '.join(['{}={}'.format(key, repr(instantiation_dict[key]))
for key in instantiation_dict])
args = ", ".join(
[
"{}={}".format(key, repr(instantiation_dict[key]))
for key in instantiation_dict
]
)
return "{}({})".format(dist_name, args)
def prob(self, samp):
......@@ -308,9 +312,17 @@ class BaseJointPriorDist(object):
class MultivariateGaussianDist(BaseJointPriorDist):
def __init__(self, names, nmodes=1, mus=None, sigmas=None, corrcoefs=None,
covs=None, weights=None, bounds=None):
def __init__(
self,
names,
nmodes=1,
mus=None,
sigmas=None,
corrcoefs=None,
covs=None,
weights=None,
bounds=None,
):
"""
A class defining a multi-variate Gaussian, allowing multiple modes for
a Gaussian mixture model.
......@@ -359,11 +371,13 @@ class MultivariateGaussianDist(BaseJointPriorDist):
for name in self.names:
bound = self.bounds[name]
if bound[0] != -np.inf or bound[1] != np.inf:
logger.warning("If using bounded ranges on the multivariate "
"Gaussian this will lead to biased posteriors "
"for nested sampling routines that require "
"a prior transform.")
self.distname = 'mvg'
logger.warning(
"If using bounded ranges on the multivariate "
"Gaussian this will lead to biased posteriors "
"for nested sampling routines that require "
"a prior transform."
)
self.distname = "mvg"
self.mus = []
self.covs = []
self.corrcoefs = []
......@@ -385,8 +399,7 @@ class MultivariateGaussianDist(BaseJointPriorDist):
if len(np.shape(sigmas)) == 1:
sigmas = [sigmas]
elif len(np.shape(sigmas)) == 0:
raise ValueError("Must supply a list of standard "
"deviations")
raise ValueError("Must supply a list of standard deviations")
if covs is not None:
if isinstance(covs, np.ndarray):
covs = [covs]
......@@ -404,10 +417,11 @@ class MultivariateGaussianDist(BaseJointPriorDist):
if len(np.shape(corrcoefs)) == 2:
corrcoefs = [np.array(corrcoefs)]
elif len(np.shape(corrcoefs)) != 3:
raise TypeError("List of correlation coefficients the wrong shape")
raise TypeError(
"List of correlation coefficients the wrong shape"
)
elif not isinstance(corrcoefs, list):
raise TypeError("Must pass a list of correlation "
"coefficients")
raise TypeError("Must pass a list of correlation coefficients")
if weights is not None:
if isinstance(weights, (int, float)):
weights = [weights]
......@@ -429,12 +443,11 @@ class MultivariateGaussianDist(BaseJointPriorDist):
sigma = sigmas[i] if sigmas is not None else None
corrcoef = corrcoefs[i] if corrcoefs is not None else None
cov = covs[i] if covs is not None else None
weight = weights[i] if weights is not None else 1.
weight = weights[i] if weights is not None else 1.0
self.add_mode(mu, sigma, corrcoef, cov, weight)
def add_mode(self, mus=None, sigmas=None, corrcoef=None, cov=None,
weight=1.):
def add_mode(self, mus=None, sigmas=None, corrcoef=None, cov=None, weight=1.0):
"""
Add a new mode.
"""
......@@ -455,8 +468,10 @@ class MultivariateGaussianDist(BaseJointPriorDist):
if len(self.covs[-1].shape) != 2:
raise ValueError("Covariance matrix must be a 2d array")
if (self.covs[-1].shape[0] != self.covs[-1].shape[1] or
self.covs[-1].shape[0] != self.num_vars):
if (
self.covs[-1].shape[0] != self.covs[-1].shape[1]
or self.covs[-1].shape[0] != self.num_vars
):
raise ValueError("Covariance shape is inconsistent")
# check matrix is symmetric
......@@ -473,23 +488,25 @@ class MultivariateGaussianDist(BaseJointPriorDist):
self.corrcoefs.append(np.asarray(corrcoef))
if len(self.corrcoefs[-1].shape) != 2:
raise ValueError("Correlation coefficient matrix must be a 2d "
"array.")
if (self.corrcoefs[-1].shape[0] != self.corrcoefs[-1].shape[1] or
self.corrcoefs[-1].shape[0] != self.num_vars):
raise ValueError("Correlation coefficient matrix shape is "
"inconsistent")
raise ValueError(
"Correlation coefficient matrix must be a 2d array."
)
if (
self.corrcoefs[-1].shape[0] != self.corrcoefs[-1].shape[1]
or self.corrcoefs[-1].shape[0] != self.num_vars
):
raise ValueError(
"Correlation coefficient matrix shape is inconsistent"
)
# check matrix is symmetric
if not np.allclose(self.corrcoefs[-1], self.corrcoefs[-1].T):
raise ValueError("Correlation coefficient matrix is not "
"symmetric")
raise ValueError("Correlation coefficient matrix is not symmetric")
# check diagonal is all ones
if not np.all(np.diag(self.corrcoefs[-1]) == 1.):
raise ValueError("Correlation coefficient matrix is not"
"correct")
if not np.all(np.diag(self.corrcoefs[-1]) == 1.0):
raise ValueError("Correlation coefficient matrix is not correct")
try:
self.sigmas.append(list(sigmas)) # standard deviations
......@@ -497,8 +514,10 @@ class MultivariateGaussianDist(BaseJointPriorDist):
raise TypeError("'sigmas' must be a list")
if len(self.sigmas[-1]) != self.num_vars:
raise ValueError("Number of standard deviations must be the "
"same as the number of parameters.")
raise ValueError(
"Number of standard deviations must be the "
"same as the number of parameters."
)
# convert correlation coefficients to covariance matrix
D = self.sigmas[-1] * np.identity(self.corrcoefs[-1].shape[0])
......@@ -515,18 +534,20 @@ class MultivariateGaussianDist(BaseJointPriorDist):
self.eigvalues.append(evals)
self.eigvectors.append(evecs)
except Exception as e:
raise RuntimeError("Problem getting eigenvalues and vectors: "
"{}".format(e))
raise RuntimeError(
"Problem getting eigenvalues and vectors: {}".format(e)
)
# check eigenvalues are positive
if np.any(self.eigvalues[-1] <= 0.):
raise ValueError("Correlation coefficient matrix is not positive "
"definite")
if np.any(self.eigvalues[-1] <= 0.0):
raise ValueError(
"Correlation coefficient matrix is not positive definite"
)
self.sqeigvalues.append(np.sqrt(self.eigvalues[-1]))
# set the weights
if weight is None:
self.weights.append(1.)
self.weights.append(1.0)
else:
self.weights.append(weight)
......@@ -537,12 +558,13 @@ class MultivariateGaussianDist(BaseJointPriorDist):
self.nmodes += 1
# add multivariate Gaussian
self.mvn.append(scipy.stats.multivariate_normal(mean=self.mus[-1],
cov=self.covs[-1]))
self.mvn.append(
scipy.stats.multivariate_normal(mean=self.mus[-1], cov=self.covs[-1])
)
def _rescale(self, samp, **kwargs):
try:
mode = kwargs['mode']
mode = kwargs["mode"]
except KeyError:
mode = None
......@@ -552,17 +574,17 @@ class MultivariateGaussianDist(BaseJointPriorDist):
else:
mode = np.argwhere(self.cumweights - np.random.rand() > 0)[0][0]
samp = erfinv(2. * samp - 1) * 2. ** 0.5
samp = erfinv(2.0 * samp - 1) * 2.0 ** 0.5
# rotate and scale to the multivariate normal shape
samp = self.mus[mode] + self.sigmas[mode] * np.einsum('ij,kj->ik',
samp * self.sqeigvalues[mode],
self.eigvectors[mode])
samp = self.mus[mode] + self.sigmas[mode] * np.einsum(
"ij,kj->ik", samp * self.sqeigvalues[mode], self.eigvectors[mode]
)
return samp
def _sample(self, size, **kwargs):
try:
mode = kwargs['mode']
mode = kwargs["mode"]
except KeyError:
mode = None
......@@ -620,12 +642,15 @@ class MultivariateGaussianDist(BaseJointPriorDist):
if sorted(self.__dict__.keys()) != sorted(other.__dict__.keys()):
return False
for key in self.__dict__:
if key == 'mvn':
if key == "mvn":
if len(self.__dict__[key]) != len(other.__dict__[key]):
return False
for thismvn, othermvn in zip(self.__dict__[key], other.__dict__[key]):
if (not isinstance(thismvn, scipy.stats._multivariate.multivariate_normal_frozen) or
not isinstance(othermvn, scipy.stats._multivariate.multivariate_normal_frozen)):
if not isinstance(
thismvn, scipy.stats._multivariate.multivariate_normal_frozen
) or not isinstance(
othermvn, scipy.stats._multivariate.multivariate_normal_frozen
):
return False
elif isinstance(self.__dict__[key], (np.ndarray, list)):
thisarr = np.asarray(self.__dict__[key])
......@@ -645,13 +670,44 @@ class MultivariateGaussianDist(BaseJointPriorDist):
return False
return True
@classmethod
def from_repr(cls, string):
"""Generate the distribution from its __repr__"""
return cls._from_repr(string)
@classmethod
def _from_repr(cls, string):
subclass_args = infer_args_from_method(cls.__init__)
string = string.replace(" ", "")
kwargs = cls._split_repr(string)
for key in kwargs:
val = kwargs[key]
if key not in subclass_args:
raise AttributeError(
"Unknown argument {} for class {}".format(key, cls.__name__)
)
else:
kwargs[key.strip()] = Prior._parse_argument_string(val)
return cls(**kwargs)
@classmethod
def _split_repr(cls, string):
string = string.replace(",", ", ")
# see https://stackoverflow.com/a/72146415/1862861
args = re.findall(r"(\w+)=(\[.*?]|{.*?}|\S+)(?=\s*,\s*\w+=|\Z)", string)
kwargs = dict()
for key, arg in args:
kwargs[key.strip()] = arg
return kwargs
class MultivariateNormalDist(MultivariateGaussianDist):
""" A synonym for the :class:`~bilby.core.prior.MultivariateGaussianDist` distribution."""
"""A synonym for the :class:`~bilby.core.prior.MultivariateGaussianDist` distribution."""
class JointPrior(Prior):
def __init__(self, dist, name=None, latex_label=None, unit=None):
"""This defines the single parameter Prior object for parameters that belong to a JointPriorDist
......@@ -667,14 +723,23 @@ class JointPrior(Prior):
See superclass
"""
if BaseJointPriorDist not in dist.__class__.__bases__:
raise TypeError("Must supply a JointPriorDist object instance to be shared by all joint params")
raise TypeError(
"Must supply a JointPriorDist object instance to be shared by all joint params"
)
if name not in dist.names:
raise ValueError("'{}' is not a parameter in the JointPriorDist".format(name))
raise ValueError(
"'{}' is not a parameter in the JointPriorDist".format(name)
)
self.dist = dist
super(JointPrior, self).__init__(name=name, latex_label=latex_label, unit=unit, minimum=dist.bounds[name][0],
maximum=dist.bounds[name][1])
super(JointPrior, self).__init__(
name=name,
latex_label=latex_label,
unit=unit,
minimum=dist.bounds[name][0],
maximum=dist.bounds[name][1],
)
@property
def minimum(self):
......@@ -737,9 +802,11 @@ class JointPrior(Prior):
"""
if self.name in self.dist.sampled_parameters:
logger.warning("You have already drawn a sample from parameter "
"'{}'. The same sample will be "
"returned".format(self.name))
logger.warning(
"You have already drawn a sample from parameter "
"'{}'. The same sample will be "
"returned".format(self.name)
)
if len(self.dist.current_sample) == 0:
# generate a sample
......@@ -779,16 +846,22 @@ class JointPrior(Prior):
# check for the same number of values for each parameter
for i in range(len(self.dist) - 1):
if (isinstance(values[i], (list, np.ndarray)) or
isinstance(values[i + 1], (list, np.ndarray))):
if (isinstance(values[i], (list, np.ndarray)) and
isinstance(values[i + 1], (list, np.ndarray))):
if isinstance(values[i], (list, np.ndarray)) or isinstance(
values[i + 1], (list, np.ndarray)
):
if isinstance(values[i], (list, np.ndarray)) and isinstance(
values[i + 1], (list, np.ndarray)
):
if len(values[i]) != len(values[i + 1]):
raise ValueError("Each parameter must have the same "
"number of requested values.")
raise ValueError(
"Each parameter must have the same "
"number of requested values."
)
else:
raise ValueError("Each parameter must have the same "
"number of requested values.")
raise ValueError(
"Each parameter must have the same "
"number of requested values."
)
lnp = self.dist.ln_prob(np.asarray(values).T)
......@@ -798,16 +871,16 @@ class JointPrior(Prior):
else:
# if not all parameters have been requested yet, just return 0
if isinstance(val, (float, int)):
return 0.
return 0.0
else:
try:
# check value has a length
len(val)
except Exception as e:
raise TypeError('Invalid type for ln_prob: {}'.format(e))
raise TypeError("Invalid type for ln_prob: {}".format(e))
if len(val) == 1:
return 0.
return 0.0
else:
return np.zeros_like(val)
......@@ -831,14 +904,18 @@ class JointPrior(Prior):
class MultivariateGaussian(JointPrior):
def __init__(self, dist, name=None, latex_label=None, unit=None):
if not isinstance(dist, MultivariateGaussianDist):
raise JointPriorDistError("dist object must be instance of MultivariateGaussianDist")
super(MultivariateGaussian, self).__init__(dist=dist, name=name, latex_label=latex_label, unit=unit)
raise JointPriorDistError(
"dist object must be instance of MultivariateGaussianDist"
)
super(MultivariateGaussian, self).__init__(
dist=dist, name=name, latex_label=latex_label, unit=unit
)
class MultivariateNormal(MultivariateGaussian):
""" A synonym for the :class:`bilby.core.prior.MultivariateGaussian`
prior distribution."""
"""A synonym for the :class:`bilby.core.prior.MultivariateGaussian`
prior distribution."""
class JointPriorDistError(PriorException):
""" Class for Error handling of JointPriorDists for JointPriors """
"""Class for Error handling of JointPriorDists for JointPriors"""
......@@ -2,7 +2,7 @@ import datetime
import inspect
import json
import os
from collections import OrderedDict, namedtuple
from collections import namedtuple
from copy import copy
from importlib import import_module
from itertools import product
......@@ -363,7 +363,7 @@ class Result(object):
The number of times the likelihood function is called
log_prior_evaluations: array_like
The evaluations of the prior for each sample point
sampling_time: (datetime.timedelta, float)
sampling_time: datetime.timedelta, float
The time taken to complete the sampling
nburn: int
The number of burn-in steps discarded for MCMC samplers
......@@ -381,7 +381,7 @@ class Result(object):
this information is generated when the result object is initialized
Notes
=========
=====
All sampling output parameters, e.g. the samples themselves are
typically not given at initialisation, but set at a later stage.
......@@ -422,65 +422,6 @@ class Result(object):
self.prior_values = None
self._kde = None
@classmethod
def _from_hdf5_old(cls, filename=None, outdir=None, label=None):
""" Read in a saved .h5 data file in the old format.
Parameters
==========
filename: str
If given, try to load from this filename
outdir, label: str
If given, use the default naming convention for saved results file
Returns
=======
result: bilby.core.result.Result
Raises
=======
ValueError: If no filename is given and either outdir or label is None
If no bilby.core.result.Result is found in the path
"""
import deepdish
filename = _determine_file_name(filename, outdir, label, 'hdf5', False)
if os.path.isfile(filename):
dictionary = deepdish.io.load(filename)
# Some versions of deepdish/pytables return the dictionary as
# a dictionary with a key 'data'
if len(dictionary) == 1 and 'data' in dictionary:
dictionary = dictionary['data']
if "priors" in dictionary:
# parse priors from JSON string (allowing for backwards
# compatibility)
if not isinstance(dictionary["priors"], PriorDict):
try:
priordict = PriorDict()
for key, value in dictionary["priors"].items():
if key not in ["__module__", "__name__", "__prior_dict__"]:
try:
priordict[key] = decode_bilby_json(value)
except AttributeError:
continue
dictionary["priors"] = priordict
except Exception as e:
raise IOError(
"Unable to parse priors from '{}':\n{}".format(
filename, e,
)
)
try:
if isinstance(dictionary.get('posterior', None), dict):
dictionary['posterior'] = pd.DataFrame(dictionary['posterior'])
return cls(**dictionary)
except TypeError as e:
raise IOError("Unable to load dictionary, error={}".format(e))
else:
raise IOError("No result '{}' found".format(filename))
_load_doctstring = """ Read in a saved .{format} data file
Parameters
......@@ -495,7 +436,7 @@ class Result(object):
result: bilby.core.result.Result
Raises
=======
======
ValueError: If no filename is given and either outdir or label is None
If no bilby.core.result.Result is found in the path
......@@ -516,8 +457,6 @@ class Result(object):
filename = _determine_file_name(filename, outdir, label, 'hdf5', False)
with h5py.File(filename, "r") as ff:
data = recursively_load_dict_contents_from_group(ff, '/')
if list(data.keys()) == ["data"]:
return cls._from_hdf5_old(filename=filename)
data["posterior"] = pd.DataFrame(data["posterior"])
data["priors"] = PriorDict._get_from_json_dict(
json.loads(data["priors"], object_hook=decode_bilby_json)
......@@ -724,7 +663,7 @@ class Result(object):
'num_likelihood_evaluations', 'samples', 'nested_samples',
'walkers', 'nburn', 'parameter_labels', 'parameter_labels_with_unit',
'version']
dictionary = OrderedDict()
dictionary = dict()
for attr in save_attrs:
try:
dictionary[attr] = getattr(self, attr)
......@@ -1158,7 +1097,7 @@ class Result(object):
to override the outdir set by the absolute path of the result object.
Notes
-----
=====
The generation of the corner plot themselves is done by the corner
python module, see https://corner.readthedocs.io for more
information.
......@@ -1397,7 +1336,7 @@ class Result(object):
ax.set_ylabel(ylabel)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
ax.legend(numpoints=3)
fig.tight_layout()
......@@ -1496,8 +1435,11 @@ class Result(object):
if keys is None:
keys = self.search_parameter_keys
if self.injection_parameters is None:
raise(TypeError, "Result object has no 'injection_parameters'. "
"Cannot compute credible levels.")
raise (
TypeError,
"Result object has no 'injection_parameters'. "
"Cannot compute credible levels."
)
credible_levels = {key: self.get_injection_credible_level(key, weights=weights)
for key in keys
if isinstance(self.injection_parameters.get(key, None), float)}
......@@ -1523,8 +1465,11 @@ class Result(object):
float: credible level
"""
if self.injection_parameters is None:
raise(TypeError, "Result object has no 'injection_parameters'. "
"Cannot copmute credible levels.")
raise (
TypeError,
"Result object has no 'injection_parameters'. "
"Cannot copmute credible levels."
)
if weights is None:
weights = np.ones(len(self.posterior))
......@@ -1910,7 +1855,7 @@ class ResultList(list):
raise ResultListError("Inconsistent parameters between results")
def check_consistent_data(self):
if not np.all([res.log_noise_evidence == self[0].log_noise_evidence for res in self])\
if not np.allclose([res.log_noise_evidence for res in self], self[0].log_noise_evidence, atol=1e-8, rtol=0.0)\
and not np.all([np.isnan(res.log_noise_evidence) for res in self]):
raise ResultListError("Inconsistent data between results")
......@@ -2038,6 +1983,8 @@ def make_pp_plot(results, filename=None, save=True, confidence_interval=[0.68, 0
The font size for the legend
keys: list
A list of keys to use, if None defaults to search_parameter_keys
title: bool
Whether to add the number of results and total p-value as a plot title
confidence_interval_alpha: float, list, optional
The transparency for the background condifence interval
weight_list: list, optional
......@@ -2059,11 +2006,12 @@ def make_pp_plot(results, filename=None, save=True, confidence_interval=[0.68, 0
if weight_list is None:
weight_list = [None] * len(results)
credible_levels = pd.DataFrame()
credible_levels = list()
for i, result in enumerate(results):
credible_levels = credible_levels.append(
result.get_all_injection_credible_levels(keys, weights=weight_list[i]),
ignore_index=True)
credible_levels.append(
result.get_all_injection_credible_levels(keys, weights=weight_list[i])
)
credible_levels = pd.DataFrame(credible_levels)
if lines is None:
colors = ["C{}".format(i) for i in range(8)]
......
import datetime
import inspect
import sys
import datetime
from collections import OrderedDict
import bilby
from ..utils import command_line_args, logger, loaded_modules_dict
from ..prior import PriorDict, DeltaFunction
from bilby.bilby_mcmc import Bilby_MCMC
from ..prior import DeltaFunction, PriorDict
from ..utils import command_line_args, loaded_modules_dict, logger
from . import proposal
from .base_sampler import Sampler, SamplingMarginalisedParameterError
from .cpnest import Cpnest
from .dnest4 import DNest4
from .dynamic_dynesty import DynamicDynesty
from .dynesty import Dynesty
from .emcee import Emcee
from .fake_sampler import FakeSampler
from .kombine import Kombine
from .nessai import Nessai
from .nestle import Nestle
......@@ -20,11 +24,7 @@ from .ptmcmc import PTMCMCSampler
from .pymc3 import Pymc3
from .pymultinest import Pymultinest
from .ultranest import Ultranest
from .fake_sampler import FakeSampler
from .dnest4 import DNest4
from .zeus import Zeus
from bilby.bilby_mcmc import Bilby_MCMC
from . import proposal
IMPLEMENTED_SAMPLERS = {
"bilby_mcmc": Bilby_MCMC,
......@@ -50,7 +50,7 @@ if command_line_args.sampler_help:
sampler = command_line_args.sampler_help
if sampler in IMPLEMENTED_SAMPLERS:
sampler_class = IMPLEMENTED_SAMPLERS[sampler]
print('Help for sampler "{}":'.format(sampler))
print(f'Help for sampler "{sampler}":')
print(sampler_class.__doc__)
else:
if sampler == "None":
......@@ -59,8 +59,8 @@ if command_line_args.sampler_help:
"the name of the sampler"
)
else:
print("Requested sampler {} not implemented".format(sampler))
print("Available samplers = {}".format(IMPLEMENTED_SAMPLERS))
print(f"Requested sampler {sampler} not implemented")
print(f"Available samplers = {IMPLEMENTED_SAMPLERS}")
sys.exit()
......@@ -82,7 +82,7 @@ def run_sampler(
gzip=False,
result_class=None,
npool=1,
**kwargs
**kwargs,
):
"""
The primary interface to easy parameter estimation
......@@ -145,9 +145,7 @@ def run_sampler(
An object containing the results
"""
logger.info(
"Running for label '{}', output will be saved to '{}'".format(label, outdir)
)
logger.info(f"Running for label '{label}', output will be saved to '{outdir}'")
if clean:
command_line_args.clean = clean
......@@ -161,12 +159,12 @@ def run_sampler(
_check_marginalized_parameters_not_sampled(likelihood, priors)
if type(priors) in [dict, OrderedDict]:
if type(priors) == dict:
priors = PriorDict(priors)
elif isinstance(priors, PriorDict):
pass
else:
raise ValueError("Input priors not understood")
raise ValueError("Input priors not understood should be dict or PriorDict")
priors.fill_priors(likelihood, default_priors_file=default_priors_file)
......@@ -175,7 +173,7 @@ def run_sampler(
meta_data = dict()
likelihood.label = label
likelihood.outdir = outdir
meta_data['likelihood'] = likelihood.meta_data
meta_data["likelihood"] = likelihood.meta_data
meta_data["loaded_modules"] = loaded_modules_dict()
if command_line_args.bilby_zero_likelihood_mode:
......@@ -199,11 +197,11 @@ def run_sampler(
plot=plot,
result_class=result_class,
npool=npool,
**kwargs
**kwargs,
)
else:
print(IMPLEMENTED_SAMPLERS)
raise ValueError("Sampler {} not yet implemented".format(sampler))
raise ValueError(f"Sampler {sampler} not yet implemented")
elif inspect.isclass(sampler):
sampler = sampler.__init__(
likelihood,
......@@ -215,12 +213,12 @@ def run_sampler(
injection_parameters=injection_parameters,
meta_data=meta_data,
npool=npool,
**kwargs
**kwargs,
)
else:
raise ValueError(
"Provided sampler should be a Sampler object or name of a known "
"sampler: {}.".format(", ".join(IMPLEMENTED_SAMPLERS.keys()))
f"sampler: {', '.join(IMPLEMENTED_SAMPLERS.keys())}."
)
if sampler.cached_result:
......@@ -241,23 +239,22 @@ def run_sampler(
elif isinstance(result.sampling_time, (float, int)):
result.sampling_time = datetime.timedelta(result.sampling_time)
logger.info('Sampling time: {}'.format(result.sampling_time))
logger.info(f"Sampling time: {result.sampling_time}")
# Convert sampling time into seconds
result.sampling_time = result.sampling_time.total_seconds()
if sampler.use_ratio:
result.log_noise_evidence = likelihood.noise_log_likelihood()
result.log_bayes_factor = result.log_evidence
result.log_evidence = \
result.log_bayes_factor + result.log_noise_evidence
result.log_evidence = result.log_bayes_factor + result.log_noise_evidence
else:
result.log_noise_evidence = likelihood.noise_log_likelihood()
result.log_bayes_factor = \
result.log_evidence - result.log_noise_evidence
result.log_bayes_factor = result.log_evidence - result.log_noise_evidence
if None not in [result.injection_parameters, conversion_function]:
result.injection_parameters = conversion_function(
result.injection_parameters)
result.injection_parameters
)
# Initial save of the sampler in case of failure in samples_to_posterior
if save:
......@@ -268,9 +265,12 @@ def run_sampler(
# Check if the posterior has already been created
if getattr(result, "_posterior", None) is None:
result.samples_to_posterior(likelihood=likelihood, priors=result.priors,
conversion_function=conversion_function,
npool=npool)
result.samples_to_posterior(
likelihood=likelihood,
priors=result.priors,
conversion_function=conversion_function,
npool=npool,
)
if save:
# The overwrite here ensures we overwrite the initially stored data
......@@ -278,7 +278,7 @@ def run_sampler(
if plot:
result.plot_corner()
logger.info("Summary of results:\n{}".format(result))
logger.info(f"Summary of results:\n{result}")
return result
......@@ -287,7 +287,5 @@ def _check_marginalized_parameters_not_sampled(likelihood, priors):
if key in priors:
if not isinstance(priors[key], (float, DeltaFunction)):
raise SamplingMarginalisedParameterError(
"Likelihood is {} marginalized but you are trying to sample in {}. ".format(
key, key
)
f"Likelihood is {key} marginalized but you are trying to sample in {key}. "
)
This diff is collapsed.
import array
import copy
import sys
import numpy as np
from numpy.lib.recfunctions import structured_to_unstructured
from pandas import DataFrame
from .base_sampler import NestedSampler
from .proposal import Sample, JumpProposalCycle
from ..utils import logger, check_directory_exists_and_if_not_mkdir
from ..utils import check_directory_exists_and_if_not_mkdir, logger
from .base_sampler import NestedSampler, signal_wrapper
from .proposal import JumpProposalCycle, Sample
class Cpnest(NestedSampler):
""" bilby wrapper of cpnest (https://github.com/johnveitch/cpnest)
"""bilby wrapper of cpnest (https://github.com/johnveitch/cpnest)
All positional and keyword arguments (i.e., the args and kwargs) passed to
`run_sampler` will be propagated to `cpnest.CPNest`, see documentation
......@@ -39,30 +39,44 @@ class Cpnest(NestedSampler):
{self.outdir}/cpnest_{self.label}/
"""
default_kwargs = dict(verbose=3, nthreads=1, nlive=500, maxmcmc=1000,
seed=None, poolsize=100, nhamiltonian=0, resume=True,
output=None, proposals=None, n_periodic_checkpoint=8000)
default_kwargs = dict(
verbose=3,
nthreads=1,
nlive=500,
maxmcmc=1000,
seed=None,
poolsize=100,
nhamiltonian=0,
resume=True,
output=None,
proposals=None,
n_periodic_checkpoint=8000,
)
hard_exit = True
def _translate_kwargs(self, kwargs):
if 'nlive' not in kwargs:
if "nlive" not in kwargs:
for equiv in self.npoints_equiv_kwargs:
if equiv in kwargs:
kwargs['nlive'] = kwargs.pop(equiv)
if 'nthreads' not in kwargs:
kwargs["nlive"] = kwargs.pop(equiv)
if "nthreads" not in kwargs:
for equiv in self.npool_equiv_kwargs:
if equiv in kwargs:
kwargs['nthreads'] = kwargs.pop(equiv)
kwargs["nthreads"] = kwargs.pop(equiv)
if 'seed' not in kwargs:
logger.warning('No seed provided, cpnest will use 1234.')
if "seed" not in kwargs:
logger.warning("No seed provided, cpnest will use 1234.")
@signal_wrapper
def run_sampler(self):
from cpnest import model as cpmodel, CPNest
from cpnest.parameter import LivePoint
from cpnest import CPNest
from cpnest import model as cpmodel
from cpnest.nest2pos import compute_weights
from cpnest.parameter import LivePoint
class Model(cpmodel.Model):
""" A wrapper class to pass our log_likelihood into cpnest """
"""A wrapper class to pass our log_likelihood into cpnest"""
def __init__(self, names, priors):
self.names = names
......@@ -82,14 +96,16 @@ class Cpnest(NestedSampler):
def _update_bounds(self):
self.bounds = [
[self.priors[key].minimum, self.priors[key].maximum]
for key in self.names]
for key in self.names
]
def new_point(self):
"""Draw a point from the prior"""
prior_samples = self.priors.sample()
self._update_bounds()
point = LivePoint(
self.names, array.array('d', [prior_samples[name] for name in self.names])
self.names,
array.array("d", [prior_samples[name] for name in self.names]),
)
return point
......@@ -105,18 +121,14 @@ class Cpnest(NestedSampler):
kwarg = remove_kwargs.pop(0)
else:
raise TypeError("Unable to initialise cpnest sampler")
logger.info(
"CPNest init. failed with error {}, please update"
.format(e))
logger.info(
"Attempting to rerun with kwarg {} removed".format(kwarg))
logger.info(f"CPNest init. failed with error {e}, please update")
logger.info(f"Attempting to rerun with kwarg {kwarg} removed")
self.kwargs.pop(kwarg)
try:
out.run()
except SystemExit as e:
import sys
logger.info("Caught exit code {}, exiting with signal {}".format(e.args[0], self.exit_code))
sys.exit(self.exit_code)
except SystemExit:
out.checkpoint()
self.write_current_state_and_exit()
if self.plot:
out.plot()
......@@ -125,42 +137,58 @@ class Cpnest(NestedSampler):
self.result.samples = structured_to_unstructured(
out.posterior_samples[self.search_parameter_keys]
)
self.result.log_likelihood_evaluations = out.posterior_samples['logL']
self.result.nested_samples = DataFrame(out.get_nested_samples(filename=''))
self.result.nested_samples.rename(columns=dict(logL='log_likelihood'), inplace=True)
_, log_weights = compute_weights(np.array(self.result.nested_samples.log_likelihood),
np.array(out.NS.state.nlive))
self.result.nested_samples['weights'] = np.exp(log_weights)
self.result.log_likelihood_evaluations = out.posterior_samples["logL"]
self.result.nested_samples = DataFrame(out.get_nested_samples(filename=""))
self.result.nested_samples.rename(
columns=dict(logL="log_likelihood"), inplace=True
)
_, log_weights = compute_weights(
np.array(self.result.nested_samples.log_likelihood),
np.array(out.NS.state.nlive),
)
self.result.nested_samples["weights"] = np.exp(log_weights)
self.result.log_evidence = out.NS.state.logZ
self.result.log_evidence_err = np.sqrt(out.NS.state.info / out.NS.state.nlive)
self.result.information_gain = out.NS.state.info
return self.result
def write_current_state_and_exit(self, signum=None, frame=None):
"""
Overwrites the base class to make sure that :code:`CPNest` terminates
properly as :code:`CPNest` handles all the multiprocessing internally.
"""
self._log_interruption(signum=signum)
sys.exit(self.exit_code)
def _verify_kwargs_against_default_kwargs(self):
"""
Set the directory where the output will be written
and check resume and checkpoint status.
"""
if not self.kwargs['output']:
self.kwargs['output'] = \
'{}/cpnest_{}/'.format(self.outdir, self.label)
if self.kwargs['output'].endswith('/') is False:
self.kwargs['output'] = '{}/'.format(self.kwargs['output'])
check_directory_exists_and_if_not_mkdir(self.kwargs['output'])
if self.kwargs['n_periodic_checkpoint'] and not self.kwargs['resume']:
self.kwargs['n_periodic_checkpoint'] = None
if not self.kwargs["output"]:
self.kwargs["output"] = f"{self.outdir}/cpnest_{self.label}/"
if self.kwargs["output"].endswith("/") is False:
self.kwargs["output"] = f"{self.kwargs['output']}/"
check_directory_exists_and_if_not_mkdir(self.kwargs["output"])
if self.kwargs["n_periodic_checkpoint"] and not self.kwargs["resume"]:
self.kwargs["n_periodic_checkpoint"] = None
NestedSampler._verify_kwargs_against_default_kwargs(self)
def _resolve_proposal_functions(self):
from cpnest.proposal import ProposalCycle
if 'proposals' in self.kwargs:
if self.kwargs['proposals'] is None:
if "proposals" in self.kwargs:
if self.kwargs["proposals"] is None:
return
if type(self.kwargs['proposals']) == JumpProposalCycle:
self.kwargs['proposals'] = dict(mhs=self.kwargs['proposals'], hmc=self.kwargs['proposals'])
for key, proposal in self.kwargs['proposals'].items():
if type(self.kwargs["proposals"]) == JumpProposalCycle:
self.kwargs["proposals"] = dict(
mhs=self.kwargs["proposals"], hmc=self.kwargs["proposals"]
)
for key, proposal in self.kwargs["proposals"].items():
if isinstance(proposal, JumpProposalCycle):
self.kwargs['proposals'][key] = cpnest_proposal_cycle_factory(proposal)
self.kwargs["proposals"][key] = cpnest_proposal_cycle_factory(
proposal
)
elif isinstance(proposal, ProposalCycle):
pass
else:
......@@ -171,7 +199,6 @@ def cpnest_proposal_factory(jump_proposal):
import cpnest.proposal
class CPNestEnsembleProposal(cpnest.proposal.EnsembleProposal):
def __init__(self, jp):
self.jump_proposal = jp
self.ensemble = None
......@@ -181,8 +208,8 @@ def cpnest_proposal_factory(jump_proposal):
def get_sample(self, cpnest_sample, **kwargs):
sample = Sample.from_cpnest_live_point(cpnest_sample)
self.ensemble = kwargs.get('coordinates', self.ensemble)
sample = self.jump_proposal(sample=sample, sampler_name='cpnest', **kwargs)
self.ensemble = kwargs.get("coordinates", self.ensemble)
sample = self.jump_proposal(sample=sample, sampler_name="cpnest", **kwargs)
self.log_J = self.jump_proposal.log_j
return self._update_cpnest_sample(cpnest_sample, sample)
......@@ -203,11 +230,15 @@ def cpnest_proposal_cycle_factory(jump_proposals):
def __init__(self):
self.jump_proposals = copy.deepcopy(jump_proposals)
for i, prop in enumerate(self.jump_proposals.proposal_functions):
self.jump_proposals.proposal_functions[i] = cpnest_proposal_factory(prop)
self.jump_proposals.proposal_functions[i] = cpnest_proposal_factory(
prop
)
self.jump_proposals.update_cycle()
super(CPNestProposalCycle, self).__init__(proposals=self.jump_proposals.proposal_functions,
weights=self.jump_proposals.weights,
cyclelength=self.jump_proposals.cycle_length)
super(CPNestProposalCycle, self).__init__(
proposals=self.jump_proposals.proposal_functions,
weights=self.jump_proposals.weights,
cyclelength=self.jump_proposals.cycle_length,
)
def get_sample(self, old, **kwargs):
return self.jump_proposals(sample=old, coordinates=self.ensemble, **kwargs)
......