Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (73)
Showing
with 824 additions and 492 deletions
......@@ -10,11 +10,13 @@
# before the next stage begins
stages:
- initial
- test
- docs
- deploy
.test-python: &test-python
stage: test
stage: initial
image: python
before_script:
# this is required because pytables doesn't use a wheel on py37
......@@ -43,7 +45,8 @@ basic-3.7:
# test example on python 3.7
python-3.7:
stage: test
image: quay.io/gregory_ashton/bilby_v2-dockerfile-test-suite-python37-frozen
needs: ["basic-3.7", "precommits-py3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- python -m pip install .
......@@ -55,21 +58,38 @@ python-3.7:
- coverage html
- coverage-badge -o coverage_badge.svg -f
artifacts:
paths:
- coverage_badge.svg
- htmlcov/
docs:
stage: docs
needs: ["basic-3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
# Make the documentation
- apt-get -yqq install pandoc
- python -m pip install .
- cd docs
- pip install ipykernel ipython jupyter
- cp ../examples/tutorials/*.ipynb ./
- rm basic_ptmcmc_tutorial.ipynb
- rm compare_samplers.ipynb
- rm visualising_the_results.ipynb
- jupyter nbconvert --to notebook --execute *.ipynb --inplace
- make clean
- make html
artifacts:
paths:
- htmlcov/
- coverage_badge.svg
- docs/_build/html/
# test example on python 3.8
python-3.8:
stage: test
image: quay.io/gregory_ashton/bilby_v2-dockerfile-test-suite-python38-frozen
needs: ["basic-3.7", "precommits-py3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python38
script:
- python -m pip install .
......@@ -78,7 +98,8 @@ python-3.8:
# test example on python 3.6
python-3.6:
stage: test
image: quay.io/gregory_ashton/bilby_v2-dockerfile-test-suite-python36-frozen
needs: ["basic-3.7", "precommits-py3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python36
script:
- python -m pip install .
......@@ -87,7 +108,8 @@ python-3.6:
# test samplers on python 3.7
python-3.7-samplers:
stage: test
image: quay.io/gregory_ashton/bilby_v2-dockerfile-test-suite-python37-frozen
needs: ["basic-3.7", "precommits-py3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- python -m pip install .
......@@ -97,7 +119,8 @@ python-3.7-samplers:
# test samplers on python 3.6
python-3.6-samplers:
stage: test
image: quay.io/gregory_ashton/bilby_v2-dockerfile-test-suite-python36-frozen
needs: ["basic-3.7", "precommits-py3.7"]
image: quay.io/bilbydev/v2-dockerfile-test-suite-python36
script:
- python -m pip install .
......@@ -105,8 +128,8 @@ python-3.6-samplers:
# Test containers are up to date
containers:
stage: test
image: quay.io/gregory_ashton/bilby_v2-dockerfile-test-suite-python37-frozen
stage: initial
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- cd containers
- python write_dockerfiles.py
......@@ -117,7 +140,7 @@ containers:
# Tests run at a fixed schedule rather than on push
scheduled-python-3.7:
stage: test
image: quay.io/gregory_ashton/bilby_v2-dockerfile-test-suite-python37-frozen
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
only:
- schedules
script:
......@@ -129,7 +152,7 @@ scheduled-python-3.7:
plotting:
stage: test
image: quay.io/gregory_ashton/bilby_v2-dockerfile-test-suite-python37-frozen
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
only:
- schedules
script:
......@@ -139,14 +162,16 @@ plotting:
- pytest test/gw/plot_test.py
authors:
stage: test
image: quay.io/gregory_ashton/bilby_v2-dockerfile-test-suite-python37-frozen
stage: initial
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- python test/check_author_list.py
pages:
stage: deploy
needs: ["docs", "python-3.7"]
dependencies:
- docs
- python-3.7
script:
- mkdir public/
......@@ -162,7 +187,7 @@ pages:
deploy_release:
stage: deploy
image: quay.io/gregory_ashton/bilby_v2-dockerfile-test-suite-python37-frozen
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
variables:
TWINE_USERNAME: $PYPI_USERNAME
TWINE_PASSWORD: $PYPI_PASSWORD
......@@ -176,8 +201,8 @@ deploy_release:
precommits-py3.7:
stage: test
image: quay.io/gregory_ashton/bilby_v2-dockerfile-test-suite-python37-frozen
stage: initial
image: quay.io/bilbydev/v2-dockerfile-test-suite-python37
script:
- source activate python37
- mkdir -p .pip37
......
# All notable changes will be documented in this file
## [1.0.3] 2020-11-23
## [1.1.0] 2021-03-15
Version 1.1.0 release of bilby
### Added
- Calibration marginalisation using a discrete set of realisations (!856)
- Nessai sampler (!921, !926)
- Capability to sample in aligned spin and spin magnitude (!868)
- Information gain now stored in the result (!907)
- Added option to save result/interferometers as pickle (!925)
- Added functionality to notch data (!898)
- Added LIGO India Aundha (A1) coordinates (!886)
### Changes
- Fixed periodic keys not working when constrained priors are present in pymultinest (!927)
- Some changes to reweighting likelihoods (!851)
- `CBCPriorDict` is now a `ConditionalPriorDict` (!868)
- Fixed hyper PE example (!910)
- Pinned numpy and pandas version number (!916)
- Fixed an issue with GPS times in `cpnest`
- `deepdish` is now longer a requirement since it lost its support (!925)
- Removed annoying warning message due to use of `newcommand` in latex (!924)
- Interpolation should be slightly faster now because we now access interpolation libraries more directly (!917, !923)
- Documentation now builds properly (!915)
- Fixed a bug caused by `loaded_modules_dict` (!920)
- `_ref_dist` is an attribute now which speeds up distance marginalised runs slightly (!913)
- Cache normalisation for `PriorDict` objects without `Constraint` priors (!914)
- Removed some deprecated `__future__` imports (!911)
- Fixed the behaviour of `plot_waveform_posterior` to use representative samples (!894)
- Uses `tqdm.auto` in some samplers now for better progress bars (!895)
- Fixed the correction of the epoch in time domain waveforms when using a segment duration that is not a power of two (!909)
- Fixed `ultranest` from failing
- Fixed issues with plotting failing in tests (!904)
- Changed the CI to run on auto-built images (!899)
- Resolved a `matplotlib` error occuring at `dynesty` checkpoint plots (!902)
- Fixed the multidimensional Gaussian example (!901)
- Now allow any lal dictionary option and added a numerical relativity file (!896)
- Fixed the likelihood count in `dynesty` (!853)
- Changed the ordering of keyword arguments for the `Sine` and `Cosine` constructors (!892)
## [1.0.4] 2020-11-23
Version 1.0.4 release of bilby
### Added
......
......@@ -16,7 +16,6 @@ https://lscsoft.docs.ligo.org/bilby/installation.html.
"""
from __future__ import absolute_import
import sys
from . import core, gw, hyper
......
from __future__ import absolute_import
from . import grid, likelihood, prior, result, sampler, series, utils
from __future__ import division
import numpy as np
import os
import json
......@@ -16,7 +14,7 @@ def grid_file_name(outdir, label, gzip=False):
""" Returns the standard filename used for a grid file
Parameters
----------
==========
outdir: str
Name of the output directory
label: str
......@@ -25,7 +23,7 @@ def grid_file_name(outdir, label, gzip=False):
Set to True to append `.gz` to the extension for saving in gzipped format
Returns
-------
=======
str: File name of the output file
"""
if gzip:
......@@ -41,7 +39,7 @@ class Grid(object):
"""
Parameters
----------
==========
likelihood: bilby.likelihood.Likelihood
priors: bilby.prior.PriorDict
grid_size: int, list, dict
......@@ -114,7 +112,7 @@ class Grid(object):
Marginalize over a list of parameters.
Parameters
----------
==========
log_array: array_like
A :class:`numpy.ndarray` of log likelihood/posterior values.
parameters: list, str
......@@ -125,7 +123,7 @@ class Grid(object):
the set of parameter to *not* marginalize over.
Returns
-------
=======
out_array: array_like
An array containing the marginalized log likelihood/posterior.
"""
......@@ -164,7 +162,7 @@ class Grid(object):
Marginalize the log likelihood/posterior over a single given parameter.
Parameters
----------
==========
log_array: array_like
A :class:`numpy.ndarray` of log likelihood/posterior values.
name: str
......@@ -173,7 +171,7 @@ class Grid(object):
A list of parameter names that have not been marginalized over.
Returns
-------
=======
out: array_like
An array containing the marginalized log likelihood/posterior.
"""
......@@ -220,14 +218,14 @@ class Grid(object):
ln likelihood will be fully marginalized over.
Parameters
----------
==========
parameters: str, list, optional
Name of, or list of names of, the parameter(s) to marginalize over.
not_parameters: str, optional
Name of, or list of names of, the parameter(s) to not marginalize over.
Returns
-------
=======
array-like:
The marginalized ln likelihood.
"""
......@@ -241,14 +239,14 @@ class Grid(object):
ln posterior will be fully marginalized over.
Parameters
----------
==========
parameters: str, list, optional
Name of, or list of names of, the parameter(s) to marginalize over.
not_parameters: str, optional
Name of, or list of names of, the parameter(s) to not marginalize over.
Returns
-------
=======
array-like:
The marginalized ln posterior.
"""
......@@ -262,14 +260,14 @@ class Grid(object):
likelihood will be fully marginalized over.
Parameters
----------
==========
parameters: str, list, optional
Name of, or list of names of, the parameter(s) to marginalize over.
not_parameters: str, optional
Name of, or list of names of, the parameter(s) to not marginalize over.
Returns
-------
=======
array-like:
The marginalized likelihood.
"""
......@@ -285,14 +283,14 @@ class Grid(object):
posterior will be fully marginalized over.
Parameters
----------
==========
parameters: str, list, optional
Name of, or list of names of, the parameter(s) to marginalize over.
not_parameters: str, optional
Name of, or list of names of, the parameter(s) to not marginalize over.
Returns
-------
=======
array-like:
The marginalized posterior.
"""
......@@ -377,7 +375,7 @@ class Grid(object):
Writes the Grid to a file.
Parameters
----------
==========
filename: str, optional
Filename to write to (overwrites the default)
overwrite: bool, optional
......@@ -420,7 +418,7 @@ class Grid(object):
""" Read in a saved .json grid file
Parameters
----------
==========
filename: str
If given, try to load from this filename
outdir, label: str
......@@ -431,11 +429,11 @@ class Grid(object):
extension)
Returns
-------
=======
grid: bilby.core.grid.Grid
Raises
-------
=======
ValueError: If no filename is given and either outdir or label is None
If no bilby.core.grid.Grid is found in the path
......
from __future__ import division, print_function
import copy
import numpy as np
......@@ -14,7 +13,7 @@ class Likelihood(object):
"""Empty likelihood class to be subclassed by other likelihoods
Parameters
----------
==========
parameters: dict
A dictionary of the parameter names and associated values
"""
......@@ -29,7 +28,7 @@ class Likelihood(object):
"""
Returns
-------
=======
float
"""
return np.nan
......@@ -38,7 +37,7 @@ class Likelihood(object):
"""
Returns
-------
=======
float
"""
return np.nan
......@@ -47,7 +46,7 @@ class Likelihood(object):
"""Difference between log likelihood and noise log likelihood
Returns
-------
=======
float
"""
return self.log_likelihood() - self.noise_log_likelihood()
......@@ -72,7 +71,7 @@ class ZeroLikelihood(Likelihood):
""" A special test-only class which already returns zero likelihood
Parameters
----------
==========
likelihood: bilby.core.likelihood.Likelihood
A likelihood object to mimic
......@@ -99,7 +98,7 @@ class Analytical1DLikelihood(Likelihood):
parameters are inferred from the arguments of function
Parameters
----------
==========
x, y: array_like
The data to analyse
func:
......@@ -175,7 +174,7 @@ class GaussianLikelihood(Analytical1DLikelihood):
parameters are inferred from the arguments of function
Parameters
----------
==========
x, y: array_like
The data to analyse
func:
......@@ -236,7 +235,7 @@ class PoissonLikelihood(Analytical1DLikelihood):
inferred from the arguments of function, which provides a rate.
Parameters
----------
==========
x: array_like
A dependent variable at which the Poisson rates will be calculated
......@@ -292,7 +291,7 @@ class ExponentialLikelihood(Analytical1DLikelihood):
An exponential likelihood function.
Parameters
----------
==========
x, y: array_like
The data to analyse
......@@ -339,7 +338,7 @@ class StudentTLikelihood(Analytical1DLikelihood):
https://en.wikipedia.org/wiki/Student%27s_t-distribution#Generalized_Student's_t-distribution
Parameters
----------
==========
x, y: array_like
The data to analyse
func:
......@@ -411,7 +410,7 @@ class Multinomial(Likelihood):
"""
Parameters
----------
==========
data: array-like
The number of objects in each class
n_dimensions: int
......@@ -455,7 +454,7 @@ class AnalyticalMultidimensionalCovariantGaussian(Likelihood):
with known analytic solution.
Parameters
----------
==========
mean: array_like
Array with the mean values of distribution
cov: array_like
......@@ -485,7 +484,7 @@ class AnalyticalMultidimensionalBimodalCovariantGaussian(Likelihood):
with known analytic solution.
Parameters
----------
==========
mean_1: array_like
Array with the mean value of the first mode
mean_2: array_like
......@@ -524,7 +523,7 @@ class JointLikelihood(Likelihood):
set consistently
Parameters
----------
==========
*likelihoods: bilby.core.likelihood.Likelihood
likelihoods to be combined parsed as arguments
"""
......
......@@ -13,7 +13,7 @@ class DeltaFunction(Prior):
"""Dirac delta function prior, this always returns peak.
Parameters
----------
==========
peak: float
Peak value of the delta function
name: str
......@@ -33,11 +33,11 @@ class DeltaFunction(Prior):
"""Rescale everything to the peak with the correct shape.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
float: Rescaled probability, equivalent to peak
"""
self.test_valid_for_rescaling(val)
......@@ -47,11 +47,11 @@ class DeltaFunction(Prior):
"""Return the prior probability of val
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: np.inf if val = peak, 0 otherwise
"""
......@@ -69,7 +69,7 @@ class PowerLaw(Prior):
"""Power law with bounds and alpha, spectral index
Parameters
----------
==========
alpha: float
Power law exponent parameter
minimum: float
......@@ -97,12 +97,12 @@ class PowerLaw(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
Parameters
----------
==========
val: Union[float, int, array_like]
Uniform probability
Returns
-------
=======
Union[float, array_like]: Rescaled probability
"""
self.test_valid_for_rescaling(val)
......@@ -116,11 +116,11 @@ class PowerLaw(Prior):
"""Return the prior probability of val
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
float: Prior probability of val
"""
if self.alpha == -1:
......@@ -134,11 +134,11 @@ class PowerLaw(Prior):
"""Return the logarithmic prior probability of val
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
float:
"""
......@@ -170,7 +170,7 @@ class Uniform(Prior):
"""Uniform prior with bounds
Parameters
----------
==========
minimum: float
See superclass
maximum: float
......@@ -195,12 +195,12 @@ class Uniform(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
Parameters
----------
==========
val: Union[float, int, array_like]
Uniform probability
Returns
-------
=======
Union[float, array_like]: Rescaled probability
"""
self.test_valid_for_rescaling(val)
......@@ -210,11 +210,11 @@ class Uniform(Prior):
"""Return the prior probability of val
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
float: Prior probability of val
"""
return ((val >= self.minimum) & (val <= self.maximum)) / (self.maximum - self.minimum)
......@@ -223,11 +223,11 @@ class Uniform(Prior):
"""Return the log prior probability of val
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
float: log probability of val
"""
return xlogy(1, (val >= self.minimum) & (val <= self.maximum)) - xlogy(1, self.maximum - self.minimum)
......@@ -246,7 +246,7 @@ class LogUniform(PowerLaw):
"""Log-Uniform prior with bounds
Parameters
----------
==========
minimum: float
See superclass
maximum: float
......@@ -278,7 +278,7 @@ class SymmetricLogUniform(Prior):
maximum].
Parameters
----------
==========
minimum: float
See superclass
maximum: float
......@@ -303,12 +303,12 @@ class SymmetricLogUniform(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
Parameters
----------
==========
val: Union[float, int, array_like]
Uniform probability
Returns
-------
=======
Union[float, array_like]: Rescaled probability
"""
self.test_valid_for_rescaling(val)
......@@ -330,11 +330,11 @@ class SymmetricLogUniform(Prior):
"""Return the prior probability of val
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
float: Prior probability of val
"""
val = np.abs(val)
......@@ -345,11 +345,11 @@ class SymmetricLogUniform(Prior):
"""Return the logarithmic prior probability of val
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
float:
"""
......@@ -370,12 +370,12 @@ class SymmetricLogUniform(Prior):
class Cosine(Prior):
def __init__(self, name=None, latex_label=None, unit=None,
minimum=-np.pi / 2, maximum=np.pi / 2, boundary=None):
def __init__(self, minimum=-np.pi / 2, maximum=np.pi / 2, name=None,
latex_label=None, unit=None, boundary=None):
"""Cosine prior with bounds
Parameters
----------
==========
minimum: float
See superclass
maximum: float
......@@ -389,8 +389,8 @@ class Cosine(Prior):
boundary: str
See superclass
"""
super(Cosine, self).__init__(name=name, latex_label=latex_label, unit=unit,
minimum=minimum, maximum=maximum, boundary=boundary)
super(Cosine, self).__init__(minimum=minimum, maximum=maximum, name=name,
latex_label=latex_label, unit=unit, boundary=boundary)
def rescale(self, val):
"""
......@@ -406,11 +406,11 @@ class Cosine(Prior):
"""Return the prior probability of val. Defined over [-pi/2, pi/2].
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
float: Prior probability of val
"""
return np.cos(val) / 2 * self.is_in_prior_range(val)
......@@ -425,12 +425,12 @@ class Cosine(Prior):
class Sine(Prior):
def __init__(self, name=None, latex_label=None, unit=None, minimum=0,
maximum=np.pi, boundary=None):
def __init__(self, minimum=0, maximum=np.pi, name=None,
latex_label=None, unit=None, boundary=None):
"""Sine prior with bounds
Parameters
----------
==========
minimum: float
See superclass
maximum: float
......@@ -444,8 +444,8 @@ class Sine(Prior):
boundary: str
See superclass
"""
super(Sine, self).__init__(name=name, latex_label=latex_label, unit=unit,
minimum=minimum, maximum=maximum, boundary=boundary)
super(Sine, self).__init__(minimum=minimum, maximum=maximum, name=name,
latex_label=latex_label, unit=unit, boundary=boundary)
def rescale(self, val):
"""
......@@ -461,11 +461,11 @@ class Sine(Prior):
"""Return the prior probability of val. Defined over [0, pi].
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
return np.sin(val) / 2 * self.is_in_prior_range(val)
......@@ -484,7 +484,7 @@ class Gaussian(Prior):
"""Gaussian prior with mean mu and width sigma
Parameters
----------
==========
mu: float
Mean of the Gaussian prior
sigma:
......@@ -507,7 +507,7 @@ class Gaussian(Prior):
'Rescale' a sample from the unit line element to the appropriate Gaussian prior.
Parameters
----------
==========
val: Union[float, int, array_like]
This maps to the inverse CDF. This has been analytically solved for this case.
......@@ -519,11 +519,11 @@ class Gaussian(Prior):
"""Return the prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (2 * np.pi) ** 0.5 / self.sigma
......@@ -532,11 +532,11 @@ class Gaussian(Prior):
"""Return the Log prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
......@@ -559,7 +559,7 @@ class TruncatedGaussian(Prior):
https://en.wikipedia.org/wiki/Truncated_normal_distribution
Parameters
----------
==========
mu: float
Mean of the Gaussian prior
sigma:
......@@ -587,7 +587,7 @@ class TruncatedGaussian(Prior):
""" Calculates the proper normalisation of the truncated Gaussian
Returns
-------
=======
float: Proper normalisation of the truncated Gaussian
"""
return (erf((self.maximum - self.mu) / 2 ** 0.5 / self.sigma) - erf(
......@@ -607,11 +607,11 @@ class TruncatedGaussian(Prior):
"""Return the prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
float: Prior probability of val
"""
return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (2 * np.pi) ** 0.5 \
......@@ -635,7 +635,7 @@ class HalfGaussian(TruncatedGaussian):
"""A Gaussian with its mode at zero, and truncated to only be positive.
Parameters
----------
==========
sigma: float
See superclass
name: str
......@@ -663,7 +663,7 @@ class LogNormal(Prior):
https://en.wikipedia.org/wiki/Log-normal_distribution
Parameters
----------
==========
mu: float
Mean of the Gaussian prior
sigma:
......@@ -699,11 +699,11 @@ class LogNormal(Prior):
"""Returns the prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
if isinstance(val, (float, int)):
......@@ -723,11 +723,11 @@ class LogNormal(Prior):
"""Returns the log prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
if isinstance(val, (float, int)):
......@@ -765,7 +765,7 @@ class Exponential(Prior):
"""Exponential prior with mean mu
Parameters
----------
==========
mu: float
Mean of the Exponential prior
name: str
......@@ -794,11 +794,11 @@ class Exponential(Prior):
"""Return the prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
if isinstance(val, (float, int)):
......@@ -815,11 +815,11 @@ class Exponential(Prior):
"""Returns the log prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
if isinstance(val, (float, int)):
......@@ -853,7 +853,7 @@ class StudentT(Prior):
https://en.wikipedia.org/wiki/Student%27s_t-distribution#Generalized_Student's_t-distribution
Parameters
----------
==========
df: float
Number of degrees of freedom for distribution
mu: float
......@@ -902,11 +902,11 @@ class StudentT(Prior):
"""Return the prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
return np.exp(self.ln_prob(val))
......@@ -915,11 +915,11 @@ class StudentT(Prior):
"""Returns the log prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
return gammaln(0.5 * (self.df + 1)) - gammaln(0.5 * self.df)\
......@@ -941,7 +941,7 @@ class Beta(Prior):
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html
Parameters
----------
==========
alpha: float
first shape parameter
beta: float
......@@ -981,11 +981,11 @@ class Beta(Prior):
"""Return the prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
return np.exp(self.ln_prob(val))
......@@ -994,11 +994,11 @@ class Beta(Prior):
"""Returns the log prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
_ln_prob = xlogy(self.alpha - 1, val - self.minimum) + xlogy(self.beta - 1, self.maximum - val)\
......@@ -1039,7 +1039,7 @@ class Logistic(Prior):
https://en.wikipedia.org/wiki/Logistic_distribution
Parameters
----------
==========
mu: float
Mean of the distribution
scale: float
......@@ -1086,11 +1086,11 @@ class Logistic(Prior):
"""Return the prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
return np.exp(self.ln_prob(val))
......@@ -1099,11 +1099,11 @@ class Logistic(Prior):
"""Returns the log prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
return -(val - self.mu) / self.scale -\
......@@ -1120,7 +1120,7 @@ class Cauchy(Prior):
https://en.wikipedia.org/wiki/Cauchy_distribution
Parameters
----------
==========
alpha: float
Location parameter
beta: float
......@@ -1164,11 +1164,11 @@ class Cauchy(Prior):
"""Return the prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
return 1. / self.beta / np.pi / (1. + ((val - self.alpha) / self.beta) ** 2)
......@@ -1177,11 +1177,11 @@ class Cauchy(Prior):
"""Return the log prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Log prior probability of val
"""
return - np.log(self.beta * np.pi) - np.log(1. + ((val - self.alpha) / self.beta) ** 2)
......@@ -1201,7 +1201,7 @@ class Gamma(Prior):
https://en.wikipedia.org/wiki/Gamma_distribution
Parameters
----------
==========
k: float
The shape parameter
theta: float
......@@ -1237,11 +1237,11 @@ class Gamma(Prior):
"""Return the prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
return np.exp(self.ln_prob(val))
......@@ -1250,11 +1250,11 @@ class Gamma(Prior):
"""Returns the log prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
if isinstance(val, (float, int)):
......@@ -1288,7 +1288,7 @@ class ChiSquared(Gamma):
https://en.wikipedia.org/wiki/Chi-squared_distribution
Parameters
----------
==========
nu: int
Number of degrees of freedom
name: str
......@@ -1324,7 +1324,7 @@ class FermiDirac(Prior):
is defined by Equation 22 of [1]_.
Parameters
----------
==========
sigma: float (required)
The range over which the attenuation of the distribution happens
mu: float
......@@ -1341,7 +1341,7 @@ class FermiDirac(Prior):
See superclass
References
----------
==========
.. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
<https:arxiv.org/abs/1705.08978v1>`_, 2017.
......@@ -1370,14 +1370,14 @@ class FermiDirac(Prior):
'Rescale' a sample from the unit line element to the appropriate Fermi-Dirac prior.
Parameters
----------
==========
val: Union[float, int, array_like]
This maps to the inverse CDF. This has been analytically solved for this case,
see Equation 24 of [1]_.
References
----------
==========
.. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
<https:arxiv.org/abs/1705.08978v1>`_, 2017.
......@@ -1404,11 +1404,11 @@ class FermiDirac(Prior):
"""Return the prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
float: Prior probability of val
"""
return np.exp(self.ln_prob(val))
......@@ -1417,11 +1417,11 @@ class FermiDirac(Prior):
"""Return the log prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Log prior probability of val
"""
......
......@@ -20,7 +20,7 @@ class Prior(object):
""" Implements a Prior object
Parameters
----------
==========
name: str, optional
Name associated with prior.
latex_label: str, optional
......@@ -57,27 +57,58 @@ class Prior(object):
"""Overrides the __call__ special method. Calls the sample method.
Returns
-------
=======
float: The return value of the sample method.
"""
return self.sample()
def __eq__(self, other):
"""
Test equality of two prior objects.
Returns true iff:
- The class of the two priors are the same
- Both priors have the same keys in the __dict__ attribute
- The instantiation arguments match
We don't check that all entries the the __dict__ attribute
are equal as some attributes are variable for conditional
priors.
Parameters
==========
other: Prior
The prior to compare with
Returns
=======
bool
Whether the priors are equivalent
Notes
=====
A special case is made for :code `scipy.stats.beta`: instances.
It may be possible to remove this as we now only check instantiation
arguments.
"""
if self.__class__ != other.__class__:
return False
if sorted(self.__dict__.keys()) != sorted(other.__dict__.keys()):
return False
for key in self.__dict__:
this_dict = self.get_instantiation_dict()
other_dict = other.get_instantiation_dict()
for key in this_dict:
if key == "least_recently_sampled":
# ignore sample drawn from prior in comparison
continue
if type(self.__dict__[key]) is np.ndarray:
if not np.array_equal(self.__dict__[key], other.__dict__[key]):
if isinstance(this_dict[key], np.ndarray):
if not np.array_equal(this_dict[key], other_dict[key]):
return False
elif isinstance(self.__dict__[key], type(scipy.stats.beta(1., 1.))):
elif isinstance(this_dict[key], type(scipy.stats.beta(1., 1.))):
continue
else:
if not self.__dict__[key] == other.__dict__[key]:
if not this_dict[key] == other_dict[key]:
return False
return True
......@@ -85,12 +116,12 @@ class Prior(object):
"""Draw a sample from the prior
Parameters
----------
==========
size: int or tuple of ints, optional
See numpy.random.uniform docs
Returns
-------
=======
float: A random number between 0 and 1, rescaled to match the distribution of this Prior
"""
......@@ -104,12 +135,12 @@ class Prior(object):
This should be overwritten by each subclass.
Parameters
----------
==========
val: Union[float, int, array_like]
A random number between 0 and 1
Returns
-------
=======
None
"""
......@@ -119,11 +150,11 @@ class Prior(object):
"""Return the prior probability of val, this should be overwritten
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
np.nan
"""
......@@ -146,11 +177,11 @@ class Prior(object):
"""Return the prior ln probability of val, this should be overwritten
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
np.nan
"""
......@@ -160,11 +191,11 @@ class Prior(object):
"""Returns True if val is in the prior boundaries, zero otherwise
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
np.nan
"""
......@@ -175,11 +206,11 @@ class Prior(object):
"""Test if 0 < val < 1
Parameters
----------
==========
val: Union[float, int, array_like]
Raises
-------
=======
ValueError: If val is not between 0 and 1
"""
valarray = np.atleast_1d(val)
......@@ -194,7 +225,7 @@ class Prior(object):
Works correctly for all child classes
Returns
-------
=======
str: A string representation of this instance
"""
......@@ -224,7 +255,7 @@ class Prior(object):
Returns
-------
=======
bool: Whether it's fixed or not!
"""
......@@ -237,7 +268,7 @@ class Prior(object):
Draws from a set of default labels if no label is given
Returns
-------
=======
str: A latex representation for this prior
"""
......@@ -392,17 +423,17 @@ class Prior(object):
Parameters
----------
==========
val: str
The string version of the agument
Returns
-------
=======
val: object
The parsed version of the argument.
Raises
------
======
TypeError:
If val cannot be parsed as described above.
"""
......
......@@ -15,7 +15,7 @@ def conditional_prior_factory(prior_class):
"""
Parameters
----------
==========
condition_func: func
Functional form of the condition for this prior. The first function argument
has to be a dictionary for the `reference_params` (see below). The following
......@@ -27,10 +27,14 @@ def conditional_prior_factory(prior_class):
`p(x|y)` with the boundaries linearly depending on y, then this
could have the following form:
```
def condition_func(reference_params, y):
return dict(minimum=reference_params['minimum'] + y, maximum=reference_params['maximum'] + y)
```
.. code-block:: python
def condition_func(reference_params, y):
return dict(
minimum=reference_params['minimum'] + y,
maximum=reference_params['maximum'] + y
)
name: str, optional
See superclass
latex_label: str, optional
......@@ -61,14 +65,14 @@ def conditional_prior_factory(prior_class):
"""Draw a sample from the prior
Parameters
----------
==========
size: int or tuple of ints, optional
See superclass
required_variables:
Any required variables that this prior depends on
Returns
-------
=======
float: See superclass
"""
......@@ -80,7 +84,7 @@ def conditional_prior_factory(prior_class):
'Rescale' a sample from the unit line element to the prior.
Parameters
----------
==========
val: Union[float, int, array_like]
See superclass
required_variables:
......@@ -95,7 +99,7 @@ def conditional_prior_factory(prior_class):
"""Return the prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
See superclass
required_variables:
......@@ -103,7 +107,7 @@ def conditional_prior_factory(prior_class):
Returns
-------
=======
float: Prior probability of val
"""
self.update_conditions(**required_variables)
......@@ -113,7 +117,7 @@ def conditional_prior_factory(prior_class):
"""Return the natural log prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
See superclass
required_variables:
......@@ -121,7 +125,7 @@ def conditional_prior_factory(prior_class):
Returns
-------
=======
float: Natural log prior probability of val
"""
self.update_conditions(**required_variables)
......@@ -131,7 +135,7 @@ def conditional_prior_factory(prior_class):
"""Return the cdf of val.
Parameters
----------
==========
val: Union[float, int, array_like]
See superclass
required_variables:
......@@ -139,7 +143,7 @@ def conditional_prior_factory(prior_class):
Returns
-------
=======
float: CDF of val
"""
self.update_conditions(**required_variables)
......@@ -154,7 +158,7 @@ def conditional_prior_factory(prior_class):
If no variables are given, the most recently used conditional parameters are kept
Parameters
----------
==========
required_variables:
Any required variables that this prior depends on. If none are given,
self.reference_params will be used.
......@@ -217,7 +221,7 @@ def conditional_prior_factory(prior_class):
Works correctly for all child classes
Returns
-------
=======
str: A string representation of this instance
"""
......@@ -234,49 +238,116 @@ def conditional_prior_factory(prior_class):
return ConditionalPrior
ConditionalBasePrior = conditional_prior_factory(Prior) # Only for testing purposes
ConditionalUniform = conditional_prior_factory(Uniform)
ConditionalDeltaFunction = conditional_prior_factory(DeltaFunction)
ConditionalPowerLaw = conditional_prior_factory(PowerLaw)
ConditionalGaussian = conditional_prior_factory(Gaussian)
ConditionalLogUniform = conditional_prior_factory(LogUniform)
ConditionalSymmetricLogUniform = conditional_prior_factory(SymmetricLogUniform)
ConditionalCosine = conditional_prior_factory(Cosine)
ConditionalSine = conditional_prior_factory(Sine)
ConditionalTruncatedGaussian = conditional_prior_factory(TruncatedGaussian)
ConditionalHalfGaussian = conditional_prior_factory(HalfGaussian)
ConditionalLogNormal = conditional_prior_factory(LogNormal)
ConditionalExponential = conditional_prior_factory(Exponential)
ConditionalStudentT = conditional_prior_factory(StudentT)
ConditionalBeta = conditional_prior_factory(Beta)
ConditionalLogistic = conditional_prior_factory(Logistic)
ConditionalCauchy = conditional_prior_factory(Cauchy)
ConditionalGamma = conditional_prior_factory(Gamma)
ConditionalChiSquared = conditional_prior_factory(ChiSquared)
ConditionalFermiDirac = conditional_prior_factory(FermiDirac)
ConditionalInterped = conditional_prior_factory(Interped)
class ConditionalBasePrior(conditional_prior_factory(Prior)):
pass
class ConditionalUniform(conditional_prior_factory(Uniform)):
pass
class ConditionalDeltaFunction(conditional_prior_factory(DeltaFunction)):
pass
class ConditionalPowerLaw(conditional_prior_factory(PowerLaw)):
pass
class ConditionalGaussian(conditional_prior_factory(Gaussian)):
pass
class ConditionalLogUniform(conditional_prior_factory(LogUniform)):
pass
class ConditionalSymmetricLogUniform(conditional_prior_factory(SymmetricLogUniform)):
pass
class ConditionalCosine(conditional_prior_factory(Cosine)):
pass
class ConditionalSine(conditional_prior_factory(Sine)):
pass
class ConditionalTruncatedGaussian(conditional_prior_factory(TruncatedGaussian)):
pass
class ConditionalHalfGaussian(conditional_prior_factory(HalfGaussian)):
pass
class ConditionalLogNormal(conditional_prior_factory(LogNormal)):
pass
class ConditionalExponential(conditional_prior_factory(Exponential)):
pass
class ConditionalStudentT(conditional_prior_factory(StudentT)):
pass
class ConditionalBeta(conditional_prior_factory(Beta)):
pass
class ConditionalLogistic(conditional_prior_factory(Logistic)):
pass
class ConditionalCauchy(conditional_prior_factory(Cauchy)):
pass
class ConditionalGamma(conditional_prior_factory(Gamma)):
pass
class ConditionalChiSquared(conditional_prior_factory(ChiSquared)):
pass
class ConditionalFermiDirac(conditional_prior_factory(FermiDirac)):
pass
class ConditionalInterped(conditional_prior_factory(Interped)):
pass
class DirichletElement(ConditionalBeta):
"""
r"""
Single element in a dirichlet distribution
The probability scales as
$p(x_order) \propto (x_max - x_order)^(n_dimensions - order - 2)$
.. math::
p(x_n) \propto (x_\max - x_n)^{(N - n - 2)}
for x_order < x_max, where x_max is the sum of x_i for i < order
for :math:`x_n < x_\max`, where :math:`x_\max` is the sum of :math:`x_i`
for :math:`i < n`
Examples
--------
========
n_dimensions = 1:
p(x_0) \propto 1 ; 0 < x_0 < 1
.. math::
p(x_0) \propto 1 ; 0 < x_0 < 1
n_dimensions = 2:
p(x_0) \propto (1 - x_0) ; 0 < x_0 < 1
p(x_1) \propto 1 ; 0 < x_1 < 1
.. math::
p(x_0) &\propto (1 - x_0) ; 0 < x_0 < 1
p(x_1) &\propto 1 ; 0 < x_1 < 1
Parameters
----------
==========
order: int
Order of this element of the dirichlet distribution.
n_dimensions: int
......@@ -284,9 +355,11 @@ class DirichletElement(ConditionalBeta):
label: str
Label for the dirichlet distribution.
This should be the same for all elements.
"""
def __init__(self, order, n_dimensions, label):
""" """
super(DirichletElement, self).__init__(
minimum=0, maximum=1, alpha=1, beta=n_dimensions - order - 1,
name=label + str(order),
......
......@@ -3,7 +3,6 @@ from io import open as ioopen
import json
import os
from future.utils import iteritems
from matplotlib.cbook import flatten
import numpy as np
......@@ -19,7 +18,7 @@ class PriorDict(dict):
""" A dictionary of priors
Parameters
----------
==========
dictionary: Union[dict, str, None]
If given, a dictionary to generate the prior set.
filename: Union[str, None]
......@@ -61,12 +60,12 @@ class PriorDict(dict):
Placeholder parameter conversion function.
Parameters
----------
==========
sample: dict
Dictionary to convert
Returns
-------
=======
sample: dict
Same as input
"""
......@@ -76,7 +75,7 @@ class PriorDict(dict):
""" Write the prior distribution to file.
Parameters
----------
==========
outdir: str
output directory name
label: str
......@@ -124,18 +123,20 @@ class PriorDict(dict):
""" Reads in a prior from a file specification
Parameters
----------
==========
filename: str
Name of the file to be read in
Notes
-----
=====
Lines beginning with '#' or empty lines will be ignored.
Priors can be loaded from:
bilby.core.prior as, e.g., foo = Uniform(minimum=0, maximum=1)
floats, e.g., foo = 1
bilby.gw.prior as, e.g., foo = bilby.gw.prior.AlignedSpin()
other external modules, e.g., foo = my.module.CustomPrior(...)
- bilby.core.prior as, e.g., :code:`foo = Uniform(minimum=0, maximum=1)`
- floats, e.g., :code:`foo = 1`
- bilby.gw.prior as, e.g., :code:`foo = bilby.gw.prior.AlignedSpin()`
- other external modules, e.g., :code:`foo = my.module.CustomPrior(...)`
"""
comments = ['#', '\n']
......@@ -154,7 +155,7 @@ class PriorDict(dict):
@classmethod
def _get_from_json_dict(cls, prior_dict):
try:
cls == getattr(
cls = getattr(
import_module(prior_dict["__module__"]),
prior_dict["__name__"])
except ImportError:
......@@ -175,7 +176,7 @@ class PriorDict(dict):
""" Reads in a prior from a json file
Parameters
----------
==========
filename: str
Name of the file to be read in
"""
......@@ -185,7 +186,7 @@ class PriorDict(dict):
def from_dictionary(self, dictionary):
eval_dict = dict(inf=np.inf)
for key, val in iteritems(dictionary):
for key, val in dictionary.items():
if isinstance(val, Prior):
continue
elif isinstance(val, (int, float)):
......@@ -230,9 +231,19 @@ class PriorDict(dict):
"= {}. Error message {}".format(key, val, e)
)
elif isinstance(val, dict):
logger.warning(
'Cannot convert {} into a prior object. '
'Leaving as dictionary.'.format(key))
try:
_class = getattr(
import_module(val.get("__module__", "none")),
val.get("__name__", "none"))
dictionary[key] = _class(**val.get("kwargs", dict()))
except ImportError:
logger.debug("Cannot import prior module {}.{}".format(
val.get("__module__", "none"), val.get("__name__", "none")
))
logger.warning(
'Cannot convert {} into a prior object. '
'Leaving as dictionary.'.format(key))
continue
else:
raise TypeError(
"Unable to parse prior, bad entry: {} "
......@@ -265,7 +276,7 @@ class PriorDict(dict):
this will set-up default priors for those as well.
Parameters
----------
==========
likelihood: bilby.likelihood.GravitationalWaveTransient instance
Used to infer the set of parameters to fill the prior with
default_priors_file: str, optional
......@@ -273,7 +284,7 @@ class PriorDict(dict):
Returns
-------
=======
prior: dict
The filled prior dictionary
......@@ -302,12 +313,12 @@ class PriorDict(dict):
"""Draw samples from the prior set
Parameters
----------
==========
size: int or tuple of ints, optional
See numpy.random.uniform docs
Returns
-------
=======
dict: Dictionary of the samples
"""
return self.sample_subset_constrained(keys=list(self.keys()), size=size)
......@@ -316,14 +327,14 @@ class PriorDict(dict):
""" Return an array of samples
Parameters
----------
==========
keys: list
A list of keys to sample in
size: int
The number of samples to draw
Returns
-------
=======
array: array_like
An array of shape (len(key), size) of the samples (ordered by keys)
"""
......@@ -336,14 +347,14 @@ class PriorDict(dict):
"""Draw samples from the prior set for parameters which are not a DeltaFunction
Parameters
----------
==========
keys: list
List of prior keys to draw samples from
size: int or tuple of ints, optional
See numpy.random.uniform docs
Returns
-------
=======
dict: Dictionary of the drawn samples
"""
self.convert_floats_to_delta_functions()
......@@ -391,6 +402,7 @@ class PriorDict(dict):
samples = self.sample_subset(keys=keys, size=sampling_chunk)
keep = np.atleast_1d(self.evaluate_constraints(samples))
if len(keep) == 1:
self._cached_normalizations[keys] = 1
return 1
all_samples = {key: np.array([]) for key in keys}
while np.count_nonzero(keep) < min_accept:
......@@ -407,14 +419,14 @@ class PriorDict(dict):
"""
Parameters
----------
==========
sample: dict
Dictionary of the samples of which we want to have the probability of
kwargs:
The keyword arguments are passed directly to `np.product`
Returns
-------
=======
float: Joint probability of all individual sample probabilities
"""
......@@ -440,14 +452,14 @@ class PriorDict(dict):
"""
Parameters
----------
==========
sample: dict
Dictionary of the samples of which to calculate the log probability
axis: None or int
Axis along which the summation is performed
Returns
-------
=======
float or ndarray:
Joint log probability of all the individual sample probabilities
......@@ -474,14 +486,14 @@ class PriorDict(dict):
"""Rescale samples from unit cube to prior
Parameters
----------
==========
keys: list
List of prior keys to be rescaled
theta: list
List of randomly drawn values on a unit cube associated with the prior keys
Returns
-------
=======
list: List of floats containing the rescaled sample
"""
return list(flatten([self[key].rescale(sample) for key, sample in zip(keys, theta)]))
......@@ -494,8 +506,8 @@ class PriorDict(dict):
"""
Test whether there are redundant keys in self.
Return
------
Returns
=======
bool: Whether there are redundancies or not
"""
redundant = False
......@@ -536,7 +548,7 @@ class ConditionalPriorDict(PriorDict):
"""
Parameters
----------
==========
dictionary: dict
See parent class
filename: str
......@@ -615,12 +627,12 @@ class ConditionalPriorDict(PriorDict):
""" Returns the required variables to sample a given conditional key.
Parameters
----------
==========
key : str
Name of the key that we want to know the required variables for
Returns
----------
==========
dict: key/value pairs of the required variables
"""
return {k: self[k].least_recently_sampled for k in getattr(self[key], 'required_variables', [])}
......@@ -629,14 +641,14 @@ class ConditionalPriorDict(PriorDict):
"""
Parameters
----------
==========
sample: dict
Dictionary of the samples of which we want to have the probability of
kwargs:
The keyword arguments are passed directly to `np.product`
Returns
-------
=======
float: Joint probability of all individual sample probabilities
"""
......@@ -650,14 +662,14 @@ class ConditionalPriorDict(PriorDict):
"""
Parameters
----------
==========
sample: dict
Dictionary of the samples of which we want to have the log probability of
axis: Union[None, int]
Axis along which the summation is performed
Returns
-------
=======
float: Joint log probability of all the individual sample probabilities
"""
......@@ -671,14 +683,14 @@ class ConditionalPriorDict(PriorDict):
"""Rescale samples from unit cube to prior
Parameters
----------
==========
keys: list
List of prior keys to be rescaled
theta: list
List of randomly drawn values on a unit cube associated with the prior keys
Returns
-------
=======
list: List of floats containing the rescaled sample
"""
self._check_resolved()
......@@ -774,14 +786,14 @@ def create_default_prior(name, default_priors_file=None):
"""Make a default prior for a parameter with a known name.
Parameters
----------
==========
name: str
Parameter name
default_priors_file: str, optional
If given, a file containing the default priors.
Return
------
Returns
======
prior: Prior
Default prior distribution for that parameter, if unknown None is
returned.
......
......@@ -13,7 +13,7 @@ class Interped(Prior):
"""Creates an interpolated prior function from arrays of xx and yy=p(xx)
Parameters
----------
==========
xx: array_like
x values for the to be interpolated prior function
yy: array_like
......@@ -32,7 +32,7 @@ class Interped(Prior):
See superclass
Attributes
----------
==========
probability_density: scipy.interpolate.interp1d
Interpolated prior probability distribution
cumulative_distribution: scipy.interpolate.interp1d
......@@ -69,11 +69,11 @@ class Interped(Prior):
"""Return the prior probability of val.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
Union[float, array_like]: Prior probability of val
"""
return self.probability_density(val)
......@@ -102,7 +102,7 @@ class Interped(Prior):
Yields an error if value is set below instantiated x-array minimum.
Returns
-------
=======
float: Minimum of the prior distribution
"""
......@@ -125,7 +125,7 @@ class Interped(Prior):
Yields an error if value is set above instantiated x-array maximum.
Returns
-------
=======
float: Maximum of the prior distribution
"""
......@@ -146,7 +146,7 @@ class Interped(Prior):
Updates the prior distribution if it is changed
Returns
-------
=======
array_like: p(xx) values
"""
......@@ -182,7 +182,7 @@ class FromFile(Interped):
"""Creates an interpolated prior function from arrays of xx and yy=p(xx) extracted from a file
Parameters
----------
==========
file_name: str
Name of the file containing the xx and yy arrays
minimum: float
......
......@@ -15,7 +15,7 @@ class BaseJointPriorDist(object):
Parameters
----------
==========
names: list (required)
A list of the parameter names in the JointPriorDist. The
listed parameters must have the same order that they appear in
......@@ -125,7 +125,7 @@ class BaseJointPriorDist(object):
Works correctly for all child classes
Returns
-------
=======
str: A string representation of this instance
"""
......@@ -149,14 +149,14 @@ class BaseJointPriorDist(object):
probability will not be properly normalised.
Parameters
----------
==========
value: array_like
A 1d vector of the sample, or 2d array of sample values with shape
NxM, where N is the number of samples and M is the number of
parameters.
Returns
-------
=======
samp: array_like
returns the input value as a sample array
outbounds: array_like
......@@ -185,7 +185,7 @@ class BaseJointPriorDist(object):
probability will not be properly normalised.
Parameters
----------
==========
value: array_like
A 1d vector of the sample, or 2d array of sample values with shape
NxM, where N is the number of samples and M is the number of
......@@ -206,7 +206,7 @@ class BaseJointPriorDist(object):
probability will not be properly normalised. **this method needs overwritten by child class**
Parameters
----------
==========
samp: vector
sample to evaluate the ln_prob at
lnprob: vector
......@@ -215,7 +215,7 @@ class BaseJointPriorDist(object):
boolean array showing which samples in lnprob vector are out of the given bounds
Returns
-------
=======
lnprob: vector
array of lnprob values for each sample given
"""
......@@ -229,7 +229,7 @@ class BaseJointPriorDist(object):
Draw, and set, a sample from the Dist, accompanying method _sample needs to overwritten
Parameters
----------
==========
size: int
number of samples to generate, defualts to 1
"""
......@@ -248,7 +248,7 @@ class BaseJointPriorDist(object):
Draw, and set, a sample from the joint dist (**needs to be ovewritten by child class**)
Parameters
----------
==========
size: int
number of samples to generate, defualts to 1
"""
......@@ -265,7 +265,7 @@ class BaseJointPriorDist(object):
overwrite accompanying method _rescale().
Parameters
----------
==========
value: array
A 1d vector sample (one for each parameter) drawn from a uniform
distribution between 0 and 1, or a 2d NxM array of samples where
......@@ -275,7 +275,7 @@ class BaseJointPriorDist(object):
args are called in the JointPrior rescale methods for each parameter
Returns
-------
=======
array:
An vector sample drawn from the multivariate Gaussian
distribution.
......@@ -297,7 +297,7 @@ class BaseJointPriorDist(object):
rescale a sample from a unit hypercybe to the joint dist (**needs to be ovewritten by child class**)
Parameters
----------
==========
samp: numpy array
this is a vector sample drawn from a uniform distribtuion to be rescaled to the distribution
"""
......@@ -322,7 +322,7 @@ class MultivariateGaussianDist(BaseJointPriorDist):
MultiNest.
Parameters
----------
==========
names: list
A list of the parameter names in the multivariate Gaussian. The
listed parameters must have the same order that they appear in
......@@ -656,7 +656,7 @@ class JointPrior(Prior):
"""This defines the single parameter Prior object for parameters that belong to a JointPriorDist
Parameters
----------
==========
dist: ChildClass of BaseJointPriorDist
The shared JointPriorDistribution that this parameter belongs to
name: str
......@@ -699,13 +699,13 @@ class JointPrior(Prior):
Scale a unit hypercube sample to the prior.
Parameters
----------
==========
val: array_like
value drawn from unit hypercube to be rescaled onto the prior
kwargs: dict
all kwargs passed to the dist.rescale method
Returns
-------
=======
float:
A sample from the prior paramter.
"""
......@@ -726,13 +726,13 @@ class JointPrior(Prior):
Draw a sample from the prior.
Parameters
----------
==========
size: int, float (defaults to 1)
number of samples to draw
kwargs: dict
kwargs passed to the dist.sample method
Returns
-------
=======
float:
A sample from the prior paramter.
"""
......@@ -764,11 +764,11 @@ class JointPrior(Prior):
distribution.
Parameters
----------
==========
val: array_like
value to evaluate the prior log-prob at
Returns
-------
=======
float:
the logp value for the prior at given sample
"""
......@@ -816,12 +816,12 @@ class JointPrior(Prior):
"""Return the prior probability of val
Parameters
----------
==========
val: array_like
value to evaluate the prior prob at
Returns
-------
=======
float:
the p value for the prior at given sample
"""
......
......@@ -16,7 +16,7 @@ class SlabSpikePrior(Prior):
`SymmetricLogUniform` and `FermiDirac` are currently not supported.
Parameters
----------
==========
slab: Prior
Any instance of a bilby prior class. All general prior attributes
from the slab are copied into the SlabSpikePrior.
......@@ -76,12 +76,12 @@ class SlabSpikePrior(Prior):
'Rescale' a sample from the unit line element to the prior.
Parameters
----------
==========
val: Union[float, int, array_like]
A random number between 0 and 1
Returns
-------
=======
array_like: Associated prior value with input value.
"""
val = np.atleast_1d(val)
......@@ -104,12 +104,12 @@ class SlabSpikePrior(Prior):
on the pure slab part of the prior.
Parameters
----------
==========
val: Union[float, int, array_like]
A random number between 0 and self.slab_fraction
Returns
-------
=======
array_like: Associated prior value with input value.
"""
return self.slab.rescale(val / self.slab_fraction)
......@@ -119,11 +119,11 @@ class SlabSpikePrior(Prior):
Returns np.inf for the spike location
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
array_like: Prior probability of val
"""
res = self.slab.prob(val) * self.slab_fraction
......@@ -136,11 +136,11 @@ class SlabSpikePrior(Prior):
Returns np.inf for the spike location
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
array_like: Prior probability of val
"""
res = self.slab.ln_prob(val) + np.log(self.slab_fraction)
......@@ -154,11 +154,11 @@ class SlabSpikePrior(Prior):
at the spike location.
Parameters
----------
==========
val: Union[float, int, array_like]
Returns
-------
=======
array_like: CDF value of val
"""
......
This diff is collapsed.
......@@ -13,6 +13,7 @@ from .dynamic_dynesty import DynamicDynesty
from .dynesty import Dynesty
from .emcee import Emcee
from .kombine import Kombine
from .nessai import Nessai
from .nestle import Nestle
from .polychord import PyPolyChord
from .ptemcee import Ptemcee
......@@ -26,10 +27,10 @@ from . import proposal
IMPLEMENTED_SAMPLERS = {
'cpnest': Cpnest, 'dnest4': DNest4, 'dynamic_dynesty': DynamicDynesty,
'dynesty': Dynesty, 'emcee': Emcee, 'kombine': Kombine, 'nestle': Nestle,
'ptemcee': Ptemcee, 'ptmcmcsampler': PTMCMCSampler, 'pymc3': Pymc3,
'pymultinest': Pymultinest, 'pypolychord': PyPolyChord, 'ultranest': Ultranest,
'fake_sampler': FakeSampler}
'dynesty': Dynesty, 'emcee': Emcee,'kombine': Kombine, 'nessai': Nessai,
'nestle': Nestle, 'ptemcee': Ptemcee, 'ptmcmcsampler': PTMCMCSampler,
'pymc3': Pymc3, 'pymultinest': Pymultinest, 'pypolychord': PyPolyChord,
'ultranest': Ultranest, 'fake_sampler': FakeSampler}
if command_line_args.sampler_help:
sampler = command_line_args.sampler_help
......@@ -57,7 +58,7 @@ def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
The primary interface to easy parameter estimation
Parameters
----------
==========
likelihood: `bilby.Likelihood`
A `Likelihood` instance
priors: `bilby.PriorDict`
......@@ -108,7 +109,7 @@ def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
All kwargs are passed directly to the samplers `run` function
Returns
-------
=======
result: bilby.core.result.Result
An object containing the results
"""
......
from __future__ import absolute_import
import datetime
import distutils.dir_util
import numpy as np
......@@ -16,7 +15,7 @@ class Sampler(object):
""" A sampler object to aid in setting up an inference run
Parameters
----------
==========
likelihood: likelihood.Likelihood
A object with a log_l method
priors: bilby.core.prior.PriorDict, dict
......@@ -51,7 +50,7 @@ class Sampler(object):
Additional keyword arguments
Attributes
----------
==========
likelihood: likelihood.Likelihood
A object with a log_l method
priors: bilby.core.prior.PriorDict
......@@ -79,7 +78,7 @@ class Sampler(object):
Dictionary of keyword arguments that can be used in the external sampler
Raises
------
======
TypeError:
If external_sampler is neither a string nor an instance of this class
If not all likelihood.parameters have been defined
......@@ -227,7 +226,7 @@ class Sampler(object):
def _initialise_result(self, result_class):
"""
Returns
-------
=======
bilby.core.result.Result: An initial template for the result
"""
......@@ -251,29 +250,13 @@ class Sampler(object):
return result
def _check_if_priors_can_be_sampled(self):
"""Check if all priors can be sampled properly.
Raises
------
AttributeError
prior can't be sampled.
"""
for key in self.priors:
if isinstance(self.priors[key], Constraint):
continue
try:
self.priors[key].sample()
except AttributeError as e:
logger.warning('Cannot sample from {}, {}'.format(key, e))
def _verify_parameters(self):
""" Evaluate a set of parameters drawn from the prior
Tests if the likelihood evaluation passes
Raises
------
======
TypeError
Likelihood can't be evaluated.
......@@ -297,7 +280,7 @@ class Sampler(object):
""" Times the likelihood evaluation and print an info message
Parameters
----------
==========
n_evaluations: int
The number of evaluations to estimate the evaluation time from
......@@ -323,7 +306,13 @@ class Sampler(object):
Checks if use_ratio is set. Prints a warning if use_ratio is set but
not properly implemented.
"""
self._check_if_priors_can_be_sampled()
try:
self.priors.sample_subset(self.search_parameter_keys)
except (KeyError, AttributeError):
logger.error("Cannot sample from priors with keys: {}.".format(
self.search_parameter_keys
))
raise
if self.use_ratio is False:
logger.debug("use_ratio set to False")
return
......@@ -343,12 +332,12 @@ class Sampler(object):
""" Prior transform method that is passed into the external sampler.
Parameters
----------
==========
theta: list
List of sampled values on a unit interval
Returns
-------
=======
list: Properly rescaled sampled values
"""
return self.priors.rescale(self._search_parameter_keys, theta)
......@@ -357,12 +346,12 @@ class Sampler(object):
"""
Parameters
----------
==========
theta: list
List of sampled values on a unit interval
Returns
-------
=======
float: Joint ln prior probability of theta
"""
......@@ -374,12 +363,12 @@ class Sampler(object):
"""
Parameters
----------
==========
theta: list
List of values for the likelihood parameters
Returns
-------
=======
float: Log-likelihood or log-likelihood-ratio given the current
likelihood.parameter values
......@@ -401,7 +390,7 @@ class Sampler(object):
""" Get a random draw from the prior distribution
Returns
-------
=======
draw: array_like
An ndim-length array of values drawn from the prior. Parameters
with delta-function (or fixed) priors are not returned
......@@ -420,12 +409,12 @@ class Sampler(object):
finite prior and likelihood (relevant for constrained priors).
Parameters
----------
==========
npoints: int
The number of values to return
Returns
-------
=======
unit_cube, parameters, likelihood: tuple of array_like
unit_cube (nlive, ndim) is an array of the prior samples from the
unit cube, parameters (nlive, ndim) is the unit_cube array
......@@ -454,12 +443,12 @@ class Sampler(object):
Also catches the output of `numpy.nan_to_num`.
Parameters
----------
==========
theta: array_like
Parameter values at which to evaluate likelihood
Returns
-------
=======
bool, cube (nlive,
True if the likelihood and prior are finite, false otherwise
......@@ -483,7 +472,7 @@ class Sampler(object):
"""
TODO: Implement this method
Raises
-------
=======
ValueError: in any case
"""
raise ValueError("Method not yet implemented")
......@@ -557,7 +546,7 @@ class NestedSampler(Sampler):
loglikelihoods
Parameters
----------
==========
sorted_samples, unsorted_samples: array-like
Sorted and unsorted values of the samples. These should be of the
same shape and contain the same sample values, but in different
......@@ -566,7 +555,7 @@ class NestedSampler(Sampler):
The loglikelihoods corresponding to the unsorted_samples
Returns
-------
=======
sorted_loglikelihoods: array-like
The loglikelihoods reordered to match that of the sorted_samples
......@@ -590,12 +579,12 @@ class NestedSampler(Sampler):
the prior constraint here.
Parameters
----------
==========
theta: array_like
Parameter values at which to evaluate likelihood
Returns
-------
=======
float: log_likelihood
"""
if self.priors.evaluate_constraints({
......@@ -646,7 +635,7 @@ class MCMCSampler(Sampler):
""" Uses the `emcee.autocorr` module to estimate the autocorrelation
Parameters
----------
==========
samples: array_like
A chain of samples.
c: float
......
from __future__ import absolute_import
import array
import copy
......@@ -19,8 +18,8 @@ class Cpnest(NestedSampler):
for that class for further help. Under Other Parameters, we list commonly
used kwargs and the bilby defaults.
Other Parameters
----------------
Parameters
==========
nlive: int
The number of live points, note this can also equivalently be given as
one of [npoints, nlives, n_live_points]
......@@ -89,8 +88,8 @@ class Cpnest(NestedSampler):
prior_samples = self.priors.sample()
self._update_bounds()
point = LivePoint(
self.names, array.array(
'f', [prior_samples[name] for name in self.names]))
self.names, array.array('d', [prior_samples[name] for name in self.names])
)
return point
self._resolve_proposal_functions()
......@@ -132,6 +131,7 @@ class Cpnest(NestedSampler):
self.result.nested_samples['weights'] = np.exp(log_weights)
self.result.log_evidence = out.NS.state.logZ
self.result.log_evidence_err = np.sqrt(out.NS.state.info / out.NS.state.nlive)
self.result.information_gain = out.NS.state.info
return self.result
def _verify_kwargs_against_default_kwargs(self):
......
......@@ -69,11 +69,11 @@ class DNest4(NestedSampler):
Bilby wrapper of DNest4
Parameters
----------
==========
TBD
Other Parameters
----------------
------==========
num_particles: int
The number of points to use in the Nested Sampling active population.
max_num_levels: int
......
from __future__ import absolute_import
import os
import dill as pickle
......@@ -22,7 +21,7 @@ class DynamicDynesty(Dynesty):
we list commonly all kwargs and the bilby defaults.
Parameters
----------
==========
likelihood: likelihood.Likelihood
A object with a log_l method
priors: bilby.core.prior.PriorDict, dict
......@@ -42,7 +41,7 @@ class DynamicDynesty(Dynesty):
only advisable for testing environments
Other Parameters
----------------
------==========
bound: {'none', 'single', 'multi', 'balls', 'cubes'}, ('multi')
Method used to select new points
sample: {'unif', 'rwalk', 'slice', 'rslice', 'hslice'}, ('rwalk')
......
......@@ -6,7 +6,7 @@ import pickle
import signal
import time
import tqdm
from tqdm.auto import tqdm
import matplotlib.pyplot as plt
import numpy as np
from pandas import DataFrame
......@@ -79,7 +79,7 @@ class Dynesty(NestedSampler):
we list commonly all kwargs and the bilby defaults.
Parameters
----------
==========
likelihood: likelihood.Likelihood
A object with a log_l method
priors: bilby.core.prior.PriorDict, dict
......@@ -99,7 +99,7 @@ class Dynesty(NestedSampler):
only advisable for testing environments
Other Parameters
----------------
------==========
npoints: int, (1000)
The number of live points, note this can also equivalently be given as
one of [nlive, nlives, n_live_points]
......@@ -224,7 +224,7 @@ class Dynesty(NestedSampler):
self.kwargs['update_interval'] = int(0.6 * self.kwargs['nlive'])
if self.kwargs['print_func'] is None:
self.kwargs['print_func'] = self._print_func
self.pbar = tqdm.tqdm(file=sys.stdout)
self.pbar = tqdm(file=sys.stdout)
Sampler._verify_kwargs_against_default_kwargs(self)
def _print_func(self, results, niter, ncall=None, dlogz=None, *args, **kwargs):
......@@ -401,6 +401,7 @@ class Dynesty(NestedSampler):
sorted_samples=self.result.samples)
self.result.log_evidence = out.logz[-1]
self.result.log_evidence_err = out.logzerr[-1]
self.result.information_gain = out.information[-1]
def _run_nested_wrapper(self, kwargs):
""" Wrapper function to run_nested
......@@ -409,7 +410,7 @@ class Dynesty(NestedSampler):
dynesty accepting different arguments.
Parameters
----------
==========
kwargs: dict
The dictionary of kwargs to pass to run_nested
......@@ -474,7 +475,7 @@ class Dynesty(NestedSampler):
The previous run time is set to self.
Parameters
----------
==========
continuing: bool
Whether the run is continuing or terminating, if True, the loaded
state is mostly written back to disk.
......@@ -612,7 +613,7 @@ class Dynesty(NestedSampler):
fig = dyplot.traceplot(self.sampler.results, labels=labels)[0]
fig.tight_layout()
fig.savefig(filename)
except (RuntimeError, np.linalg.linalg.LinAlgError, ValueError) as e:
except (RuntimeError, np.linalg.linalg.LinAlgError, ValueError, OverflowError, Exception) as e:
logger.warning(e)
logger.warning('Failed to create dynesty state plot at checkpoint')
finally:
......@@ -679,17 +680,27 @@ class Dynesty(NestedSampler):
cube we map this back to [0, 1].
Parameters
----------
==========
theta: list
List of sampled values on a unit interval
Returns
-------
=======
list: Properly rescaled sampled values
"""
return self.priors.rescale(self._search_parameter_keys, theta)
def calc_likelihood_count(self):
if self.likelihood_benchmark:
if hasattr(self, 'sampler'):
self.result.num_likelihood_evaluations = \
getattr(self.sampler, 'ncall', 0)
else:
self.result.num_likelihood_evaluations = 0
else:
return None
def sample_rwalk_bilby(args):
""" Modified bilby-implemented version of dynesty.sampling.sample_rwalk """
......@@ -809,13 +820,13 @@ def sample_rwalk_bilby(args):
def estimate_nmcmc(accept_ratio, old_act, maxmcmc, safety=5, tau=None):
""" Estimate autocorrelation length of chain using acceptance fraction
Using ACL = (2/acc) - 1 multiplied by a safety margin. Code adapated from
CPNest:
- https://github.com/johnveitch/cpnest/blob/master/cpnest/sampler.py
- http://github.com/farr/Ensemble.jl
Using ACL = (2/acc) - 1 multiplied by a safety margin. Code adapated from CPNest:
- https://github.com/johnveitch/cpnest/blob/master/cpnest/sampler.py
- http://github.com/farr/Ensemble.jl
Parameters
----------
==========
accept_ratio: float [0, 1]
Ratio of the number of accepted points to the total number of points
old_act: int
......