Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (8)
Showing
with 181 additions and 116 deletions
...@@ -59,9 +59,9 @@ containers: ...@@ -59,9 +59,9 @@ containers:
${script} --help; ${script} --help;
done done
basic-3.8: # basic-3.8:
<<: *test-python # <<: *test-python
image: python:3.8 # image: python:3.8
basic-3.9: basic-3.9:
<<: *test-python <<: *test-python
...@@ -78,9 +78,9 @@ basic-3.10: ...@@ -78,9 +78,9 @@ basic-3.10:
- python -m pip list installed - python -m pip list installed
- python test/test_samplers_import.py - python test/test_samplers_import.py
import-samplers-3.8: # import-samplers-3.8:
<<: *test-samplers-import # <<: *test-samplers-import
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38 # image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
import-samplers-3.9: import-samplers-3.9:
<<: *test-samplers-import <<: *test-samplers-import
...@@ -102,12 +102,12 @@ import-samplers-3.10: ...@@ -102,12 +102,12 @@ import-samplers-3.10:
# Run precommits (flake8, spellcheck, isort, no merge conflicts, etc) # Run precommits (flake8, spellcheck, isort, no merge conflicts, etc)
- pre-commit run --all-files --verbose --show-diff-on-failure - pre-commit run --all-files --verbose --show-diff-on-failure
precommits-py3.8: # precommits-py3.8:
<<: *precommits # <<: *precommits
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38 # image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
variables: # variables:
CACHE_DIR: ".pip38" # CACHE_DIR: ".pip38"
PYVERSION: "python38" # PYVERSION: "python38"
precommits-py3.9: precommits-py3.9:
<<: *precommits <<: *precommits
...@@ -142,10 +142,10 @@ install: ...@@ -142,10 +142,10 @@ install:
- pytest --cov=bilby --durations 10 - pytest --cov=bilby --durations 10
python-3.8: # python-3.8:
<<: *unit-test # <<: *unit-test
needs: ["basic-3.8", "precommits-py3.8"] # needs: ["basic-3.8", "precommits-py3.8"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38 # image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
python-3.9: python-3.9:
<<: *unit-test <<: *unit-test
...@@ -176,10 +176,10 @@ python-3.10: ...@@ -176,10 +176,10 @@ python-3.10:
- python -m pip list installed - python -m pip list installed
- pytest test/integration/sampler_run_test.py --durations 10 -v - pytest test/integration/sampler_run_test.py --durations 10 -v
python-3.8-samplers: # python-3.8-samplers:
<<: *test-sampler # <<: *test-sampler
needs: ["basic-3.8", "precommits-py3.8"] # needs: ["basic-3.8", "precommits-py3.8"]
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38 # image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
python-3.9-samplers: python-3.9-samplers:
<<: *test-sampler <<: *test-sampler
...@@ -213,10 +213,10 @@ integration-tests-python-3.9: ...@@ -213,10 +213,10 @@ integration-tests-python-3.9:
- pytest test/gw/plot_test.py - pytest test/gw/plot_test.py
plotting-python-3.8: # plotting-python-3.8:
<<: *plotting # <<: *plotting
image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38 # image: containers.ligo.org/lscsoft/bilby/v2-bilby-python38
needs: ["basic-3.8", "precommits-py3.8"] # needs: ["basic-3.8", "precommits-py3.8"]
plotting-python-3.9: plotting-python-3.9:
<<: *plotting <<: *plotting
...@@ -285,10 +285,10 @@ pages: ...@@ -285,10 +285,10 @@ pages:
- docker image tag v3-bilby-$PYVERSION containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest - docker image tag v3-bilby-$PYVERSION containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest
- docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest - docker image push containers.ligo.org/lscsoft/bilby/v2-bilby-$PYVERSION:latest
build-python38-container: # build-python38-container:
<<: *build-container # <<: *build-container
variables: # variables:
PYVERSION: "python38" # PYVERSION: "python38"
build-python39-container: build-python39-container:
<<: *build-container <<: *build-container
......
# All notable changes will be documented in this file # All notable changes will be documented in this file
## [2.1.2] 2023-07-17
Version 2.1.2 release of Bilby
This is a bugfix release.
Note that one of the changes will have a significant impact on scripts that rely on
a seed for random data generation.
Where users have previously used `np.random.seed` they should now call
`bilby.core.utils.random.seed`.
### Changes
- Fix issues related to random number generation with multiprocessing (!1273)
- Enable cosmological priors to be written/read in our plain text format (!1258)
- Allow posterior reweighting to be performed when changing the likelihood and the prior (!1260)
## [2.1.1] 2023-04-28
Version 2.1.1 release of Bilby
Bugfix release
### Changes
- Fix the matched filter SNR phase for the multiband likelihood (!1253)
- Bugfix for Fisher matrix proposals in `bilby_mcmc` (!1251)
- Make the changes to the spline calibration backward compatible, 2.0.2 resume files can't be read with 2.1.0 (!1250)
## [2.1.0] 2023-04-12 ## [2.1.0] 2023-04-12
Version 2.1.0 release of Bilby Version 2.1.0 release of Bilby
......
...@@ -136,12 +136,14 @@ class Chain(object): ...@@ -136,12 +136,14 @@ class Chain(object):
@property @property
def _random_idx(self): def _random_idx(self):
from ..core.utils.random import rng
mindex = self._last_minimum_index[1] mindex = self._last_minimum_index[1]
# Check if mindex exceeds current position by 10 ACT: if so use a random sample # Check if mindex exceeds current position by 10 ACT: if so use a random sample
# otherwise we draw only from the chain past the minimum_index # otherwise we draw only from the chain past the minimum_index
if np.isinf(self.tau_last) or self.position - mindex < 10 * self.tau_last: if np.isinf(self.tau_last) or self.position - mindex < 10 * self.tau_last:
mindex = 0 mindex = 0
return np.random.randint(mindex, self.position + 1) return rng.integers(mindex, self.position + 1)
@property @property
def random_sample(self): def random_sample(self):
......
...@@ -9,7 +9,7 @@ from scipy.stats import gaussian_kde ...@@ -9,7 +9,7 @@ from scipy.stats import gaussian_kde
from ..core.fisher import FisherMatrixPosteriorEstimator from ..core.fisher import FisherMatrixPosteriorEstimator
from ..core.prior import PriorDict from ..core.prior import PriorDict
from ..core.sampler.base_sampler import SamplerError from ..core.sampler.base_sampler import SamplerError
from ..core.utils import logger, reflect from ..core.utils import logger, random, reflect
from ..gw.source import PARAMETER_SETS from ..gw.source import PARAMETER_SETS
...@@ -19,7 +19,7 @@ class ProposalCycle(object): ...@@ -19,7 +19,7 @@ class ProposalCycle(object):
self.weights = [prop.weight for prop in self.proposal_list] self.weights = [prop.weight for prop in self.proposal_list]
self.normalized_weights = [w / sum(self.weights) for w in self.weights] self.normalized_weights = [w / sum(self.weights) for w in self.weights]
self.weighted_proposal_list = [ self.weighted_proposal_list = [
np.random.choice(self.proposal_list, p=self.normalized_weights) random.rng.choice(self.proposal_list, p=self.normalized_weights)
for _ in range(10 * int(1 / min(self.normalized_weights))) for _ in range(10 * int(1 / min(self.normalized_weights)))
] ]
self.nproposals = len(self.weighted_proposal_list) self.nproposals = len(self.weighted_proposal_list)
...@@ -219,7 +219,7 @@ class FixedGaussianProposal(BaseProposal): ...@@ -219,7 +219,7 @@ class FixedGaussianProposal(BaseProposal):
sample = chain.current_sample sample = chain.current_sample
for key in self.parameters: for key in self.parameters:
sigma = self.prior_width_dict[key] * self.sigmas[key] sigma = self.prior_width_dict[key] * self.sigmas[key]
sample[key] += sigma * np.random.randn() sample[key] += sigma * random.rng.normal(0, 1)
log_factor = 0 log_factor = 0
return sample, log_factor return sample, log_factor
...@@ -256,15 +256,15 @@ class AdaptiveGaussianProposal(BaseProposal): ...@@ -256,15 +256,15 @@ class AdaptiveGaussianProposal(BaseProposal):
def propose(self, chain): def propose(self, chain):
sample = chain.current_sample sample = chain.current_sample
self.update_scale(chain) self.update_scale(chain)
if np.random.random() < 1e-3: if random.rng.uniform(0, 1) < 1e-3:
factor = 1e1 factor = 1e1
elif np.random.random() < 1e-4: elif random.rng.uniform(0, 1) < 1e-4:
factor = 1e2 factor = 1e2
else: else:
factor = 1 factor = 1
for key in self.parameters: for key in self.parameters:
sigma = factor * self.scale * self.prior_width_dict[key] * self.sigmas[key] sigma = factor * self.scale * self.prior_width_dict[key] * self.sigmas[key]
sample[key] += sigma * np.random.randn() sample[key] += sigma * random.rng.normal(0, 1)
log_factor = 0 log_factor = 0
return sample, log_factor return sample, log_factor
...@@ -306,13 +306,13 @@ class DifferentialEvolutionProposal(BaseProposal): ...@@ -306,13 +306,13 @@ class DifferentialEvolutionProposal(BaseProposal):
theta = chain.current_sample theta = chain.current_sample
theta1 = chain.random_sample theta1 = chain.random_sample
theta2 = chain.random_sample theta2 = chain.random_sample
if np.random.rand() > self.mode_hopping_frac: if random.rng.uniform(0, 1) > self.mode_hopping_frac:
gamma = 1 gamma = 1
else: else:
# Base jump size # Base jump size
gamma = np.random.normal(0, 2.38 / np.sqrt(2 * self.ndim)) gamma = random.rng.normal(0, 2.38 / np.sqrt(2 * self.ndim))
# Scale uniformly in log between 0.1 and 10 times # Scale uniformly in log between 0.1 and 10 times
gamma *= np.exp(np.log(0.1) + np.log(100.0) * np.random.rand()) gamma *= np.exp(np.log(0.1) + np.log(100.0) * random.rng.uniform(0, 1))
for key in self.parameters: for key in self.parameters:
theta[key] += gamma * (theta2[key] - theta1[key]) theta[key] += gamma * (theta2[key] - theta1[key])
...@@ -347,7 +347,7 @@ class UniformProposal(BaseProposal): ...@@ -347,7 +347,7 @@ class UniformProposal(BaseProposal):
for key in self.parameters: for key in self.parameters:
width = self.prior_width_dict[key] width = self.prior_width_dict[key]
if np.isinf(width) is False: if np.isinf(width) is False:
sample[key] = np.random.uniform( sample[key] = random.rng.uniform(
self.prior_minimum_dict[key], self.prior_maximum_dict[key] self.prior_minimum_dict[key], self.prior_maximum_dict[key]
) )
else: else:
...@@ -778,14 +778,14 @@ class FixedJumpProposal(BaseProposal): ...@@ -778,14 +778,14 @@ class FixedJumpProposal(BaseProposal):
def propose(self, chain): def propose(self, chain):
sample = chain.current_sample sample = chain.current_sample
for key, jump in self.jumps.items(): for key, jump in self.jumps.items():
sign = np.random.randint(2) * 2 - 1 sign = random.rng.integers(2) * 2 - 1
sample[key] += sign * jump + self.epsilon * self.prior_width_dict[key] sample[key] += sign * jump + self.epsilon * self.prior_width_dict[key]
log_factor = 0 log_factor = 0
return sample, log_factor return sample, log_factor
@property @property
def epsilon(self): def epsilon(self):
return self.scale * np.random.normal() return self.scale * random.rng.normal()
class FisherMatrixProposal(AdaptiveGaussianProposal): class FisherMatrixProposal(AdaptiveGaussianProposal):
...@@ -805,7 +805,7 @@ class FisherMatrixProposal(AdaptiveGaussianProposal): ...@@ -805,7 +805,7 @@ class FisherMatrixProposal(AdaptiveGaussianProposal):
weight=1, weight=1,
update_interval=100, update_interval=100,
scale_init=1e0, scale_init=1e0,
fd_eps=1e-6, fd_eps=1e-4,
adapt=False, adapt=False,
): ):
super(FisherMatrixProposal, self).__init__( super(FisherMatrixProposal, self).__init__(
...@@ -827,14 +827,14 @@ class FisherMatrixProposal(AdaptiveGaussianProposal): ...@@ -827,14 +827,14 @@ class FisherMatrixProposal(AdaptiveGaussianProposal):
) )
try: try:
self.iFIM = fmp.calculate_iFIM(sample.dict) self.iFIM = fmp.calculate_iFIM(sample.dict)
except (RuntimeError, ValueError) as e: except (RuntimeError, ValueError, np.linalg.LinAlgError) as e:
logger.warning(f"FisherMatrixProposal failed with {e}") logger.warning(f"FisherMatrixProposal failed with {e}")
if hasattr(self, "iFIM") is False: if hasattr(self, "iFIM") is False:
# No past iFIM exists, return sample # No past iFIM exists, return sample
return sample, 0 return sample, 0
self.steps_since_update = 0 self.steps_since_update = 0
jump = self.scale * np.random.multivariate_normal( jump = self.scale * random.rng.multivariate_normal(
self.mean, self.iFIM, check_valid="ignore" self.mean, self.iFIM, check_valid="ignore"
) )
...@@ -897,11 +897,11 @@ class CorrelatedPolarisationPhaseJump(BaseGravitationalWaveTransientProposal): ...@@ -897,11 +897,11 @@ class CorrelatedPolarisationPhaseJump(BaseGravitationalWaveTransientProposal):
alpha = sample["psi"] + phase alpha = sample["psi"] + phase
beta = sample["psi"] - phase beta = sample["psi"] - phase
draw = np.random.random() draw = random.rng.random()
if draw < 0.5: if draw < 0.5:
alpha = 3.0 * np.pi * np.random.random() alpha = 3.0 * np.pi * random.rng.random()
else: else:
beta = 3.0 * np.pi * np.random.random() - 2 * np.pi beta = 3.0 * np.pi * random.rng.random() - 2 * np.pi
# Update # Update
sample["psi"] = (alpha + beta) * 0.5 sample["psi"] = (alpha + beta) * 0.5
...@@ -936,7 +936,7 @@ class PhaseReversalProposal(BaseGravitationalWaveTransientProposal): ...@@ -936,7 +936,7 @@ class PhaseReversalProposal(BaseGravitationalWaveTransientProposal):
@property @property
def epsilon(self): def epsilon(self):
if self.fuzz: if self.fuzz:
return np.random.normal(0, self.fuzz_sigma) return random.rng.normal(0, self.fuzz_sigma)
else: else:
return 0 return 0
...@@ -1001,7 +1001,7 @@ class StretchProposal(BaseProposal): ...@@ -1001,7 +1001,7 @@ class StretchProposal(BaseProposal):
def _stretch_move(sample, complement, scale, ndim, parameters): def _stretch_move(sample, complement, scale, ndim, parameters):
# Draw z # Draw z
u = np.random.rand() u = random.rng.uniform(0, 1)
z = (u * (scale - 1) + 1) ** 2 / scale z = (u * (scale - 1) + 1) ** 2 / scale
log_factor = (ndim - 1) * np.log(z) log_factor = (ndim - 1) * np.log(z)
...@@ -1045,7 +1045,7 @@ class EnsembleStretch(EnsembleProposal): ...@@ -1045,7 +1045,7 @@ class EnsembleStretch(EnsembleProposal):
def propose(self, chain, chain_complement): def propose(self, chain, chain_complement):
sample = chain.current_sample sample = chain.current_sample
completement = chain_complement[ completement = chain_complement[
np.random.randint(len(chain_complement)) random.rng.integers(len(chain_complement))
].current_sample ].current_sample
return _stretch_move( return _stretch_move(
sample, completement, self.scale, self.ndim, self.parameters sample, completement, self.scale, self.ndim, self.parameters
......
...@@ -16,7 +16,12 @@ from ..core.sampler.base_sampler import ( ...@@ -16,7 +16,12 @@ from ..core.sampler.base_sampler import (
_sampling_convenience_dump, _sampling_convenience_dump,
signal_wrapper, signal_wrapper,
) )
from ..core.utils import check_directory_exists_and_if_not_mkdir, logger, safe_file_dump from ..core.utils import (
check_directory_exists_and_if_not_mkdir,
logger,
random,
safe_file_dump,
)
from . import proposals from . import proposals
from .chain import Chain, Sample from .chain import Chain, Sample
from .utils import LOGLKEY, LOGPKEY, ConvergenceInputs, ParallelTemperingInputs from .utils import LOGLKEY, LOGPKEY, ConvergenceInputs, ParallelTemperingInputs
...@@ -818,7 +823,7 @@ class BilbyPTMCMCSampler(object): ...@@ -818,7 +823,7 @@ class BilbyPTMCMCSampler(object):
with np.errstate(over="ignore"): with np.errstate(over="ignore"):
alpha_swap = np.exp(dbeta * (logli - loglj)) alpha_swap = np.exp(dbeta * (logli - loglj))
if np.random.uniform(0, 1) <= alpha_swap: if random.rng.uniform(0, 1) <= alpha_swap:
sampleri.chain[-1] = vj sampleri.chain[-1] = vj
samplerj.chain[-1] = vi samplerj.chain[-1] = vi
self.sampler_dictionary[Tindex][Eindex] = sampleri self.sampler_dictionary[Tindex][Eindex] = sampleri
...@@ -852,7 +857,7 @@ class BilbyPTMCMCSampler(object): ...@@ -852,7 +857,7 @@ class BilbyPTMCMCSampler(object):
- curr[LOGPKEY] - curr[LOGPKEY]
) )
if np.random.uniform(0, 1) <= alpha: if random.rng.uniform(0, 1) <= alpha:
sampler.accept_proposal(prop, proposal) sampler.accept_proposal(prop, proposal)
else: else:
sampler.reject_proposal(curr, proposal) sampler.reject_proposal(curr, proposal)
...@@ -1021,7 +1026,7 @@ class BilbyPTMCMCSampler(object): ...@@ -1021,7 +1026,7 @@ class BilbyPTMCMCSampler(object):
ln_z_realisations = [] ln_z_realisations = []
try: try:
for _ in range(repeats): for _ in range(repeats):
idxs = [np.random.randint(i, i + ll) for i in range(steps - ll)] idxs = [random.rng.integers(i, i + ll) for i in range(steps - ll)]
ln_z_realisations.append( ln_z_realisations.append(
self._calculate_stepping_stone(betas, ln_likes[idxs, :])[0] self._calculate_stepping_stone(betas, ln_likes[idxs, :])[0]
) )
...@@ -1256,7 +1261,7 @@ class BilbyMCMCSampler(object): ...@@ -1256,7 +1261,7 @@ class BilbyMCMCSampler(object):
- curr[LOGPKEY] - curr[LOGPKEY]
) )
if np.random.uniform(0, 1) <= alpha: if random.rng.uniform(0, 1) <= alpha:
internal_accepted += 1 internal_accepted += 1
proposal.accepted += 1 proposal.accepted += 1
curr = prop curr = prop
......
import numpy as np import numpy as np
import scipy.linalg
import pandas as pd import pandas as pd
import scipy.linalg
from scipy.optimize import minimize from scipy.optimize import minimize
...@@ -61,12 +60,14 @@ class FisherMatrixPosteriorEstimator(object): ...@@ -61,12 +60,14 @@ class FisherMatrixPosteriorEstimator(object):
return iFIM return iFIM
def sample_array(self, sample, n=1): def sample_array(self, sample, n=1):
from .utils.random import rng
if sample == "maxL": if sample == "maxL":
sample = self.get_maximum_likelihood_sample() sample = self.get_maximum_likelihood_sample()
self.mean = np.array(list(sample.values())) self.mean = np.array(list(sample.values()))
self.iFIM = self.calculate_iFIM(sample) self.iFIM = self.calculate_iFIM(sample)
return np.random.multivariate_normal(self.mean, self.iFIM, n) return rng.multivariate_normal(self.mean, self.iFIM, n)
def sample_dataframe(self, sample, n=1): def sample_dataframe(self, sample, n=1):
samples = self.sample_array(sample, n) samples = self.sample_array(sample, n)
......
...@@ -7,8 +7,13 @@ import numpy as np ...@@ -7,8 +7,13 @@ import numpy as np
import scipy.stats import scipy.stats
from scipy.interpolate import interp1d from scipy.interpolate import interp1d
from bilby.core.utils import infer_args_from_method, BilbyJsonEncoder, decode_bilby_json, logger, \ from ..utils import (
get_dict_with_properties infer_args_from_method,
BilbyJsonEncoder,
decode_bilby_json,
logger,
get_dict_with_properties,
)
class Prior(object): class Prior(object):
...@@ -124,7 +129,9 @@ class Prior(object): ...@@ -124,7 +129,9 @@ class Prior(object):
float: A random number between 0 and 1, rescaled to match the distribution of this Prior float: A random number between 0 and 1, rescaled to match the distribution of this Prior
""" """
self.least_recently_sampled = self.rescale(np.random.uniform(0, 1, size)) from ..utils.random import rng
self.least_recently_sampled = self.rescale(rng.uniform(0, 1, size))
return self.least_recently_sampled return self.least_recently_sampled
def rescale(self, val): def rescale(self, val):
...@@ -222,18 +229,6 @@ class Prior(object): ...@@ -222,18 +229,6 @@ class Prior(object):
else: else:
return f"{prior_module}.{prior_name}({args})" return f"{prior_module}.{prior_name}({args})"
@property
def _repr_dict(self):
"""
Get a dictionary containing the arguments needed to reproduce this object.
"""
property_names = {p for p in dir(self.__class__) if isinstance(getattr(self.__class__, p), property)}
subclass_args = infer_args_from_method(self.__init__)
dict_with_properties = self.__dict__.copy()
for key in property_names.intersection(subclass_args):
dict_with_properties[key] = getattr(self, key)
return {key: dict_with_properties[key] for key in subclass_args}
@property @property
def is_fixed(self): def is_fixed(self):
""" """
......
import numpy as np
from .base import Prior, PriorException from .base import Prior, PriorException
from .interpolated import Interped from .interpolated import Interped
from .analytical import DeltaFunction, PowerLaw, Uniform, LogUniform, \ from .analytical import DeltaFunction, PowerLaw, Uniform, LogUniform, \
...@@ -76,7 +74,9 @@ def conditional_prior_factory(prior_class): ...@@ -76,7 +74,9 @@ def conditional_prior_factory(prior_class):
float: See superclass float: See superclass
""" """
self.least_recently_sampled = self.rescale(np.random.uniform(0, 1, size), **required_variables) from ..utils.random import rng
self.least_recently_sampled = self.rescale(rng.uniform(0, 1, size), **required_variables)
return self.least_recently_sampled return self.least_recently_sampled
def rescale(self, val, **required_variables): def rescale(self, val, **required_variables):
......
...@@ -6,6 +6,7 @@ from scipy.special import erfinv ...@@ -6,6 +6,7 @@ from scipy.special import erfinv
from .base import Prior, PriorException from .base import Prior, PriorException
from ..utils import logger, infer_args_from_method, get_dict_with_properties from ..utils import logger, infer_args_from_method, get_dict_with_properties
from ..utils import random
class BaseJointPriorDist(object): class BaseJointPriorDist(object):
...@@ -583,7 +584,7 @@ class MultivariateGaussianDist(BaseJointPriorDist): ...@@ -583,7 +584,7 @@ class MultivariateGaussianDist(BaseJointPriorDist):
if self.nmodes == 1: if self.nmodes == 1:
mode = 0 mode = 0
else: else:
mode = np.argwhere(self.cumweights - np.random.rand() > 0)[0][0] mode = np.argwhere(self.cumweights - random.rng.uniform(0, 1) > 0)[0][0]
samp = erfinv(2.0 * samp - 1) * 2.0 ** 0.5 samp = erfinv(2.0 * samp - 1) * 2.0 ** 0.5
...@@ -604,12 +605,12 @@ class MultivariateGaussianDist(BaseJointPriorDist): ...@@ -604,12 +605,12 @@ class MultivariateGaussianDist(BaseJointPriorDist):
mode = 0 mode = 0
else: else:
if size == 1: if size == 1:
mode = np.argwhere(self.cumweights - np.random.rand() > 0)[0][0] mode = np.argwhere(self.cumweights - random.rng.uniform(0, 1) > 0)[0][0]
else: else:
# pick modes # pick modes
mode = [ mode = [
np.argwhere(self.cumweights - r > 0)[0][0] np.argwhere(self.cumweights - r > 0)[0][0]
for r in np.random.rand(size) for r in random.rng.uniform(0, 1, size)
] ]
samps = np.zeros((size, len(self))) samps = np.zeros((size, len(self)))
...@@ -617,7 +618,7 @@ class MultivariateGaussianDist(BaseJointPriorDist): ...@@ -617,7 +618,7 @@ class MultivariateGaussianDist(BaseJointPriorDist):
inbound = False inbound = False
while not inbound: while not inbound:
# sample the multivariate Gaussian keys # sample the multivariate Gaussian keys
vals = np.random.uniform(0, 1, len(self)) vals = random.rng.uniform(0, 1, len(self))
if isinstance(mode, list): if isinstance(mode, list):
samp = np.atleast_1d(self.rescale(vals, mode=mode[i])) samp = np.atleast_1d(self.rescale(vals, mode=mode[i]))
......
...@@ -24,6 +24,7 @@ from .utils import ( ...@@ -24,6 +24,7 @@ from .utils import (
recursively_load_dict_contents_from_group, recursively_load_dict_contents_from_group,
recursively_decode_bilby_json, recursively_decode_bilby_json,
safe_file_dump, safe_file_dump,
random,
) )
from .prior import Prior, PriorDict, DeltaFunction, ConditionalDeltaFunction from .prior import Prior, PriorDict, DeltaFunction, ConditionalDeltaFunction
...@@ -219,10 +220,15 @@ def get_weights_for_reweighting( ...@@ -219,10 +220,15 @@ def get_weights_for_reweighting(
desc='Computing priors', desc='Computing priors',
total=n), total=n),
start=starting_index): start=starting_index):
# prior calculation needs to not have prior or likelihood keys
ln_prior = sample.pop("log_prior", np.nan)
if "log_likelihood" in sample:
del sample["log_likelihood"]
if old_prior is not None: if old_prior is not None:
old_log_prior_array[ii] = old_prior.ln_prob(sample) old_log_prior_array[ii] = old_prior.ln_prob(sample)
else: else:
old_log_prior_array[ii] = sample["log_prior"] old_log_prior_array[ii] = ln_prior
if new_prior is not None: if new_prior is not None:
new_log_prior_array[ii] = new_prior.ln_prob(sample) new_log_prior_array[ii] = new_prior.ln_prob(sample)
...@@ -259,7 +265,7 @@ def rejection_sample(posterior, weights): ...@@ -259,7 +265,7 @@ def rejection_sample(posterior, weights):
The posterior resampled using rejection sampling The posterior resampled using rejection sampling
""" """
keep = weights > np.random.uniform(0, max(weights), weights.shape) keep = weights > random.rng.uniform(0, max(weights), weights.shape)
return posterior[keep] return posterior[keep]
...@@ -1870,7 +1876,7 @@ class ResultList(list): ...@@ -1870,7 +1876,7 @@ class ResultList(list):
result_weights = np.exp(log_evidences - np.max(log_evidences)) result_weights = np.exp(log_evidences - np.max(log_evidences))
posteriors = list() posteriors = list()
for res, frac in zip(self, result_weights): for res, frac in zip(self, result_weights):
selected_samples = (np.random.uniform(size=len(res.posterior)) < frac) selected_samples = (random.rng.uniform(size=len(res.posterior)) < frac)
posteriors.append(res.posterior[selected_samples]) posteriors.append(res.posterior[selected_samples])
# remove original nested_samples # remove original nested_samples
......
...@@ -18,6 +18,7 @@ from ..utils import ( ...@@ -18,6 +18,7 @@ from ..utils import (
command_line_args, command_line_args,
logger, logger,
) )
from ..utils.random import seed as set_seed
@attr.s @attr.s
...@@ -305,6 +306,7 @@ class Sampler(object): ...@@ -305,6 +306,7 @@ class Sampler(object):
for equiv in self.sampling_seed_equiv_kwargs: for equiv in self.sampling_seed_equiv_kwargs:
if equiv in kwargs: if equiv in kwargs:
kwargs[self.sampling_seed_key] = kwargs.pop(equiv) kwargs[self.sampling_seed_key] = kwargs.pop(equiv)
set_seed(kwargs[self.sampling_seed_key])
return kwargs return kwargs
@property @property
...@@ -568,12 +570,14 @@ class Sampler(object): ...@@ -568,12 +570,14 @@ class Sampler(object):
likelihood evaluations. likelihood evaluations.
""" """
from ..utils.random import rng
logger.info("Generating initial points from the prior") logger.info("Generating initial points from the prior")
unit_cube = [] unit_cube = []
parameters = [] parameters = []
likelihood = [] likelihood = []
while len(unit_cube) < npoints: while len(unit_cube) < npoints:
unit = np.random.rand(self.ndim) unit = rng.uniform(0, 1, self.ndim)
theta = self.prior_transform(unit) theta = self.prior_transform(unit)
if self.check_draw(theta, warning=False): if self.check_draw(theta, warning=False):
unit_cube.append(unit) unit_cube.append(unit)
......
...@@ -42,9 +42,11 @@ class _DNest4Model(object): ...@@ -42,9 +42,11 @@ class _DNest4Model(object):
def perturb(self, coords): def perturb(self, coords):
"""The perturb function to perform Monte Carlo trial moves.""" """The perturb function to perform Monte Carlo trial moves."""
idx = np.random.randint(self._n_dim) from ..utils.random import rng
coords[idx] += self._widths[idx] * (np.random.uniform(size=1) - 0.5) idx = rng.integers(self._n_dim)
coords[idx] += self._widths[idx] * (rng.uniform(size=1) - 0.5)
cw = self._widths[idx] cw = self._widths[idx]
cc = self._centers[idx] cc = self._centers[idx]
......
...@@ -568,6 +568,8 @@ class Dynesty(NestedSampler): ...@@ -568,6 +568,8 @@ class Dynesty(NestedSampler):
import dynesty import dynesty
from scipy.special import logsumexp from scipy.special import logsumexp
from ..utils.random import rng
logwts = out["logwt"] logwts = out["logwt"]
weights = np.exp(logwts - out["logz"][-1]) weights = np.exp(logwts - out["logz"][-1])
nested_samples = DataFrame(out.samples, columns=self.search_parameter_keys) nested_samples = DataFrame(out.samples, columns=self.search_parameter_keys)
...@@ -575,7 +577,7 @@ class Dynesty(NestedSampler): ...@@ -575,7 +577,7 @@ class Dynesty(NestedSampler):
nested_samples["log_likelihood"] = out.logl nested_samples["log_likelihood"] = out.logl
self.result.nested_samples = nested_samples self.result.nested_samples = nested_samples
if self.rejection_sample_posterior: if self.rejection_sample_posterior:
keep = weights > np.random.uniform(0, max(weights), len(weights)) keep = weights > rng.uniform(0, max(weights), len(weights))
self.result.samples = out.samples[keep] self.result.samples = out.samples[keep]
self.result.log_likelihood_evaluations = out.logl[keep] self.result.log_likelihood_evaluations = out.logl[keep]
logger.info( logger.info(
......
import random
from inspect import isclass from inspect import isclass
import numpy as np import numpy as np
from ..prior import Uniform from ..prior import Uniform
from ..utils import random
class Sample(dict): class Sample(dict):
...@@ -135,7 +135,7 @@ class JumpProposalCycle(object): ...@@ -135,7 +135,7 @@ class JumpProposalCycle(object):
return len(self.proposal_functions) return len(self.proposal_functions)
def update_cycle(self): def update_cycle(self):
self._cycle = np.random.choice( self._cycle = random.rng.choice(
self.proposal_functions, self.proposal_functions,
size=self.cycle_length, size=self.cycle_length,
p=self.weights, p=self.weights,
...@@ -195,14 +195,14 @@ class NormJump(JumpProposal): ...@@ -195,14 +195,14 @@ class NormJump(JumpProposal):
def __call__(self, sample, **kwargs): def __call__(self, sample, **kwargs):
for key in sample.keys(): for key in sample.keys():
sample[key] = np.random.normal(sample[key], self.step_size) sample[key] = random.rng.normal(sample[key], self.step_size)
return super(NormJump, self).__call__(sample) return super(NormJump, self).__call__(sample)
class EnsembleWalk(JumpProposal): class EnsembleWalk(JumpProposal):
def __init__( def __init__(
self, self,
random_number_generator=random.random, random_number_generator=random.rng.uniform,
n_points=3, n_points=3,
priors=None, priors=None,
**random_number_generator_args **random_number_generator_args
...@@ -227,7 +227,7 @@ class EnsembleWalk(JumpProposal): ...@@ -227,7 +227,7 @@ class EnsembleWalk(JumpProposal):
self.random_number_generator_args = random_number_generator_args self.random_number_generator_args = random_number_generator_args
def __call__(self, sample, **kwargs): def __call__(self, sample, **kwargs):
subset = random.sample(kwargs["coordinates"], self.n_points) subset = random.rng.choice(kwargs["coordinates"], self.n_points, replace=False)
for i in range(len(subset)): for i in range(len(subset)):
subset[i] = Sample.from_external_type( subset[i] = Sample.from_external_type(
subset[i], kwargs.get("sampler_name", None) subset[i], kwargs.get("sampler_name", None)
...@@ -258,11 +258,11 @@ class EnsembleStretch(JumpProposal): ...@@ -258,11 +258,11 @@ class EnsembleStretch(JumpProposal):
self.scale = scale self.scale = scale
def __call__(self, sample, **kwargs): def __call__(self, sample, **kwargs):
second_sample = random.choice(kwargs["coordinates"]) second_sample = random.rng.choice(kwargs["coordinates"])
second_sample = Sample.from_external_type( second_sample = Sample.from_external_type(
second_sample, kwargs.get("sampler_name", None) second_sample, kwargs.get("sampler_name", None)
) )
step = random.uniform(-1, 1) * np.log(self.scale) step = random.rng.uniform(-1, 1) * np.log(self.scale)
sample = second_sample + (sample - second_sample) * np.exp(step) sample = second_sample + (sample - second_sample) * np.exp(step)
self.log_j = len(sample) * step self.log_j = len(sample) * step
return super(EnsembleStretch, self).__call__(sample) return super(EnsembleStretch, self).__call__(sample)
...@@ -286,8 +286,8 @@ class DifferentialEvolution(JumpProposal): ...@@ -286,8 +286,8 @@ class DifferentialEvolution(JumpProposal):
self.mu = mu self.mu = mu
def __call__(self, sample, **kwargs): def __call__(self, sample, **kwargs):
a, b = random.sample(kwargs["coordinates"], 2) a, b = random.rng.choice(kwargs["coordinates"], 2, replace=False)
sample = sample + (b - a) * random.gauss(self.mu, self.sigma) sample = sample + (b - a) * random.rng.normal(self.mu, self.sigma)
return super(DifferentialEvolution, self).__call__(sample) return super(DifferentialEvolution, self).__call__(sample)
...@@ -334,8 +334,8 @@ class EnsembleEigenVector(JumpProposal): ...@@ -334,8 +334,8 @@ class EnsembleEigenVector(JumpProposal):
def __call__(self, sample, **kwargs): def __call__(self, sample, **kwargs):
self.update_eigenvectors(kwargs["coordinates"]) self.update_eigenvectors(kwargs["coordinates"])
i = random.randrange(len(sample)) i = random.rng.integers(len(sample))
jump_size = np.sqrt(np.fabs(self.eigen_values[i])) * random.gauss(0, 1) jump_size = np.sqrt(np.fabs(self.eigen_values[i])) * random.rng.normal(0, 1)
for j, key in enumerate(sample.keys()): for j, key in enumerate(sample.keys()):
sample[key] += jump_size * self.eigen_vectors[j, i] sample[key] += jump_size * self.eigen_vectors[j, i]
return super(EnsembleEigenVector, self).__call__(sample) return super(EnsembleEigenVector, self).__call__(sample)
......
...@@ -323,6 +323,8 @@ class Ptemcee(MCMCSampler): ...@@ -323,6 +323,8 @@ class Ptemcee(MCMCSampler):
from scipy.optimize import minimize from scipy.optimize import minimize
from ..utils.random import rng
# Set up the minimize list: keys not in this list will have initial # Set up the minimize list: keys not in this list will have initial
# positions drawn from the prior # positions drawn from the prior
if minimize_list is None: if minimize_list is None:
...@@ -375,7 +377,7 @@ class Ptemcee(MCMCSampler): ...@@ -375,7 +377,7 @@ class Ptemcee(MCMCSampler):
pos0_max = np.max(success[:, i]) pos0_max = np.max(success[:, i])
logger.info(f"Initialize {key} walkers from {pos0_min}->{pos0_max}") logger.info(f"Initialize {key} walkers from {pos0_min}->{pos0_max}")
j = self.search_parameter_keys.index(key) j = self.search_parameter_keys.index(key)
pos0[:, :, j] = np.random.uniform( pos0[:, :, j] = rng.uniform(
pos0_min, pos0_min,
pos0_max, pos0_max,
size=(self.kwargs["ntemps"], self.kwargs["nwalkers"]), size=(self.kwargs["ntemps"], self.kwargs["nwalkers"]),
......
...@@ -262,11 +262,13 @@ class Ultranest(_TemporaryFileSamplerMixin, NestedSampler): ...@@ -262,11 +262,13 @@ class Ultranest(_TemporaryFileSamplerMixin, NestedSampler):
def _generate_result(self, out): def _generate_result(self, out):
# extract results # extract results
from ..utils.random import rng
data = np.array(out["weighted_samples"]["points"]) data = np.array(out["weighted_samples"]["points"])
weights = np.array(out["weighted_samples"]["weights"]) weights = np.array(out["weighted_samples"]["weights"])
scaledweights = weights / weights.max() scaledweights = weights / weights.max()
mask = np.random.rand(len(scaledweights)) < scaledweights mask = rng.uniform(0, 1, len(scaledweights)) < scaledweights
nested_samples = DataFrame(data, columns=self.search_parameter_keys) nested_samples = DataFrame(data, columns=self.search_parameter_keys)
nested_samples["weights"] = weights nested_samples["weights"] = weights
......
from . import random
from .calculus import * from .calculus import *
from .cmd import * from .cmd import *
from .colors import * from .colors import *
......
from numpy.random import default_rng, SeedSequence
def __getattr__(name):
if name == "rng":
return Generator.rng
class Generator:
rng = default_rng()
def seed(seed):
Generator.rng = default_rng(seed)
def generate_seeds(nseeds):
return SeedSequence(Generator.rng.integers(0, 2**63 - 1, size=4)).spawn(nseeds)
import numpy as np import numpy as np
_TOL = 14 _TOL = 14
...@@ -165,29 +164,23 @@ def create_white_noise(sampling_frequency, duration): ...@@ -165,29 +164,23 @@ def create_white_noise(sampling_frequency, duration):
array_like: white noise array_like: white noise
array_like: frequency array array_like: frequency array
""" """
from .random import rng
number_of_samples = duration * sampling_frequency number_of_samples = duration * sampling_frequency
number_of_samples = int(np.round(number_of_samples)) number_of_samples = int(np.round(number_of_samples))
delta_freq = 1. / duration
frequencies = create_frequency_series(sampling_frequency, duration) frequencies = create_frequency_series(sampling_frequency, duration)
norm1 = 0.5 * (1. / delta_freq)**0.5 norm1 = 0.5 * duration**0.5
re1 = np.random.normal(0, norm1, len(frequencies)) re1, im1 = rng.normal(0, norm1, (2, len(frequencies)))
im1 = np.random.normal(0, norm1, len(frequencies)) white_noise = re1 + 1j * im1
htilde1 = re1 + 1j * im1
# convolve data with instrument transfer function
otilde1 = htilde1 * 1.
# set DC and Nyquist = 0 # set DC and Nyquist = 0
otilde1[0] = 0 white_noise[0] = 0
# no Nyquist frequency when N=odd # no Nyquist frequency when N=odd
if np.mod(number_of_samples, 2) == 0: if np.mod(number_of_samples, 2) == 0:
otilde1[-1] = 0 white_noise[-1] = 0
# normalise for positive frequencies and units of strain/rHz
white_noise = otilde1
# python: transpose for use with infft # python: transpose for use with infft
white_noise = np.transpose(white_noise) white_noise = np.transpose(white_noise)
frequencies = np.transpose(frequencies) frequencies = np.transpose(frequencies)
......
...@@ -1619,8 +1619,9 @@ def binary_love_lambda_symmetric_to_lambda_1_lambda_2_automatic_marginalisation( ...@@ -1619,8 +1619,9 @@ def binary_love_lambda_symmetric_to_lambda_1_lambda_2_automatic_marginalisation(
lambda_2: float lambda_2: float
Tidal parameter of less massive neutron star. Tidal parameter of less massive neutron star.
""" """
from ..core.utils.random import rng
binary_love_uniform = np.random.uniform(0, 1, len(lambda_symmetric)) binary_love_uniform = rng.uniform(0, 1, len(lambda_symmetric))
lambda_1, lambda_2 = binary_love_lambda_symmetric_to_lambda_1_lambda_2_manual_marginalisation( lambda_1, lambda_2 = binary_love_lambda_symmetric_to_lambda_1_lambda_2_manual_marginalisation(
binary_love_uniform, lambda_symmetric, mass_ratio) binary_love_uniform, lambda_symmetric, mass_ratio)
...@@ -2422,6 +2423,8 @@ def generate_posterior_samples_from_marginalized_likelihood( ...@@ -2422,6 +2423,8 @@ def generate_posterior_samples_from_marginalized_likelihood(
sample: DataFrame sample: DataFrame
Returns the posterior with new samples. Returns the posterior with new samples.
""" """
from ..core.utils.random import generate_seeds
marginalized_parameters = getattr(likelihood, "_marginalized_parameters", list()) marginalized_parameters = getattr(likelihood, "_marginalized_parameters", list())
if len(marginalized_parameters) == 0: if len(marginalized_parameters) == 0:
return samples return samples
...@@ -2485,7 +2488,8 @@ def generate_posterior_samples_from_marginalized_likelihood( ...@@ -2485,7 +2488,8 @@ def generate_posterior_samples_from_marginalized_likelihood(
_sampling_convenience_dump.likelihood = likelihood _sampling_convenience_dump.likelihood = likelihood
pool = None pool = None
fill_args = [(ii, row) for ii, row in samples.iterrows()] seeds = generate_seeds(len(samples))
fill_args = [(ii, row, seed) for (ii, row), seed in zip(samples.iterrows(), seeds)]
ii = 0 ii = 0
pbar = tqdm(total=len(samples), file=sys.stdout) pbar = tqdm(total=len(samples), file=sys.stdout)
while ii < len(samples): while ii < len(samples):
...@@ -2544,8 +2548,11 @@ def generate_sky_frame_parameters(samples, likelihood): ...@@ -2544,8 +2548,11 @@ def generate_sky_frame_parameters(samples, likelihood):
def fill_sample(args): def fill_sample(args):
from ..core.sampler.base_sampler import _sampling_convenience_dump from ..core.sampler.base_sampler import _sampling_convenience_dump
from ..core.utils.random import seed
ii, sample, rseed = args
seed(rseed)
likelihood = _sampling_convenience_dump.likelihood likelihood = _sampling_convenience_dump.likelihood
ii, sample = args
marginalized_parameters = getattr(likelihood, "_marginalized_parameters", list()) marginalized_parameters = getattr(likelihood, "_marginalized_parameters", list())
sample = dict(sample).copy() sample = dict(sample).copy()
likelihood.parameters.update(dict(sample).copy()) likelihood.parameters.update(dict(sample).copy())
......