Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • john-veitch/bilby
  • duncanmmacleod/bilby
  • colm.talbot/bilby
  • lscsoft/bilby
  • matthew-pitkin/bilby
  • salvatore-vitale/tupak
  • charlie.hoy/bilby
  • bfarr/bilby
  • virginia.demilio/bilby
  • vivien/bilby
  • eric-howell/bilby
  • sebastian-khan/bilby
  • rhys.green/bilby
  • moritz.huebner/bilby
  • joseph.mills/bilby
  • scott.coughlin/bilby
  • matthew.carney/bilby
  • hyungwon.lee/bilby
  • monica.rizzo/bilby
  • christopher-berry/bilby
  • lindsay.demarchi/bilby
  • kaushik.rao/bilby
  • charles.kimball/bilby
  • andrew.matas/bilby
  • juan.calderonbustillo/bilby
  • patrick-meyers/bilby
  • hannah.middleton/bilby
  • eve.chase/bilby
  • grant.meadors/bilby
  • khun.phukon/bilby
  • sumeet.kulkarni/bilby
  • daniel.reardon/bilby
  • cjhaster/bilby
  • sylvia.biscoveanu/bilby
  • james-clark/bilby
  • meg.millhouse/bilby
  • joshua.willis/bilby
  • nikhil.sarin/bilby
  • paul.easter/bilby
  • youngmin/bilby
  • daniel-williams/bilby
  • shanika.galaudage/bilby
  • bruce.edelman/bilby
  • avi.vajpeyi/bilby
  • isobel.romero-shaw/bilby
  • andrew.kim/bilby
  • dominika.zieba/bilby
  • jonathan.davies/bilby
  • marc.arene/bilby
  • srishti.tiwari/bilby-tidal-heating-eccentric
  • aditya.vijaykumar/bilby
  • michael.williams/bilby
  • cecilio.garcia-quiros/bilby
  • rory-smith/bilby
  • maite.mateu-lucena/bilby
  • wushichao/bilby
  • kaylee.desoto/bilby
  • brandon.piotrzkowski/bilby
  • rossella.gamba/bilby
  • hunter.gabbard/bilby
  • deep.chatterjee/bilby
  • tathagata.ghosh/bilby
  • arunava.mukherjee/bilby
  • philip.relton/bilby
  • reed.essick/bilby
  • pawan.gupta/bilby
  • francisco.hernandez/bilby
  • rhiannon.udall/bilby
  • leo.tsukada/bilby
  • will-farr/bilby
  • vijay.varma/bilby
  • jeremy.baier/bilby
  • joshua.brandt/bilby
  • ethan.payne/bilby
  • ka-lok.lo/bilby
  • antoni.ramos-buades/bilby
  • oliviastephany.wilk/bilby
  • jack.heinzel/bilby
  • samson.leong/bilby-psi4
  • viviana.caceres/bilby
  • nadia.qutob/bilby
  • michael-coughlin/bilby
  • hemantakumar.phurailatpam/bilby
  • boris.goncharov/bilby
  • sama.al-shammari/bilby
  • siqi.zhong/bilby
  • jocelyn-read/bilby
  • marc.penuliar/bilby
  • stephanie.letourneau/bilby
  • alexandresebastien.goettel/bilby
  • alec.gunny/bilby
  • serguei.ossokine/bilby
  • pratyusava.baral/bilby
  • sophie.hourihane/bilby
  • eunsub/bilby
  • james.hart/bilby
  • pratyusava.baral/bilby-tg
  • zhaozc/bilby
  • pratyusava.baral/bilby_SoG
  • tomasz.baka/bilby
  • nicogerardo.bers/bilby
  • soumen.roy/bilby
  • isaac.mcmahon/healpix-redundancy
  • asamakai.baker/bilby-frequency-dependent-antenna-pattern-functions
  • anna.puecher/bilby
  • pratyusava.baral/bilby-x-g
  • thibeau.wouters/bilby
  • christian.adamcewicz/bilby
  • raffi.enficiaud/bilby
109 results
Show changes
Commits on Source (14)
......@@ -95,20 +95,20 @@ class PriorDict(dict):
check_directory_exists_and_if_not_mkdir(outdir)
prior_file = os.path.join(outdir, "{}.prior".format(label))
logger.debug("Writing priors to {}".format(prior_file))
mvgs = []
joint_dists = []
with open(prior_file, "w") as outfile:
for key in self.keys():
if isinstance(self[key], MultivariateGaussian):
mvgname = '_'.join(self[key].mvg.names) + '_mvg'
if mvgname not in mvgs:
mvgs.append(mvgname)
if JointPrior in self[key].__class__.__mro__:
distname = '_'.join(self[key].dist.names) + '_{}'.format(self[key].dist.distname)
if distname not in joint_dists:
joint_dists.append(distname)
outfile.write(
"{} = {}\n".format(mvgname, self[key].mvg))
mvgstr = repr(self[key].mvg)
"{} = {}\n".format(distname, self[key].dist))
diststr = repr(self[key].dist)
priorstr = repr(self[key])
outfile.write(
"{} = {}\n".format(key, priorstr.replace(mvgstr,
mvgname)))
"{} = {}\n".format(key, priorstr.replace(diststr,
distname)))
else:
outfile.write(
"{} = {}\n".format(key, self[key]))
......@@ -641,22 +641,16 @@ class ConditionalPriorDict(PriorDict):
self._check_resolved()
self._update_rescale_keys(keys)
result = dict()
for key, index in zip(self._rescale_keys, self._rescale_indexes):
for key, index in zip(self.sorted_keys_without_fixed_parameters, self._rescale_indexes):
required_variables = {k: result[k] for k in getattr(self[key], 'required_variables', [])}
result[key] = self[key].rescale(theta[index], **required_variables)
return [result[key] for key in keys]
def _update_rescale_keys(self, keys):
if not keys == self._least_recently_rescaled_keys:
self._set_rescale_keys_and_indexes(keys)
self._rescale_indexes = [keys.index(element) for element in self.sorted_keys_without_fixed_parameters]
self._least_recently_rescaled_keys = keys
def _set_rescale_keys_and_indexes(self, keys):
unconditional_keys, unconditional_idxs, _ = np.intersect1d(keys, self.unconditional_keys, return_indices=True)
conditional_keys, conditional_idxs, _ = np.intersect1d(keys, self.conditional_keys, return_indices=True)
self._rescale_keys = np.append(unconditional_keys, conditional_keys)
self._rescale_indexes = np.append(unconditional_idxs, conditional_idxs)
def _check_resolved(self):
if not self._resolved:
raise IllegalConditionsException("The current set of priors contains unresolveable conditions.")
......@@ -673,6 +667,10 @@ class ConditionalPriorDict(PriorDict):
def sorted_keys(self):
return self.unconditional_keys + self.conditional_keys
@property
def sorted_keys_without_fixed_parameters(self):
return [key for key in self.sorted_keys if not isinstance(self[key], (DeltaFunction, Constraint))]
def __setitem__(self, key, value):
super(ConditionalPriorDict, self).__setitem__(key, value)
self._resolve_conditions()
......@@ -2736,61 +2734,31 @@ class FermiDirac(Prior):
return lnp
class MultivariateGaussianDist(object):
class BaseJointPriorDist(object):
def __init__(self, names, nmodes=1, mus=None, sigmas=None, corrcoefs=None,
covs=None, weights=None, bounds=None):
def __init__(self, names, bounds=None):
"""
A class defining a multi-variate Gaussian, allowing multiple modes for
a Gaussian mixture model.
A class defining JointPriorDist that will be overwritten with child
classes defining the joint prior distribtuions between given parameters,
Note: if using a multivariate Gaussian prior, with bounds, this can
lead to biases in the marginal likelihood estimate and posterior
estimate for nested samplers routines that rely on sampling from a unit
hypercube and having a prior transform, e.g., nestle, dynesty and
MultiNest.
Parameters
----------
names: list
A list of the parameter names in the multivariate Gaussian. The
names: list (required)
A list of the parameter names in the JointPriorDist. The
listed parameters must have the same order that they appear in
the lists of means, standard deviations, and the correlation
coefficient, or covariance, matrices.
nmodes: int
The number of modes for the mixture model. This defaults to 1,
which will be checked against the shape of the other inputs.
mus: array_like
A list of lists of means of each mode in a multivariate Gaussian
mixture model. A single list can be given for a single mode. If
this is None then means at zero will be assumed.
sigmas: array_like
A list of lists of the standard deviations of each mode of the
multivariate Gaussian. If supplying a correlation coefficient
matrix rather than a covariance matrix these values must be given.
If this is None unit variances will be assumed.
corrcoefs: array
A list of square matrices containing the correlation coefficients
of the parameters for each mode. If this is None it will be assumed
that the parameters are uncorrelated.
covs: array
A list of square matrices containing the covariance matrix of the
multivariate Gaussian.
weights: list
A list of weights (relative probabilities) for each mode of the
multivariate Gaussian. This will default to equal weights for each
mode.
bounds: list
the lists of statistical parameters that may be passed in child class
bounds: list (optional)
A list of bounds on each parameter. The defaults are for bounds at
+/- infinity.
"""
self.distname = 'joint_dist'
if not isinstance(names, list):
self.names = [names]
else:
self.names = names
self.num_vars = len(self.names) # the number of parameters
self.num_vars = len(self.names)
# set the bounds for each parameter
if isinstance(bounds, list):
......@@ -2815,83 +2783,12 @@ class MultivariateGaussianDist(object):
"a prior transform.")
else:
bounds = [(-np.inf, np.inf) for _ in self.names]
# set bounds as dictionary
self.bounds = {name: val for name, val in zip(self.names, bounds)}
self.mus = []
self.covs = []
self.corrcoefs = []
self.sigmas = []
self.weights = []
self.eigvalues = []
self.eigvectors = []
self.sqeigvalues = [] # square root of the eigenvalues
self.mvn = [] # list of multivariate normal distributions
self._current_sample = {} # initialise empty sample
self._uncorrelated = None
self._current_lnprob = None
# put values in lists if required
if nmodes == 1:
if mus is not None:
if len(np.shape(mus)) == 1:
mus = [mus]
elif len(np.shape(mus)) == 0:
raise ValueError("Must supply a list of means")
if sigmas is not None:
if len(np.shape(sigmas)) == 1:
sigmas = [sigmas]
elif len(np.shape(sigmas)) == 0:
raise ValueError("Must supply a list of standard "
"deviations")
if covs is not None:
if isinstance(covs, np.ndarray):
covs = [covs]
elif isinstance(covs, list):
if len(np.shape(covs)) == 2:
covs = [np.array(covs)]
elif len(np.shape(covs)) != 3:
raise TypeError("List of covariances the wrong shape")
else:
raise TypeError("Must pass a list of covariances")
if corrcoefs is not None:
if isinstance(corrcoefs, np.ndarray):
corrcoefs = [corrcoefs]
elif isinstance(corrcoefs, list):
if len(np.shape(corrcoefs)) == 2:
corrcoefs = [np.array(corrcoefs)]
elif len(np.shape(corrcoefs)) != 3:
raise TypeError("List of correlation coefficients the wrong shape")
elif not isinstance(corrcoefs, list):
raise TypeError("Must pass a list of correlation "
"coefficients")
if weights is not None:
if isinstance(weights, (int, float)):
weights = [weights]
elif isinstance(weights, list):
if len(weights) != 1:
raise ValueError("Wrong number of weights given")
for val in [mus, sigmas, covs, corrcoefs, weights]:
if val is not None and not isinstance(val, list):
raise TypeError("Value must be a list")
else:
if val is not None and len(val) != nmodes:
raise ValueError("Wrong number of modes given")
# add the modes
self.nmodes = 0
for i in range(nmodes):
mu = mus[i] if mus is not None else None
sigma = sigmas[i] if sigmas is not None else None
corrcoef = corrcoefs[i] if corrcoefs is not None else None
cov = covs[i] if covs is not None else None
weight = weights[i] if weights is not None else 1.
self.add_mode(mu, sigma, corrcoef, cov, weight)
# a dictionary of the parameters as requested by the prior
self.requested_parameters = dict()
self.reset_request()
......@@ -2925,7 +2822,7 @@ class MultivariateGaussianDist(object):
def filled_rescale(self):
"""
Check is all the rescaled parameters have been filled.
Check if all the rescaled parameters have been filled.
"""
return not np.any([val is None for val in
......@@ -2939,117 +2836,170 @@ class MultivariateGaussianDist(object):
for name in self.names:
self.rescale_parameters[name] = None
def add_mode(self, mus=None, sigmas=None, corrcoef=None, cov=None,
weight=1.):
"""
Add a new mode.
"""
def get_instantiation_dict(self):
subclass_args = infer_args_from_method(self.__init__)
property_names = [p for p in dir(self.__class__)
if isinstance(getattr(self.__class__, p), property)]
dict_with_properties = self.__dict__.copy()
for key in property_names:
dict_with_properties[key] = getattr(self, key)
instantiation_dict = dict()
for key in subclass_args:
if isinstance(dict_with_properties[key], list):
value = np.asarray(dict_with_properties[key]).tolist()
else:
value = dict_with_properties[key]
instantiation_dict[key] = value
return instantiation_dict
# add means
if mus is not None:
try:
self.mus.append(list(mus)) # means
except TypeError:
raise TypeError("'mus' must be a list")
else:
self.mus.append(np.zeros(self.num_vars))
def __len__(self):
return len(self.names)
# add the covariances if supplied
if cov is not None:
self.covs.append(np.asarray(cov))
def __repr__(self):
"""Overrides the special method __repr__.
if len(self.covs[-1].shape) != 2:
raise ValueError("Covariance matrix must be a 2d array")
Returns a representation of this instance that resembles how it is instantiated.
Works correctly for all child classes
if (self.covs[-1].shape[0] != self.covs[-1].shape[1] or
self.covs[-1].shape[0] != self.num_vars):
raise ValueError("Covariance shape is inconsistent")
Returns
-------
str: A string representation of this instance
# check matrix is symmetric
if not np.allclose(self.covs[-1], self.covs[-1].T):
raise ValueError("Covariance matrix is not symmetric")
"""
dist_name = self.__class__.__name__
instantiation_dict = self.get_instantiation_dict()
args = ', '.join(['{}={}'.format(key, repr(instantiation_dict[key]))
for key in instantiation_dict])
return "{}({})".format(dist_name, args)
self.sigmas.append(np.sqrt(np.diag(self.covs[-1]))) # standard deviations
def prob(self, samp):
"""
Get the probability of a sample. For bounded priors the
probability will not be properly normalised.
"""
# convert covariance into a correlation coefficient matrix
D = self.sigmas[-1] * np.identity(self.covs[-1].shape[0])
Dinv = np.linalg.inv(D)
self.corrcoefs.append(np.dot(np.dot(Dinv, self.covs[-1]), Dinv))
elif corrcoef is not None and sigmas is not None:
self.corrcoefs.append(np.asarray(corrcoef))
return np.exp(self.ln_prob(samp))
if len(self.corrcoefs[-1].shape) != 2:
raise ValueError("Correlation coefficient matrix must be a 2d "
"array.")
def _check_samp(self, value):
"""
Get the log-probability of a sample. For bounded priors the
probability will not be properly normalised.
if (self.corrcoefs[-1].shape[0] != self.corrcoefs[-1].shape[1] or
self.corrcoefs[-1].shape[0] != self.num_vars):
raise ValueError("Correlation coefficient matrix shape is "
"inconsistent")
Parameters
----------
value: array_like
A 1d vector of the sample, or 2d array of sample values with shape
NxM, where N is the number of samples and M is the number of
parameters.
# check matrix is symmetric
if not np.allclose(self.corrcoefs[-1], self.corrcoefs[-1].T):
raise ValueError("Correlation coefficient matrix is not "
"symmetric")
Returns
-------
samp: array_like
returns the input value as a sample array
outbounds: array_like
Boolean Array that selects samples in samp that are out of given bounds
"""
samp = np.array(value)
if len(samp.shape) == 1:
samp = samp.reshape(1, self.num_vars)
# check diagonal is all ones
if not np.all(np.diag(self.corrcoefs[-1]) == 1.):
raise ValueError("Correlation coefficient matrix is not"
"correct")
if len(samp.shape) != 2:
raise ValueError("Array is the wrong shape")
elif samp.shape[1] != self.num_vars:
raise ValueError("Array is the wrong shape")
try:
self.sigmas.append(list(sigmas)) # standard deviations
except TypeError:
raise TypeError("'sigmas' must be a list")
# check sample(s) is within bounds
outbounds = np.ones(samp.shape[0], dtype=np.bool)
for s, bound in zip(samp.T, self.bounds.values()):
outbounds = (s < bound[0]) | (s > bound[1])
if np.any(outbounds):
break
return samp, outbounds
if len(self.sigmas[-1]) != self.num_vars:
raise ValueError("Number of standard deviations must be the "
"same as the number of parameters.")
def ln_prob(self, value):
"""
Get the log-probability of a sample. For bounded priors the
probability will not be properly normalised.
# convert correlation coefficients to covariance matrix
D = self.sigmas[-1] * np.identity(self.corrcoefs[-1].shape[0])
self.covs.append(np.dot(D, np.dot(self.corrcoefs[-1], D)))
else:
# set unit variance uncorrelated covariance
self.corrcoefs.append(np.eye(self.num_vars))
self.covs.append(np.eye(self.num_vars))
self.sigmas.append(np.ones(self.num_vars))
Parameters
----------
value: array_like
A 1d vector of the sample, or 2d array of sample values with shape
NxM, where N is the number of samples and M is the number of
parameters.
"""
# get eigen values and vectors
try:
evals, evecs = np.linalg.eig(self.corrcoefs[-1])
self.eigvalues.append(evals)
self.eigvectors.append(evecs)
except Exception as e:
raise RuntimeError("Problem getting eigenvalues and vectors: "
"{}".format(e))
samp, outbounds = self._check_samp(value)
lnprob = -np.inf * np.ones(samp.shape[0])
lnprob = self._ln_prob(samp, lnprob, outbounds)
if samp.shape[0] == 1:
return lnprob[0]
else:
return lnprob
# check eigenvalues are positive
if np.any(self.eigvalues[-1] <= 0.):
raise ValueError("Correlation coefficient matrix is not positive "
"definite")
self.sqeigvalues.append(np.sqrt(self.eigvalues[-1]))
def _ln_prob(self, samp, lnprob, outbounds):
"""
Get the log-probability of a sample. For bounded priors the
probability will not be properly normalised. **this method needs overwritten by child class**
# set the weights
if weight is None:
self.weights.append(1.)
else:
self.weights.append(weight)
Parameters
----------
samp: vector
sample to evaluate the ln_prob at
lnprob: vector
of -inf pased in with the same shape as the number of samples
outbounds: array_like
boolean array showing which samples in lnprob vector are out of the given bounds
# set the cumulative relative weights
self.cumweights = np.cumsum(self.weights) / np.sum(self.weights)
Returns
-------
lnprob: vector
array of lnprob values for each sample given
"""
"""
Here is where the subclass where overwrite ln_prob method
"""
return lnprob
# add the mode
self.nmodes += 1
def sample(self, size=1, **kwargs):
"""
Draw, and set, a sample from the Dist, accompanying method _sample needs to overwritten
# add multivariate Gaussian
self.mvn.append(scipy.stats.multivariate_normal(mean=self.mus[-1],
cov=self.covs[-1]))
Parameters
----------
size: int
number of samples to generate, defualts to 1
"""
def rescale(self, value, mode=None):
if size is None:
size = 1
samps = self._sample(size=size, **kwargs)
for i, name in enumerate(self.names):
if size == 1:
self.current_sample[name] = samps[:, i].flatten()[0]
else:
self.current_sample[name] = samps[:, i].flatten()
def _sample(self, size, **kwargs):
"""
Rescale from a unit hypercube to multivariate Gaussian. Note that no
bounds are applied in the rescale function.
Draw, and set, a sample from the joint dist (**needs to be ovewritten by child class**)
Parameters
----------
size: int
number of samples to generate, defualts to 1
"""
samps = np.zeros((size, len(self)))
"""
Here is where the subclass where overwrite sampling method
"""
return samps
def rescale(self, value, **kwargs):
"""
Rescale from a unit hypercube to JointPriorDist. Note that no
bounds are applied in the rescale function. (child classes need to
overwrite accompanying method _rescale().
Parameters
----------
......@@ -3057,9 +3007,9 @@ class MultivariateGaussianDist(object):
A 1d vector sample (one for each parameter) drawn from a uniform
distribution between 0 and 1, or a 2d NxM array of samples where
N is the number of samples and M is the number of parameters.
mode: int
Specify which mode to sample from. If not set then a mode is
chosen randomly based on its weight.
kwargs: dict
All keyword args that need to be passed to _rescale method, these keyword
args are called in the JointPrior rescale methods for each parameter
Returns
-------
......@@ -3067,14 +3017,6 @@ class MultivariateGaussianDist(object):
An vector sample drawn from the multivariate Gaussian
distribution.
"""
# pick a mode (with a probability given by their weights)
if mode is None:
if self.nmodes == 1:
mode = 0
else:
mode = np.argwhere(self.cumweights - np.random.rand() > 0)[0][0]
samp = np.asarray(value)
if len(samp.shape) == 1:
samp = samp.reshape(1, self.num_vars)
......@@ -3084,31 +3026,285 @@ class MultivariateGaussianDist(object):
elif samp.shape[1] != self.num_vars:
raise ValueError("Array is the wrong shape")
# draw points from unit variance, uncorrelated Gaussian
samp = erfinv(2. * samp - 1) * 2. ** 0.5
samp = self._rescale(samp, **kwargs)
return np.squeeze(samp)
# rotate and scale to the multivariate normal shape
samp = self.mus[mode] + self.sigmas[mode] * np.einsum('ij,kj->ik',
samp * self.sqeigvalues[mode],
self.eigvectors[mode])
def _rescale(self, samp, **kwargs):
"""
rescale a sample from a unit hypercybe to the joint dist (**needs to be ovewritten by child class**)
return np.squeeze(samp)
Parameters
----------
samp: numpy array
this is a vector sample drawn from a uniform distribtuion to be rescaled to the distribution
"""
"""
Here is where the subclass where overwrite rescale method
"""
return samp
def sample(self, size=1, mode=None):
class MultivariateGaussianDist(BaseJointPriorDist):
def __init__(self, names, nmodes=1, mus=None, sigmas=None, corrcoefs=None,
covs=None, weights=None, bounds=None):
"""
Draw, and set, a sample from the multivariate Gaussian.
A class defining a multi-variate Gaussian, allowing multiple modes for
a Gaussian mixture model.
Note: if using a multivariate Gaussian prior, with bounds, this can
lead to biases in the marginal likelihood estimate and posterior
estimate for nested samplers routines that rely on sampling from a unit
hypercube and having a prior transform, e.g., nestle, dynesty and
MultiNest.
Parameters
----------
mode: int
Specify which mode to sample from. If not set then a mode is
chosen randomly based on its weight.
names: list
A list of the parameter names in the multivariate Gaussian. The
listed parameters must have the same order that they appear in
the lists of means, standard deviations, and the correlation
coefficient, or covariance, matrices.
nmodes: int
The number of modes for the mixture model. This defaults to 1,
which will be checked against the shape of the other inputs.
mus: array_like
A list of lists of means of each mode in a multivariate Gaussian
mixture model. A single list can be given for a single mode. If
this is None then means at zero will be assumed.
sigmas: array_like
A list of lists of the standard deviations of each mode of the
multivariate Gaussian. If supplying a correlation coefficient
matrix rather than a covariance matrix these values must be given.
If this is None unit variances will be assumed.
corrcoefs: array
A list of square matrices containing the correlation coefficients
of the parameters for each mode. If this is None it will be assumed
that the parameters are uncorrelated.
covs: array
A list of square matrices containing the covariance matrix of the
multivariate Gaussian.
weights: list
A list of weights (relative probabilities) for each mode of the
multivariate Gaussian. This will default to equal weights for each
mode.
bounds: list
A list of bounds on each parameter. The defaults are for bounds at
+/- infinity.
"""
super(MultivariateGaussianDist, self).__init__(names=names, bounds=bounds)
self.distname = 'mvg'
self.mus = []
self.covs = []
self.corrcoefs = []
self.sigmas = []
self.weights = []
self.eigvalues = []
self.eigvectors = []
self.sqeigvalues = [] # square root of the eigenvalues
self.mvn = [] # list of multivariate normal distributions
if size is None:
size = 1
self._current_sample = {} # initialise empty sample
self._uncorrelated = None
self._current_lnprob = None
# put values in lists if required
if nmodes == 1:
if mus is not None:
if len(np.shape(mus)) == 1:
mus = [mus]
elif len(np.shape(mus)) == 0:
raise ValueError("Must supply a list of means")
if sigmas is not None:
if len(np.shape(sigmas)) == 1:
sigmas = [sigmas]
elif len(np.shape(sigmas)) == 0:
raise ValueError("Must supply a list of standard "
"deviations")
if covs is not None:
if isinstance(covs, np.ndarray):
covs = [covs]
elif isinstance(covs, list):
if len(np.shape(covs)) == 2:
covs = [np.array(covs)]
elif len(np.shape(covs)) != 3:
raise TypeError("List of covariances the wrong shape")
else:
raise TypeError("Must pass a list of covariances")
if corrcoefs is not None:
if isinstance(corrcoefs, np.ndarray):
corrcoefs = [corrcoefs]
elif isinstance(corrcoefs, list):
if len(np.shape(corrcoefs)) == 2:
corrcoefs = [np.array(corrcoefs)]
elif len(np.shape(corrcoefs)) != 3:
raise TypeError("List of correlation coefficients the wrong shape")
elif not isinstance(corrcoefs, list):
raise TypeError("Must pass a list of correlation "
"coefficients")
if weights is not None:
if isinstance(weights, (int, float)):
weights = [weights]
elif isinstance(weights, list):
if len(weights) != 1:
raise ValueError("Wrong number of weights given")
for val in [mus, sigmas, covs, corrcoefs, weights]:
if val is not None and not isinstance(val, list):
raise TypeError("Value must be a list")
else:
if val is not None and len(val) != nmodes:
raise ValueError("Wrong number of modes given")
# add the modes
self.nmodes = 0
for i in range(nmodes):
mu = mus[i] if mus is not None else None
sigma = sigmas[i] if sigmas is not None else None
corrcoef = corrcoefs[i] if corrcoefs is not None else None
cov = covs[i] if covs is not None else None
weight = weights[i] if weights is not None else 1.
self.add_mode(mu, sigma, corrcoef, cov, weight)
def add_mode(self, mus=None, sigmas=None, corrcoef=None, cov=None,
weight=1.):
"""
Add a new mode.
"""
# add means
if mus is not None:
try:
self.mus.append(list(mus)) # means
except TypeError:
raise TypeError("'mus' must be a list")
else:
self.mus.append(np.zeros(self.num_vars))
# add the covariances if supplied
if cov is not None:
self.covs.append(np.asarray(cov))
if len(self.covs[-1].shape) != 2:
raise ValueError("Covariance matrix must be a 2d array")
if (self.covs[-1].shape[0] != self.covs[-1].shape[1] or
self.covs[-1].shape[0] != self.num_vars):
raise ValueError("Covariance shape is inconsistent")
# check matrix is symmetric
if not np.allclose(self.covs[-1], self.covs[-1].T):
raise ValueError("Covariance matrix is not symmetric")
self.sigmas.append(np.sqrt(np.diag(self.covs[-1]))) # standard deviations
# convert covariance into a correlation coefficient matrix
D = self.sigmas[-1] * np.identity(self.covs[-1].shape[0])
Dinv = np.linalg.inv(D)
self.corrcoefs.append(np.dot(np.dot(Dinv, self.covs[-1]), Dinv))
elif corrcoef is not None and sigmas is not None:
self.corrcoefs.append(np.asarray(corrcoef))
if len(self.corrcoefs[-1].shape) != 2:
raise ValueError("Correlation coefficient matrix must be a 2d "
"array.")
if (self.corrcoefs[-1].shape[0] != self.corrcoefs[-1].shape[1] or
self.corrcoefs[-1].shape[0] != self.num_vars):
raise ValueError("Correlation coefficient matrix shape is "
"inconsistent")
# check matrix is symmetric
if not np.allclose(self.corrcoefs[-1], self.corrcoefs[-1].T):
raise ValueError("Correlation coefficient matrix is not "
"symmetric")
# check diagonal is all ones
if not np.all(np.diag(self.corrcoefs[-1]) == 1.):
raise ValueError("Correlation coefficient matrix is not"
"correct")
try:
self.sigmas.append(list(sigmas)) # standard deviations
except TypeError:
raise TypeError("'sigmas' must be a list")
if len(self.sigmas[-1]) != self.num_vars:
raise ValueError("Number of standard deviations must be the "
"same as the number of parameters.")
# samples drawn from unit variance uncorrelated multivariate Gaussian
# convert correlation coefficients to covariance matrix
D = self.sigmas[-1] * np.identity(self.corrcoefs[-1].shape[0])
self.covs.append(np.dot(D, np.dot(self.corrcoefs[-1], D)))
else:
# set unit variance uncorrelated covariance
self.corrcoefs.append(np.eye(self.num_vars))
self.covs.append(np.eye(self.num_vars))
self.sigmas.append(np.ones(self.num_vars))
# get eigen values and vectors
try:
evals, evecs = np.linalg.eig(self.corrcoefs[-1])
self.eigvalues.append(evals)
self.eigvectors.append(evecs)
except Exception as e:
raise RuntimeError("Problem getting eigenvalues and vectors: "
"{}".format(e))
# check eigenvalues are positive
if np.any(self.eigvalues[-1] <= 0.):
raise ValueError("Correlation coefficient matrix is not positive "
"definite")
self.sqeigvalues.append(np.sqrt(self.eigvalues[-1]))
# set the weights
if weight is None:
self.weights.append(1.)
else:
self.weights.append(weight)
# set the cumulative relative weights
self.cumweights = np.cumsum(self.weights) / np.sum(self.weights)
# add the mode
self.nmodes += 1
# add multivariate Gaussian
self.mvn.append(scipy.stats.multivariate_normal(mean=self.mus[-1],
cov=self.covs[-1]))
def _rescale(self, samp, **kwargs):
try:
mode = kwargs['mode']
except KeyError:
mode = None
if mode is None:
if self.nmodes == 1:
mode = 0
else:
mode = np.argwhere(self.cumweights - np.random.rand() > 0)[0][0]
samp = erfinv(2. * samp - 1) * 2. ** 0.5
# rotate and scale to the multivariate normal shape
samp = self.mus[mode] + self.sigmas[mode] * np.einsum('ij,kj->ik',
samp * self.sqeigvalues[mode],
self.eigvectors[mode])
return samp
def _sample(self, size, **kwargs):
try:
mode = kwargs['mode']
except KeyError:
mode = None
if mode is None:
if self.nmodes == 1:
mode = 0
else:
mode = np.argwhere(self.cumweights - np.random.rand() > 0)[0][0]
samps = np.zeros((size, len(self)))
for i in range(size):
inbound = False
......@@ -3129,42 +3325,9 @@ class MultivariateGaussianDist(object):
if not outbound:
inbound = True
for i, name in enumerate(self.names):
if size == 1:
self.current_sample[name] = samps[:, i].flatten()[0]
else:
self.current_sample[name] = samps[:, i].flatten()
def ln_prob(self, value):
"""
Get the log-probability of a sample. For bounded priors the
probability will not be properly normalised.
Parameters
----------
value: array_like
A 1d vector of the sample, or 2d array of sample values with shape
NxM, where N is the number of samples and M is the number of
parameters.
"""
samp = np.asarray(value)
if len(samp.shape) == 1:
samp = samp.reshape(1, self.num_vars)
return samps
if len(samp.shape) != 2:
raise ValueError("Array is the wrong shape")
elif samp.shape[1] != self.num_vars:
raise ValueError("Array is the wrong shape")
# check sample(s) is within bounds
outbounds = np.ones(samp.shape[0], dtype=np.bool)
for s, bound in zip(samp.T, self.bounds.values()):
outbounds = (s < bound[0]) | (s > bound[1])
if np.any(outbounds):
break
lnprob = -np.inf * np.ones(samp.shape[0])
def _ln_prob(self, samp, lnprob, outbounds):
for j in range(samp.shape[0]):
# loop over the modes and sum the probabilities
for i in range(self.nmodes):
......@@ -3172,55 +3335,7 @@ class MultivariateGaussianDist(object):
# set out-of-bounds values to -inf
lnprob[outbounds] = -np.inf
if samp.shape[0] == 1:
return lnprob[0]
else:
return lnprob
def prob(self, samp):
"""
Get the probability of a sample. For bounded priors the
probability will not be properly normalised.
"""
return np.exp(self.ln_prob(samp))
def get_instantiation_dict(self):
subclass_args = infer_args_from_method(self.__init__)
property_names = [p for p in dir(self.__class__)
if isinstance(getattr(self.__class__, p), property)]
dict_with_properties = self.__dict__.copy()
for key in property_names:
dict_with_properties[key] = getattr(self, key)
instantiation_dict = dict()
for key in subclass_args:
if isinstance(dict_with_properties[key], list):
value = np.asarray(dict_with_properties[key]).tolist()
else:
value = dict_with_properties[key]
instantiation_dict[key] = value
return instantiation_dict
def __len__(self):
return len(self.names)
def __repr__(self):
"""Overrides the special method __repr__.
Returns a representation of this instance that resembles how it is instantiated.
Works correctly for all child classes
Returns
-------
str: A string representation of this instance
"""
dist_name = self.__class__.__name__
instantiation_dict = self.get_instantiation_dict()
args = ', '.join(['{}={}'.format(key, repr(instantiation_dict[key]))
for key in instantiation_dict])
return "{}({})".format(dist_name, args)
return lnprob
def __eq__(self, other):
if self.__class__ != other.__class__:
......@@ -3258,132 +3373,136 @@ class MultivariateNormalDist(MultivariateGaussianDist):
""" A synonym for the :class:`~bilby.core.prior.MultivariateGaussianDist` distribution."""
class MultivariateGaussian(Prior):
class JointPrior(Prior):
def __init__(self, mvg, name=None, latex_label=None, unit=None):
"""
A prior class for a multivariate Gaussian (mixture model) prior.
def __init__(self, dist, name=None, latex_label=None, unit=None):
"""This defines the single parameter Prior object for parameters that belong to a JointPriorDist
Parameters
----------
mvg: MultivariateGaussianDist
A :class:`bilby.core.prior.MultivariateGaussianDist` object defining
the multivariate Gaussian distribution. This object is not copied,
as it needs to be shared across multiple priors, and as such its
contents will be altered by the prior.
dist: ChildClass of BaseJointPriorDist
The shared JointPriorDistribution that this parameter belongs to
name: str
See superclass
Name of this parameter. Must be contained in dist.names
latex_label: str
See superclass
unit: str
See superclass
"""
if BaseJointPriorDist not in dist.__class__.__bases__:
raise TypeError("Must supply a JointPriorDist object instance to be shared by all joint params")
if not isinstance(mvg, MultivariateGaussianDist):
raise TypeError("Must supply a multivariate Gaussian object")
if name not in dist.names:
raise ValueError("'{}' is not a parameter in the JointPriorDist")
self.dist = dist
super(JointPrior, self).__init__(name=name, latex_label=latex_label, unit=unit, minimum=dist.bounds[name][0],
maximum=dist.bounds[name][1])
@property
def minimum(self):
return self._minimum
@minimum.setter
def minimum(self, minimum):
self._minimum = minimum
self.dist.bounds[self.name] = (minimum, self.dist.bounds[self.name][1])
# check name is in the MultivariateGaussianDist class
if name not in mvg.names:
raise ValueError("'{}' is not a parameter in the multivariate "
"Gaussian")
self.mvg = mvg
@property
def maximum(self):
return self._maximum
super(MultivariateGaussian, self).__init__(name=name, latex_label=latex_label, unit=unit,
minimum=mvg.bounds[name][0],
maximum=mvg.bounds[name][1])
@maximum.setter
def maximum(self, maximum):
self._maximum = maximum
self.dist.bounds[self.name] = (self.dist.bounds[self.name][0], maximum)
def rescale(self, val, mode=None):
def rescale(self, val, **kwargs):
"""
Scale a unit hypercube sample to the prior.
Parameters
----------
mode: int
Specify which mode to sample from. If not set then a mode is
chosen randomly based on its weight.
val: array_like
value drawn from unit hypercube to be rescaled onto the prior
kwargs: dict
all kwargs passed to the dist.rescale method
Returns
-------
float:
A sample from the prior paramter.
"""
Prior.test_valid_for_rescaling(val)
# add parameter value to multivariate Gaussian
self.mvg.rescale_parameters[self.name] = val
self.test_valid_for_rescaling(val)
self.dist.rescale_parameters[self.name] = val
if self.mvg.filled_rescale():
values = np.array(list(self.mvg.rescale_parameters.values())).T
samples = self.mvg.rescale(values, mode=mode)
self.mvg.reset_rescale()
if self.dist.filled_rescale():
values = np.array(list(self.dist.rescale_parameters.values())).T
samples = self.dist.rescale(values, **kwargs)
self.dist.reset_rescale()
return samples
else:
return [] # return empty list
def sample(self, size=1, mode=None):
def sample(self, size=1, **kwargs):
"""
Draw a sample from the prior.
Parameters
----------
mode: int
Specify which mode to sample from. If not set then a mode is
chosen randomly based on its weight.
size: int, float (defaults to 1)
number of samples to draw
kwargs: dict
kwargs passed to the dist.sample method
Returns
-------
float:
A sample from the prior paramter.
"""
if self.name in self.mvg.sampled_parameters:
if self.name in self.dist.sampled_parameters:
logger.warning("You have already drawn a sample from parameter "
"'{}'. The same sample will be "
"returned".format(self.name))
if len(self.mvg.current_sample) == 0:
if len(self.dist.current_sample) == 0:
# generate a sample
self.mvg.sample(size=size, mode=mode)
self.dist.sample(size=size, **kwargs)
sample = self.mvg.current_sample[self.name]
sample = self.dist.current_sample[self.name]
if self.name not in self.mvg.sampled_parameters:
self.mvg.sampled_parameters.append(self.name)
if self.name not in self.dist.sampled_parameters:
self.dist.sampled_parameters.append(self.name)
if len(self.mvg.sampled_parameters) == len(self.mvg):
if len(self.dist.sampled_parameters) == len(self.dist):
# reset samples
self.mvg.reset_sampled()
self.dist.reset_sampled()
self.least_recently_sampled = sample
return sample
def prob(self, val):
"""Return the prior probability of val
def ln_prob(self, val):
"""
Return the natural logarithm of the prior probability. Note that this
will not be correctly normalised if there are bounds on the
distribution.
Parameters
----------
val: float
val: array_like
value to evaluate the prior log-prob at
Returns
-------
float:
"""
return np.exp(self.ln_prob(val))
def ln_prob(self, val):
"""
Return the natural logarithm of the prior probability. Note that this
will not be correctly normalised if there are bounds on the
distribution.
the logp value for the prior at given sample
"""
self.dist.requested_parameters[self.name] = val
# add parameter value to multivariate Gaussian
self.mvg.requested_parameters[self.name] = val
if self.mvg.filled_request():
if self.dist.filled_request():
# all required parameters have been set
values = list(self.mvg.requested_parameters.values())
values = list(self.dist.requested_parameters.values())
# check for the same number of values for each parameter
for i in range(len(self.mvg) - 1):
for i in range(len(self.dist) - 1):
if (isinstance(values[i], (list, np.ndarray)) or
isinstance(values[i + 1], (list, np.ndarray))):
if (isinstance(values[i], (list, np.ndarray)) and
......@@ -3395,11 +3514,10 @@ class MultivariateGaussian(Prior):
raise ValueError("Each parameter must have the same "
"number of requested values.")
lnp = self.mvg.ln_prob(np.asarray(values).T)
lnp = self.dist.ln_prob(np.asarray(values).T)
# reset the requested parameters
self.mvg.reset_request()
self.dist.reset_request()
return lnp
else:
# if not all parameters have been requested yet, just return 0
......@@ -3417,32 +3535,33 @@ class MultivariateGaussian(Prior):
else:
return np.zeros_like(val)
@property
def minimum(self):
return self._minimum
def prob(self, val):
"""Return the prior probability of val
@minimum.setter
def minimum(self, minimum):
self._minimum = minimum
Parameters
----------
val: array_like
value to evaluate the prior prob at
# update the bounds in the MultivariateGaussianDist
self.mvg.bounds[self.name] = (minimum, self.mvg.bounds[self.name][1])
Returns
-------
float:
the p value for the prior at given sample
"""
@property
def maximum(self):
return self._maximum
return np.exp(self.ln_prob(val))
@maximum.setter
def maximum(self, maximum):
self._maximum = maximum
# update the bounds in the MultivariateGaussianDist
self.mvg.bounds[self.name] = (self.mvg.bounds[self.name][0], maximum)
class MultivariateGaussian(JointPrior):
def __init__(self, dist, name=None, latex_label=None, unit=None):
if not isinstance(dist, MultivariateGaussianDist):
raise JointPriorDistError("dist object must be instance of MultivariateGaussianDist")
super(MultivariateGaussian, self).__init__(dist=dist, name=name, latex_label=latex_label, unit=unit)
class MultivariateNormal(MultivariateGaussian):
"""A synonym for the :class:`bilby.core.prior.MultivariateGaussian`
prior distribution."""
""" A synonym for the :class:`bilby.core.prior.MultivariateGaussian`
prior distribution."""
def conditional_prior_factory(prior_class):
......@@ -3492,6 +3611,7 @@ def conditional_prior_factory(prior_class):
self.condition_func = condition_func
self._reference_params = reference_params
self.__class__.__name__ = 'Conditional{}'.format(prior_class.__name__)
self.__class__.__qualname__ = 'Conditional{}'.format(prior_class.__qualname__)
def sample(self, size=None, **required_variables):
"""Draw a sample from the prior
......@@ -3683,3 +3803,7 @@ class ConditionalPriorDictException(PriorDictException):
class IllegalConditionsException(ConditionalPriorDictException):
""" Exception class to handle prior dicts that contain unresolvable conditions. """
class JointPriorDistError(PriorException):
""" Class for Error handling of JointPriorDists for JointPriors """
......@@ -670,7 +670,7 @@ class Interferometer(object):
line, self.strain_data.sampling_frequency))
if len(zpks) > 0:
zpk = gwpy.signal.filter_design.concatenate_zpks(*zpks)
strain = timeseries.filter(zpk, filtfilt=True)
strain = timeseries.filter(zpk, filtfilt=False)
else:
strain = timeseries
......
......@@ -152,7 +152,8 @@ class WaveformGenerator(object):
transformed_model_data_points, parameters):
if parameters is not None:
self.parameters = parameters
if self.parameters == self._cache['parameters'] and self._cache['model'] == model:
if self.parameters == self._cache['parameters'] and self._cache['model'] == model and \
self._cache['transformed_model'] == transformed_model:
return self._cache['waveform']
if model is not None:
model_strain = self._strain_from_model(model_data_points, model)
......@@ -164,6 +165,7 @@ class WaveformGenerator(object):
self._cache['waveform'] = model_strain
self._cache['parameters'] = self.parameters.copy()
self._cache['model'] = model
self._cache['transformed_model'] = transformed_model
return model_strain
def _strain_from_model(self, model_data_points, model):
......
......@@ -5,7 +5,6 @@ from mock import Mock
import mock
import numpy as np
import os
from collections import OrderedDict
import scipy.stats as ss
......@@ -197,10 +196,10 @@ class TestPriorClasses(unittest.TestCase):
bilby.core.prior.Gamma(name='test', unit='unit', k=1, theta=1),
bilby.core.prior.ChiSquared(name='test', unit='unit', nu=2),
bilby.gw.prior.AlignedSpin(name='test', unit='unit'),
bilby.core.prior.MultivariateGaussian(mvg=mvg, name='testa', unit='unit'),
bilby.core.prior.MultivariateGaussian(mvg=mvg, name='testb', unit='unit'),
bilby.core.prior.MultivariateNormal(mvg=mvn, name='testa', unit='unit'),
bilby.core.prior.MultivariateNormal(mvg=mvn, name='testb', unit='unit'),
bilby.core.prior.MultivariateGaussian(dist=mvg, name='testa', unit='unit'),
bilby.core.prior.MultivariateGaussian(dist=mvg, name='testb', unit='unit'),
bilby.core.prior.MultivariateNormal(dist=mvn, name='testa', unit='unit'),
bilby.core.prior.MultivariateNormal(dist=mvn, name='testb', unit='unit'),
bilby.core.prior.ConditionalDeltaFunction(condition_func=condition_func, name='test', unit='unit', peak=1),
bilby.core.prior.ConditionalGaussian(condition_func=condition_func, name='test', unit='unit', mu=0, sigma=1),
bilby.core.prior.ConditionalPowerLaw(condition_func=condition_func, name='test', unit='unit', alpha=0, minimum=0, maximum=1),
......@@ -230,9 +229,9 @@ class TestPriorClasses(unittest.TestCase):
def test_minimum_rescaling(self):
"""Test the the rescaling works as expected."""
for prior in self.priors:
if isinstance(prior, bilby.core.prior.MultivariateGaussian):
if bilby.core.prior.JointPrior in prior.__class__.__mro__:
minimum_sample = prior.rescale(0)
if prior.mvg.filled_rescale():
if prior.dist.filled_rescale():
self.assertAlmostEqual(minimum_sample[0], prior.minimum)
self.assertAlmostEqual(minimum_sample[1], prior.minimum)
else:
......@@ -242,9 +241,9 @@ class TestPriorClasses(unittest.TestCase):
def test_maximum_rescaling(self):
"""Test the the rescaling works as expected."""
for prior in self.priors:
if isinstance(prior, bilby.core.prior.MultivariateGaussian):
if bilby.core.prior.JointPrior in prior.__class__.__mro__:
maximum_sample = prior.rescale(0)
if prior.mvg.filled_rescale():
if prior.dist.filled_rescale():
self.assertAlmostEqual(maximum_sample[0], prior.maximum)
self.assertAlmostEqual(maximum_sample[1], prior.maximum)
else:
......@@ -255,8 +254,8 @@ class TestPriorClasses(unittest.TestCase):
"""Test the the rescaling works as expected."""
for prior in self.priors:
many_samples = prior.rescale(np.random.uniform(0, 1, 1000))
if isinstance(prior, bilby.core.prior.MultivariateGaussian):
if not prior.mvg.filled_rescale():
if bilby.core.prior.JointPrior in prior.__class__.__mro__:
if not prior.dist.filled_rescale():
continue
self.assertTrue(all((many_samples >= prior.minimum) & (many_samples <= prior.maximum)))
......@@ -304,7 +303,7 @@ class TestPriorClasses(unittest.TestCase):
def test_prob_and_ln_prob(self):
for prior in self.priors:
sample = prior.sample()
if not isinstance(prior, bilby.core.prior.MultivariateGaussian):
if not bilby.core.prior.JointPrior in prior.__class__.__mro__:
# due to the way that the Multivariate Gaussian prior must sequentially call
# the prob and ln_prob functions, it must be ignored in this test.
self.assertAlmostEqual(np.log(prior.prob(sample)), prior.ln_prob(sample), 12)
......@@ -312,7 +311,7 @@ class TestPriorClasses(unittest.TestCase):
def test_many_prob_and_many_ln_prob(self):
for prior in self.priors:
samples = prior.sample(10)
if not isinstance(prior, bilby.core.prior.MultivariateGaussian):
if not bilby.core.prior.JointPrior in prior.__class__.__mro__:
ln_probs = prior.ln_prob(samples)
probs = prior.prob(samples)
for sample, logp, p in zip(samples, ln_probs, probs):
......@@ -323,9 +322,8 @@ class TestPriorClasses(unittest.TestCase):
domain = np.linspace(0, 1, 100)
threshold = 1e-9
for prior in self.priors:
if isinstance(prior, (
bilby.core.prior.DeltaFunction,
bilby.core.prior.MultivariateGaussian)):
if isinstance(prior, bilby.core.prior.DeltaFunction) or \
bilby.core.prior.JointPrior in prior.__class__.__mro__:
continue
rescaled = prior.rescale(domain)
max_difference = max(np.abs(domain - prior.cdf(rescaled)))
......@@ -490,7 +488,7 @@ class TestPriorClasses(unittest.TestCase):
continue
if isinstance(prior, bilby.core.prior.Cauchy):
continue
if isinstance(prior, bilby.core.prior.MultivariateGaussian):
if bilby.core.prior.JointPrior in prior.__class__.__mro__:
continue
elif isinstance(prior, bilby.core.prior.Gaussian):
domain = np.linspace(-1e2, 1e2, 1000)
......@@ -1182,6 +1180,28 @@ class TestConditionalPriorDict(unittest.TestCase):
with self.assertRaises(bilby.core.prior.IllegalConditionsException):
self.conditional_priors.rescale(keys=list(self.test_sample.keys()), theta=list(self.test_sample.values()))
def test_combined_conditions(self):
def d_condition_func(reference_params, a, b, c):
return dict(minimum=reference_params['minimum'], maximum=reference_params['maximum'])
def a_condition_func(reference_params, b, c):
return dict(minimum=reference_params['minimum'], maximum=reference_params['maximum'])
priors = bilby.core.prior.ConditionalPriorDict()
priors['a'] = bilby.core.prior.ConditionalUniform(condition_func=a_condition_func,
minimum=0, maximum=1)
priors['b'] = bilby.core.prior.LogUniform(minimum=1, maximum=10)
priors['d'] = bilby.core.prior.ConditionalUniform(condition_func=d_condition_func,
minimum=0.0, maximum=1.0)
priors['c'] = bilby.core.prior.LogUniform(minimum=1, maximum=10)
sample = priors.sample()
res = priors.rescale(['a', 'b', 'd', 'c'], [0.5, 0.5, 0.5, 0.5])
print(res)
class TestJsonIO(unittest.TestCase):
......@@ -1226,10 +1246,10 @@ class TestJsonIO(unittest.TestCase):
aa=bilby.core.prior.Gamma(name='test', unit='unit', k=1, theta=1),
ab=bilby.core.prior.ChiSquared(name='test', unit='unit', nu=2),
ac=bilby.gw.prior.AlignedSpin(name='test', unit='unit'),
ad=bilby.core.prior.MultivariateGaussian(mvg=mvg, name='testa', unit='unit'),
ae=bilby.core.prior.MultivariateGaussian(mvg=mvg, name='testb', unit='unit'),
af=bilby.core.prior.MultivariateNormal(mvg=mvn, name='testa', unit='unit'),
ag=bilby.core.prior.MultivariateNormal(mvg=mvn, name='testb', unit='unit')
ad=bilby.core.prior.MultivariateGaussian(dist=mvg, name='testa', unit='unit'),
ae=bilby.core.prior.MultivariateGaussian(dist=mvg, name='testb', unit='unit'),
af=bilby.core.prior.MultivariateNormal(dist=mvn, name='testa', unit='unit'),
ag=bilby.core.prior.MultivariateNormal(dist=mvn, name='testb', unit='unit')
))
def test_read_write_to_json(self):
......
......@@ -16,6 +16,10 @@ def dummy_func_dict_return_value(frequency_array, amplitude, mu, sigma, ra, dec,
return ht
def dummy_func_array_return_value_2(array, amplitude, mu, sigma, ra, dec, geocent_time, psi):
return dict(plus=np.array(array), cross=np.array(array))
class TestWaveformGeneratorInstantiationWithoutOptionalParameters(unittest.TestCase):
def setUp(self):
......@@ -302,6 +306,25 @@ class TestFrequencyDomainStrainMethod(unittest.TestCase):
parameters=self.simulation_parameters)
self.assertNotEqual(original_waveform, new_waveform)
def test_frequency_domain_caching_changing_model(self):
original_waveform = self.waveform_generator.frequency_domain_strain(
parameters=self.simulation_parameters)
self.waveform_generator.frequency_domain_source_model = dummy_func_array_return_value_2
new_waveform = self.waveform_generator.frequency_domain_strain(
parameters=self.simulation_parameters)
self.assertFalse(np.array_equal(original_waveform['plus'], new_waveform['plus']))
def test_time_domain_caching_changing_model(self):
self.waveform_generator = \
bilby.gw.waveform_generator.WaveformGenerator(duration=1, sampling_frequency=4096,
time_domain_source_model=dummy_func_dict_return_value)
original_waveform = self.waveform_generator.frequency_domain_strain(
parameters=self.simulation_parameters)
self.waveform_generator.time_domain_source_model = dummy_func_array_return_value_2
new_waveform = self.waveform_generator.frequency_domain_strain(
parameters=self.simulation_parameters)
self.assertFalse(np.array_equal(original_waveform['plus'], new_waveform['plus']))
class TestTimeDomainStrainMethod(unittest.TestCase):
......