Commit b2bb6c11 authored by Moritz's avatar Moritz

Moved dynesty prior transform handling into the dynesty module

parent 331cb245
Pipeline #57651 passed with stage
in 6 minutes and 42 seconds
......@@ -543,11 +543,10 @@ class Prior(object):
-------
ValueError: If val is not between 0 and 1
"""
valarray = np.atleast_1d(val)
tests = (valarray < 0) + (valarray > 1)
val = np.atleast_1d(val)
tests = (val < 0) + (val > 1)
if np.any(tests):
raise ValueError("Number to be rescaled should be in [0, 1]")
return val
def __repr__(self):
"""Overrides the special method __repr__.
......@@ -701,7 +700,7 @@ class DeltaFunction(Prior):
-------
float: Rescaled probability, equivalent to peak
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
return self.peak * val ** 0
def prob(self, val):
......@@ -763,7 +762,7 @@ class PowerLaw(Prior):
-------
float: Rescaled probability
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
if self.alpha == -1:
return self.minimum * np.exp(val * np.log(self.maximum / self.minimum))
else:
......@@ -836,7 +835,7 @@ class Uniform(Prior):
periodic_boundary=periodic_boundary)
def rescale(self, val):
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
return self.minimum + val * (self.maximum - self.minimum)
def prob(self, val):
......@@ -940,7 +939,7 @@ class SymmetricLogUniform(Prior):
-------
float: Rescaled probability
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
if val < 0.5:
return -self.maximum * np.exp(-2 * val * np.log(self.maximum / self.minimum))
elif val > 0.5:
......@@ -1008,7 +1007,7 @@ class Cosine(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
norm = 1 / (np.sin(self.maximum) - np.sin(self.minimum))
return np.arcsin(val / norm + np.sin(self.minimum))
......@@ -1056,7 +1055,7 @@ class Sine(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
norm = 1 / (np.cos(self.minimum) - np.cos(self.maximum))
return np.arccos(np.cos(self.minimum) - val / norm)
......@@ -1104,7 +1103,7 @@ class Gaussian(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
return self.mu + erfinv(2 * val - 1) * 2 ** 0.5 * self.sigma
def prob(self, val):
......@@ -1197,7 +1196,7 @@ class TruncatedGaussian(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
return erfinv(2 * val * self.normalisation + erf(
(self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) * 2 ** 0.5 * self.sigma + self.mu
......@@ -1326,7 +1325,7 @@ class LogNormal(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
return scipy.stats.lognorm.ppf(val, self.sigma, scale=np.exp(self.mu))
def prob(self, val):
......@@ -1399,7 +1398,7 @@ class Exponential(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
return scipy.stats.expon.ppf(val, scale=self.mu)
def prob(self, val):
......@@ -1460,7 +1459,7 @@ class StudentT(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.t.ppf(val, self.df, loc=self.mu, scale=self.scale)
......@@ -1528,7 +1527,7 @@ class Beta(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return self._dist.ppf(val)
......@@ -1647,7 +1646,7 @@ class Logistic(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.logistic.ppf(val, loc=self.mu, scale=self.scale)
......@@ -1704,7 +1703,7 @@ class Cauchy(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.cauchy.ppf(val, loc=self.alpha, scale=self.beta)
......@@ -1787,7 +1786,7 @@ class Gamma(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.gamma.ppf(val, self.k, loc=0., scale=self.theta)
......@@ -1917,7 +1916,7 @@ class Interped(Prior):
This maps to the inverse CDF. This is done using interpolation.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
rescaled = self.inverse_cumulative_distribution(val)
if rescaled.shape == ():
rescaled = float(rescaled)
......@@ -2081,7 +2080,7 @@ class FermiDirac(Prior):
.. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
<https:arxiv.org/abs/1705.08978v1>`_, 2017.
"""
val = self.test_valid_for_rescaling(val)
Prior.test_valid_for_rescaling(val)
inv = (-np.exp(-1. * self.r) + (1. + np.exp(self.r))**-val +
np.exp(-1. * self.r) * (1. + np.exp(self.r))**-val)
......
......@@ -408,3 +408,7 @@ class Dynesty(NestedSampler):
self.result.log_evidence = np.nan
self.result.log_evidence_err = np.nan
return self.result
def prior_transform(self, theta):
theta = [t % 1 for t in theta]
return super(Dynesty, self).prior_transform(theta)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment