Skip to content
Snippets Groups Projects

Speed up core.prior classes

Merged Liting Xiao requested to merge (removed):speed-up-prior into master
All threads resolved!
Files
2
+ 136
45
@@ -12,7 +12,8 @@ import numpy as np
import scipy.stats
from scipy.integrate import cumtrapz
from scipy.interpolate import interp1d
from scipy.special import erf, erfinv
from scipy.special import erf, erfinv, xlogy, log1p,\
gamma, gammaln, gammainc, gammaincinv, stdtr, stdtrit
from matplotlib.cbook import flatten
# Keep import bilby statement, it is necessary for some eval() statements
@@ -1155,10 +1156,7 @@ class Uniform(Prior):
-------
float: log probability of val
"""
with np.errstate(divide='ignore'):
_ln_prob = np.log((val >= self.minimum) & (val <= self.maximum), dtype=np.float64)\
- np.log(self.maximum - self.minimum)
return _ln_prob
return xlogy(1, (val >= self.minimum) & (val <= self.maximum)) - xlogy(1, self.maximum - self.minimum)
def cdf(self, val):
_cdf = (val - self.minimum) / (self.maximum - self.minimum)
@@ -1600,7 +1598,7 @@ class LogNormal(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return scipy.stats.lognorm.ppf(val, self.sigma, scale=np.exp(self.mu))
return np.exp(self.mu + np.sqrt(2 * self.sigma ** 2) * erfinv(2 * val - 1))
def prob(self, val):
"""Returns the prior probability of val.
@@ -1613,8 +1611,18 @@ class LogNormal(Prior):
-------
Union[float, array_like]: Prior probability of val
"""
return scipy.stats.lognorm.pdf(val, self.sigma, scale=np.exp(self.mu))
if isinstance(val, (float, int)):
if val <= self.minimum:
_prob = 0.
else:
_prob = np.exp(-(np.log(val) - self.mu) ** 2 / self.sigma ** 2 / 2)\
/ np.sqrt(2 * np.pi) / val / self.sigma
else:
_prob = np.zeros(len(val))
idx = (val > self.minimum)
_prob[idx] = np.exp(-(np.log(val[idx]) - self.mu) ** 2 / self.sigma ** 2 / 2)\
/ np.sqrt(2 * np.pi) / val[idx] / self.sigma
return _prob
def ln_prob(self, val):
"""Returns the log prior probability of val.
@@ -1627,11 +1635,30 @@ class LogNormal(Prior):
-------
Union[float, array_like]: Prior probability of val
"""
return scipy.stats.lognorm.logpdf(val, self.sigma, scale=np.exp(self.mu))
if isinstance(val, (float, int)):
if val <= self.minimum:
_ln_prob = -np.inf
else:
_ln_prob = -(np.log(val) - self.mu) ** 2 / self.sigma ** 2 / 2\
- np.log(np.sqrt(2 * np.pi) * val * self.sigma)
else:
_ln_prob = -np.inf * np.ones(len(val))
idx = (val > self.minimum)
_ln_prob[idx] = -(np.log(val[idx]) - self.mu) ** 2\
/ self.sigma ** 2 / 2 - np.log(np.sqrt(2 * np.pi) * val[idx] * self.sigma)
return _ln_prob
def cdf(self, val):
return scipy.stats.lognorm.cdf(val, self.sigma, scale=np.exp(self.mu))
if isinstance(val, (float, int)):
if val <= self.minimum:
_cdf = 0.
else:
_cdf = 0.5 + erf((np.log(val) - self.mu) / self.sigma / np.sqrt(2)) / 2
else:
_cdf = np.zeros(len(val))
_cdf[val > self.minimum] = 0.5 + erf((
np.log(val[val > self.minimum]) - self.mu) / self.sigma / np.sqrt(2)) / 2
return _cdf
class LogGaussian(LogNormal):
@@ -1666,7 +1693,7 @@ class Exponential(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
return scipy.stats.expon.ppf(val, scale=self.mu)
return -self.mu * log1p(-val)
def prob(self, val):
"""Return the prior probability of val.
@@ -1679,8 +1706,15 @@ class Exponential(Prior):
-------
Union[float, array_like]: Prior probability of val
"""
return scipy.stats.expon.pdf(val, scale=self.mu)
if isinstance(val, (float, int)):
if val < self.minimum:
_prob = 0.
else:
_prob = np.exp(-val / self.mu) / self.mu
else:
_prob = np.zeros(len(val))
_prob[val >= self.minimum] = np.exp(-val[val >= self.minimum] / self.mu) / self.mu
return _prob
def ln_prob(self, val):
"""Returns the log prior probability of val.
@@ -1693,11 +1727,26 @@ class Exponential(Prior):
-------
Union[float, array_like]: Prior probability of val
"""
return scipy.stats.expon.logpdf(val, scale=self.mu)
if isinstance(val, (float, int)):
if val < self.minimum:
_ln_prob = -np.inf
else:
_ln_prob = -val / self.mu - np.log(self.mu)
else:
_ln_prob = -np.inf * np.ones(len(val))
_ln_prob[val >= self.minimum] = -val[val >= self.minimum] / self.mu - np.log(self.mu)
return _ln_prob
def cdf(self, val):
return scipy.stats.expon.cdf(val, scale=self.mu)
if isinstance(val, (float, int)):
if val < self.minimum:
_cdf = 0.
else:
_cdf = 1. - np.exp(-val / self.mu)
else:
_cdf = np.zeros(len(val))
_cdf[val >= self.minimum] = 1. - np.exp(-val[val >= self.minimum] / self.mu)
return _cdf
class StudentT(Prior):
@@ -1741,9 +1790,18 @@ class StudentT(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.t.ppf(val, self.df, loc=self.mu, scale=self.scale)
if isinstance(val, (float, int)):
if val == 0:
rescaled = -np.inf
elif val == 1:
rescaled = np.inf
else:
rescaled = stdtrit(self.df, val) * self.scale + self.mu
else:
rescaled = stdtrit(self.df, val) * self.scale + self.mu
rescaled[val == 0] = -np.inf
rescaled[val == 1] = np.inf
return rescaled
def prob(self, val):
"""Return the prior probability of val.
@@ -1756,7 +1814,8 @@ class StudentT(Prior):
-------
Union[float, array_like]: Prior probability of val
"""
return scipy.stats.t.pdf(val, self.df, loc=self.mu, scale=self.scale)
return gamma(0.5 * (self.df + 1)) / gamma(0.5 * self.df) / np.sqrt(np.pi * self.df)\
/ self.scale * (1 + ((val - self.mu) / self.scale) ** 2 / self.df) ** (-0.5 * (self.df + 1))
def ln_prob(self, val):
"""Returns the log prior probability of val.
@@ -1769,11 +1828,12 @@ class StudentT(Prior):
-------
Union[float, array_like]: Prior probability of val
"""
return scipy.stats.t.logpdf(val, self.df, loc=self.mu, scale=self.scale)
return gammaln(0.5 * (self.df + 1)) - gammaln(0.5 * self.df)\
- np.log(np.sqrt(np.pi * self.df) * self.scale) - (self.df + 1) / 2 *\
np.log(1 + ((val - self.mu) / self.scale) ** 2 / self.df)
def cdf(self, val):
return scipy.stats.t.cdf(val, self.df, loc=self.mu, scale=self.scale)
return stdtr(self.df, (val - self.mu) / self.scale)
class Beta(Prior):
@@ -1955,9 +2015,19 @@ class Logistic(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.logistic.ppf(val, loc=self.mu, scale=self.scale)
if isinstance(val, (float, int)):
if val == 0:
rescaled = -np.inf
elif val == 1:
rescaled = np.inf
else:
rescaled = self.mu + self.scale * np.log(val / (1. - val))
else:
rescaled = np.inf * np.ones(len(val))
rescaled[val == 0] = -np.inf
rescaled[(val > 0) & (val < 1)] = self.mu + self.scale\
* np.log(val[(val > 0) & (val < 1)] / (1. - val[(val > 0) & (val < 1)]))
return rescaled
def prob(self, val):
"""Return the prior probability of val.
@@ -1970,7 +2040,7 @@ class Logistic(Prior):
-------
Union[float, array_like]: Prior probability of val
"""
return scipy.stats.logistic.pdf(val, loc=self.mu, scale=self.scale)
return np.exp(self.ln_prob(val))
def ln_prob(self, val):
"""Returns the log prior probability of val.
@@ -1983,11 +2053,11 @@ class Logistic(Prior):
-------
Union[float, array_like]: Prior probability of val
"""
return scipy.stats.logistic.logpdf(val, loc=self.mu, scale=self.scale)
return -(val - self.mu) / self.scale -\
2. * np.log(1. + np.exp(-(val - self.mu) / self.scale)) - np.log(self.scale)
def cdf(self, val):
return scipy.stats.logistic.cdf(val, loc=self.mu, scale=self.scale)
return 1. / (1. + np.exp(-(val - self.mu) / self.scale))
class Cauchy(Prior):
@@ -2026,9 +2096,16 @@ class Cauchy(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.cauchy.ppf(val, loc=self.alpha, scale=self.beta)
rescaled = self.alpha + self.beta * np.tan(np.pi * (val - 0.5))
if isinstance(val, (float, int)):
if val == 1:
rescaled = np.inf
elif val == 0:
rescaled = -np.inf
else:
rescaled[val == 1] = np.inf
rescaled[val == 0] = -np.inf
return rescaled
def prob(self, val):
"""Return the prior probability of val.
@@ -2041,7 +2118,7 @@ class Cauchy(Prior):
-------
Union[float, array_like]: Prior probability of val
"""
return scipy.stats.cauchy.pdf(val, loc=self.alpha, scale=self.beta)
return 1. / self.beta / np.pi / (1. + ((val - self.alpha) / self.beta) ** 2)
def ln_prob(self, val):
"""Return the log prior probability of val.
@@ -2054,10 +2131,10 @@ class Cauchy(Prior):
-------
Union[float, array_like]: Log prior probability of val
"""
return scipy.stats.cauchy.logpdf(val, loc=self.alpha, scale=self.beta)
return - np.log(self.beta * np.pi) - np.log(1. + ((val - self.alpha) / self.beta) ** 2)
def cdf(self, val):
return scipy.stats.cauchy.cdf(val, loc=self.alpha, scale=self.beta)
return 0.5 + np.arctan((val - self.alpha) / self.beta) / np.pi
class Lorentzian(Cauchy):
@@ -2101,9 +2178,7 @@ class Gamma(Prior):
This maps to the inverse CDF. This has been analytically solved for this case.
"""
self.test_valid_for_rescaling(val)
# use scipy distribution percentage point function (ppf)
return scipy.stats.gamma.ppf(val, self.k, loc=0., scale=self.theta)
return gammaincinv(self.k, val) * self.theta
def prob(self, val):
"""Return the prior probability of val.
@@ -2116,8 +2191,7 @@ class Gamma(Prior):
-------
Union[float, array_like]: Prior probability of val
"""
return scipy.stats.gamma.pdf(val, self.k, loc=0., scale=self.theta)
return np.exp(self.ln_prob(val))
def ln_prob(self, val):
"""Returns the log prior probability of val.
@@ -2130,11 +2204,28 @@ class Gamma(Prior):
-------
Union[float, array_like]: Prior probability of val
"""
return scipy.stats.gamma.logpdf(val, self.k, loc=0., scale=self.theta)
if isinstance(val, (float, int)):
if val < self.minimum:
_ln_prob = -np.inf
else:
_ln_prob = xlogy(self.k - 1, val) - val / self.theta - xlogy(self.k, self.theta) - gammaln(self.k)
else:
_ln_prob = -np.inf * np.ones(len(val))
idx = (val >= self.minimum)
_ln_prob[idx] = xlogy(self.k - 1, val[idx]) - val[idx] / self.theta\
- xlogy(self.k, self.theta) - gammaln(self.k)
return _ln_prob
def cdf(self, val):
return scipy.stats.gamma.cdf(val, self.k, loc=0., scale=self.theta)
if isinstance(val, (float, int)):
if val < self.minimum:
_cdf = 0.
else:
_cdf = gammainc(self.k, val / self.theta)
else:
_cdf = np.zeros(len(val))
_cdf[val >= self.minimum] = gammainc(self.k, val[val >= self.minimum] / self.theta)
return _cdf
class ChiSquared(Gamma):
Loading