Skip to content
Snippets Groups Projects
Commit 84675b8e authored by Moritz Huebner's avatar Moritz Huebner
Browse files

Merge branch 'celerite_likelihood' into 'master'

Celerite/george likelihood

See merge request !1086
parents 84016656 86bc1b94
No related branches found
No related tags found
1 merge request!1086Celerite/george likelihood
Pipeline #394042 failed
...@@ -4,7 +4,7 @@ import numpy as np ...@@ -4,7 +4,7 @@ import numpy as np
from scipy.special import gammaln, xlogy from scipy.special import gammaln, xlogy
from scipy.stats import multivariate_normal from scipy.stats import multivariate_normal
from .utils import infer_parameters_from_function from .utils import infer_parameters_from_function, infer_args_from_function_except_n_args
class Likelihood(object): class Likelihood(object):
...@@ -570,5 +570,166 @@ class JointLikelihood(Likelihood): ...@@ -570,5 +570,166 @@ class JointLikelihood(Likelihood):
return sum([likelihood.noise_log_likelihood() for likelihood in self.likelihoods]) return sum([likelihood.noise_log_likelihood() for likelihood in self.likelihoods])
def function_to_celerite_mean_model(func):
from celerite.modeling import Model as CeleriteModel
return _function_to_gp_model(func, CeleriteModel)
def function_to_george_mean_model(func):
from celerite.modeling import Model as GeorgeModel
return _function_to_gp_model(func, GeorgeModel)
def _function_to_gp_model(func, cls):
class MeanModel(cls):
parameter_names = tuple(infer_args_from_function_except_n_args(func=func, n=1))
def get_value(self, t):
params = {name: getattr(self, name) for name in self.parameter_names}
return func(t, **params)
def compute_gradient(self, *args, **kwargs):
pass
return MeanModel
class _GPLikelihood(Likelihood):
def __init__(self, kernel, mean_model, t, y, yerr=1e-6, gp_class=None):
"""
Basic Gaussian Process likelihood interface for `celerite` and `george`.
For `celerite` documentation see: https://celerite.readthedocs.io/en/stable/
For `george` documentation see: https://george.readthedocs.io/en/latest/
Parameters
==========
kernel: Union[celerite.term.Term, george.kernels.Kernel]
`celerite` or `george` kernel. See the respective package documentation about the usage.
mean_model: Union[celerite.modeling.Model, george.modeling.Model]
Mean model
t: array_like
The `times` or `x` values of the data set.
y: array_like
The `y` values of the data set.
yerr: float, int, array_like, optional
The error values on the y-values. If a single value is given, it is assumed that the value
applies for all y-values. Default is 1e-6, effectively assuming that no y-errors are present.
gp_class: type, None, optional
GPClass to use. This is determined by the child class used to instantiate the GP. Should usually
not be given by the user and is mostly used for testing
"""
self.kernel = kernel
self.mean_model = mean_model
self.t = np.array(t)
self.y = np.array(y)
self.yerr = np.array(yerr)
self.GPClass = gp_class
self.gp = self.GPClass(kernel=self.kernel, mean=self.mean_model, fit_mean=True, fit_white_noise=True)
self.gp.compute(self.t, yerr=self.yerr)
super().__init__(parameters=self.gp.get_parameter_dict())
def set_parameters(self, parameters):
"""
Safely set a set of parameters to the internal instances of the `gp` and `mean_model`, as well as the
`parameters` dict.
Parameters
==========
parameters: dict, pandas.DataFrame
The set of parameters we would like to set.
"""
for name, value in parameters.items():
try:
self.gp.set_parameter(name=name, value=value)
except ValueError:
pass
self.parameters[name] = value
class CeleriteLikelihood(_GPLikelihood):
def __init__(self, kernel, mean_model, t, y, yerr=1e-6):
"""
Basic Gaussian Process likelihood interface for `celerite` and `george`.
For `celerite` documentation see: https://celerite.readthedocs.io/en/stable/
For `george` documentation see: https://george.readthedocs.io/en/latest/
Parameters
==========
kernel: celerite.term.Term
`celerite` or `george` kernel. See the respective package documentation about the usage.
mean_model: celerite.modeling.Model
Mean model
t: array_like
The `times` or `x` values of the data set.
y: array_like
The `y` values of the data set.
yerr: float, int, array_like, optional
The error values on the y-values. If a single value is given, it is assumed that the value
applies for all y-values. Default is 1e-6, effectively assuming that no y-errors are present.
"""
import celerite
super().__init__(kernel=kernel, mean_model=mean_model, t=t, y=y, yerr=yerr, gp_class=celerite.GP)
def log_likelihood(self):
"""
Calculate the log-likelihood for the Gaussian process given the current parameters.
Returns
=======
float: The log-likelihood value.
"""
self.gp.set_parameter_vector(vector=np.array(list(self.parameters.values())))
try:
return self.gp.log_likelihood(self.y)
except Exception:
return -np.inf
class GeorgeLikelihood(_GPLikelihood):
def __init__(self, kernel, mean_model, t, y, yerr=1e-6):
"""
Basic Gaussian Process likelihood interface for `celerite` and `george`.
For `celerite` documentation see: https://celerite.readthedocs.io/en/stable/
For `george` documentation see: https://george.readthedocs.io/en/latest/
Parameters
==========
kernel: george.kernels.Kernel
`celerite` or `george` kernel. See the respective package documentation about the usage.
mean_model: george.modeling.Model
Mean model
t: array_like
The `times` or `x` values of the data set.
y: array_like
The `y` values of the data set.
yerr: float, int, array_like, optional
The error values on the y-values. If a single value is given, it is assumed that the value
applies for all y-values. Default is 1e-6, effectively assuming that no y-errors are present.
"""
import george
super().__init__(kernel=kernel, mean_model=mean_model, t=t, y=y, yerr=yerr, gp_class=george.GP)
def log_likelihood(self):
"""
Calculate the log-likelihood for the Gaussian process given the current parameters.
Returns
=======
float: The log-likelihood value.
"""
for name, value in self.parameters.items():
try:
self.gp.set_parameter(name=name, value=value)
except ValueError:
raise ValueError(f"Parameter {name} not a valid parameter for the GP.")
try:
return self.gp.log_likelihood(self.y)
except Exception:
return -np.inf
class MarginalizedLikelihoodReconstructionError(Exception): class MarginalizedLikelihoodReconstructionError(Exception):
pass pass
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import celerite.terms
import bilby
from bilby.core.prior import Uniform
# In this example we show how we can use the `celerite` package within `bilby`.
# We begin by synthesizing some data and then use a simple Gaussian Process
# model to fit and interpolate the data.
# The advantage of `celerite` is its high speed which allows the application of
# Gaussian Processes to large one-dimensional data sets.
# `bilby` implements a likelihood interface to the `celerite` and `george`, and
# adds some useful utility functions.
# For specific use of `celerite`, see also the documentation at
# https://celerite.readthedocs.io/en/stable/, and the paper on the arxiv
# https://arxiv.org/abs/1703.09710.
# For the use of Gaussian Processes in general, Rasmussen and Williams (2006)
# provides an in-depth introduction. The book is available for free at
# http://www.gaussianprocess.org/.
label = "gaussian_process_celerite_example"
outdir = "outdir"
Path(outdir).mkdir(exist_ok=True)
# Here, we set up the parameters for the data creation.
# The data will be a Gaussian Bell curve times a sine function.
# We also add a linear trend and some Gaussian white noise.
period = 6
amplitude = 10
width = 20
jitter = 1
offset = 10
slope = 0.1
def linear_function(x, a, b):
return a * x + b
# For the data creation, we leave a gap in the middle of the time series
# to see how the Gaussian Process model can interpolate the data.
# We fix the seed to ensure reproducibility.
np.random.seed(42)
times = np.linspace(0, 40, 100)
times = np.append(times, np.linspace(60, 100, 100))
dt = times[1] - times[0]
duration = times[-1] - times[0]
ys = (
amplitude
* np.sin(2 * np.pi * times / period)
* np.exp(-((times - 50) ** 2) / 2 / width**2)
+ np.random.normal(scale=jitter, size=len(times))
+ linear_function(x=times, a=slope, b=offset)
)
plt.errorbar(times, ys, yerr=jitter, fmt=".k")
plt.xlabel("times")
plt.ylabel("y")
plt.savefig(f"{outdir}/{label}_data.pdf")
plt.show()
# We use a Gaussian Process kernel to model the Bell curve and sine aspects of
# the data. This specific kernel is a
# 'stochastically-driven harmonic oscillator'.
kernel = celerite.terms.SHOTerm(log_S0=0, log_Q=0, log_omega0=0)
# If we wish to also infer the Gaussian white noise, we can add a `JitterTerm`.
# This is useful if the `yerr` values of the data are not known.
jitter_term = True
if jitter_term:
kernel += celerite.terms.JitterTerm(log_sigma=0)
# `bilby.core.likelihood.function_to_celerite_mean_model` takes a python
# function and transforms it into the correct class for `celerite` to use as
# a mean function. The first argument of the python function to be passed
# has to be the 'time' or 'x' variable, followed by the parameters. The
# model needs to be initialized with some values, though these do not matter
# for inference.
LinearMeanModel = bilby.core.likelihood.function_to_celerite_mean_model(linear_function)
mean_model = LinearMeanModel(a=0, b=0)
# Set up the likelihood. We set `yerr=1e-6` so that we can find the amount
# of white noise during the inference process. Smaller values of `yerr`
# cause the program to break. If you know the `yerr` in your problem,
# you can pass them in as an array.
likelihood = bilby.core.likelihood.CeleriteLikelihood(
kernel=kernel, mean_model=mean_model, t=times, y=ys, yerr=1e-6
)
# Print the parameter names. This is useful if we have trouble figuring out
# how `celerite` applies its naming scheme.
print(likelihood.gp.parameter_names)
# Set up the priors. We know the name of the parameters from the print
# statement in the line before.
priors = bilby.core.prior.PriorDict()
priors["kernel:terms[0]:log_S0"] = Uniform(
minimum=-10, maximum=30, name="log_S0", latex_label=r"$\ln S_0$"
)
priors["kernel:terms[0]:log_Q"] = Uniform(
minimum=-10, maximum=30, name="log_Q", latex_label=r"$\ln Q$"
)
priors["kernel:terms[0]:log_omega0"] = Uniform(
minimum=-5, maximum=5, name="log_omega0", latex_label=r"$\ln \omega_0$"
)
priors["kernel:terms[1]:log_sigma"] = Uniform(
minimum=-5, maximum=5, name="log sigma", latex_label=r"$\ln \sigma$"
)
priors["mean:a"] = Uniform(minimum=-100, maximum=100, name="a", latex_label=r"$a$")
priors["mean:b"] = Uniform(minimum=-100, maximum=100, name="b", latex_label=r"$b$")
# Run the sampling process as usual with `bilby`. The settings are such that
# the run should finish within a few minutes.
result = bilby.run_sampler(
likelihood=likelihood,
priors=priors,
outdir=outdir,
label=label,
sampler="dynesty",
sample="rslice",
nlive=500,
resume=True,
)
result.plot_corner()
# Let's also plot the parameters with known values separately. This way we
# can see if the inference process yielded sensible results.
truths = {
"mean:a": slope,
"mean:b": offset,
"kernel:terms[0]:log_omega0": -np.log(period / 2 / np.pi),
"kernel:terms[1]:log_sigma": np.log(jitter),
}
result.label = f"{label}_truths"
result.plot_corner(truths=truths)
# Now let's plot the data with some possible realizations of the mean model,
# and the interpolated prediction from the Gaussian Process using the
# maximum likelihood parameters.
start_time = times[0]
end_time = times[-1]
# First, we extract the inferred Gaussian white noise jitter.
jitter = np.exp(result.posterior.iloc[-1]["kernel:terms[1]:log_sigma"])
# Next, we re-compute the Gaussian process with these y errors.
# `likelihood.gp` is an instance of the `celerite` Gaussian Process class.
# See the `celerite` documentation for a detailed explanation. We can then
# draw predicted means and variances from the `celerite` Gaussian process.
likelihood.gp.compute(times, jitter)
x = np.linspace(start_time, end_time, 5000)
pred_mean, pred_var = likelihood.gp.predict(ys, x, return_var=True)
pred_std = np.sqrt(pred_var)
# Plot the data and prediction.
color = "#ff7f0e"
plt.errorbar(times, ys, yerr=jitter, fmt=".k", capsize=0, label="Data")
plt.plot(x, pred_mean, color=color, label="Prediction")
plt.fill_between(
x,
pred_mean + pred_std,
pred_mean - pred_std,
color=color,
alpha=0.3,
edgecolor="none",
)
# Plot the mean model for the maximum likelihood parameters.
if isinstance(likelihood.mean_model, (float, int)):
trend = np.ones(len(x)) * likelihood.mean_model
else:
trend = likelihood.mean_model.get_value(x)
plt.plot(x, trend, color="green", label="Mean")
# Plot the mean model for ten other posterior samples.
samples = [
result.posterior.iloc[np.random.randint(len(result.posterior))] for _ in range(10)
]
for sample in samples:
likelihood.set_parameters(sample)
if not isinstance(likelihood.mean_model, (float, int)):
trend = likelihood.mean_model.get_value(x)
plt.plot(x, trend, color="green", alpha=0.3)
plt.xlabel("times")
plt.ylabel("y")
plt.legend(ncol=2)
plt.tight_layout()
plt.savefig(f"{outdir}/{label}_max_like_fit.pdf")
plt.show()
plt.clf()
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import george
import bilby
from bilby.core.prior import Uniform
# In this example we show how we can use the `george` package within
# `bilby`. We begin by synthesizing some data and then use a simple Gaussian
# Process model to fit and interpolate the data. `bilby` implements a
# likelihood interface to the `celerite` and `george` and adds some useful
# utility functions.
# For specific use of `george`, see also the documentation at
# https://george.readthedocs.io/en/stable/, and the paper on the arxiv
# https://arxiv.org/abs/1703.09710.
# For the use of Gaussian Processes in general, Rasmussen and Williams (
# 2006) provides an in-depth introduction. The book is available for free at
# http://www.gaussianprocess.org/.
label = "gaussian_process_george_example"
outdir = "outdir"
Path(outdir).mkdir(exist_ok=True)
# Here, we set up the parameters for the data creation. The data will be a
# Gaussian Bell curve times a sine function. We also add a linear trend and
# some Gaussian white noise.
period = 6
amplitude = 10
width = 20
jitter = 1
offset = 10
slope = 0.1
def linear_function(x, a, b):
return a * x + b
# For the data creation, we leave a gap in the middle of the time series to
# see how the Gaussian Process model can interpolate the data. We fix the
# seed to ensure reproducibility.
np.random.seed(42)
times = np.linspace(0, 40, 100)
times = np.append(times, np.linspace(60, 100, 100))
dt = times[1] - times[0]
duration = times[-1] - times[0]
ys = (
amplitude
* np.sin(2 * np.pi * times / period)
* np.exp(-((times - 50) ** 2) / 2 / width**2)
+ np.random.normal(scale=jitter, size=len(times))
+ linear_function(x=times, a=slope, b=offset)
)
plt.errorbar(times, ys, yerr=jitter, fmt=".k")
plt.xlabel("times")
plt.ylabel("y")
plt.savefig(f"{outdir}/{label}_data.pdf")
plt.show()
# We use a Gaussian Process kernel to model the Bell curve and sine aspects
# of the data. This specific kernel is a Matern 3/2 kernel, which will be
# actually not be a good model! Try using the other available kernels in
# `george` to see if you can achieve a better interpolation!
kernel = 2.0 * george.kernels.Matern32Kernel(metric=5.0)
# `bilby.core.likelihood.function_to_george_mean_model` takes a python
# function and transforms it into the correct class for `celerite` to use as
# a mean function. The first argument of the python function to be passed
# has to be the 'time' or 'x' variable, followed by the parameters. The
# model needs to be initialized with some values, though these do not matter
# for inference.
LinearMeanModel = bilby.core.likelihood.function_to_george_mean_model(linear_function)
mean_model = LinearMeanModel(a=0, b=0)
# Set up the likelihood. We set `yerr=1e-6` so that we can find the amount
# of white noise during the inference process. # Smaller values of `yerr`
# cause the program to break. If you know the `yerr` in your problem,
# you can pass them in as # an array.
likelihood = bilby.core.likelihood.GeorgeLikelihood(
kernel=kernel, mean_model=mean_model, t=times, y=ys, yerr=1e-6
)
# Print the parameter names. This is useful if we have trouble figuring out
# how `celerite` applies its naming scheme.
print(likelihood.gp.parameter_names)
print(likelihood.gp.parameter_vector)
# Set up the priors. We know the name of the parameters from the print
# statement in the line before.
priors = bilby.core.prior.PriorDict()
priors["kernel:k1:log_constant"] = Uniform(
minimum=-10, maximum=30, name="log_A", latex_label=r"$\ln A$"
)
priors["kernel:k2:metric:log_M_0_0"] = Uniform(
minimum=-10, maximum=30, name="log_M_0_0", latex_label=r"$\ln M_{00}$"
)
priors["white_noise:value"] = Uniform(
minimum=0, maximum=10, name="white noise", latex_label=r"$\sigma$"
)
priors["mean:a"] = Uniform(minimum=-100, maximum=100, name="a", latex_label=r"$a$")
priors["mean:b"] = Uniform(minimum=-100, maximum=100, name="b", latex_label=r"$b$")
# Run the sampling process as usual with `bilby`. The settings are such that
# the run should finish within a few minutes.
result = bilby.run_sampler(
likelihood=likelihood,
priors=priors,
outdir=outdir,
label=label,
sampler="dynesty",
sample="rslice",
nlive=300,
resume=True,
)
result.plot_corner()
# Now let's plot the data with some possible realizations of the mean model,
# and the interpolated prediction from the Gaussian Process using the
# maximum likelihood parameters.
start_time = times[0]
end_time = times[-1]
# First, we extract the inferred Gaussian white noise jitter
jitter = result.posterior.iloc[-1]["white_noise:value"]
# Next, we re-compute the Gaussian process with these y errors.
# `likelihood.gp` is an instance of the `celerite` Gaussian Process class.
# See the `celerite` documentation for a detailed explanation. We can then
# draw predicted means and variances from the `celerite` Gaussian process.
likelihood.gp.compute(times, jitter)
x = np.linspace(start_time, end_time, 5000)
pred_mean, pred_var = likelihood.gp.predict(ys, x, return_var=True)
pred_std = np.sqrt(pred_var)
# Plot the data and prediction.
color = "#ff7f0e"
plt.errorbar(times, ys, yerr=jitter, fmt=".k", capsize=0, label="Data")
plt.plot(x, pred_mean, color=color, label="Prediction")
plt.fill_between(
x,
pred_mean + pred_std,
pred_mean - pred_std,
color=color,
alpha=0.3,
edgecolor="none",
)
# Plot the mean model for the maximum likelihood parameters.
if isinstance(likelihood.mean_model, (float, int)):
trend = np.ones(len(x)) * likelihood.mean_model
else:
trend = likelihood.mean_model.get_value(x)
plt.plot(x, trend, color="green", label="Mean")
# Plot the mean model for ten other posterior samples.
samples = [
result.posterior.iloc[np.random.randint(len(result.posterior))] for _ in range(10)
]
for sample in samples:
likelihood.set_parameters(sample)
if not isinstance(likelihood.mean_model, (float, int)):
trend = likelihood.mean_model.get_value(x)
plt.plot(x, trend, color="green", alpha=0.3)
plt.xlabel("times")
plt.ylabel("y")
plt.legend(ncol=2)
plt.tight_layout()
plt.savefig(f"{outdir}/{label}_max_like_fit.pdf")
plt.show()
plt.clf()
import unittest import unittest
from unittest import mock from unittest import mock
import numpy as np import numpy as np
import bilby.core.likelihood
from bilby.core.likelihood import ( from bilby.core.likelihood import (
Likelihood, Likelihood,
GaussianLikelihood, GaussianLikelihood,
...@@ -693,5 +696,158 @@ class TestJointLikelihood(unittest.TestCase): ...@@ -693,5 +696,158 @@ class TestJointLikelihood(unittest.TestCase):
# self.assertDictEqual(self.joint_likelihood.parameters, joint_likelihood.parameters) # self.assertDictEqual(self.joint_likelihood.parameters, joint_likelihood.parameters)
class TestGPLikelihood(unittest.TestCase):
def setUp(self) -> None:
self.t = [1, 2, 3]
self.y = [4, 5, 6]
self.yerr = [0.4, 0.5, 0.6]
self.kernel = mock.MagicMock()
self.mean_model = mock.MagicMock()
self.gp_mock = mock.MagicMock()
self.gp_mock.compute = mock.MagicMock()
self.parameter_dict = dict(a=1, b=2)
self.gp_mock.get_parameter_dict = mock.MagicMock(return_value=dict(self.parameter_dict))
self.gp_class = mock.MagicMock(return_value=self.gp_mock)
self.celerite_likelihood = bilby.core.likelihood._GPLikelihood(
kernel=self.kernel, mean_model=self.mean_model, t=self.t, y=self.y, yerr=self.yerr, gp_class=self.gp_class)
def tearDown(self) -> None:
del self.t
del self.y
del self.yerr
del self.kernel
del self.mean_model
del self.parameter_dict
del self.gp_class
del self.gp_mock
del self.celerite_likelihood
def test_t(self):
self.assertIsInstance(self.celerite_likelihood.t, np.ndarray)
self.assertTrue(np.array_equal(self.t, self.celerite_likelihood.t))
def test_y(self):
self.assertIsInstance(self.celerite_likelihood.y, np.ndarray)
self.assertTrue(np.array_equal(self.y, self.celerite_likelihood.y))
def test_yerr(self):
self.assertIsInstance(self.celerite_likelihood.yerr, np.ndarray)
self.assertTrue(np.array_equal(self.yerr, self.celerite_likelihood.yerr))
def test_gp_class(self):
self.assertEqual(self.gp_class, self.celerite_likelihood.GPClass)
def test_gp_instantiation(self):
self.celerite_likelihood.GPClass.assert_called_once_with(
kernel=self.kernel, mean=self.mean_model, fit_mean=True, fit_white_noise=True)
def test_gp_mock(self):
self.celerite_likelihood.gp.compute.assert_called_once_with(
self.celerite_likelihood.t, yerr=self.celerite_likelihood.yerr)
def test_parameters(self):
self.assertDictEqual(self.parameter_dict, self.celerite_likelihood.parameters)
def test_set_parameters_no_exceptions(self):
self.celerite_likelihood.gp.set_parameter = mock.MagicMock()
self.celerite_likelihood.mean_model.set_parameter = mock.MagicMock()
expected_a = 5
self.celerite_likelihood.set_parameters(dict(a=expected_a))
self.celerite_likelihood.gp.set_parameter.assert_called_once_with(name="a", value=5)
self.assertEqual(expected_a, self.celerite_likelihood.parameters["a"])
class TestFunctionMeanModel(unittest.TestCase):
def test_function_to_celerite_mean_model(self):
def func(x, a, b, c):
return a * x ** 2 + b * x + c
mean_model = bilby.core.likelihood.function_to_celerite_mean_model(func=func)
self.assertListEqual(["a", "b", "c"], list(mean_model.parameter_names))
def test_function_to_george_mean_model(self):
def func(x, a, b, c):
return a * x ** 2 + b * x + c
mean_model = bilby.core.likelihood.function_to_celerite_mean_model(func=func)
self.assertListEqual(["a", "b", "c"], list(mean_model.parameter_names))
class TestCeleriteLikelihoodEvaluation(unittest.TestCase):
def setUp(self) -> None:
import celerite
def func(x, a):
return a * x
self.t = [0, 1, 2]
self.y = [0, 1, 2]
self.yerr = [0.4, 0.5, 0.6]
self.parameters = {"kernel:log_S0": 0, "kernel:log_Q": 0, "kernel:log_omega0": 0, "mean:a": 1}
self.kernel = celerite.terms.SHOTerm(log_S0=0, log_Q=0, log_omega0=0)
self.MeanModel = bilby.likelihood.function_to_celerite_mean_model(func=func)
self.mean_model = self.MeanModel(a=1)
self.celerite_likelihood = bilby.core.likelihood.CeleriteLikelihood(
kernel=self.kernel, mean_model=self.mean_model, t=self.t, y=self.y, yerr=self.yerr)
self.celerite_likelihood.parameters = self.parameters
def tearDown(self) -> None:
del self.t
del self.y
del self.yerr
del self.parameters
del self.kernel
del self.MeanModel
del self.mean_model
del self.celerite_likelihood
def test_log_l_evalutation(self):
log_l = self.celerite_likelihood.log_likelihood()
expected = -2.0390312696885102
self.assertEqual(expected, log_l)
def test_set_parameters(self):
combined_params = {"kernel:log_S0": 2, "kernel:log_Q": 2, "kernel:log_omega0": 2, "mean:a": 2}
self.celerite_likelihood.set_parameters(combined_params)
self.assertDictEqual(combined_params, self.celerite_likelihood.parameters)
class TestGeorgeLikelihoodEvaluation(unittest.TestCase):
def setUp(self) -> None:
import george
def func(x, a):
return a * x
self.t = [0, 1, 2]
self.y = [0, 1, 2]
self.yerr = [0.4, 0.5, 0.6]
self.parameters = {}
self.kernel = 2.0 * george.kernels.Matern32Kernel(metric=5.0)
self.MeanModel = bilby.likelihood.function_to_celerite_mean_model(func=func)
self.mean_model = self.MeanModel(a=1)
self.george_likelihood = bilby.core.likelihood.GeorgeLikelihood(
kernel=self.kernel, mean_model=self.mean_model, t=self.t, y=self.y, yerr=self.yerr)
def tearDown(self) -> None:
pass
def test_likelihood_value(self):
log_l = self.george_likelihood.log_likelihood()
expected = -3.2212751203208403
self.assertEqual(expected, log_l)
def test_set_parameters(self):
combined_params = {"kernel:k1:log_constant": 2, "kernel:k2:metric:log_M_0_0": 2, "mean:a": 2}
self.george_likelihood.set_parameters(combined_params)
self.assertDictEqual(combined_params, self.george_likelihood.parameters)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment