Skip to content
Snippets Groups Projects
Commit 3dda0972 authored by Gregory Ashton's avatar Gregory Ashton
Browse files

Review core examples

- Add a README
- Move non-dynesty examples into separate sub-directory to avoid
repeatition
- Standardize variable names
- Remove the multidimensional gaussian: the 15d example is better and
more complete
- Remove the start_mcmc_chains_near.. example: this is demonstrating a
rarely used feature of a rarely used sampler, better to put this into
the docs if needed.
parent 63c7aaca
No related branches found
No related tags found
1 merge request!1031Review core examples
Pipeline #292528 passed
Showing
with 87 additions and 170 deletions
# Core examples
This directory contains examples related to the `bilby.core` modules of
`bilby`. Examples are intended to be a starting point, showing different
functionality which you can extend. They are not exhaustive.
#!/usr/bin/env python
"""
An example of how to use bilby to perform parameter estimation for
non-gravitational wave data. In this case, fitting a linear function to
data with background Gaussian noise
"""
import bilby
import numpy as np
import matplotlib.pyplot as plt
# A few simple setup steps
label = 'linear_regression'
outdir = 'outdir'
bilby.utils.check_directory_exists_and_if_not_mkdir(outdir)
# First, we define our "signal model", in this case a simple linear function
def model(time, m, c):
return time * m + c
# Now we define the injection parameters which we make simulated data with
injection_parameters = dict(m=0.5, c=0.2)
# For this example, we'll use standard Gaussian noise
# These lines of code generate the fake data. Note the ** just unpacks the
# contents of the injection_parameters when calling the model function.
sampling_frequency = 10
time_duration = 10
time = np.arange(0, time_duration, 1 / sampling_frequency)
N = len(time)
sigma = np.random.normal(1, 0.01, N)
data = model(time, **injection_parameters) + np.random.normal(0, sigma, N)
# We quickly plot the data to check it looks sensible
fig, ax = plt.subplots()
ax.plot(time, data, 'o', label='data')
ax.plot(time, model(time, **injection_parameters), '--r', label='signal')
ax.set_xlabel('time')
ax.set_ylabel('y')
ax.legend()
fig.savefig('{}/{}_data.png'.format(outdir, label))
# Now lets instantiate a version of our GaussianLikelihood, giving it
# the time, data and signal model
likelihood = bilby.likelihood.GaussianLikelihood(time, data, model, sigma)
# From hereon, the syntax is exactly equivalent to other bilby examples
# We make a prior
priors = dict()
priors['m'] = bilby.core.prior.Uniform(0, 5, 'm')
priors['c'] = bilby.core.prior.Uniform(-2, 2, 'c')
# And run sampler
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, sampler='dynesty', nlive=250,
injection_parameters=injection_parameters, outdir=outdir,
label=label)
# Finally plot a corner plot: all outputs are stored in outdir
result.plot_corner()
......@@ -39,6 +39,6 @@ priors['z'] = bilby.core.prior.ConditionalUniform(condition_func=condition_func_
likelihood = ZeroLikelihood(parameters=dict(x=0, y=0, z=0))
# Sample the prior distribution
res = bilby.run_sampler(likelihood=likelihood, priors=priors, sampler='dynesty', npoints=5000, walks=100,
res = bilby.run_sampler(likelihood=likelihood, priors=priors, sampler='dynesty', nlive=5000,
label='conditional_prior', outdir='outdir', resume=False, clean=True)
res.plot_corner()
......@@ -20,7 +20,7 @@ data = [injection_parameters[label + str(ii)] * 1000 for ii in range(n_dim)]
likelihood = Multinomial(data=data, n_dimensions=n_dim, label=label)
result = run_sampler(
likelihood=likelihood, priors=priors, nlive=100, walks=10,
likelihood=likelihood, priors=priors, nlive=100,
label="multinomial", injection_parameters=injection_parameters
)
......
......@@ -48,6 +48,6 @@ priors = dict(mu=bilby.core.prior.Uniform(0, 5, 'mu'),
# And run sampler
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, sampler='dynesty', npoints=500,
walks=10, outdir=outdir, label=label)
likelihood=likelihood, priors=priors, sampler='dynesty', nlive=1000,
outdir=outdir, label=label)
result.plot_corner()
......@@ -9,8 +9,10 @@ from bilby.core.prior import Uniform
from bilby.core.sampler import run_sampler
from bilby.core.result import make_pp_plot
from bilby.hyper.likelihood import HyperparameterLikelihood
from bilby.core.utils import check_directory_exists_and_if_not_mkdir
outdir = 'outdir'
check_directory_exists_and_if_not_mkdir(outdir)
# Define a model to fit to each data set
......
......@@ -55,7 +55,9 @@ priors['c'] = bilby.core.prior.Uniform(-2, 2, 'c')
# And run sampler
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, sampler='dynesty', nlive=500,
sample='unif', injection_parameters=injection_parameters, outdir=outdir,
likelihood=likelihood, priors=priors, sampler='dynesty', nlive=250,
injection_parameters=injection_parameters, outdir=outdir,
label=label)
# Finally plot a corner plot: all outputs are stored in outdir
result.plot_corner()
......@@ -57,7 +57,9 @@ priors['sigma'] = bilby.core.prior.Uniform(0, 10, 'sigma')
# And run sampler
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, sampler='dynesty', npoints=500,
sample='unif', injection_parameters=injection_parameters, outdir=outdir,
likelihood=likelihood, priors=priors, sampler='dynesty', npoints=250,
injection_parameters=injection_parameters, outdir=outdir,
label=label)
# Finally plot a corner plot: all outputs are stored in outdir
result.plot_corner()
#!/usr/bin/env python
"""
Testing the recovery of a multi-dimensional
Gaussian with zero mean and unit variance
"""
import bilby
import numpy as np
# A few simple setup steps
label = "multidim_gaussian"
outdir = "outdir"
# Generating simulated data: generating n-dim Gaussian
dim = 5
mean = np.zeros(dim)
cov = np.ones((dim, dim))
data = np.random.multivariate_normal(mean, cov, 100)
class MultidimGaussianLikelihood(bilby.Likelihood):
"""
A multivariate Gaussian likelihood
with known analytic solution.
Parameters
----------
data: array_like
The data to analyse
dim: int
The number of dimensions
"""
def __init__(self, data, dim):
super().__init__()
self.dim = dim
self.data = np.array(data)
self.N = len(data)
self.parameters = {}
def log_likelihood(self):
mu = np.array(
[self.parameters["mu_{0}".format(i)] for i in range(self.dim)]
)
sigma = np.array(
[self.parameters["sigma_{0}".format(i)] for i in range(self.dim)]
)
p = np.array([(self.data[n, :] - mu) / sigma for n in range(self.N)])
return np.sum(
-0.5 * (np.sum(p ** 2) + self.N * np.log(2 * np.pi * sigma ** 2))
)
likelihood = MultidimGaussianLikelihood(data, dim)
priors = bilby.core.prior.PriorDict()
priors.update(
{
"mu_{0}".format(i): bilby.core.prior.Uniform(-5, 5, "mu")
for i in range(dim)
}
)
priors.update(
{
"sigma_{0}".format(i): bilby.core.prior.LogUniform(0.2, 5, "sigma")
for i in range(dim)
}
)
# And run sampler
# The plot arg produces trace_plots useful for diagnostics
result = bilby.run_sampler(
likelihood=likelihood,
priors=priors,
sampler="dynesty",
npoints=500,
walks=10,
outdir=outdir,
label=label,
plot=True,
)
result.plot_corner()
......@@ -69,13 +69,13 @@ class Polynomial(bilby.Likelihood):
n: int
The degree of the polynomial to fit.
"""
self.keys = ['c{}'.format(k) for k in range(n)]
super().__init__(parameters={k: None for k in self.keys})
self.x = x
self.y = y
self.sigma = sigma
self.N = len(x)
self.n = n
self.keys = ['c{}'.format(k) for k in range(n)]
self.parameters = {k: None for k in self.keys}
def polynomial(self, x, parameters):
coeffs = [parameters[k] for k in self.keys]
......@@ -95,8 +95,8 @@ def fit(n):
priors[k] = bilby.core.prior.Uniform(0, 10, k)
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, npoints=100, outdir=outdir,
label=label)
likelihood=likelihood, priors=priors, nlive=1000, outdir=outdir,
label=label, sampler="nestle")
return (result.log_evidence, result.log_evidence_err,
np.log(result.occam_factor(priors)))
......
......@@ -88,6 +88,6 @@ priors['n_init'] = LogUniform(
# And run sampler
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, sampler='dynesty',
nlive=1000, sample='unif', injection_parameters=injection_parameters,
nlive=1000, injection_parameters=injection_parameters,
outdir=outdir, label=label)
result.plot_corner()
#!/usr/bin/env python
"""
An example of using emcee, but starting the walkers off close to the peak (or
any other arbitrary point). This is based off the
linear_regression_with_unknown_noise.py example.
"""
import bilby
import numpy as np
import pandas as pd
# A few simple setup steps
label = 'starting_mcmc_chains_near_to_the_peak'
outdir = 'outdir'
# First, we define our "signal model", in this case a simple linear function
def model(time, m, c):
return time * m + c
# Now we define the injection parameters which we make simulated data with
injection_parameters = dict(m=0.5, c=0.2)
# For this example, we'll inject standard Gaussian noise
sigma = 1
# These lines of code generate the fake data
sampling_frequency = 10
time_duration = 10
time = np.arange(0, time_duration, 1 / sampling_frequency)
N = len(time)
data = model(time, **injection_parameters) + np.random.normal(0, sigma, N)
# Now lets instantiate the built-in GaussianLikelihood, giving it
# the time, data and signal model. Note that, because we do not give it the
# parameter, sigma is unknown and marginalised over during the sampling
likelihood = bilby.core.likelihood.GaussianLikelihood(time, data, model)
# Here we define the prior distribution used while sampling
priors = bilby.core.prior.PriorDict()
priors['m'] = bilby.core.prior.Uniform(0, 5, 'm')
priors['c'] = bilby.core.prior.Uniform(-2, 2, 'c')
priors['sigma'] = bilby.core.prior.Uniform(0, 10, 'sigma')
# Set values to determine how to sample with emcee
nwalkers = 100
nsteps = 1000
sampler = 'emcee'
# Run the sampler from the default pos0 (which is samples drawn from the prior)
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, sampler=sampler, nsteps=nsteps,
nwalkers=nwalkers, outdir=outdir, label=label + 'default_pos0')
result.plot_walkers()
# Here we define a distribution from which to start the walkers off from.
start_pos = bilby.core.prior.PriorDict()
start_pos['m'] = bilby.core.prior.Normal(injection_parameters['m'], 0.1)
start_pos['c'] = bilby.core.prior.Normal(injection_parameters['c'], 0.1)
start_pos['sigma'] = bilby.core.prior.Normal(sigma, 0.1)
# This line generated the initial starting position data frame by sampling
# nwalkers-times from the start_pos distribution. Note, you can
# generate this is anyway you like, but it must be a DataFrame with a length
# equal to the number of walkers
pos0 = pd.DataFrame(start_pos.sample(nwalkers))
# Run the sampler with our created pos0
result = bilby.run_sampler(
likelihood=likelihood, priors=priors, sampler=sampler, nsteps=nsteps,
nwalkers=nwalkers, outdir=outdir, label=label + 'user_pos0', pos0=pos0)
result.plot_walkers()
# After running this script, in the outdir, you'll find to png images showing
# the result of the runs with and without the initialisation.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment