Skip to content
Snippets Groups Projects
Commit f5ff4c20 authored by Gregory Ashton's avatar Gregory Ashton
Browse files

Merge branch 'Resolve-266' into 'master'

Resolves #266 and improve saved log likelihoods and priors

Closes #266

See merge request lscsoft/bilby!324
parents 930280f6 787c0e35
No related branches found
No related tags found
No related merge requests found
......@@ -210,20 +210,24 @@ class PriorDict(OrderedDict):
"""
return np.product([self[key].prob(sample[key]) for key in sample], **kwargs)
def ln_prob(self, sample):
def ln_prob(self, sample, axis=None):
"""
Parameters
----------
sample: dict
Dictionary of the samples of which we want to have the log probability of
Dictionary of the samples of which to calculate the log probability
axis: None or int
Axis along which the summation is performed
Returns
-------
float: Joint log probability of all the individual sample probabilities
float or ndarray:
Joint log probability of all the individual sample probabilities
"""
return np.sum([self[key].ln_prob(sample[key]) for key in sample])
return np.sum([self[key].ln_prob(sample[key]) for key in sample],
axis=axis)
def rescale(self, keys, theta):
"""Rescale samples from unit cube to prior
......
......@@ -74,9 +74,10 @@ class Result(object):
nested_samples=None, log_evidence=np.nan,
log_evidence_err=np.nan, log_noise_evidence=np.nan,
log_bayes_factor=np.nan, log_likelihood_evaluations=None,
sampling_time=None, nburn=None, walkers=None,
max_autocorrelation_time=None, parameter_labels=None,
parameter_labels_with_unit=None, version=None):
log_prior_evaluations=None, sampling_time=None, nburn=None,
walkers=None, max_autocorrelation_time=None,
parameter_labels=None, parameter_labels_with_unit=None,
version=None):
""" A class to store the results of the sampling run
Parameters
......@@ -102,6 +103,8 @@ class Result(object):
Natural log evidences
log_likelihood_evaluations: array_like
The evaluations of the likelihood for each sample point
log_prior_evaluations: array_like
The evaluations of the prior for each sample point
sampling_time: float
The time taken to complete the sampling
nburn: int
......@@ -143,6 +146,7 @@ class Result(object):
self.log_noise_evidence = log_noise_evidence
self.log_bayes_factor = log_bayes_factor
self.log_likelihood_evaluations = log_likelihood_evaluations
self.log_prior_evaluations = log_prior_evaluations
self.sampling_time = sampling_time
self.version = version
self.max_autocorrelation_time = max_autocorrelation_time
......@@ -269,8 +273,8 @@ class Result(object):
'log_noise_evidence', 'log_bayes_factor', 'priors', 'posterior',
'injection_parameters', 'meta_data', 'search_parameter_keys',
'fixed_parameter_keys', 'sampling_time', 'sampler_kwargs',
'log_likelihood_evaluations', 'samples', 'nested_samples',
'walkers', 'nburn', 'parameter_labels',
'log_likelihood_evaluations', 'log_prior_evaluations', 'samples',
'nested_samples', 'walkers', 'nburn', 'parameter_labels',
'parameter_labels_with_unit', 'version']
dictionary = OrderedDict()
for attr in save_attrs:
......@@ -855,6 +859,11 @@ class Result(object):
data_frame[key] = priors[key]
data_frame['log_likelihood'] = getattr(
self, 'log_likelihood_evaluations', np.nan)
if self.log_prior_evaluations is None:
data_frame['log_prior'] = self.priors.ln_prob(
data_frame[self.search_parameter_keys], axis=0)
else:
data_frame['log_prior'] = self.log_prior_evaluations
if conversion_function is not None:
data_frame = conversion_function(data_frame, likelihood, priors)
self.posterior = data_frame
......
......@@ -84,6 +84,8 @@ class Cpnest(NestedSampler):
out.plot()
self.result.posterior = DataFrame(out.posterior_samples)
self.result.posterior.rename(columns=dict(
logL='log_likelihood', logPrior='log_prior'), inplace=True)
self.result.log_evidence = out.NS.state.logZ
self.result.log_evidence_err = np.nan
return self.result
......
......@@ -121,6 +121,10 @@ class Emcee(MCMCSampler):
"The run has finished, but the chain is not burned in: "
"`nburn < nsteps`. Try increasing the number of steps.")
self.result.samples = sampler.chain[:, self.nburn:, :].reshape((-1, self.ndim))
blobs_flat = np.array(sampler.blobs)[self.nburn:, :, :].reshape((-1, 2))
log_likelihoods, log_priors = blobs_flat.T
self.result.log_likelihood_evaluations = log_likelihoods
self.result.log_prior_evaluations = log_priors
self.result.walkers = sampler.chain
self.result.log_evidence = np.nan
self.result.log_evidence_err = np.nan
......@@ -146,8 +150,9 @@ class Emcee(MCMCSampler):
for _ in range(self.nwalkers)]
def lnpostfn(self, theta):
p = self.log_prior(theta)
if np.isinf(p):
return -np.inf
log_prior = self.log_prior(theta)
if np.isinf(log_prior):
return -np.inf, [np.nan, np.nan]
else:
return self.log_likelihood(theta) + p
log_likelihood = self.log_likelihood(theta)
return log_likelihood + log_prior, [log_likelihood, log_prior]
......@@ -65,9 +65,13 @@ class Ptemcee(Emcee):
for _ in range(self.nwalkers)]
for _ in range(self.kwargs['ntemps'])]
for _ in tqdm(
log_likelihood_evaluations = []
log_prior_evaluations = []
for pos, logpost, loglike in tqdm(
sampler.sample(self.pos0, **self.sampler_function_kwargs),
total=self.nsteps):
log_likelihood_evaluations.append(loglike)
log_prior_evaluations.append(logpost - loglike)
pass
self.calculate_autocorrelation(sampler.chain.reshape((-1, self.ndim)))
......@@ -80,6 +84,10 @@ class Ptemcee(Emcee):
"`nburn < nsteps`. Try increasing the number of steps.")
self.result.samples = sampler.chain[0, :, self.nburn:, :].reshape(
(-1, self.ndim))
self.result.log_likelihood_evaluations = np.array(
log_likelihood_evaluations)[self.nburn:, 0, :].reshape((-1))
self.result.log_prior_evaluations = np.array(
log_prior_evaluations)[self.nburn:, 0, :].reshape((-1))
self.result.betas = sampler.betas
self.result.log_evidence, self.result.log_evidence_err =\
sampler.log_evidence_estimate(
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment