Commit 787c0e35 authored by Gregory Ashton's avatar Gregory Ashton
Browse files

Resolves #266 and improve saved log likelihoods and priors

parent 930280f6
...@@ -210,20 +210,24 @@ class PriorDict(OrderedDict): ...@@ -210,20 +210,24 @@ class PriorDict(OrderedDict):
""" """
return np.product([self[key].prob(sample[key]) for key in sample], **kwargs) return np.product([self[key].prob(sample[key]) for key in sample], **kwargs)
def ln_prob(self, sample): def ln_prob(self, sample, axis=None):
""" """
Parameters Parameters
---------- ----------
sample: dict sample: dict
Dictionary of the samples of which we want to have the log probability of Dictionary of the samples of which to calculate the log probability
axis: None or int
Axis along which the summation is performed
Returns Returns
------- -------
float: Joint log probability of all the individual sample probabilities float or ndarray:
Joint log probability of all the individual sample probabilities
""" """
return np.sum([self[key].ln_prob(sample[key]) for key in sample]) return np.sum([self[key].ln_prob(sample[key]) for key in sample],
axis=axis)
def rescale(self, keys, theta): def rescale(self, keys, theta):
"""Rescale samples from unit cube to prior """Rescale samples from unit cube to prior
......
...@@ -74,9 +74,10 @@ class Result(object): ...@@ -74,9 +74,10 @@ class Result(object):
nested_samples=None, log_evidence=np.nan, nested_samples=None, log_evidence=np.nan,
log_evidence_err=np.nan, log_noise_evidence=np.nan, log_evidence_err=np.nan, log_noise_evidence=np.nan,
log_bayes_factor=np.nan, log_likelihood_evaluations=None, log_bayes_factor=np.nan, log_likelihood_evaluations=None,
sampling_time=None, nburn=None, walkers=None, log_prior_evaluations=None, sampling_time=None, nburn=None,
max_autocorrelation_time=None, parameter_labels=None, walkers=None, max_autocorrelation_time=None,
parameter_labels_with_unit=None, version=None): parameter_labels=None, parameter_labels_with_unit=None,
version=None):
""" A class to store the results of the sampling run """ A class to store the results of the sampling run
Parameters Parameters
...@@ -102,6 +103,8 @@ class Result(object): ...@@ -102,6 +103,8 @@ class Result(object):
Natural log evidences Natural log evidences
log_likelihood_evaluations: array_like log_likelihood_evaluations: array_like
The evaluations of the likelihood for each sample point The evaluations of the likelihood for each sample point
log_prior_evaluations: array_like
The evaluations of the prior for each sample point
sampling_time: float sampling_time: float
The time taken to complete the sampling The time taken to complete the sampling
nburn: int nburn: int
...@@ -143,6 +146,7 @@ class Result(object): ...@@ -143,6 +146,7 @@ class Result(object):
self.log_noise_evidence = log_noise_evidence self.log_noise_evidence = log_noise_evidence
self.log_bayes_factor = log_bayes_factor self.log_bayes_factor = log_bayes_factor
self.log_likelihood_evaluations = log_likelihood_evaluations self.log_likelihood_evaluations = log_likelihood_evaluations
self.log_prior_evaluations = log_prior_evaluations
self.sampling_time = sampling_time self.sampling_time = sampling_time
self.version = version self.version = version
self.max_autocorrelation_time = max_autocorrelation_time self.max_autocorrelation_time = max_autocorrelation_time
...@@ -269,8 +273,8 @@ class Result(object): ...@@ -269,8 +273,8 @@ class Result(object):
'log_noise_evidence', 'log_bayes_factor', 'priors', 'posterior', 'log_noise_evidence', 'log_bayes_factor', 'priors', 'posterior',
'injection_parameters', 'meta_data', 'search_parameter_keys', 'injection_parameters', 'meta_data', 'search_parameter_keys',
'fixed_parameter_keys', 'sampling_time', 'sampler_kwargs', 'fixed_parameter_keys', 'sampling_time', 'sampler_kwargs',
'log_likelihood_evaluations', 'samples', 'nested_samples', 'log_likelihood_evaluations', 'log_prior_evaluations', 'samples',
'walkers', 'nburn', 'parameter_labels', 'nested_samples', 'walkers', 'nburn', 'parameter_labels',
'parameter_labels_with_unit', 'version'] 'parameter_labels_with_unit', 'version']
dictionary = OrderedDict() dictionary = OrderedDict()
for attr in save_attrs: for attr in save_attrs:
...@@ -855,6 +859,11 @@ class Result(object): ...@@ -855,6 +859,11 @@ class Result(object):
data_frame[key] = priors[key] data_frame[key] = priors[key]
data_frame['log_likelihood'] = getattr( data_frame['log_likelihood'] = getattr(
self, 'log_likelihood_evaluations', np.nan) self, 'log_likelihood_evaluations', np.nan)
if self.log_prior_evaluations is None:
data_frame['log_prior'] = self.priors.ln_prob(
data_frame[self.search_parameter_keys], axis=0)
else:
data_frame['log_prior'] = self.log_prior_evaluations
if conversion_function is not None: if conversion_function is not None:
data_frame = conversion_function(data_frame, likelihood, priors) data_frame = conversion_function(data_frame, likelihood, priors)
self.posterior = data_frame self.posterior = data_frame
......
...@@ -84,6 +84,8 @@ class Cpnest(NestedSampler): ...@@ -84,6 +84,8 @@ class Cpnest(NestedSampler):
out.plot() out.plot()
self.result.posterior = DataFrame(out.posterior_samples) self.result.posterior = DataFrame(out.posterior_samples)
self.result.posterior.rename(columns=dict(
logL='log_likelihood', logPrior='log_prior'), inplace=True)
self.result.log_evidence = out.NS.state.logZ self.result.log_evidence = out.NS.state.logZ
self.result.log_evidence_err = np.nan self.result.log_evidence_err = np.nan
return self.result return self.result
......
...@@ -121,6 +121,10 @@ class Emcee(MCMCSampler): ...@@ -121,6 +121,10 @@ class Emcee(MCMCSampler):
"The run has finished, but the chain is not burned in: " "The run has finished, but the chain is not burned in: "
"`nburn < nsteps`. Try increasing the number of steps.") "`nburn < nsteps`. Try increasing the number of steps.")
self.result.samples = sampler.chain[:, self.nburn:, :].reshape((-1, self.ndim)) self.result.samples = sampler.chain[:, self.nburn:, :].reshape((-1, self.ndim))
blobs_flat = np.array(sampler.blobs)[self.nburn:, :, :].reshape((-1, 2))
log_likelihoods, log_priors = blobs_flat.T
self.result.log_likelihood_evaluations = log_likelihoods
self.result.log_prior_evaluations = log_priors
self.result.walkers = sampler.chain self.result.walkers = sampler.chain
self.result.log_evidence = np.nan self.result.log_evidence = np.nan
self.result.log_evidence_err = np.nan self.result.log_evidence_err = np.nan
...@@ -146,8 +150,9 @@ class Emcee(MCMCSampler): ...@@ -146,8 +150,9 @@ class Emcee(MCMCSampler):
for _ in range(self.nwalkers)] for _ in range(self.nwalkers)]
def lnpostfn(self, theta): def lnpostfn(self, theta):
p = self.log_prior(theta) log_prior = self.log_prior(theta)
if np.isinf(p): if np.isinf(log_prior):
return -np.inf return -np.inf, [np.nan, np.nan]
else: else:
return self.log_likelihood(theta) + p log_likelihood = self.log_likelihood(theta)
return log_likelihood + log_prior, [log_likelihood, log_prior]
...@@ -65,9 +65,13 @@ class Ptemcee(Emcee): ...@@ -65,9 +65,13 @@ class Ptemcee(Emcee):
for _ in range(self.nwalkers)] for _ in range(self.nwalkers)]
for _ in range(self.kwargs['ntemps'])] for _ in range(self.kwargs['ntemps'])]
for _ in tqdm( log_likelihood_evaluations = []
log_prior_evaluations = []
for pos, logpost, loglike in tqdm(
sampler.sample(self.pos0, **self.sampler_function_kwargs), sampler.sample(self.pos0, **self.sampler_function_kwargs),
total=self.nsteps): total=self.nsteps):
log_likelihood_evaluations.append(loglike)
log_prior_evaluations.append(logpost - loglike)
pass pass
self.calculate_autocorrelation(sampler.chain.reshape((-1, self.ndim))) self.calculate_autocorrelation(sampler.chain.reshape((-1, self.ndim)))
...@@ -80,6 +84,10 @@ class Ptemcee(Emcee): ...@@ -80,6 +84,10 @@ class Ptemcee(Emcee):
"`nburn < nsteps`. Try increasing the number of steps.") "`nburn < nsteps`. Try increasing the number of steps.")
self.result.samples = sampler.chain[0, :, self.nburn:, :].reshape( self.result.samples = sampler.chain[0, :, self.nburn:, :].reshape(
(-1, self.ndim)) (-1, self.ndim))
self.result.log_likelihood_evaluations = np.array(
log_likelihood_evaluations)[self.nburn:, 0, :].reshape((-1))
self.result.log_prior_evaluations = np.array(
log_prior_evaluations)[self.nburn:, 0, :].reshape((-1))
self.result.betas = sampler.betas self.result.betas = sampler.betas
self.result.log_evidence, self.result.log_evidence_err =\ self.result.log_evidence, self.result.log_evidence_err =\
sampler.log_evidence_estimate( sampler.log_evidence_estimate(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment