Skip to content
Snippets Groups Projects
Commit 0caafc17 authored by Moritz Huebner's avatar Moritz Huebner
Browse files

Merge branch 'add_information_gain_result' into 'master'

Add information gain result

See merge request lscsoft/bilby!907
parents 819a0635 c20bd29f
No related branches found
No related tags found
1 merge request!907Add information gain result
Pipeline #178640 passed with warnings
......@@ -238,8 +238,9 @@ class Result(object):
sampler_kwargs=None, injection_parameters=None,
meta_data=None, posterior=None, samples=None,
nested_samples=None, log_evidence=np.nan,
log_evidence_err=np.nan, log_noise_evidence=np.nan,
log_bayes_factor=np.nan, log_likelihood_evaluations=None,
log_evidence_err=np.nan, information_gain=np.nan,
log_noise_evidence=np.nan, log_bayes_factor=np.nan,
log_likelihood_evaluations=None,
log_prior_evaluations=None, sampling_time=None, nburn=None,
num_likelihood_evaluations=None, walkers=None,
max_autocorrelation_time=None, use_ratio=None,
......@@ -269,6 +270,8 @@ class Result(object):
An array of the output posterior samples and the unweighted samples
log_evidence, log_evidence_err, log_noise_evidence, log_bayes_factor: float
Natural log evidences
information_gain: float
The Kullback-Leibler divergence
log_likelihood_evaluations: array_like
The evaluations of the likelihood for each sample point
num_likelihood_evaluations: int
......@@ -321,6 +324,7 @@ class Result(object):
self.use_ratio = use_ratio
self.log_evidence = log_evidence
self.log_evidence_err = log_evidence_err
self.information_gain = information_gain
self.log_noise_evidence = log_noise_evidence
self.log_bayes_factor = log_bayes_factor
self.log_likelihood_evaluations = log_likelihood_evaluations
......@@ -573,7 +577,7 @@ class Result(object):
'log_noise_evidence', 'log_bayes_factor', 'priors', 'posterior',
'injection_parameters', 'meta_data', 'search_parameter_keys',
'fixed_parameter_keys', 'constraint_parameter_keys',
'sampling_time', 'sampler_kwargs', 'use_ratio',
'sampling_time', 'sampler_kwargs', 'use_ratio', 'information_gain',
'log_likelihood_evaluations', 'log_prior_evaluations',
'num_likelihood_evaluations', 'samples', 'nested_samples',
'walkers', 'nburn', 'parameter_labels', 'parameter_labels_with_unit',
......
......@@ -132,6 +132,7 @@ class Cpnest(NestedSampler):
self.result.nested_samples['weights'] = np.exp(log_weights)
self.result.log_evidence = out.NS.state.logZ
self.result.log_evidence_err = np.sqrt(out.NS.state.info / out.NS.state.nlive)
self.result.information_gain = out.NS.state.info
return self.result
def _verify_kwargs_against_default_kwargs(self):
......
......@@ -401,6 +401,7 @@ class Dynesty(NestedSampler):
sorted_samples=self.result.samples)
self.result.log_evidence = out.logz[-1]
self.result.log_evidence_err = out.logzerr[-1]
self.result.information_gain = out.information[-1]
def _run_nested_wrapper(self, kwargs):
""" Wrapper function to run_nested
......
......@@ -74,6 +74,7 @@ class Nestle(NestedSampler):
sorted_samples=self.result.samples)
self.result.log_evidence = out.logz
self.result.log_evidence_err = out.logzerr
self.result.information_gain = out.h
self.calc_likelihood_count()
return self.result
......
......@@ -366,6 +366,8 @@ class Ultranest(NestedSampler):
self.result.nested_samples = nested_samples
self.result.log_evidence = out["logz"]
self.result.log_evidence_err = out["logzerr"]
if self.kwargs["num_live_points"] is not None:
self.result.information_gain = np.power(out["logzerr"], 2) * self.kwargs["num_live_points"]
self.result.outputfiles_basename = self.outputfiles_basename
self.result.sampling_time = datetime.timedelta(seconds=self.total_sampling_time)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment