Commit 709e2b58 authored by Rhys Green's avatar Rhys Green Committed by Moritz Huebner

replacing self.count with result.num counter, this makes the code cleaner as...

replacing self.count with result.num counter, this makes the code cleaner as only a few extra line sof codee needed
parent b383e97b
......@@ -101,7 +101,8 @@ class Result(object):
log_evidence_err=np.nan, log_noise_evidence=np.nan,
log_bayes_factor=np.nan, log_likelihood_evaluations=None,
log_prior_evaluations=None, sampling_time=None, nburn=None,
walkers=None, max_autocorrelation_time=None, use_ratio=None,
num_likelihood_evaluations=None, walkers=None,
max_autocorrelation_time=None, use_ratio=None,
parameter_labels=None, parameter_labels_with_unit=None,
gzip=False, version=None):
""" A class to store the results of the sampling run
......@@ -130,6 +131,8 @@ class Result(object):
Natural log evidences
log_likelihood_evaluations: array_like
The evaluations of the likelihood for each sample point
num_likelihood_evaluations: int
The number of times the likelihood function is called
log_prior_evaluations: array_like
The evaluations of the prior for each sample point
sampling_time: float
......@@ -182,6 +185,7 @@ class Result(object):
self.log_bayes_factor = log_bayes_factor
self.log_likelihood_evaluations = log_likelihood_evaluations
self.log_prior_evaluations = log_prior_evaluations
self.num_likelihood_evaluations = num_likelihood_evaluations
self.sampling_time = sampling_time
self.version = version
self.max_autocorrelation_time = max_autocorrelation_time
......@@ -329,6 +333,18 @@ class Result(object):
def samples(self, samples):
self._samples = samples
@property
def num_likelihood_evaluations(self):
""" number of likelihood evaluations """
if self._num_likelihood_evaluations is not None:
return self._num_likelihood_evaluations
else:
raise ValueError("Result object has no stored likelihood evaluations")
@num_likelihood_evaluations.setter
def num_likelihood_evaluations(self, num_likelihood_evaluations):
self._num_likelihood_evaluations = num_likelihood_evaluations
@property
def nested_samples(self):
"""" An array of unweighted samples """
......
......@@ -4,7 +4,7 @@ import numpy as np
from pandas import DataFrame
from ..utils import logger, command_line_args
from ..utils import logger, command_line_args, Counter
from ..prior import Prior, PriorDict, DeltaFunction, Constraint
from ..result import Result, read_in_result
......@@ -86,6 +86,7 @@ class Sampler(object):
self, likelihood, priors, outdir='outdir', label='label',
use_ratio=False, plot=False, skip_import_verification=False,
injection_parameters=None, meta_data=None, result_class=None,
likelihood_benchmark=False,
**kwargs):
self.likelihood = likelihood
if isinstance(priors, PriorDict):
......@@ -101,6 +102,7 @@ class Sampler(object):
self._verify_external_sampler()
self.external_sampler_function = None
self.plot = plot
self.likelihood_benchmark = likelihood_benchmark
self._search_parameter_keys = list()
self._fixed_parameter_keys = list()
......@@ -116,6 +118,9 @@ class Sampler(object):
self._log_summary_for_sampler()
self.result = self._initialise_result(result_class)
self.likelihood_count = None
if self.likelihood_benchmark:
self.likelihood_count = Counter()
@property
def search_parameter_keys(self):
......@@ -364,6 +369,11 @@ class Sampler(object):
likelihood.parameter values
"""
if self.likelihood_benchmark:
try:
self.likelihood_count.increment()
except AttributeError:
pass
params = {
key: t for key, t in zip(self._search_parameter_keys, theta)}
self.likelihood.parameters.update(params)
......@@ -506,6 +516,12 @@ class Sampler(object):
logger.info("Using sampler {} with kwargs {}".format(
self.__class__.__name__, kwargs_print))
def calc_likelihood_count(self):
if self.likelihood_benchmark:
self.result.num_likelihood_evaluations = self.likelihood_count.value
else:
return None
class NestedSampler(Sampler):
npoints_equiv_kwargs = ['nlive', 'nlives', 'n_live_points', 'npoints', 'npoint', 'Nlive']
......
......@@ -100,6 +100,7 @@ class Cpnest(NestedSampler):
if self.plot:
out.plot()
self.calc_likelihood_count()
self.result.posterior = DataFrame(out.posterior_samples)
self.result.nested_samples = DataFrame(out.get_nested_samples(filename=''))
self.result.nested_samples.rename(columns=dict(logL='log_likelihood'), inplace=True)
......
......@@ -242,6 +242,7 @@ class Dynesty(NestedSampler):
self.result.log_likelihood_evaluations = self.reorder_loglikelihoods(
unsorted_loglikelihoods=out.logl, unsorted_samples=out.samples,
sorted_samples=self.result.samples)
self.calc_likelihood_count()
self.result.log_evidence = out.logz[-1]
self.result.log_evidence_err = out.logzerr[-1]
......@@ -442,7 +443,6 @@ class Dynesty(NestedSampler):
sampler_kwargs['maxiter'] = 2
self.sampler.run_nested(**sampler_kwargs)
self.result.samples = pd.DataFrame(
self.priors.sample(100))[self.search_parameter_keys].values
self.result.log_evidence = np.nan
......
......@@ -361,6 +361,7 @@ class Emcee(MCMCSampler):
log_ps = log_priors
self.calculate_autocorrelation(chain)
self.print_nburn_logging_info()
self.calc_likelihood_count()
self.result.nburn = self.nburn
n_samples = self.nwalkers * self.nburn
if self.result.nburn > self.nsteps:
......
......@@ -70,6 +70,7 @@ class Nestle(NestedSampler):
sorted_samples=self.result.samples)
self.result.log_evidence = out.logz
self.result.log_evidence_err = out.logzerr
self.calc_likelihood_count()
return self.result
def _run_test(self):
......@@ -92,4 +93,5 @@ class Nestle(NestedSampler):
self.result.samples = np.random.uniform(0, 1, (100, self.ndim))
self.result.log_evidence = np.nan
self.result.log_evidence_err = np.nan
self.calc_likelihood_count()
return self.result
......@@ -46,7 +46,6 @@ class PyPolyChord(NestedSampler):
pc_kwargs.pop('use_polychord_defaults')
settings = PolyChordSettings(nDims=self.ndim, nDerived=self.ndim, **pc_kwargs)
self._verify_kwargs_against_default_kwargs()
out = pypolychord.run_polychord(loglikelihood=self.log_likelihood, nDims=self.ndim,
nDerived=self.ndim, settings=settings, prior=self.prior_transform)
self.result.log_evidence = out.logZ
......@@ -54,6 +53,7 @@ class PyPolyChord(NestedSampler):
log_likelihoods, physical_parameters = self._read_sample_file()
self.result.log_likelihood_evaluations = log_likelihoods
self.result.samples = physical_parameters
self.calc_likelihood_count()
return self.result
def _setup_dynamic_defaults(self):
......
......@@ -138,7 +138,7 @@ class Ptemcee(Emcee):
raise SamplerError(
"The run has finished, but the chain is not burned in: "
"`nburn < nsteps`. Try increasing the number of steps.")
self.calc_likelihood_count()
self.result.samples = self.sampler.chain[0, :, self.nburn:, :].reshape(
(-1, self.ndim))
self.result.walkers = self.sampler.chain[0, :, :, :]
......
......@@ -148,6 +148,7 @@ class PTMCMCSampler(MCMCSampler):
sampler.sample(p0=self.p0, **self.sampler_function_kwargs)
samples, meta, loglike = self.__read_in_data()
self.calc_likelihood_count()
self.result.nburn = self.sampler_function_kwargs['burn']
self.result.samples = samples[self.result.nburn:]
self.meta_data['sampler_meta'] = meta
......@@ -161,7 +162,10 @@ class PTMCMCSampler(MCMCSampler):
def __read_in_data(self):
""" Read the data stored by PTMCMC to disk """
temp_outDir = self.sampler_init_kwargs['outDir']
data = np.loadtxt('{}/chain_1.txt'.format(temp_outDir))
try:
data = np.loadtxt('{}chain_1.txt'.format(temp_outDir))
except OSError:
data = np.loadtxt('{}chain_1.0.txt'.format(temp_outDir))
jumpfiles = glob.glob('{}/*jump.txt'.format(temp_outDir))
jumps = map(np.loadtxt, jumpfiles)
samples = data[:, :-4]
......@@ -171,8 +175,8 @@ class PTMCMCSampler(MCMCSampler):
for ct, j in enumerate(jumps):
label = jumpfiles[ct].split('/')[-1].split('_jump.txt')[0]
jump_accept[label] = j
PT_swap = {'swap_accept': data[-1]}
tot_accept = {'tot_accept': data[-2]}
PT_swap = {'swap_accept': data[:, -1]}
tot_accept = {'tot_accept': data[:, -2]}
log_post = {'log_post': data[:, -4]}
meta = {}
meta['tot_accept'] = tot_accept
......
......@@ -573,11 +573,11 @@ class Pymc3(MCMCSampler):
index = self.multivariate_normal_sets[key]['index']
self.result.samples[:, count] = trace[priorset][:, index]
count += 1
self.result.sampler_output = np.nan
self.calculate_autocorrelation(self.result.samples)
self.result.log_evidence = np.nan
self.result.log_evidence_err = np.nan
self.calc_likelihood_count()
return self.result
def set_prior(self):
......
......@@ -107,5 +107,6 @@ class Pymultinest(NestedSampler):
self.result.samples = post_equal_weights_data[:, :-1]
self.result.log_evidence = out['logZ']
self.result.log_evidence_err = out['logZerr']
self.calc_likelihood_count()
self.result.outputfiles_basename = self.kwargs['outputfiles_basename']
return self.result
......@@ -8,6 +8,7 @@ import traceback
import inspect
import subprocess
import json
import multiprocessing
import numpy as np
from scipy.interpolate import interp2d
......@@ -827,6 +828,28 @@ def run_commandline(cl, log_level=20, raise_error=True, return_output=True):
process.communicate()
class Counter(object):
"""
General class to count number of times a function is Called, returns total
number of function calls
Parameters
----------
initalval : int, 0
number to start counting from
"""
def __init__(self, initval=0):
self.val = multiprocessing.RawValue('i', initval)
self.lock = multiprocessing.Lock()
def increment(self):
with self.lock:
self.val.value += 1
@property
def value(self):
return self.val.value
class UnsortedInterp2d(interp2d):
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment