Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
bilby
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Build
Pipelines
Jobs
Pipeline schedules
Test cases
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
lscsoft
bilby
Commits
52baf97c
Commit
52baf97c
authored
4 years ago
by
Gregory Ashton
Browse files
Options
Downloads
Patches
Plain Diff
Update from log likelihood to log posterior
parent
3b03a766
No related branches found
No related tags found
1 merge request
!842
Add a mean-log-likelihood method to improve the ACT estimation
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
bilby/core/sampler/ptemcee.py
+48
-39
48 additions, 39 deletions
bilby/core/sampler/ptemcee.py
with
48 additions
and
39 deletions
bilby/core/sampler/ptemcee.py
+
48
−
39
View file @
52baf97c
...
...
@@ -26,7 +26,7 @@ ConvergenceInputs = namedtuple(
"
autocorr_tol
"
,
"
autocorr_tau
"
,
"
gradient_tau
"
,
"
gradient_mean_log
l
"
,
"
gradient_mean_log
_posterior
"
,
"
Q_tol
"
,
"
safety
"
,
"
burn_in_nact
"
,
...
...
@@ -78,7 +78,7 @@ class Ptemcee(MCMCSampler):
gradient_tau: float, (0.1)
The maximum (smoothed) local gradient of the ACT estimate to allow.
This ensures the ACT estimate is stable before finishing sampling.
gradient_mean_log
l
: float, (0.1)
gradient_mean_log
_posterior
: float, (0.1)
The maximum (smoothed) local gradient of the logliklilhood to allow.
This ensures the ACT estimate is stable before finishing sampling.
Q_tol: float (1.01)
...
...
@@ -159,7 +159,7 @@ class Ptemcee(MCMCSampler):
safety
=
1
,
autocorr_tau
=
1
,
gradient_tau
=
0.1
,
gradient_mean_log
l
=
0.1
,
gradient_mean_log
_posterior
=
0.1
,
Q_tol
=
1.02
,
min_tau
=
1
,
check_point_deltaT
=
600
,
...
...
@@ -213,7 +213,7 @@ class Ptemcee(MCMCSampler):
mean_logl_frac
=
mean_logl_frac
,
thin_by_nact
=
thin_by_nact
,
gradient_tau
=
gradient_tau
,
gradient_mean_log
l
=
gradient_mean_log
l
,
gradient_mean_log
_posterior
=
gradient_mean_log
_posterior
,
Q_tol
=
Q_tol
,
nsamples
=
nsamples
,
ignore_keys_for_tau
=
ignore_keys_for_tau
,
...
...
@@ -382,6 +382,7 @@ class Ptemcee(MCMCSampler):
self
.
iteration
=
data
[
"
iteration
"
]
self
.
chain_array
=
data
[
"
chain_array
"
]
self
.
log_likelihood_array
=
data
[
"
log_likelihood_array
"
]
self
.
log_posterior_array
=
data
[
"
log_posterior_array
"
]
self
.
pos0
=
data
[
"
pos0
"
]
self
.
beta_list
=
data
[
"
beta_list
"
]
self
.
sampler
.
_betas
=
np
.
array
(
self
.
beta_list
[
-
1
])
...
...
@@ -424,7 +425,8 @@ class Ptemcee(MCMCSampler):
# Initialize storing results
self
.
iteration
=
0
self
.
chain_array
=
self
.
get_zero_chain_array
()
self
.
log_likelihood_array
=
self
.
get_zero_log_likelihood_array
()
self
.
log_likelihood_array
=
self
.
get_zero_array
()
self
.
log_posterior_array
=
self
.
get_zero_array
()
self
.
beta_list
=
[]
self
.
tau_list
=
[]
self
.
tau_list_n
=
[]
...
...
@@ -437,7 +439,7 @@ class Ptemcee(MCMCSampler):
def
get_zero_chain_array
(
self
):
return
np
.
zeros
((
self
.
nwalkers
,
self
.
max_steps
,
self
.
ndim
))
def
get_zero_
log_likelihood_
array
(
self
):
def
get_zero_array
(
self
):
return
np
.
zeros
((
self
.
ntemps
,
self
.
nwalkers
,
self
.
max_steps
))
def
get_pos0
(
self
):
...
...
@@ -483,14 +485,18 @@ class Ptemcee(MCMCSampler):
self
.
chain_array
=
np
.
concatenate
((
self
.
chain_array
,
self
.
get_zero_chain_array
()),
axis
=
1
)
self
.
log_likelihood_array
=
np
.
concatenate
((
self
.
log_likelihood_array
,
self
.
get_zero_log_likelihood_array
()),
self
.
log_likelihood_array
,
self
.
get_zero_array
()),
axis
=
2
)
self
.
log_posterior_array
=
np
.
concatenate
((
self
.
log_posterior_array
,
self
.
get_zero_array
()),
axis
=
2
)
self
.
pos0
=
pos0
self
.
chain_array
[:,
self
.
iteration
,
:]
=
pos0
[
0
,
:,
:]
self
.
log_likelihood_array
[:,
:,
self
.
iteration
]
=
log_likelihood
self
.
mean_log_likelihood
=
np
.
mean
(
self
.
log_likelihood_array
[:,
:,
:
self
.
iteration
],
axis
=
1
)
self
.
log_posterior_array
[:,
:,
self
.
iteration
]
=
log_posterior
self
.
mean_log_posterior
=
np
.
mean
(
self
.
log_posterior_array
[:,
:,
:
self
.
iteration
],
axis
=
1
)
# Calculate time per iteration
self
.
time_per_check
.
append
((
datetime
.
datetime
.
now
()
-
t0
).
total_seconds
())
...
...
@@ -499,15 +505,16 @@ class Ptemcee(MCMCSampler):
self
.
iteration
+=
1
# Calculate minimum iteration step to discard
m
ean_logl_min_it
=
get_mean_logl_min_it
(
self
.
mean_log_
likelihood
,
m
inimum_iteration
=
get_minimum_stable_itertion
(
self
.
mean_log_
posterior
,
frac
=
self
.
convergence_inputs
.
mean_logl_frac
)
logger
.
debug
(
"
M
ean logl min it = {}
"
.
format
(
mean_logl_min_it
))
logger
.
debug
(
"
M
inimum iteration = {}
"
.
format
(
minimum_iteration
))
# Calculate the maximum discard number
discard_max
=
np
.
max
(
[
self
.
convergence_inputs
.
burn_in_fixed_discard
,
mean_logl_min_it
]
[
self
.
convergence_inputs
.
burn_in_fixed_discard
,
minimum_iteration
]
)
if
self
.
iteration
>
discard_max
+
self
.
nwalkers
:
...
...
@@ -536,7 +543,7 @@ class Ptemcee(MCMCSampler):
self
.
tau_list
,
self
.
tau_list_n
,
self
.
Q_list
,
self
.
mean_log_
likelihood
,
self
.
mean_log_
posterior
,
)
if
stop
:
...
...
@@ -650,8 +657,8 @@ class Ptemcee(MCMCSampler):
logger
.
info
(
"
tau plot failed with exception {}
"
.
format
(
e
))
try
:
plot_mean_log_
likelihood
(
self
.
mean_log_
likelihood
,
plot_mean_log_
posterior
(
self
.
mean_log_
posterior
,
self
.
outdir
,
self
.
label
,
)
...
...
@@ -659,13 +666,13 @@ class Ptemcee(MCMCSampler):
logger
.
info
(
"
mean_logl plot failed with exception {}
"
.
format
(
e
))
def
get_m
ean_logl_min_it
(
mean_log_likelihood
,
frac
):
nsteps
=
mean_
log_likelihood
.
shape
[
1
]
if
nsteps
<
10
:
def
get_m
inimum_stable_itertion
(
mean_array
,
frac
,
nsteps_min
=
10
):
nsteps
=
mean_
array
.
shape
[
1
]
if
nsteps
<
nsteps_min
:
return
0
min_it
=
0
for
x
in
mean_
log_likelihood
:
for
x
in
mean_
array
:
maxl
=
np
.
max
(
x
)
fracdiff
=
(
maxl
-
x
)
/
np
.
abs
(
maxl
)
idxs
=
fracdiff
<
frac
...
...
@@ -685,7 +692,7 @@ def check_iteration(
tau_list
,
tau_list_n
,
Q_list
,
mean_log_
likelihood
,
mean_log_
posterior
,
):
"""
Per-iteration logic to calculate the convergence check
...
...
@@ -776,28 +783,28 @@ def check_iteration(
)
tau_usable
=
False
check_mean_log_
likelihood
=
mean_log_
likelihood
[:,
-
nsteps_to_check
:]
gradient_mean_log
l
=
get_max_gradient
(
check_mean_log_
likelihood
,
axis
=
1
,
window_length
=
GRAD_WINDOW_LENGTH
,
check_mean_log_
posterior
=
mean_log_
posterior
[:,
-
nsteps_to_check
:]
gradient_mean_log
_posterior
=
get_max_gradient
(
check_mean_log_
posterior
,
axis
=
1
,
window_length
=
GRAD_WINDOW_LENGTH
,
smooth
=
True
)
if
gradient_mean_log
l
<
ci
.
gradient_mean_log
l
:
if
gradient_mean_log
_posterior
<
ci
.
gradient_mean_log
_posterior
:
logger
.
debug
(
"
tau usable as {} < gradient_mean_log
l
={}
"
.
format
(
gradient_mean_log
l
,
ci
.
gradient_mean_log
l
)
"
tau usable as {} < gradient_mean_log
_posterior
={}
"
.
format
(
gradient_mean_log
_posterior
,
ci
.
gradient_mean_log
_posterior
)
)
tau_usable
=
True
else
:
logger
.
debug
(
"
tau not usable as {} > gradient_mean_log
l
={}
"
.
format
(
gradient_mean_log
l
,
ci
.
gradient_mean_log
l
)
"
tau not usable as {} > gradient_mean_log
_posterior
={}
"
.
format
(
gradient_mean_log
_posterior
,
ci
.
gradient_mean_log
_posterior
)
)
tau_usable
=
False
else
:
logger
.
debug
(
"
ACT is nan
"
)
gradient_tau
=
np
.
nan
gradient_mean_log
l
=
np
.
nan
gradient_mean_log
_posterior
=
np
.
nan
tau_usable
=
False
if
nsteps
<
tau_int
*
ci
.
autocorr_tol
:
...
...
@@ -816,7 +823,7 @@ def check_iteration(
samples_per_check
,
tau_int
,
gradient_tau
,
gradient_mean_log
l
,
gradient_mean_log
_posterior
,
tau_usable
,
convergence_inputs
,
Q
...
...
@@ -856,7 +863,7 @@ def print_progress(
samples_per_check
,
tau_int
,
gradient_tau
,
gradient_mean_log
l
,
gradient_mean_log
_posterior
,
tau_usable
,
convergence_inputs
,
Q
,
...
...
@@ -880,7 +887,9 @@ def print_progress(
sampling_time
=
datetime
.
timedelta
(
seconds
=
np
.
sum
(
time_per_check
))
tau_str
=
"
{}(+{:0.2f},+{:0.2f})
"
.
format
(
tau_int
,
gradient_tau
,
gradient_mean_logl
)
tau_str
=
"
{}(+{:0.2f},+{:0.2f})
"
.
format
(
tau_int
,
gradient_tau
,
gradient_mean_log_posterior
)
if
tau_usable
:
tau_str
=
"
={}
"
.
format
(
tau_str
)
...
...
@@ -1050,21 +1059,21 @@ def plot_tau(
plt
.
close
(
fig
)
def
plot_mean_log_
likelihood
(
mean_log_likelihood
,
outdir
,
label
):
def
plot_mean_log_
posterior
(
mean_log_posterior
,
outdir
,
label
):
ntemps
,
nsteps
=
mean_log_
likelihood
.
shape
ymax
=
np
.
max
(
mean_log_
likelihood
)
ymin
=
np
.
min
(
mean_log_
likelihood
[:,
-
100
:])
ntemps
,
nsteps
=
mean_log_
posterior
.
shape
ymax
=
np
.
max
(
mean_log_
posterior
)
ymin
=
np
.
min
(
mean_log_
posterior
[:,
-
100
:])
ymax
+=
0.1
*
(
ymax
-
ymin
)
ymin
-=
0.1
*
(
ymax
-
ymin
)
fig
,
ax
=
plt
.
subplots
()
idxs
=
np
.
arange
(
nsteps
)
ax
.
plot
(
idxs
,
mean_log_
likelihood
.
T
)
ax
.
plot
(
idxs
,
mean_log_
posterior
.
T
)
ax
.
set
(
xlabel
=
"
Iteration
"
,
ylabel
=
r
"
$\langle\log\mathcal{L}\rangle$
"
,
ylim
=
(
ymin
,
ymax
))
fig
.
tight_layout
()
fig
.
savefig
(
"
{}/{}_checkpoint_meanlog
like
.png
"
.
format
(
outdir
,
label
))
fig
.
savefig
(
"
{}/{}_checkpoint_meanlog
posterior
.png
"
.
format
(
outdir
,
label
))
plt
.
close
(
fig
)
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment