Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
7
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Open sidebar
lscsoft
bilby
Commits
0094a607
Commit
0094a607
authored
Sep 11, 2018
by
Gregory Ashton
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fix the flake8 failures
parent
913c16c7
Pipeline
#30220
passed with stage
in 5 minutes and 41 seconds
Changes
17
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
17 changed files
with
225 additions
and
175 deletions
+225
-175
.gitlab-ci.yml
.gitlab-ci.yml
+4
-3
cli_tupak/plot_multiple_posteriors.py
cli_tupak/plot_multiple_posteriors.py
+1
-1
setup.cfg
setup.cfg
+2
-1
setup.py
setup.py
+2
-2
tupak/core/__init__.py
tupak/core/__init__.py
+1
-1
tupak/core/likelihood.py
tupak/core/likelihood.py
+6
-5
tupak/core/prior.py
tupak/core/prior.py
+18
-18
tupak/core/result.py
tupak/core/result.py
+17
-11
tupak/core/sampler.py
tupak/core/sampler.py
+94
-57
tupak/core/utils.py
tupak/core/utils.py
+7
-7
tupak/gw/calibration.py
tupak/gw/calibration.py
+0
-1
tupak/gw/conversion.py
tupak/gw/conversion.py
+10
-8
tupak/gw/detector.py
tupak/gw/detector.py
+7
-7
tupak/gw/likelihood.py
tupak/gw/likelihood.py
+17
-16
tupak/gw/source.py
tupak/gw/source.py
+28
-28
tupak/gw/utils.py
tupak/gw/utils.py
+9
-7
tupak/hyper/likelihood.py
tupak/hyper/likelihood.py
+2
-2
No files found.
.gitlab-ci.yml
View file @
0094a607
...
...
@@ -41,6 +41,10 @@ python-3:
-
pip install flake8
script
:
-
python setup.py install
# Run pyflakes
-
flake8 .
# Run tests and collect coverage data
-
coverage --version
-
coverage erase
...
...
@@ -67,9 +71,6 @@ python-3:
-
make clean
-
make html
# Run pyflakes
-
flake8 .
artifacts
:
paths
:
-
htmlcov/
...
...
cli_tupak/plot_multiple_posteriors.py
View file @
0094a607
...
...
@@ -4,7 +4,7 @@ import argparse
def
setup_command_line_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
"Plot corner plots from results files"
)
parser
.
add_argument
(
"-r"
,
"--results"
,
nargs
=
'+'
,
parser
.
add_argument
(
"-r"
,
"--results"
,
nargs
=
'+'
,
help
=
"List of results files to use."
)
parser
.
add_argument
(
"-f"
,
"--filename"
,
default
=
None
,
help
=
"Output file name."
)
...
...
setup.cfg
View file @
0094a607
[flake8]
exclude = .git,docs,build,dist
exclude = .git,docs,build,dist
,test,examples,*__init__.py
max-line-length = 160
ignore = E129
setup.py
View file @
0094a607
...
...
@@ -22,8 +22,8 @@ def write_version_file(version):
try
:
git_log
=
subprocess
.
check_output
(
[
'git'
,
'log'
,
'-1'
,
'--pretty=%h %ai'
]).
decode
(
'utf-8'
)
git_diff
=
(
subprocess
.
check_output
([
'git'
,
'diff'
,
'.'
])
+
subprocess
.
check_output
(
git_diff
=
(
subprocess
.
check_output
([
'git'
,
'diff'
,
'.'
])
+
subprocess
.
check_output
(
[
'git'
,
'diff'
,
'--cached'
,
'.'
])).
decode
(
'utf-8'
)
if
git_diff
==
''
:
git_status
=
'(CLEAN) '
+
git_log
...
...
tupak/core/__init__.py
View file @
0094a607
...
...
@@ -3,4 +3,4 @@ import tupak.core.likelihood
import
tupak.core.prior
import
tupak.core.result
import
tupak.core.sampler
import
tupak.core.utils
import
tupak.core.utils
\ No newline at end of file
tupak/core/likelihood.py
View file @
0094a607
...
...
@@ -160,8 +160,8 @@ class GaussianLikelihood(Analytical1DLikelihood):
return
self
.
parameters
.
get
(
'sigma'
,
self
.
sigma
)
def
__summed_log_likelihood
(
self
,
sigma
):
return
-
0.5
*
(
np
.
sum
((
self
.
residual
/
sigma
)
**
2
)
+
self
.
n
*
np
.
log
(
2
*
np
.
pi
*
sigma
**
2
))
return
-
0.5
*
(
np
.
sum
((
self
.
residual
/
sigma
)
**
2
)
+
self
.
n
*
np
.
log
(
2
*
np
.
pi
*
sigma
**
2
))
class
PoissonLikelihood
(
Analytical1DLikelihood
):
...
...
@@ -314,8 +314,10 @@ class StudentTLikelihood(Analytical1DLikelihood):
return
self
.
parameters
.
get
(
'nu'
,
self
.
nu
)
def
__summed_log_likelihood
(
self
,
nu
):
return
self
.
n
*
(
gammaln
((
nu
+
1.0
)
/
2.0
)
+
.
5
*
np
.
log
(
self
.
lam
/
(
nu
*
np
.
pi
))
-
gammaln
(
nu
/
2.0
))
\
-
(
nu
+
1.0
)
/
2.0
*
np
.
sum
(
np
.
log1p
(
self
.
lam
*
self
.
residual
**
2
/
nu
))
return
(
self
.
n
*
(
gammaln
((
nu
+
1.0
)
/
2.0
)
+
.
5
*
np
.
log
(
self
.
lam
/
(
nu
*
np
.
pi
))
-
gammaln
(
nu
/
2.0
))
-
(
nu
+
1.0
)
/
2.0
*
np
.
sum
(
np
.
log1p
(
self
.
lam
*
self
.
residual
**
2
/
nu
)))
class
JointLikelihood
(
Likelihood
):
...
...
@@ -372,4 +374,3 @@ class JointLikelihood(Likelihood):
def
noise_log_likelihood
(
self
):
""" This is just the sum of the noise likelihoods of all parts of the joint likelihood"""
return
sum
([
likelihood
.
noise_log_likelihood
()
for
likelihood
in
self
.
likelihoods
])
tupak/core/prior.py
View file @
0094a607
...
...
@@ -10,7 +10,7 @@ from collections import OrderedDict
from
tupak.core.utils
import
logger
from
tupak.core
import
utils
import
tupak
import
tupak
# noqa
class
PriorSet
(
OrderedDict
):
...
...
@@ -28,8 +28,8 @@ class PriorSet(OrderedDict):
if
isinstance
(
dictionary
,
dict
):
self
.
update
(
dictionary
)
elif
type
(
dictionary
)
is
str
:
logger
.
debug
(
'Argument "dictionary" is a string.'
+
' Assuming it is intended as a file name.'
)
logger
.
debug
(
'Argument "dictionary" is a string.'
+
' Assuming it is intended as a file name.'
)
self
.
read_in_file
(
dictionary
)
elif
type
(
filename
)
is
str
:
self
.
read_in_file
(
filename
)
...
...
@@ -580,8 +580,9 @@ class PowerLaw(Prior):
if
self
.
alpha
==
-
1
:
return
np
.
nan_to_num
(
1
/
val
/
np
.
log
(
self
.
maximum
/
self
.
minimum
))
*
in_prior
else
:
return
np
.
nan_to_num
(
val
**
self
.
alpha
*
(
1
+
self
.
alpha
)
/
(
self
.
maximum
**
(
1
+
self
.
alpha
)
-
self
.
minimum
**
(
1
+
self
.
alpha
)))
*
in_prior
return
np
.
nan_to_num
(
val
**
self
.
alpha
*
(
1
+
self
.
alpha
)
/
(
self
.
maximum
**
(
1
+
self
.
alpha
)
-
self
.
minimum
**
(
1
+
self
.
alpha
)))
*
in_prior
def
ln_prob
(
self
,
val
):
"""Return the logarithmic prior probability of val
...
...
@@ -600,11 +601,10 @@ class PowerLaw(Prior):
if
self
.
alpha
==
-
1
:
normalising
=
1.
/
np
.
log
(
self
.
maximum
/
self
.
minimum
)
else
:
normalising
=
(
1
+
self
.
alpha
)
/
(
self
.
maximum
**
(
1
+
self
.
alpha
)
-
self
.
minimum
**
(
1
+
self
.
alpha
))
normalising
=
(
1
+
self
.
alpha
)
/
(
self
.
maximum
**
(
1
+
self
.
alpha
)
-
self
.
minimum
**
(
1
+
self
.
alpha
))
return
(
self
.
alpha
*
np
.
log
(
val
)
+
np
.
log
(
normalising
))
+
np
.
log
(
1.
*
in_prior
)
return
(
self
.
alpha
*
np
.
log
(
val
)
+
np
.
log
(
normalising
))
+
np
.
log
(
1.
*
in_prior
)
def
__repr__
(
self
):
"""Call to helper method in the super class."""
...
...
@@ -645,7 +645,7 @@ class Uniform(Prior):
float: Prior probability of val
"""
return
scipy
.
stats
.
uniform
.
pdf
(
val
,
loc
=
self
.
minimum
,
scale
=
self
.
maximum
-
self
.
minimum
)
scale
=
self
.
maximum
-
self
.
minimum
)
def
ln_prob
(
self
,
val
):
"""Return the log prior probability of val
...
...
@@ -659,7 +659,7 @@ class Uniform(Prior):
float: log probability of val
"""
return
scipy
.
stats
.
uniform
.
logpdf
(
val
,
loc
=
self
.
minimum
,
scale
=
self
.
maximum
-
self
.
minimum
)
scale
=
self
.
maximum
-
self
.
minimum
)
class
LogUniform
(
PowerLaw
):
...
...
@@ -821,7 +821,7 @@ class Gaussian(Prior):
class
Normal
(
Gaussian
):
def
__init__
(
self
,
mu
,
sigma
,
name
=
None
,
latex_label
=
None
):
"""A synonym for the Gaussian distribution.
...
...
@@ -899,7 +899,7 @@ class TruncatedGaussian(Prior):
"""
in_prior
=
(
val
>=
self
.
minimum
)
&
(
val
<=
self
.
maximum
)
return
np
.
exp
(
-
(
self
.
mu
-
val
)
**
2
/
(
2
*
self
.
sigma
**
2
))
/
(
2
*
np
.
pi
)
**
0.5
/
self
.
sigma
/
self
.
normalisation
*
in_prior
2
*
np
.
pi
)
**
0.5
/
self
.
sigma
/
self
.
normalisation
*
in_prior
def
__repr__
(
self
):
"""Call to helper method in the super class."""
...
...
@@ -907,7 +907,7 @@ class TruncatedGaussian(Prior):
class
TruncatedNormal
(
TruncatedGaussian
):
def
__init__
(
self
,
mu
,
sigma
,
minimum
,
maximum
,
name
=
None
,
latex_label
=
None
):
"""A synonym for the TruncatedGaussian distribution.
...
...
@@ -943,7 +943,7 @@ class HalfGaussian(TruncatedGaussian):
See superclass
"""
TruncatedGaussian
.
__init__
(
self
,
0.
,
sigma
,
minimum
=
0.
,
maximum
=
np
.
inf
,
name
=
name
,
latex_label
=
latex_label
)
def
__repr__
(
self
):
"""Call to helper method in the super class."""
return
Prior
.
_subclass_repr_helper
(
self
,
subclass_args
=
[
'sigma'
])
...
...
@@ -1109,7 +1109,7 @@ class StudentT(Prior):
See superclass
"""
Prior
.
__init__
(
self
,
name
,
latex_label
)
if
df
<=
0.
or
scale
<=
0.
:
raise
ValueError
(
"For the StudentT prior the number of degrees of freedom and scale must be positive"
)
...
...
@@ -1215,7 +1215,7 @@ class Beta(Prior):
return
spdf
if
isinstance
(
val
,
np
.
ndarray
):
pdf
=
-
np
.
inf
*
np
.
ones
(
len
(
val
))
pdf
=
-
np
.
inf
*
np
.
ones
(
len
(
val
))
pdf
[
np
.
isfinite
(
spdf
)]
=
spdf
[
np
.
isfinite
]
return
spdf
else
:
...
...
@@ -1437,7 +1437,7 @@ class ChiSquared(Gamma):
if
nu
<=
0
or
not
isinstance
(
nu
,
int
):
raise
ValueError
(
"For the ChiSquared prior the number of degrees of freedom must be a positive integer"
)
Gamma
.
__init__
(
self
,
name
=
name
,
k
=
nu
/
2.
,
theta
=
2.
,
latex_label
=
latex_label
)
Gamma
.
__init__
(
self
,
name
=
name
,
k
=
nu
/
2.
,
theta
=
2.
,
latex_label
=
latex_label
)
class
Interped
(
Prior
):
...
...
tupak/core/result.py
View file @
0094a607
...
...
@@ -372,18 +372,24 @@ class Result(dict):
def
construct_cbc_derived_parameters
(
self
):
""" Construct widely used derived parameters of CBCs """
self
.
posterior
[
'mass_chirp'
]
=
(
self
.
posterior
.
mass_1
*
self
.
posterior
.
mass_2
)
**
0.6
/
(
self
.
posterior
.
mass_1
+
self
.
posterior
.
mass_2
)
**
0.2
self
.
posterior
[
'mass_chirp'
]
=
(
(
self
.
posterior
.
mass_1
*
self
.
posterior
.
mass_2
)
**
0.6
/
(
self
.
posterior
.
mass_1
+
self
.
posterior
.
mass_2
)
**
0.2
)
self
.
posterior
[
'q'
]
=
self
.
posterior
.
mass_2
/
self
.
posterior
.
mass_1
self
.
posterior
[
'eta'
]
=
(
self
.
posterior
.
mass_1
*
self
.
posterior
.
mass_2
)
/
(
self
.
posterior
.
mass_1
+
self
.
posterior
.
mass_2
)
**
2
self
.
posterior
[
'chi_eff'
]
=
(
self
.
posterior
.
a_1
*
np
.
cos
(
self
.
posterior
.
tilt_1
)
+
self
.
posterior
.
q
*
self
.
posterior
.
a_2
*
np
.
cos
(
self
.
posterior
.
tilt_2
))
/
(
1
+
self
.
posterior
.
q
)
self
.
posterior
[
'chi_p'
]
=
np
.
maximum
(
self
.
posterior
.
a_1
*
np
.
sin
(
self
.
posterior
.
tilt_1
),
(
4
*
self
.
posterior
.
q
+
3
)
/
(
3
*
self
.
posterior
.
q
+
4
)
*
self
.
posterior
.
q
*
self
.
posterior
.
a_2
*
np
.
sin
(
self
.
posterior
.
tilt_2
))
self
.
posterior
[
'eta'
]
=
(
(
self
.
posterior
.
mass_1
*
self
.
posterior
.
mass_2
)
/
(
self
.
posterior
.
mass_1
+
self
.
posterior
.
mass_2
)
**
2
)
self
.
posterior
[
'chi_eff'
]
=
(
self
.
posterior
.
a_1
*
np
.
cos
(
self
.
posterior
.
tilt_1
)
+
self
.
posterior
.
q
*
self
.
posterior
.
a_2
*
np
.
cos
(
self
.
posterior
.
tilt_2
))
/
(
1
+
self
.
posterior
.
q
)
self
.
posterior
[
'chi_p'
]
=
np
.
maximum
(
self
.
posterior
.
a_1
*
np
.
sin
(
self
.
posterior
.
tilt_1
),
(
4
*
self
.
posterior
.
q
+
3
)
/
(
3
*
self
.
posterior
.
q
+
4
)
*
self
.
posterior
.
q
*
self
.
posterior
.
a_2
*
np
.
sin
(
self
.
posterior
.
tilt_2
))
def
check_attribute_match_to_other_object
(
self
,
name
,
other_object
):
""" Check attribute name exists in other_object and is the same
...
...
tupak/core/sampler.py
View file @
0094a607
...
...
@@ -535,8 +535,8 @@ class Dynesty(Sampler):
# If n_check_point is not already set, set it checkpoint every 10 mins
if
'n_check_point'
not
in
self
.
__kwargs
:
n_check_point_raw
=
(
self
.
__kwargs
[
'check_point_delta_t'
]
/
self
.
_log_likelihood_eval_time
)
n_check_point_raw
=
(
self
.
__kwargs
[
'check_point_delta_t'
]
/
self
.
_log_likelihood_eval_time
)
n_check_point_rnd
=
int
(
float
(
"{:1.0g}"
.
format
(
n_check_point_raw
)))
self
.
__kwargs
[
'n_check_point'
]
=
n_check_point_rnd
...
...
@@ -1114,36 +1114,61 @@ class Pymc3(Sampler):
prior_map
=
{}
self
.
prior_map
=
prior_map
# predefined PyMC3 distributions
prior_map
[
'Gaussian'
]
=
{
'pymc3'
:
'Normal'
,
'argmap'
:
{
'mu'
:
'mu'
,
'sigma'
:
'sd'
}}
prior_map
[
'TruncatedGaussian'
]
=
{
'pymc3'
:
'TruncatedNormal'
,
'argmap'
:
{
'mu'
:
'mu'
,
'sigma'
:
'sd'
,
'minimum'
:
'lower'
,
'maximum'
:
'upper'
}}
prior_map
[
'HalfGaussian'
]
=
{
'pymc3'
:
'HalfNormal'
,
'argmap'
:
{
'sigma'
:
'sd'
}}
prior_map
[
'Uniform'
]
=
{
'pymc3'
:
'Uniform'
,
'argmap'
:
{
'minimum'
:
'lower'
,
'maximum'
:
'upper'
}}
prior_map
[
'LogNormal'
]
=
{
'pymc3'
:
'Lognormal'
,
'argmap'
:
{
'mu'
:
'mu'
,
'sigma'
:
'sd'
}}
prior_map
[
'Exponential'
]
=
{
'pymc3'
:
'Exponential'
,
'argmap'
:
{
'mu'
:
'lam'
},
'argtransform'
:
{
'mu'
:
lambda
mu
:
1.
/
mu
}}
prior_map
[
'StudentT'
]
=
{
'pymc3'
:
'StudentT'
,
'argmap'
:
{
'df'
:
'nu'
,
'mu'
:
'mu'
,
'scale'
:
'sd'
}}
prior_map
[
'Beta'
]
=
{
'pymc3'
:
'Beta'
,
'argmap'
:
{
'alpha'
:
'alpha'
,
'beta'
:
'beta'
}}
prior_map
[
'Logistic'
]
=
{
'pymc3'
:
'Logistic'
,
'argmap'
:
{
'mu'
:
'mu'
,
'scale'
:
's'
}}
prior_map
[
'Cauchy'
]
=
{
'pymc3'
:
'Cauchy'
,
'argmap'
:
{
'alpha'
:
'alpha'
,
'beta'
:
'beta'
}}
prior_map
[
'Gamma'
]
=
{
'pymc3'
:
'Gamma'
,
'argmap'
:
{
'k'
:
'alpha'
,
'theta'
:
'beta'
},
'argtransform'
:
{
'theta'
:
lambda
theta
:
1.
/
theta
}}
prior_map
[
'ChiSquared'
]
=
{
'pymc3'
:
'ChiSquared'
,
'argmap'
:
{
'nu'
:
'nu'
}}
prior_map
[
'Interped'
]
=
{
'pymc3'
:
'Interpolated'
,
'argmap'
:
{
'xx'
:
'x_points'
,
'yy'
:
'pdf_points'
}}
# predefined PyMC3 distributions
prior_map
[
'Gaussian'
]
=
{
'pymc3'
:
'Normal'
,
'argmap'
:
{
'mu'
:
'mu'
,
'sigma'
:
'sd'
}}
prior_map
[
'TruncatedGaussian'
]
=
{
'pymc3'
:
'TruncatedNormal'
,
'argmap'
:
{
'mu'
:
'mu'
,
'sigma'
:
'sd'
,
'minimum'
:
'lower'
,
'maximum'
:
'upper'
}}
prior_map
[
'HalfGaussian'
]
=
{
'pymc3'
:
'HalfNormal'
,
'argmap'
:
{
'sigma'
:
'sd'
}}
prior_map
[
'Uniform'
]
=
{
'pymc3'
:
'Uniform'
,
'argmap'
:
{
'minimum'
:
'lower'
,
'maximum'
:
'upper'
}}
prior_map
[
'LogNormal'
]
=
{
'pymc3'
:
'Lognormal'
,
'argmap'
:
{
'mu'
:
'mu'
,
'sigma'
:
'sd'
}}
prior_map
[
'Exponential'
]
=
{
'pymc3'
:
'Exponential'
,
'argmap'
:
{
'mu'
:
'lam'
},
'argtransform'
:
{
'mu'
:
lambda
mu
:
1.
/
mu
}}
prior_map
[
'StudentT'
]
=
{
'pymc3'
:
'StudentT'
,
'argmap'
:
{
'df'
:
'nu'
,
'mu'
:
'mu'
,
'scale'
:
'sd'
}}
prior_map
[
'Beta'
]
=
{
'pymc3'
:
'Beta'
,
'argmap'
:
{
'alpha'
:
'alpha'
,
'beta'
:
'beta'
}}
prior_map
[
'Logistic'
]
=
{
'pymc3'
:
'Logistic'
,
'argmap'
:
{
'mu'
:
'mu'
,
'scale'
:
's'
}}
prior_map
[
'Cauchy'
]
=
{
'pymc3'
:
'Cauchy'
,
'argmap'
:
{
'alpha'
:
'alpha'
,
'beta'
:
'beta'
}}
prior_map
[
'Gamma'
]
=
{
'pymc3'
:
'Gamma'
,
'argmap'
:
{
'k'
:
'alpha'
,
'theta'
:
'beta'
},
'argtransform'
:
{
'theta'
:
lambda
theta
:
1.
/
theta
}}
prior_map
[
'ChiSquared'
]
=
{
'pymc3'
:
'ChiSquared'
,
'argmap'
:
{
'nu'
:
'nu'
}}
prior_map
[
'Interped'
]
=
{
'pymc3'
:
'Interpolated'
,
'argmap'
:
{
'xx'
:
'x_points'
,
'yy'
:
'pdf_points'
}}
prior_map
[
'Normal'
]
=
prior_map
[
'Gaussian'
]
prior_map
[
'TruncatedNormal'
]
=
prior_map
[
'TruncatedGaussian'
]
prior_map
[
'HalfNormal'
]
=
prior_map
[
'HalfGaussian'
]
...
...
@@ -1153,10 +1178,10 @@ class Pymc3(Sampler):
# internally defined mappings for tupak priors
prior_map
[
'DeltaFunction'
]
=
{
'internal'
:
self
.
_deltafunction_prior
}
prior_map
[
'Sine'
]
=
{
'internal'
:
self
.
_sine_prior
}
prior_map
[
'Cosine'
]
=
{
'internal'
:
self
.
_cosine_prior
}
prior_map
[
'PowerLaw'
]
=
{
'internal'
:
self
.
_powerlaw_prior
}
prior_map
[
'LogUniform'
]
=
{
'internal'
:
self
.
_powerlaw_prior
}
prior_map
[
'Sine'
]
=
{
'internal'
:
self
.
_sine_prior
}
prior_map
[
'Cosine'
]
=
{
'internal'
:
self
.
_cosine_prior
}
prior_map
[
'PowerLaw'
]
=
{
'internal'
:
self
.
_powerlaw_prior
}
prior_map
[
'LogUniform'
]
=
{
'internal'
:
self
.
_powerlaw_prior
}
def
_deltafunction_prior
(
self
,
key
,
**
kwargs
):
"""
...
...
@@ -1175,7 +1200,7 @@ class Pymc3(Sampler):
"""
Map the tupak Sine prior to a PyMC3 style function
"""
from
tupak.core.prior
import
Sine
# check prior is a Sine
...
...
@@ -1197,7 +1222,9 @@ class Pymc3(Sampler):
self
.
lower
=
lower
=
tt
.
as_tensor_variable
(
floatX
(
lower
))
self
.
upper
=
upper
=
tt
.
as_tensor_variable
(
floatX
(
upper
))
self
.
norm
=
(
tt
.
cos
(
lower
)
-
tt
.
cos
(
upper
))
self
.
mean
=
(
tt
.
sin
(
upper
)
+
lower
*
tt
.
cos
(
lower
)
-
tt
.
sin
(
lower
)
-
upper
*
tt
.
cos
(
upper
))
/
self
.
norm
self
.
mean
=
(
tt
.
sin
(
upper
)
+
lower
*
tt
.
cos
(
lower
)
-
tt
.
sin
(
lower
)
-
upper
*
tt
.
cos
(
upper
))
/
self
.
norm
transform
=
pymc3
.
distributions
.
transforms
.
interval
(
lower
,
upper
)
...
...
@@ -1206,7 +1233,9 @@ class Pymc3(Sampler):
def
logp
(
self
,
value
):
upper
=
self
.
upper
lower
=
self
.
lower
return
pymc3
.
distributions
.
dist_math
.
bound
(
tt
.
log
(
tt
.
sin
(
value
)
/
self
.
norm
),
lower
<=
value
,
value
<=
upper
)
return
pymc3
.
distributions
.
dist_math
.
bound
(
tt
.
log
(
tt
.
sin
(
value
)
/
self
.
norm
),
lower
<=
value
,
value
<=
upper
)
return
Pymc3Sine
(
key
,
lower
=
self
.
priors
[
key
].
minimum
,
upper
=
self
.
priors
[
key
].
maximum
)
else
:
...
...
@@ -1216,7 +1245,7 @@ class Pymc3(Sampler):
"""
Map the tupak Cosine prior to a PyMC3 style function
"""
from
tupak.core.prior
import
Cosine
# check prior is a Cosine
...
...
@@ -1231,14 +1260,16 @@ class Pymc3(Sampler):
raise
ImportError
(
"You must have Theano installed to use PyMC3"
)
class
Pymc3Cosine
(
pymc3
.
Continuous
):
def
__init__
(
self
,
lower
=-
np
.
pi
/
2.
,
upper
=
np
.
pi
/
2.
):
def
__init__
(
self
,
lower
=-
np
.
pi
/
2.
,
upper
=
np
.
pi
/
2.
):
if
lower
>=
upper
:
raise
ValueError
(
"Lower bound is above upper bound!"
)
self
.
lower
=
lower
=
tt
.
as_tensor_variable
(
floatX
(
lower
))
self
.
upper
=
upper
=
tt
.
as_tensor_variable
(
floatX
(
upper
))
self
.
norm
=
(
tt
.
sin
(
upper
)
-
tt
.
sin
(
lower
))
self
.
mean
=
(
upper
*
tt
.
sin
(
upper
)
+
tt
.
cos
(
upper
)
-
lower
*
tt
.
sin
(
lower
)
-
tt
.
cos
(
lower
))
/
self
.
norm
self
.
mean
=
(
upper
*
tt
.
sin
(
upper
)
+
tt
.
cos
(
upper
)
-
lower
*
tt
.
sin
(
lower
)
-
tt
.
cos
(
lower
))
/
self
.
norm
transform
=
pymc3
.
distributions
.
transforms
.
interval
(
lower
,
upper
)
...
...
@@ -1247,7 +1278,9 @@ class Pymc3(Sampler):
def
logp
(
self
,
value
):
upper
=
self
.
upper
lower
=
self
.
lower
return
pymc3
.
distributions
.
dist_math
.
bound
(
tt
.
log
(
tt
.
cos
(
value
)
/
self
.
norm
),
lower
<=
value
,
value
<=
upper
)
return
pymc3
.
distributions
.
dist_math
.
bound
(
tt
.
log
(
tt
.
cos
(
value
)
/
self
.
norm
),
lower
<=
value
,
value
<=
upper
)
return
Pymc3Cosine
(
key
,
lower
=
self
.
priors
[
key
].
minimum
,
upper
=
self
.
priors
[
key
].
maximum
)
else
:
...
...
@@ -1257,7 +1290,7 @@ class Pymc3(Sampler):
"""
Map the tupak PowerLaw prior to a PyMC3 style function
"""
from
tupak.core.prior
import
PowerLaw
# check prior is a PowerLaw
...
...
@@ -1289,11 +1322,11 @@ class Pymc3(Sampler):
self
.
alpha
=
alpha
=
tt
.
as_tensor_variable
(
floatX
(
alpha
))
if
falpha
==
-
1
:
self
.
norm
=
1.
/
(
tt
.
log
(
self
.
upper
/
self
.
lower
))
self
.
norm
=
1.
/
(
tt
.
log
(
self
.
upper
/
self
.
lower
))
else
:
beta
=
(
1.
+
self
.
alpha
)
self
.
norm
=
1.
/
(
beta
*
(
tt
.
pow
(
self
.
upper
,
beta
)
-
tt
.
pow
(
self
.
lower
,
beta
)))
self
.
norm
=
1.
/
(
beta
*
(
tt
.
pow
(
self
.
upper
,
beta
)
-
tt
.
pow
(
self
.
lower
,
beta
)))
transform
=
pymc3
.
distributions
.
transforms
.
interval
(
lower
,
upper
)
...
...
@@ -1304,7 +1337,9 @@ class Pymc3(Sampler):
lower
=
self
.
lower
alpha
=
self
.
alpha
return
pymc3
.
distributions
.
dist_math
.
bound
(
self
.
alpha
*
tt
.
log
(
value
)
+
tt
.
log
(
self
.
norm
),
lower
<=
value
,
value
<=
upper
)
return
pymc3
.
distributions
.
dist_math
.
bound
(
alpha
*
tt
.
log
(
value
)
+
tt
.
log
(
self
.
norm
),
lower
<=
value
,
value
<=
upper
)
return
Pymc3PowerLaw
(
key
,
lower
=
self
.
priors
[
key
].
minimum
,
upper
=
self
.
priors
[
key
].
maximum
,
alpha
=
self
.
priors
[
key
].
alpha
)
else
:
...
...
@@ -1350,13 +1385,13 @@ class Pymc3(Sampler):
trace
=
pymc3
.
sample
(
self
.
draws
,
step
=
sm
,
**
self
.
kwargs
)
nparams
=
len
([
key
for
key
in
self
.
priors
.
keys
()
if
self
.
priors
[
key
].
__class__
.
__name__
!=
'DeltaFunction'
])
nsamples
=
len
(
trace
)
*
self
.
chains
nsamples
=
len
(
trace
)
*
self
.
chains
self
.
result
.
samples
=
np
.
zeros
((
nsamples
,
nparams
))
count
=
0
for
key
in
self
.
priors
.
keys
():
if
self
.
priors
[
key
].
__class__
.
__name__
!=
'DeltaFunction'
:
# ignore DeltaFunction variables
self
.
result
.
samples
[:,
count
]
=
trace
[
key
]
if
self
.
priors
[
key
].
__class__
.
__name__
!=
'DeltaFunction'
:
# ignore DeltaFunction variables
self
.
result
.
samples
[:,
count
]
=
trace
[
key
]
count
+=
1
self
.
result
.
sampler_output
=
np
.
nan
...
...
@@ -1387,7 +1422,7 @@ class Pymc3(Sampler):
self
.
pymc3_priors
[
key
]
=
self
.
priors
[
key
].
ln_prob
(
sampler
=
self
)
except
RuntimeError
:
raise
RuntimeError
((
"Problem setting PyMC3 prior for "
,
"'{}'"
.
format
(
key
)))
"'{}'"
.
format
(
key
)))
else
:
# use Prior distribution name
distname
=
self
.
priors
[
key
].
__class__
.
__name__
...
...
@@ -1412,9 +1447,11 @@ class Pymc3(Sampler):
if
targ
in
self
.
prior_map
[
distname
][
'argtransform'
]:
tfunc
=
self
.
prior_map
[
distname
][
'argtransform'
][
targ
]
else
:
tfunc
=
lambda
x
:
x
def
tfunc
(
x
):
return
x
else
:
tfunc
=
lambda
x
:
x
def
tfunc
(
x
):
return
x
priorkwargs
[
parg
]
=
tfunc
(
getattr
(
self
.
priors
[
key
],
targ
))
else
:
...
...
@@ -1447,7 +1484,7 @@ class Pymc3(Sampler):
not
hasattr
(
self
.
likelihood
,
'function'
)
or
not
hasattr
(
self
.
likelihood
,
'function_keys'
)):
raise
ValueError
(
"Gaussian Likelihood does not have all the correct attributes!"
)
if
'sigma'
in
self
.
pymc3_priors
:
# if sigma is suppled use that value
if
self
.
likelihood
.
sigma
is
None
:
...
...
@@ -1471,7 +1508,7 @@ class Pymc3(Sampler):
not
hasattr
(
self
.
likelihood
,
'function'
)
or
not
hasattr
(
self
.
likelihood
,
'function_keys'
)):
raise
ValueError
(
"Poisson Likelihood does not have all the correct attributes!"
)
for
key
in
self
.
pymc3_priors
:
if
key
not
in
self
.
likelihood
.
function_keys
:
raise
ValueError
(
"Prior key '{}' is not a function key!"
.
format
(
key
))
...
...
@@ -1497,7 +1534,7 @@ class Pymc3(Sampler):
model
=
self
.
likelihood
.
function
(
self
.
likelihood
.
x
,
**
self
.
pymc3_priors
)
# set the distribution
pymc3
.
Exponential
(
'likelihood'
,
lam
=
1.
/
model
,
observed
=
self
.
likelihood
.
y
)
pymc3
.
Exponential
(
'likelihood'
,
lam
=
1.
/
model
,
observed
=
self
.
likelihood
.
y
)
elif
self
.
likelihood
.
__class__
.
__name__
==
'StudentTLikelihood'
:
# check required attributes exist
if
(
not
hasattr
(
self
.
likelihood
,
'x'
)
or
...
...
tupak/core/utils.py
View file @
0094a607
...
...
@@ -104,7 +104,7 @@ def create_time_series(sampling_frequency, duration, starting_time=0.):
float: An equidistant time series given the parameters
"""
return
np
.
arange
(
starting_time
,
starting_time
+
duration
,
1.
/
sampling_frequency
)
return
np
.
arange
(
starting_time
,
starting_time
+
duration
,
1.
/
sampling_frequency
)
def
ra_dec_to_theta_phi
(
ra
,
dec
,
gmst
):
...
...
@@ -175,8 +175,8 @@ def create_frequency_series(sampling_frequency, duration):
number_of_samples
=
int
(
np
.
round
(
number_of_samples
))
# prepare for FFT
number_of_frequencies
=
(
number_of_samples
-
1
)
//
2
delta_freq
=
1.
/
duration
number_of_frequencies
=
(
number_of_samples
-
1
)
//
2
delta_freq
=
1.
/
duration
frequencies
=
delta_freq
*
np
.
linspace
(
1
,
number_of_frequencies
,
number_of_frequencies
)
...
...
@@ -207,14 +207,14 @@ def create_white_noise(sampling_frequency, duration):
number_of_samples
=
duration
*
sampling_frequency
number_of_samples
=
int
(
np
.
round
(
number_of_samples
))
delta_freq
=
1.
/
duration
delta_freq
=
1.
/
duration
frequencies
=
create_frequency_series
(
sampling_frequency
,
duration
)
norm1
=
0.5
*
(
1.
/
delta_freq
)
**
0.5
norm1
=
0.5
*
(
1.
/
delta_freq
)
**
0.5
re1
=
np
.
random
.
normal
(
0
,
norm1
,
len
(
frequencies
))
im1
=
np
.
random
.
normal
(
0
,
norm1
,
len
(
frequencies
))
htilde1
=
re1
+
1j
*
im1
htilde1
=
re1
+
1j
*
im1
# convolve data with instrument transfer function
otilde1
=
htilde1
*
1.
...
...
@@ -260,7 +260,7 @@ def nfft(time_domain_strain, sampling_frequency):
time_domain_strain
=
np
.
append
(
time_domain_strain
,
0
)
LL
=
len
(
time_domain_strain
)
# frequency range
frequency_array
=
sampling_frequency
/
2
*
np
.
linspace
(
0
,
1
,
int
(
LL
/
2
+
1
))
frequency_array
=
sampling_frequency
/
2
*
np
.
linspace
(
0
,
1
,
int
(
LL
/
2
+
1
))
# calculate FFT
# rfft computes the fft for real inputs
...
...
tupak/gw/calibration.py
View file @
0094a607
...
...
@@ -107,4 +107,3 @@ class CubicSpline(Recalibrate):
calibration_factor
=
(
1
+
delta_amplitude
)
*
(
2
+
1j
*
delta_phase
)
/
(
2
-
1j
*
delta_phase
)
return
calibration_factor
tupak/gw/conversion.py
View file @
0094a607
...
...
@@ -128,8 +128,10 @@ def convert_to_lal_binary_black_hole_parameters(parameters, search_keys, remove=
converted_parameters
[
'mass_ratio'
]
=
\
mass_1_and_chirp_mass_to_mass_ratio
(
parameters
[
'mass_1'
],
parameters
[
'chirp_mass'
])
temp
=
(
parameters
[
'chirp_mass'
]
/
parameters
[
'mass_1'
])
**
5
parameters
[
'mass_ratio'
]
=
(
2
*
temp
/
3
/
((
51
*
temp
**
2
-
12
*
temp
**
3
)
**
0.5
+
9
*
temp
))
**
(
1
/
3
)
+
(((
51
*
temp
**
2
-
12
*
temp
**
3
)
**
0.5
+
9
*
temp
)
/
9
/
2
**
0.5
)
**
(
1
/
3
)
parameters
[
'mass_ratio'
]
=
(
(
2
*
temp
/
3
/
(
(
51
*
temp
**
2
-
12
*
temp
**
3
)
**
0.5
+
9
*
temp
))
**
(
1
/
3
)
+