Maintenance will be performed on git.ligo.org, chat.ligo.org, containers.ligo.org, and docs.ligo.org on the morning of Tuesday 11th August 2020, starting at approximately 9am PDT. It is expected to take around 20 minutes and there will be a short period of downtime (less than five minutes) towards the end of the maintenance window. Please direct any comments, questions, or concerns to computing-help@ligo.org.

Commit 0094a607 authored by Gregory Ashton's avatar Gregory Ashton

Fix the flake8 failures

parent 913c16c7
Pipeline #30220 passed with stage
in 5 minutes and 41 seconds
......@@ -41,6 +41,10 @@ python-3:
- pip install flake8
script:
- python setup.py install
# Run pyflakes
- flake8 .
# Run tests and collect coverage data
- coverage --version
- coverage erase
......@@ -67,9 +71,6 @@ python-3:
- make clean
- make html
# Run pyflakes
- flake8 .
artifacts:
paths:
- htmlcov/
......
......@@ -4,7 +4,7 @@ import argparse
def setup_command_line_args():
parser = argparse.ArgumentParser(
description="Plot corner plots from results files")
parser.add_argument("-r", "--results", nargs='+',
parser.add_argument("-r", "--results", nargs='+',
help="List of results files to use.")
parser.add_argument("-f", "--filename", default=None,
help="Output file name.")
......
[flake8]
exclude = .git,docs,build,dist
exclude = .git,docs,build,dist,test,examples,*__init__.py
max-line-length = 160
ignore = E129
......@@ -22,8 +22,8 @@ def write_version_file(version):
try:
git_log = subprocess.check_output(
['git', 'log', '-1', '--pretty=%h %ai']).decode('utf-8')
git_diff = (subprocess.check_output(['git', 'diff', '.'])
+ subprocess.check_output(
git_diff = (subprocess.check_output(['git', 'diff', '.']) +
subprocess.check_output(
['git', 'diff', '--cached', '.'])).decode('utf-8')
if git_diff == '':
git_status = '(CLEAN) ' + git_log
......
......@@ -3,4 +3,4 @@ import tupak.core.likelihood
import tupak.core.prior
import tupak.core.result
import tupak.core.sampler
import tupak.core.utils
import tupak.core.utils
\ No newline at end of file
......@@ -160,8 +160,8 @@ class GaussianLikelihood(Analytical1DLikelihood):
return self.parameters.get('sigma', self.sigma)
def __summed_log_likelihood(self, sigma):
return -0.5 * (np.sum((self.residual / sigma) ** 2)
+ self.n * np.log(2 * np.pi * sigma ** 2))
return -0.5 * (np.sum((self.residual / sigma) ** 2) +
self.n * np.log(2 * np.pi * sigma ** 2))
class PoissonLikelihood(Analytical1DLikelihood):
......@@ -314,8 +314,10 @@ class StudentTLikelihood(Analytical1DLikelihood):
return self.parameters.get('nu', self.nu)
def __summed_log_likelihood(self, nu):
return self.n * (gammaln((nu + 1.0) / 2.0) + .5 * np.log(self.lam / (nu * np.pi)) - gammaln(nu / 2.0)) \
- (nu + 1.0) / 2.0 * np.sum(np.log1p(self.lam * self.residual ** 2 / nu))
return (
self.n * (gammaln((nu + 1.0) / 2.0) + .5 * np.log(self.lam / (nu * np.pi)) -
gammaln(nu / 2.0)) -
(nu + 1.0) / 2.0 * np.sum(np.log1p(self.lam * self.residual ** 2 / nu)))
class JointLikelihood(Likelihood):
......@@ -372,4 +374,3 @@ class JointLikelihood(Likelihood):
def noise_log_likelihood(self):
""" This is just the sum of the noise likelihoods of all parts of the joint likelihood"""
return sum([likelihood.noise_log_likelihood() for likelihood in self.likelihoods])
......@@ -10,7 +10,7 @@ from collections import OrderedDict
from tupak.core.utils import logger
from tupak.core import utils
import tupak
import tupak # noqa
class PriorSet(OrderedDict):
......@@ -28,8 +28,8 @@ class PriorSet(OrderedDict):
if isinstance(dictionary, dict):
self.update(dictionary)
elif type(dictionary) is str:
logger.debug('Argument "dictionary" is a string.'
+ ' Assuming it is intended as a file name.')
logger.debug('Argument "dictionary" is a string.' +
' Assuming it is intended as a file name.')
self.read_in_file(dictionary)
elif type(filename) is str:
self.read_in_file(filename)
......@@ -580,8 +580,9 @@ class PowerLaw(Prior):
if self.alpha == -1:
return np.nan_to_num(1 / val / np.log(self.maximum / self.minimum)) * in_prior
else:
return np.nan_to_num(val ** self.alpha * (1 + self.alpha) / (self.maximum ** (1 + self.alpha)
- self.minimum ** (1 + self.alpha))) * in_prior
return np.nan_to_num(val ** self.alpha * (1 + self.alpha) /
(self.maximum ** (1 + self.alpha) -
self.minimum ** (1 + self.alpha))) * in_prior
def ln_prob(self, val):
"""Return the logarithmic prior probability of val
......@@ -600,11 +601,10 @@ class PowerLaw(Prior):
if self.alpha == -1:
normalising = 1. / np.log(self.maximum / self.minimum)
else:
normalising = (1 + self.alpha) / (self.maximum ** (1 + self.alpha)
- self.minimum ** (
1 + self.alpha))
normalising = (1 + self.alpha) / (self.maximum ** (1 + self.alpha) -
self.minimum ** (1 + self.alpha))
return (self.alpha * np.log(val) + np.log(normalising)) + np.log(1.*in_prior)
return (self.alpha * np.log(val) + np.log(normalising)) + np.log(1. * in_prior)
def __repr__(self):
"""Call to helper method in the super class."""
......@@ -645,7 +645,7 @@ class Uniform(Prior):
float: Prior probability of val
"""
return scipy.stats.uniform.pdf(val, loc=self.minimum,
scale=self.maximum-self.minimum)
scale=self.maximum - self.minimum)
def ln_prob(self, val):
"""Return the log prior probability of val
......@@ -659,7 +659,7 @@ class Uniform(Prior):
float: log probability of val
"""
return scipy.stats.uniform.logpdf(val, loc=self.minimum,
scale=self.maximum-self.minimum)
scale=self.maximum - self.minimum)
class LogUniform(PowerLaw):
......@@ -821,7 +821,7 @@ class Gaussian(Prior):
class Normal(Gaussian):
def __init__(self, mu, sigma, name=None, latex_label=None):
"""A synonym for the Gaussian distribution.
......@@ -899,7 +899,7 @@ class TruncatedGaussian(Prior):
"""
in_prior = (val >= self.minimum) & (val <= self.maximum)
return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (
2 * np.pi) ** 0.5 / self.sigma / self.normalisation * in_prior
2 * np.pi) ** 0.5 / self.sigma / self.normalisation * in_prior
def __repr__(self):
"""Call to helper method in the super class."""
......@@ -907,7 +907,7 @@ class TruncatedGaussian(Prior):
class TruncatedNormal(TruncatedGaussian):
def __init__(self, mu, sigma, minimum, maximum, name=None, latex_label=None):
"""A synonym for the TruncatedGaussian distribution.
......@@ -943,7 +943,7 @@ class HalfGaussian(TruncatedGaussian):
See superclass
"""
TruncatedGaussian.__init__(self, 0., sigma, minimum=0., maximum=np.inf, name=name, latex_label=latex_label)
def __repr__(self):
"""Call to helper method in the super class."""
return Prior._subclass_repr_helper(self, subclass_args=['sigma'])
......@@ -1109,7 +1109,7 @@ class StudentT(Prior):
See superclass
"""
Prior.__init__(self, name, latex_label)
if df <= 0. or scale <= 0.:
raise ValueError("For the StudentT prior the number of degrees of freedom and scale must be positive")
......@@ -1215,7 +1215,7 @@ class Beta(Prior):
return spdf
if isinstance(val, np.ndarray):
pdf = -np.inf*np.ones(len(val))
pdf = -np.inf * np.ones(len(val))
pdf[np.isfinite(spdf)] = spdf[np.isfinite]
return spdf
else:
......@@ -1437,7 +1437,7 @@ class ChiSquared(Gamma):
if nu <= 0 or not isinstance(nu, int):
raise ValueError("For the ChiSquared prior the number of degrees of freedom must be a positive integer")
Gamma.__init__(self, name=name, k=nu/2., theta=2., latex_label=latex_label)
Gamma.__init__(self, name=name, k=nu / 2., theta=2., latex_label=latex_label)
class Interped(Prior):
......
......@@ -372,18 +372,24 @@ class Result(dict):
def construct_cbc_derived_parameters(self):
""" Construct widely used derived parameters of CBCs """
self.posterior['mass_chirp'] = (self.posterior.mass_1 * self.posterior.mass_2) ** 0.6 / (
self.posterior.mass_1 + self.posterior.mass_2) ** 0.2
self.posterior['mass_chirp'] = (
(self.posterior.mass_1 * self.posterior.mass_2) ** 0.6 / (
self.posterior.mass_1 + self.posterior.mass_2) ** 0.2)
self.posterior['q'] = self.posterior.mass_2 / self.posterior.mass_1
self.posterior['eta'] = (self.posterior.mass_1 * self.posterior.mass_2) / (
self.posterior.mass_1 + self.posterior.mass_2) ** 2
self.posterior['chi_eff'] = (self.posterior.a_1 * np.cos(self.posterior.tilt_1)
+ self.posterior.q * self.posterior.a_2 * np.cos(self.posterior.tilt_2)) / (
1 + self.posterior.q)
self.posterior['chi_p'] = np.maximum(self.posterior.a_1 * np.sin(self.posterior.tilt_1),
(4 * self.posterior.q + 3) / (3 * self.posterior.q + 4) * self.posterior.q
* self.posterior.a_2 * np.sin(self.posterior.tilt_2))
self.posterior['eta'] = (
(self.posterior.mass_1 * self.posterior.mass_2) / (
self.posterior.mass_1 + self.posterior.mass_2) ** 2)
self.posterior['chi_eff'] = (
self.posterior.a_1 * np.cos(self.posterior.tilt_1) +
self.posterior.q * self.posterior.a_2 *
np.cos(self.posterior.tilt_2)) / (1 + self.posterior.q)
self.posterior['chi_p'] = np.maximum(
self.posterior.a_1 * np.sin(self.posterior.tilt_1),
(4 * self.posterior.q + 3) / (3 * self.posterior.q + 4) *
self.posterior.q * self.posterior.a_2 *
np.sin(self.posterior.tilt_2))
def check_attribute_match_to_other_object(self, name, other_object):
""" Check attribute name exists in other_object and is the same
......
This diff is collapsed.
......@@ -104,7 +104,7 @@ def create_time_series(sampling_frequency, duration, starting_time=0.):
float: An equidistant time series given the parameters
"""
return np.arange(starting_time, starting_time+duration, 1./sampling_frequency)
return np.arange(starting_time, starting_time + duration, 1. / sampling_frequency)
def ra_dec_to_theta_phi(ra, dec, gmst):
......@@ -175,8 +175,8 @@ def create_frequency_series(sampling_frequency, duration):
number_of_samples = int(np.round(number_of_samples))
# prepare for FFT
number_of_frequencies = (number_of_samples-1)//2
delta_freq = 1./duration
number_of_frequencies = (number_of_samples - 1) // 2
delta_freq = 1. / duration
frequencies = delta_freq * np.linspace(1, number_of_frequencies, number_of_frequencies)
......@@ -207,14 +207,14 @@ def create_white_noise(sampling_frequency, duration):
number_of_samples = duration * sampling_frequency
number_of_samples = int(np.round(number_of_samples))
delta_freq = 1./duration
delta_freq = 1. / duration
frequencies = create_frequency_series(sampling_frequency, duration)
norm1 = 0.5*(1./delta_freq)**0.5
norm1 = 0.5 * (1. / delta_freq)**0.5
re1 = np.random.normal(0, norm1, len(frequencies))
im1 = np.random.normal(0, norm1, len(frequencies))
htilde1 = re1 + 1j*im1
htilde1 = re1 + 1j * im1
# convolve data with instrument transfer function
otilde1 = htilde1 * 1.
......@@ -260,7 +260,7 @@ def nfft(time_domain_strain, sampling_frequency):
time_domain_strain = np.append(time_domain_strain, 0)
LL = len(time_domain_strain)
# frequency range
frequency_array = sampling_frequency / 2 * np.linspace(0, 1, int(LL/2+1))
frequency_array = sampling_frequency / 2 * np.linspace(0, 1, int(LL / 2 + 1))
# calculate FFT
# rfft computes the fft for real inputs
......
......@@ -107,4 +107,3 @@ class CubicSpline(Recalibrate):
calibration_factor = (1 + delta_amplitude) * (2 + 1j * delta_phase) / (2 - 1j * delta_phase)
return calibration_factor
......@@ -128,8 +128,10 @@ def convert_to_lal_binary_black_hole_parameters(parameters, search_keys, remove=
converted_parameters['mass_ratio'] = \
mass_1_and_chirp_mass_to_mass_ratio(parameters['mass_1'], parameters['chirp_mass'])
temp = (parameters['chirp_mass'] / parameters['mass_1']) ** 5
parameters['mass_ratio'] = (2 * temp / 3 / ((51 * temp ** 2 - 12 * temp ** 3) ** 0.5 + 9 * temp)) ** (
1 / 3) + (((51 * temp ** 2 - 12 * temp ** 3) ** 0.5 + 9 * temp) / 9 / 2 ** 0.5) ** (1 / 3)
parameters['mass_ratio'] = (
(2 * temp / 3 / (
(51 * temp ** 2 - 12 * temp ** 3) ** 0.5 + 9 * temp)) ** (1 / 3) +
(((51 * temp ** 2 - 12 * temp ** 3) ** 0.5 + 9 * temp) / 9 / 2 ** 0.5) ** (1 / 3))
if remove:
added_keys.append('chirp_mass')
elif 'symmetric_mass_ratio' in converted_parameters.keys() and 'symmetric_mass_ratio' not in added_keys:
......@@ -462,12 +464,12 @@ def generate_component_spins(sample):
output_sample['iota'], output_sample['spin_1x'], output_sample['spin_1y'], output_sample['spin_1z'], \
output_sample['spin_2x'], output_sample['spin_2y'], output_sample['spin_2z'] = \
lalsim.SimInspiralTransformPrecessingNewInitialConditions(
output_sample['iota'], output_sample['phi_jl'],
output_sample['tilt_1'], output_sample['tilt_2'],
output_sample['phi_12'], output_sample['a_1'], output_sample['a_2'],
output_sample['mass_1'] * tupak.core.utils.solar_mass,
output_sample['mass_2'] * tupak.core.utils.solar_mass,
output_sample['reference_frequency'], output_sample['phase'])
output_sample['iota'], output_sample['phi_jl'],
output_sample['tilt_1'], output_sample['tilt_2'],
output_sample['phi_12'], output_sample['a_1'], output_sample['a_2'],
output_sample['mass_1'] * tupak.core.utils.solar_mass,
output_sample['mass_2'] * tupak.core.utils.solar_mass,
output_sample['reference_frequency'], output_sample['phase'])
output_sample['phi_1'] = np.arctan(output_sample['spin_1y'] / output_sample['spin_1x'])
output_sample['phi_2'] = np.arctan(output_sample['spin_2y'] / output_sample['spin_2x'])
......
......@@ -1247,7 +1247,7 @@ class Interferometer(object):
if not self.strain_data.time_within_data(parameters['geocent_time']):
logger.warning(
'Injecting signal outside segment, start_time={}, merger time={}.'
.format(self.strain_data.start_time, parameters['geocent_time']))
.format(self.strain_data.start_time, parameters['geocent_time']))
signal_ifo = self.get_detector_response(injection_polarizations, parameters)
if np.shape(self.frequency_domain_strain).__eq__(np.shape(signal_ifo)):
......@@ -1305,9 +1305,9 @@ class Interferometer(object):
e_h = np.array([np.cos(self.__latitude) * np.cos(self.__longitude),
np.cos(self.__latitude) * np.sin(self.__longitude), np.sin(self.__latitude)])
return np.cos(arm_tilt) * np.cos(arm_azimuth) * e_long + \
np.cos(arm_tilt) * np.sin(arm_azimuth) * e_lat + \
np.sin(arm_tilt) * e_h
return (np.cos(arm_tilt) * np.cos(arm_azimuth) * e_long +
np.cos(arm_tilt) * np.sin(arm_azimuth) * e_lat +
np.sin(arm_tilt) * e_h)
@property
def amplitude_spectral_density_array(self):
......@@ -1331,8 +1331,8 @@ class Interferometer(object):
array_like: An array representation of the PSD
"""
return self.power_spectral_density.power_spectral_density_interpolated(self.frequency_array) \
* self.strain_data.window_factor
return (self.power_spectral_density.power_spectral_density_interpolated(self.frequency_array) *
self.strain_data.window_factor)
@property
def frequency_array(self):
......@@ -1786,7 +1786,7 @@ def get_empty_interferometer(name):
try:
interferometer = load_interferometer(filename)
return interferometer
except FileNotFoundError:
except OSError:
logger.warning('Interferometer {} not implemented'.format(name))
......
......@@ -110,8 +110,8 @@ class GravitationalWaveTransient(likelihood.Likelihood):
'Prior not provided for {}, using the BBH default.'.format(key))
if key == 'geocent_time':
self.prior[key] = Uniform(
self.interferometers.start_time,
self.interferometers.start_time + self.interferometers.duration)
self.interferometers.start_time,
self.interferometers.start_time + self.interferometers.duration)
else:
self.prior[key] = BBHPriorSet()[key]
......@@ -172,9 +172,9 @@ class GravitationalWaveTransient(likelihood.Likelihood):
if self.time_marginalization:
matched_filter_snr_squared_tc_array +=\
4 / self.waveform_generator.duration * np.fft.fft(
signal_ifo.conjugate()[0:-1]
* interferometer.frequency_domain_strain[0:-1]
/ interferometer.power_spectral_density_array[0:-1])
signal_ifo.conjugate()[0:-1] *
interferometer.frequency_domain_strain[0:-1] /
interferometer.power_spectral_density_array[0:-1])
if self.time_marginalization:
......@@ -190,11 +190,12 @@ class GravitationalWaveTransient(likelihood.Likelihood):
rho_mf_ref_tc_array.real, rho_opt_ref)
log_l = logsumexp(dist_marged_log_l_tc_array) + self.tc_log_norm
elif self.phase_marginalization:
log_l = logsumexp(self._bessel_function_interped(abs(matched_filter_snr_squared_tc_array)))\
- optimal_snr_squared / 2 + self.tc_log_norm
log_l = (
logsumexp(self._bessel_function_interped(abs(matched_filter_snr_squared_tc_array))) -
optimal_snr_squared / 2 + self.tc_log_norm)
else:
log_l = logsumexp(matched_filter_snr_squared_tc_array.real) + self.tc_log_norm\
- optimal_snr_squared / 2
log_l = (logsumexp(matched_filter_snr_squared_tc_array.real) +
self.tc_log_norm - optimal_snr_squared / 2)
elif self.distance_marginalization:
rho_mf_ref, rho_opt_ref = self._setup_rho(matched_filter_snr_squared, optimal_snr_squared)
......@@ -212,9 +213,9 @@ class GravitationalWaveTransient(likelihood.Likelihood):
return log_l.real
def _setup_rho(self, matched_filter_snr_squared, optimal_snr_squared):
rho_opt_ref = optimal_snr_squared.real * \
self.waveform_generator.parameters['luminosity_distance'] ** 2 \
/ self._ref_dist ** 2.
rho_opt_ref = (optimal_snr_squared.real *
self.waveform_generator.parameters['luminosity_distance'] ** 2 /
self._ref_dist ** 2.)
rho_mf_ref = matched_filter_snr_squared * \
self.waveform_generator.parameters['luminosity_distance'] / self._ref_dist
return rho_mf_ref, rho_opt_ref
......@@ -273,8 +274,8 @@ class GravitationalWaveTransient(likelihood.Likelihood):
def _setup_phase_marginalization(self):
self._bessel_function_interped = interp1d(
np.logspace(-5, 10, int(1e6)), np.log([i0e(snr) for snr in np.logspace(-5, 10, int(1e6))])
+ np.logspace(-5, 10, int(1e6)), bounds_error=False, fill_value=(0, np.nan))
np.logspace(-5, 10, int(1e6)), np.log([i0e(snr) for snr in np.logspace(-5, 10, int(1e6))]) +
np.logspace(-5, 10, int(1e6)), bounds_error=False, fill_value=(0, np.nan))
def _setup_time_marginalization(self):
delta_tc = 2 / self.waveform_generator.sampling_frequency
......@@ -360,8 +361,8 @@ class BasicGravitationalWaveTransient(likelihood.Likelihood):
log_l = - 2. / self.waveform_generator.duration * np.vdot(
interferometer.frequency_domain_strain - signal_ifo,
(interferometer.frequency_domain_strain - signal_ifo)
/ interferometer.power_spectral_density_array)
(interferometer.frequency_domain_strain - signal_ifo) /
interferometer.power_spectral_density_array)
return log_l.real
......
......@@ -111,9 +111,10 @@ def lal_binary_black_hole(
return {'plus': h_plus, 'cross': h_cross}
def lal_eccentric_binary_black_hole_no_spins(
frequency_array, mass_1, mass_2, eccentricity, luminosity_distance, iota, phase, ra, dec,
geocent_time, psi, **kwargs):
frequency_array, mass_1, mass_2, eccentricity, luminosity_distance, iota, phase, ra, dec,
geocent_time, psi, **kwargs):
""" Eccentric binary black hole waveform model using lalsimulation (EccentricFD)
Parameters
......@@ -147,7 +148,7 @@ def lal_eccentric_binary_black_hole_no_spins(
-------
dict: A dictionary with the plus and cross polarisation strain modes
"""
waveform_kwargs = dict(waveform_approximant='EccentricFD', reference_frequency=10.0,
minimum_frequency=10.0)
waveform_kwargs.update(kwargs)
......@@ -161,14 +162,14 @@ def lal_eccentric_binary_black_hole_no_spins(
luminosity_distance = luminosity_distance * 1e6 * utils.parsec
mass_1 = mass_1 * utils.solar_mass
mass_2 = mass_2 * utils.solar_mass
spin_1x = 0.0
spin_1y = 0.0
spin_1z = 0.0
spin_1z = 0.0
spin_2x = 0.0
spin_2y = 0.0
spin_2z = 0.0
longitude_ascending_nodes = 0.0
mean_per_ano = 0.0
......@@ -193,21 +194,20 @@ def lal_eccentric_binary_black_hole_no_spins(
def sinegaussian(frequency_array, hrss, Q, frequency, ra, dec, geocent_time, psi):
tau = Q / (np.sqrt(2.0)*np.pi*frequency)
temp = Q / (4.0*np.sqrt(np.pi)*frequency)
t = geocent_time
tau = Q / (np.sqrt(2.0) * np.pi * frequency)
temp = Q / (4.0 * np.sqrt(np.pi) * frequency)
fm = frequency_array - frequency
fp = frequency_array + frequency
h_plus = ((hrss / np.sqrt(temp * (1+np.exp(-Q**2))))
* ((np.sqrt(np.pi)*tau)/2.0)
* (np.exp(-fm**2 * np.pi**2 * tau**2)
+ np.exp(-fp**2 * np.pi**2 * tau**2)))
h_plus = ((hrss / np.sqrt(temp * (1 + np.exp(-Q**2)))) *
((np.sqrt(np.pi) * tau) / 2.0) *
(np.exp(-fm**2 * np.pi**2 * tau**2) +
np.exp(-fp**2 * np.pi**2 * tau**2)))
h_cross = (-1j*(hrss / np.sqrt(temp * (1-np.exp(-Q**2))))
* ((np.sqrt(np.pi)*tau)/2.0)
* (np.exp(-fm**2 * np.pi**2 * tau**2)
- np.exp(-fp**2 * np.pi**2 * tau**2)))
h_cross = (-1j * (hrss / np.sqrt(temp * (1 - np.exp(-Q**2)))) *
((np.sqrt(np.pi) * tau) / 2.0) *
(np.exp(-fm**2 * np.pi**2 * tau**2) -
np.exp(-fp**2 * np.pi**2 * tau**2)))
return{'plus': h_plus, 'cross': h_cross}
......@@ -223,8 +223,8 @@ def supernova(
# waveform in file at 10kpc
scaling = 1e-3 * (10.0 / luminosity_distance)
h_plus = scaling * (realhplus + 1.0j*imaghplus)
h_cross = scaling * (realhcross + 1.0j*imaghcross)
h_plus = scaling * (realhplus + 1.0j * imaghplus)
h_cross = scaling * (realhcross + 1.0j * imaghcross)
return {'plus': h_plus, 'cross': h_cross}
......@@ -236,18 +236,18 @@ def supernova_pca_model(
realPCs = kwargs['realPCs']
imagPCs = kwargs['imagPCs']
pc1 = realPCs[:, 0] + 1.0j*imagPCs[:, 0]
pc2 = realPCs[:, 1] + 1.0j*imagPCs[:, 1]
pc3 = realPCs[:, 2] + 1.0j*imagPCs[:, 2]
pc4 = realPCs[:, 3] + 1.0j*imagPCs[:, 3]
pc5 = realPCs[:, 4] + 1.0j*imagPCs[:, 5]
pc1 = realPCs[:, 0] + 1.0j * imagPCs[:, 0]
pc2 = realPCs[:, 1] + 1.0j * imagPCs[:, 1]
pc3 = realPCs[:, 2] + 1.0j * imagPCs[:, 2]
pc4 = realPCs[:, 3] + 1.0j * imagPCs[:, 3]
pc5 = realPCs[:, 4] + 1.0j * imagPCs[:, 5]
# file at 10kpc
scaling = 1e-23 * (10.0 / luminosity_distance)
h_plus = scaling * (pc_coeff1*pc1 + pc_coeff2*pc2 + pc_coeff3*pc3
+ pc_coeff4*pc4 + pc_coeff5*pc5)
h_cross = scaling * (pc_coeff1*pc1 + pc_coeff2*pc2 + pc_coeff3*pc3
+ pc_coeff4*pc4 + pc_coeff5*pc5)
h_plus = scaling * (pc_coeff1 * pc1 + pc_coeff2 * pc2 + pc_coeff3 * pc3 +
pc_coeff4 * pc4 + pc_coeff5 * pc5)
h_cross = scaling * (pc_coeff1 * pc1 + pc_coeff2 * pc2 + pc_coeff3 * pc3 +
pc_coeff4 * pc4 + pc_coeff5 * pc5)
return {'plus': h_plus, 'cross': h_cross}
......@@ -3,11 +3,9 @@ import os
import numpy as np
from ..core.utils import (gps_time_to_gmst, ra_dec_to_theta_phi, speed_of_light,
nfft, logger)
from ..core.utils import (gps_time_to_gmst, ra_dec_to_theta_phi, speed_of_light, logger)
try:
from gwpy.signal import filter_design
from gwpy.timeseries import TimeSeries
except ImportError:
logger.warning("You do not have gwpy installed currently. You will "
......@@ -158,8 +156,8 @@ def get_vertex_position_geocentric(latitude, longitude, elevation):
"""
semi_major_axis = 6378137 # for ellipsoid model of Earth, in m
semi_minor_axis = 6356752.314 # in m
radius = semi_major_axis**2 * (semi_major_axis**2 * np.cos(latitude)**2
+ semi_minor_axis**2 * np.sin(latitude)**2)**(-0.5)
radius = semi_major_axis**2 * (semi_major_axis**2 * np.cos(latitude)**2 +
semi_minor_axis**2 * np.sin(latitude)**2)**(-0.5)
x_comp = (radius + elevation) * np.cos(latitude) * np.cos(longitude)
y_comp = (radius + elevation) * np.cos(latitude) * np.sin(longitude)
z_comp = ((semi_minor_axis / semi_major_axis)**2 * radius + elevation) * np.sin(latitude)
......@@ -282,8 +280,12 @@ def get_event_time(event):
event_time: float
Merger time
"""
event_times = {'150914': 1126259462.422, '151012': 1128678900.4443, '151226': 1135136350.65,
'170104': 1167559936.5991, '170608': 1180922494.4902, '170814': 1186741861.5268,
event_times = {'150914': 1126259462.422,
'151012': 1128678900.4443,
'151226': 1135136350.65,
'170104': 1167559936.5991,
'170608': 1180922494.4902,
'170814': 1186741861.5268,
'170817': 1187008882.4457}
if 'GW' or 'LVT' in event:
event = event[-6:]
......
......@@ -43,8 +43,8 @@ class HyperparameterLikelihood(Likelihood):
def log_likelihood(self):
self.hyper_prior.parameters.update(self.parameters)
log_l = np.sum(np.log(np.sum(self.hyper_prior.prob(self.data)
/ self.sampling_prior.prob(self.data), axis=-1))) + self.log_factor
log_l = np.sum(np.log(np.sum(self.hyper_prior.prob(self.data) /
self.sampling_prior.prob(self.data), axis=-1))) + self.log_factor
return np.nan_to_num(log_l)
def resample_posteriors(self, max_samples=None):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment