From 9a412584be789a47718ffe93639edf00eb80c641 Mon Sep 17 00:00:00 2001 From: Leo Singer Date: Thu, 7 Feb 2019 12:47:28 -0500 Subject: [PATCH] Remove old unmaintained versions of BAYESTAR-related files These modules have moved to ligo.skymap. The copies in LALInference are outdated and unmaintained. Delete them to remove the risk of people accidentally using obsolete code and getting unreliable results. --- lalinference/.gitignore | 3 - lalinference/configure.ac | 1 - .../debian/python-lalinference.install | 1 - lalinference/lalinference.spec.in | 1 - lalinference/python/Makefile.am | 1 - .../python/lalinference/bayestar/Makefile.am | 23 - .../python/lalinference/bayestar/_sky_map.c | 458 -------- .../python/lalinference/bayestar/command.py | 527 ---------- .../python/lalinference/bayestar/decorator.py | 82 -- .../python/lalinference/bayestar/filter.py | 720 ------------- .../python/lalinference/bayestar/ligolw.py | 133 --- .../lalinference/bayestar/postprocess.py | 977 ------------------ .../python/lalinference/bayestar/sky_map.py | 364 ------- .../python/lalinference/bayestar/timing.py | 215 ---- .../python/lalinference/io/Makefile.am | 2 - .../python/lalinference/io/events/Makefile.am | 20 - .../python/lalinference/io/events/__init__.py | 35 - .../python/lalinference/io/events/base.py | 127 --- .../io/events/detector_disabled.py | 79 -- .../python/lalinference/io/events/gracedb.py | 55 - .../python/lalinference/io/events/hdf.py | 237 ----- .../python/lalinference/io/events/ligolw.py | 287 ----- .../python/lalinference/io/events/magic.py | 86 -- .../python/lalinference/io/events/sqlite.py | 53 - .../python/lalinference_average_skymaps.py | 84 -- 25 files changed, 4571 deletions(-) delete mode 100644 lalinference/python/lalinference/bayestar/_sky_map.c delete mode 100644 lalinference/python/lalinference/bayestar/command.py delete mode 100644 lalinference/python/lalinference/bayestar/decorator.py delete mode 100644 lalinference/python/lalinference/bayestar/filter.py delete mode 100755 lalinference/python/lalinference/bayestar/ligolw.py delete mode 100644 lalinference/python/lalinference/bayestar/postprocess.py delete mode 100644 lalinference/python/lalinference/bayestar/timing.py delete mode 100644 lalinference/python/lalinference/io/events/Makefile.am delete mode 100644 lalinference/python/lalinference/io/events/__init__.py delete mode 100644 lalinference/python/lalinference/io/events/base.py delete mode 100644 lalinference/python/lalinference/io/events/detector_disabled.py delete mode 100644 lalinference/python/lalinference/io/events/gracedb.py delete mode 100644 lalinference/python/lalinference/io/events/hdf.py delete mode 100644 lalinference/python/lalinference/io/events/ligolw.py delete mode 100644 lalinference/python/lalinference/io/events/magic.py delete mode 100644 lalinference/python/lalinference/io/events/sqlite.py delete mode 100644 lalinference/python/lalinference_average_skymaps.py diff --git a/lalinference/.gitignore b/lalinference/.gitignore index 0ce090abb2..3f1a88ec75 100644 --- a/lalinference/.gitignore +++ b/lalinference/.gitignore @@ -57,11 +57,8 @@ python/lalinference/_bayespputils.so python/lalinference/_distance.so python/lalinference/_lalinference* python/lalinference/_moc.so -python/lalinference/bayestar/_sky_map.so -python/lalinference/bayestar/sky_map.so python/lalinference/git_version.py python/lalinference/lalinference.py -python/lalinference_average_skymaps python/lalinference_burst_pp_pipe python/lalinference_coherence_test python/lalinference_compute_roq_weights diff --git a/lalinference/configure.ac b/lalinference/configure.ac index 2dede00c2b..e32f774341 100644 --- a/lalinference/configure.ac +++ b/lalinference/configure.ac @@ -20,7 +20,6 @@ AC_CONFIG_FILES([ \ python/lalinference/bayestar/Makefile \ python/lalinference/imrtgr/Makefile \ python/lalinference/io/Makefile \ - python/lalinference/io/events/Makefile \ python/lalinference/plot/Makefile \ python/lalinference/popprior/Makefile \ python/lalinference/rapid_pe/Makefile \ diff --git a/lalinference/debian/python-lalinference.install b/lalinference/debian/python-lalinference.install index 544d813998..82ebba54a8 100644 --- a/lalinference/debian/python-lalinference.install +++ b/lalinference/debian/python-lalinference.install @@ -1,6 +1,5 @@ usr/bin/cbcBayes* usr/bin/imrtgr_* -usr/bin/lalinference_average_skymaps usr/bin/lalinference_burst_pp_pipe usr/bin/lalinference_coherence_test usr/bin/lalinference_compute_roq_weights diff --git a/lalinference/lalinference.spec.in b/lalinference/lalinference.spec.in index 42925ea2c4..a12042ec1c 100644 --- a/lalinference/lalinference.spec.in +++ b/lalinference/lalinference.spec.in @@ -182,7 +182,6 @@ rm -Rf ${RPM_BUILD_DIR}/%{name}-%{version}%{?nightly:-%{nightly}} %license COPYING %{_bindir}/cbcBayes* %{_bindir}/imrtgr_* -%{_bindir}/lalinference_average_skymaps %{_bindir}/lalinference_burst_pp_pipe %{_bindir}/lalinference_coherence_test %{_bindir}/lalinference_compute_roq_weights diff --git a/lalinference/python/Makefile.am b/lalinference/python/Makefile.am index 07f780d5e9..2ceecb4e91 100644 --- a/lalinference/python/Makefile.am +++ b/lalinference/python/Makefile.am @@ -15,7 +15,6 @@ pybin_scripts = \ rapidpe_compute_intrinsic_grid \ rapidpe_calculate_overlap \ imrtgr_imr_consistency_test \ - lalinference_average_skymaps \ lalinference_burst_pp_pipe \ lalinference_coherence_test \ lalinference_compute_roq_weights \ diff --git a/lalinference/python/lalinference/bayestar/Makefile.am b/lalinference/python/lalinference/bayestar/Makefile.am index 4442fdcab4..a763729f96 100644 --- a/lalinference/python/lalinference/bayestar/Makefile.am +++ b/lalinference/python/lalinference/bayestar/Makefile.am @@ -9,31 +9,8 @@ pymoduledir = $(pkgpythondir)/bayestar pymodule_PYTHON = \ __init__.py \ - command.py \ - decorator.py \ deprecation.py \ - filter.py \ - ligolw.py \ sky_map.py \ - postprocess.py \ - timing.py \ $(END_OF_LIST) -if HAVE_CHEALPIX -if SWIG_BUILD_PYTHON -pymodule_LTLIBRARIES = _sky_map.la - -_sky_map_la_CPPFLAGS = $(AM_CPPFLAGS) $(SWIG_PYTHON_CPPFLAGS) -I$(top_srcdir)/src -_sky_map_la_CFLAGS = $(AM_CFLAGS) $(SWIG_PYTHON_CFLAGS) -Wno-error -_sky_map_la_LDFLAGS = $(top_builddir)/src/liblalinference.la -shared -module -avoid-version -endif -endif - -all-local: _sky_map.so - -_sky_map.so: - rm -f $@ && $(LN_S) .libs/$@ - -CLEANFILES = _sky_map.so - endif diff --git a/lalinference/python/lalinference/bayestar/_sky_map.c b/lalinference/python/lalinference/bayestar/_sky_map.c deleted file mode 100644 index c4c967a3a3..0000000000 --- a/lalinference/python/lalinference/bayestar/_sky_map.c +++ /dev/null @@ -1,458 +0,0 @@ -/* - * Copyright (C) 2013-2017 Leo Singer - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with with program; see the file COPYING. If not, write to the - * Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA - */ - -#include "config.h" -#include -/* Ignore warnings in Numpy API itself */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wcast-qual" -#include -#include -#pragma GCC diagnostic pop -#include -#include -#include -#include -#include -#include "six.h" - - -static void capsule_free(PyObject *self) -{ - free(PyCapsule_GetPointer(self, NULL)); -} - - -#define INPUT_LIST_OF_ARRAYS(NAME, NPYTYPE, DEPTH, CHECK) \ -{ \ - const Py_ssize_t n = PySequence_Length(NAME##_obj); \ - if (n < 0) \ - goto fail; \ - else if (n != nifos) { \ - PyErr_SetString(PyExc_ValueError, #NAME \ - " appears to be the wrong length for the number of detectors"); \ - goto fail; \ - } \ - for (unsigned int iifo = 0; iifo < nifos; iifo ++) \ - { \ - PyObject *obj = PySequence_ITEM(NAME##_obj, iifo); \ - if (!obj) goto fail; \ - PyArrayObject *npy = (PyArrayObject *) \ - PyArray_ContiguousFromAny(obj, NPYTYPE, DEPTH, DEPTH); \ - Py_XDECREF(obj); \ - if (!npy) goto fail; \ - NAME##_npy[iifo] = npy; \ - { CHECK } \ - NAME[iifo] = PyArray_DATA(npy); \ - } \ -} - -#define FREE_INPUT_LIST_OF_ARRAYS(NAME) \ -{ \ - for (unsigned int iifo = 0; iifo < nifos; iifo ++) \ - Py_XDECREF(NAME##_npy[iifo]); \ -} - -#define INPUT_VECTOR_NIFOS(CTYPE, NAME, NPYTYPE) \ - NAME##_npy = (PyArrayObject *) \ - PyArray_ContiguousFromAny(NAME##_obj, NPYTYPE, 1, 1); \ - if (!NAME##_npy) goto fail; \ - if (PyArray_DIM(NAME##_npy, 0) != nifos) \ - { \ - PyErr_SetString(PyExc_ValueError, #NAME \ - " appears to be the wrong length for the number of detectors"); \ - goto fail; \ - } \ - const CTYPE *NAME = PyArray_DATA(NAME##_npy); - -#define INPUT_VECTOR_DOUBLE_NIFOS(NAME) \ - INPUT_VECTOR_NIFOS(double, NAME, NPY_DOUBLE) - - -static PyArray_Descr *sky_map_descr; - - -static PyArray_Descr *sky_map_create_descr(void) -{ - PyArray_Descr *dtype = NULL; - PyObject *dtype_dict = Py_BuildValue("{s(ssss)s(cccc)s(IIII)}", - "names", "UNIQ", "PROBDENSITY", "DISTMEAN", "DISTSTD", - "formats", NPY_ULONGLONGLTR, NPY_DOUBLELTR, NPY_DOUBLELTR, NPY_DOUBLELTR, - "offsets", - (unsigned int) offsetof(bayestar_pixel, uniq), - (unsigned int) offsetof(bayestar_pixel, value[0]), - (unsigned int) offsetof(bayestar_pixel, value[1]), - (unsigned int) offsetof(bayestar_pixel, value[2])); - - if (dtype_dict) - { - PyArray_DescrConverter(dtype_dict, &dtype); - Py_DECREF(dtype_dict); - } - - return dtype; -} - - -static PyObject *sky_map_toa_phoa_snr( - PyObject *NPY_UNUSED(module), PyObject *args, PyObject *kwargs) -{ - /* Input arguments */ - double min_distance; - double max_distance; - int prior_distance_power; - int cosmology; - double gmst; - unsigned int nifos; - unsigned long nsamples = 0; - double sample_rate; - PyObject *epochs_obj; - PyObject *snrs_obj; - PyObject *responses_obj; - PyObject *locations_obj; - PyObject *horizons_obj; - - /* Names of arguments */ - static const char *keywords[] = {"min_distance", "max_distance", - "prior_distance_power", "cosmology", "gmst", "sample_rate", "epochs", - "snrs", "responses", "locations", "horizons", NULL}; - - /* Parse arguments */ - /* FIXME: PyArg_ParseTupleAndKeywords should expect keywords to be const */ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wincompatible-pointer-types" - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ddiiddOOOOO", - keywords, &min_distance, &max_distance, &prior_distance_power, - &cosmology, &gmst, &sample_rate, &epochs_obj, &snrs_obj, - &responses_obj, &locations_obj, &horizons_obj)) return NULL; - #pragma GCC diagnostic pop - - /* Determine number of detectors */ - { - Py_ssize_t n = PySequence_Length(epochs_obj); - if (n < 0) return NULL; - nifos = n; - } - - /* Return value */ - PyObject *out = NULL; - double log_bci, log_bsn; - - /* Numpy array objects */ - PyArrayObject *epochs_npy = NULL, *snrs_npy[nifos], *responses_npy[nifos], - *locations_npy[nifos], *horizons_npy = NULL; - memset(snrs_npy, 0, sizeof(snrs_npy)); - memset(responses_npy, 0, sizeof(responses_npy)); - memset(locations_npy, 0, sizeof(locations_npy)); - - /* Arrays of pointers for inputs with multiple dimensions */ - const float complex *snrs[nifos]; - const float (*responses[nifos])[3]; - const double *locations[nifos]; - - /* Gather C-aligned arrays from Numpy types */ - INPUT_VECTOR_DOUBLE_NIFOS(epochs) - INPUT_LIST_OF_ARRAYS(snrs, NPY_CFLOAT, 1, - npy_intp dim = PyArray_DIM(npy, 0); - if (iifo == 0) - nsamples = dim; - else if ((unsigned long)dim != nsamples) - { - PyErr_SetString(PyExc_ValueError, - "expected elements of snrs to be vectors of the same length"); - goto fail; - } - ) - INPUT_LIST_OF_ARRAYS(responses, NPY_FLOAT, 2, - if (PyArray_DIM(npy, 0) != 3 || PyArray_DIM(npy, 1) != 3) - { - PyErr_SetString(PyExc_ValueError, - "expected elements of responses to be 3x3 arrays"); - goto fail; - } - ) - INPUT_LIST_OF_ARRAYS(locations, NPY_DOUBLE, 1, - if (PyArray_DIM(npy, 0) != 3) - { - PyErr_SetString(PyExc_ValueError, - "expected elements of locations to be vectors of length 3"); - goto fail; - } - ) - INPUT_VECTOR_DOUBLE_NIFOS(horizons) - - /* Call function */ - gsl_error_handler_t *old_handler = gsl_set_error_handler_off(); - size_t len; - bayestar_pixel *pixels; - Py_BEGIN_ALLOW_THREADS - pixels = bayestar_sky_map_toa_phoa_snr(&len, &log_bci, &log_bsn, - min_distance, max_distance, prior_distance_power, cosmology, gmst, - nifos, nsamples, sample_rate, epochs, snrs, responses, locations, - horizons); - Py_END_ALLOW_THREADS - gsl_set_error_handler(old_handler); - - PyErr_CheckSignals(); - - if (!pixels) - goto fail; - - /* Prepare output object */ - PyObject *capsule = PyCapsule_New(pixels, NULL, capsule_free); - if (!capsule) - goto fail; - - npy_intp dims[] = {len}; - Py_INCREF(sky_map_descr); - out = PyArray_NewFromDescr(&PyArray_Type, - sky_map_descr, 1, dims, NULL, pixels, NPY_ARRAY_DEFAULT, NULL); - if (!out) - { - Py_DECREF(capsule); - goto fail; - } - - if (PyArray_SetBaseObject((PyArrayObject *) out, capsule)) - { - Py_DECREF(out); - out = NULL; - goto fail; - } - -fail: /* Cleanup */ - Py_XDECREF(epochs_npy); - FREE_INPUT_LIST_OF_ARRAYS(snrs) - FREE_INPUT_LIST_OF_ARRAYS(responses) - FREE_INPUT_LIST_OF_ARRAYS(locations) - Py_XDECREF(horizons_npy); - if (out) { - out = Py_BuildValue("Ndd", out, log_bci, log_bsn); - } - return out; -}; - - -static PyObject *log_likelihood_toa_phoa_snr( - PyObject *NPY_UNUSED(module), PyObject *args, PyObject *kwargs) -{ - /* Input arguments */ - double ra; - double sin_dec; - double distance; - double u; - double twopsi; - double t; - double gmst; - unsigned int nifos; - unsigned long nsamples = 0; - double sample_rate; - PyObject *epochs_obj; - PyObject *snrs_obj; - PyObject *responses_obj; - PyObject *locations_obj; - PyObject *horizons_obj; - - /* Names of arguments */ - static const char *keywords[] = {"params", "gmst", "sample_rate", "epochs", - "snrs", "responses", "locations", "horizons", NULL}; - - /* Parse arguments */ - /* FIXME: PyArg_ParseTupleAndKeywords should expect keywords to be const */ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wincompatible-pointer-types" - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "(dddddd)ddOOOOO", - keywords, &ra, &sin_dec, &distance, &u, &twopsi, &t, &gmst, - &sample_rate, &epochs_obj, &snrs_obj, &responses_obj, &locations_obj, - &horizons_obj)) return NULL; - #pragma GCC diagnostic pop - - /* Determine number of detectors */ - { - Py_ssize_t n = PySequence_Length(epochs_obj); - if (n < 0) return NULL; - nifos = n; - } - - /* Return value */ - PyObject *out = NULL; - - /* Numpy array objects */ - PyArrayObject *epochs_npy = NULL, *snrs_npy[nifos], *responses_npy[nifos], - *locations_npy[nifos], *horizons_npy = NULL; - memset(snrs_npy, 0, sizeof(snrs_npy)); - memset(responses_npy, 0, sizeof(responses_npy)); - memset(locations_npy, 0, sizeof(locations_npy)); - - /* Arrays of pointers for inputs with multiple dimensions */ - const float complex *snrs[nifos]; - const float (*responses[nifos])[3]; - const double *locations[nifos]; - - /* Gather C-aligned arrays from Numpy types */ - INPUT_VECTOR_DOUBLE_NIFOS(epochs) - INPUT_LIST_OF_ARRAYS(snrs, NPY_CFLOAT, 1, - npy_intp dim = PyArray_DIM(npy, 0); - if (iifo == 0) - nsamples = dim; - else if ((unsigned long)dim != nsamples) - { - PyErr_SetString(PyExc_ValueError, - "expected elements of snrs to be vectors of the same length"); - goto fail; - } - ) - INPUT_LIST_OF_ARRAYS(responses, NPY_FLOAT, 2, - if (PyArray_DIM(npy, 0) != 3 || PyArray_DIM(npy, 1) != 3) - { - PyErr_SetString(PyExc_ValueError, - "expected elements of responses to be 3x3 arrays"); - goto fail; - } - ) - INPUT_LIST_OF_ARRAYS(locations, NPY_DOUBLE, 1, - if (PyArray_DIM(npy, 0) != 3) - { - PyErr_SetString(PyExc_ValueError, - "expected elements of locations to be vectors of length 3"); - goto fail; - } - ) - INPUT_VECTOR_DOUBLE_NIFOS(horizons) - - /* Call function */ - gsl_error_handler_t *old_handler = gsl_set_error_handler_off(); - const double ret = bayestar_log_likelihood_toa_phoa_snr(ra, sin_dec, - distance, u, twopsi, t, gmst, nifos, nsamples, sample_rate, epochs, - snrs, responses, locations, horizons); - gsl_set_error_handler(old_handler); - - /* Prepare output object */ - out = PyFloat_FromDouble(ret); - -fail: /* Cleanup */ - Py_XDECREF(epochs_npy); - FREE_INPUT_LIST_OF_ARRAYS(snrs) - FREE_INPUT_LIST_OF_ARRAYS(responses) - FREE_INPUT_LIST_OF_ARRAYS(locations) - Py_XDECREF(horizons_npy); - return out; -}; - - -static void signal_amplitude_model_loop( - char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data)) -{ - const npy_intp n = dimensions[0]; - - for (npy_intp i = 0; i < n; i ++) - { - /* FIXME: args must be void ** to avoid alignment warnings */ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wcast-align" - *(double complex *) &args[4][i * steps[4]] = - bayestar_signal_amplitude_model( - *(double complex *) &args[0][i * steps[0]], - *(double complex *) &args[1][i * steps[1]], - *(double *) &args[2][i * steps[2]], - *(double *) &args[3][i * steps[3]]); - #pragma GCC diagnostic pop - } -} - - -static PyObject *test( - PyObject *NPY_UNUSED(module), PyObject *NPY_UNUSED(arg)) -{ - int ret; - gsl_error_handler_t *old_handler = gsl_set_error_handler_off(); - Py_BEGIN_ALLOW_THREADS - ret = bayestar_test(); - Py_END_ALLOW_THREADS - gsl_set_error_handler(old_handler); - return PyLong_FromLong(ret); -} - - -static PyMethodDef methods[] = { - {"toa_phoa_snr", (PyCFunction)sky_map_toa_phoa_snr, - METH_VARARGS | METH_KEYWORDS, "fill me in"}, - {"log_likelihood_toa_phoa_snr", (PyCFunction)log_likelihood_toa_phoa_snr, - METH_VARARGS | METH_KEYWORDS, "fill me in"}, - {"test", (PyCFunction)test, - METH_NOARGS, "fill me in"}, - {NULL, NULL, 0, NULL} -}; - - -static PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_sky_map", NULL, -1, methods, - NULL, NULL, NULL, NULL -}; - - -static const PyUFuncGenericFunction - signal_amplitude_model_loops[] = {signal_amplitude_model_loop}; - -static const char signal_amplitude_model_ufunc_types[] = { - NPY_CDOUBLE, NPY_CDOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CDOUBLE}; - -static void *const no_ufunc_data[] = {NULL}; - - -PyMODINIT_FUNC PyInit__sky_map(void); /* Silence -Wmissing-prototypes */ -PyMODINIT_FUNC PyInit__sky_map(void) -{ - PyObject *module = NULL; - - gsl_set_error_handler_off(); - import_array(); - import_umath(); - - sky_map_descr = sky_map_create_descr(); - if (!sky_map_descr) - goto done; - - module = PyModule_Create(&moduledef); - if (!module) - goto done; - - /* Ignore warnings in Numpy API */ - #pragma GCC diagnostic push - #ifdef __clang__ - #pragma GCC diagnostic ignored "-Wincompatible-pointer-types-discards-qualifiers" - #else - #pragma GCC diagnostic ignored "-Wdiscarded-qualifiers" - #endif - - PyModule_AddObject( - module, "signal_amplitude_model", PyUFunc_FromFuncAndData( - signal_amplitude_model_loops, no_ufunc_data, - signal_amplitude_model_ufunc_types, 1, 4, 1, PyUFunc_None, - "signal_amplitude_model", NULL, 0)); - - #pragma GCC diagnostic pop - -done: - return module; -} - - -SIX_COMPAT_MODULE(_sky_map) diff --git a/lalinference/python/lalinference/bayestar/command.py b/lalinference/python/lalinference/bayestar/command.py deleted file mode 100644 index c78968eec6..0000000000 --- a/lalinference/python/lalinference/bayestar/command.py +++ /dev/null @@ -1,527 +0,0 @@ -# -# Copyright (C) 2013-2017 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -""" -Functions that support the command line interface. -""" - -from __future__ import print_function -import argparse -from distutils.dir_util import mkpath -from distutils.errors import DistutilsFileError -import glob -import inspect -import itertools -import logging -import os -import sys -import tempfile -import matplotlib -from matplotlib import cm -from ..plot import cmap -from ..util import sqlite - - -# Set no-op Matplotlib backend to defer importing anything that requires a GUI -# until we have determined that it is necessary based on the command line -# arguments. -if 'matplotlib.pyplot' in sys.modules: - from matplotlib import pyplot as plt - plt.switch_backend('Template') -else: - matplotlib.use('Template', warn=False, force=True) - - -# FIXME: Remove this after all Matplotlib monkeypatches are obsolete. -import matplotlib -import distutils.version -mpl_version = distutils.version.LooseVersion(matplotlib.__version__) - - -def get_version(): - from .. import InferenceVCSInfo as vcs_info - return vcs_info.name + ' ' + vcs_info.version - - -class GlobAction(argparse._StoreAction): - """Generate a list of filenames from a list of filenames and globs.""" - - def __call__(self, parser, namespace, values, *args, **kwargs): - values = list( - itertools.chain.from_iterable(glob.iglob(s) for s in values)) - if values: - super(GlobAction, self).__call__( - parser, namespace, values, *args, **kwargs) - nvalues = getattr(namespace, self.dest) - nvalues = 0 if nvalues is None else len(nvalues) - if self.nargs == argparse.OPTIONAL: - if nvalues > 1: - msg = 'expected at most one file' - else: - msg = None - elif self.nargs == argparse.ONE_OR_MORE: - if nvalues < 1: - msg = 'expected at least one file' - else: - msg = None - elif self.nargs == argparse.ZERO_OR_MORE: - msg = None - elif int(self.nargs) != nvalues: - msg = 'expected exactly %s file' % self.nargs - if self.nargs != 1: - msg += 's' - else: - msg = None - if msg is not None: - msg += ', but found ' - msg += '{} file'.format(nvalues) - if nvalues != 1: - msg += 's' - raise argparse.ArgumentError(self, msg) - - -waveform_parser = argparse.ArgumentParser(add_help=False) -group = waveform_parser.add_argument_group( - 'waveform options', 'Options that affect template waveform generation') -# FIXME: The O1 uberbank high-mass template, SEOBNRv2_ROM_DoubleSpin, does -# not support frequencies less than 30 Hz. -group.add_argument( - '--f-low', type=float, metavar='Hz', default=30, - help='Low frequency cutoff [default: %(default)s]') -group.add_argument( - '--f-high-truncate', type=float, default=0.95, - help='Truncate waveform at this fraction of the maximum frequency of the ' - 'PSD [default: %(default)s]') -group.add_argument( - '--waveform', default='o2-uberbank', - help='Template waveform approximant (e.g., TaylorF2threePointFivePN) ' - '[default: O2 uberbank mass-dependent waveform]') -del group - - -prior_parser = argparse.ArgumentParser(add_help=False) -group = prior_parser.add_argument_group( - 'prior options', 'Options that affect the BAYESTAR likelihood') -group.add_argument( - '--min-distance', type=float, metavar='Mpc', - help='Minimum distance of prior in megaparsecs ' - '[default: infer from effective distance]') -group.add_argument( - '--max-distance', type=float, metavar='Mpc', - help='Maximum distance of prior in megaparsecs ' - '[default: infer from effective distance]') -group.add_argument( - '--prior-distance-power', type=int, metavar='-1|2', default=2, - help='Distance prior ' - '[-1 for uniform in log, 2 for uniform in volume, default: %(default)s]') -group.add_argument( - '--cosmology', default=False, action='store_true', - help='Use cosmological comoving volume prior [default: %(default)s]') -group.add_argument( - '--disable-snr-series', dest='enable_snr_series', action='store_false', - help='Disable input of SNR time series (WARNING: UNREVIEWED!) ' - '[default: enabled]') -del group - - -skymap_parser = argparse.ArgumentParser(add_help=False) -group = skymap_parser.add_argument_group( - 'sky map output options', 'Options that affect sky map output') -group.add_argument( - '--nside', '-n', type=int, default=-1, - help='HEALPix resolution [default: auto]') -group.add_argument( - '--chain-dump', default=False, action='store_true', - help='For MCMC methods, dump the sample chain to disk [default: no]') -del group - - -class MatplotlibFigureType(argparse.FileType): - - def __init__(self): - super(MatplotlibFigureType, self).__init__('wb') - - @staticmethod - def __show(): - from matplotlib import pyplot as plt - return plt.show() - - def __save(self): - from matplotlib import pyplot as plt - _, ext = os.path.splitext(self.string) - ext = ext.lower() - program, _ = os.path.splitext(os.path.basename(sys.argv[0])) - cmdline = ' '.join([program] + sys.argv[1:]) - metadata = {'Title': cmdline} - if ext == '.png': - metadata['Software'] = get_version() - elif ext in {'.pdf', '.ps', '.eps'}: - metadata['Creator'] = get_version() - return plt.savefig(self.string, metadata=metadata) - - def __call__(self, string): - from matplotlib import pyplot as plt - if string == '-': - plt.switch_backend(matplotlib.rcParamsOrig['backend']) - return self.__show - else: - with super(MatplotlibFigureType, self).__call__(string): - pass - plt.switch_backend('agg') - self.string = string - return self.__save - - -class HelpChoicesAction(argparse.Action): - - def __init__(self, - option_strings, - choices=(), - dest=argparse.SUPPRESS, - default=argparse.SUPPRESS): - name = option_strings[0].replace('--help-', '') - super(HelpChoicesAction, self).__init__( - option_strings=option_strings, - dest=dest, - default=default, - nargs=0, - help='show support values for --' + name + ' and exit') - self._name = name - self._choices = choices - - def __call__(self, parser, namespace, values, option_string=None): - print('Supported values for --' + self._name + ':') - for choice in self._choices: - print(choice) - parser.exit() - - -def type_with_sideeffect(type): - def decorator(sideeffect): - def func(value): - ret = type(value) - sideeffect(ret) - return ret - return func - return decorator - - -@type_with_sideeffect(str) -def colormap(value): - from matplotlib import rcParams - rcParams['image.cmap'] = value - - -@type_with_sideeffect(float) -def figwidth(value): - from matplotlib import rcParams - rcParams['figure.figsize'][0] = float(value) - - -@type_with_sideeffect(float) -def figheight(value): - from matplotlib import rcParams - rcParams['figure.figsize'][1] = float(value) - - -@type_with_sideeffect(int) -def dpi(value): - from matplotlib import rcParams - rcParams['figure.dpi'] = rcParams['savefig.dpi'] = float(value) - - -@type_with_sideeffect(int) -def transparent(value): - from matplotlib import rcParams - rcParams['savefig.transparent'] = bool(value) - - -figure_parser = argparse.ArgumentParser(add_help=False) -colormap_choices = sorted(cm.cmap_d.keys()) -group = figure_parser.add_argument_group( - 'figure options', 'Options that affect figure output format') -group.add_argument( - '-o', '--output', metavar='FILE.{pdf,png}', - default='-', type=MatplotlibFigureType(), - help='name of output file [default: plot to screen]') -group.add_argument( - '--colormap', default='cylon', choices=colormap_choices, - type=colormap, metavar='CMAP', - help='name of matplotlib colormap [default: %(default)s]') -group.add_argument( - '--help-colormap', action=HelpChoicesAction, choices=colormap_choices) -group.add_argument( - '--figure-width', metavar='INCHES', type=figwidth, default='8', - help='width of figure in inches [default: %(default)s]') -group.add_argument( - '--figure-height', metavar='INCHES', type=figheight, default='6', - help='height of figure in inches [default: %(default)s]') -group.add_argument( - '--dpi', metavar='PIXELS', type=dpi, default=300, - help='resolution of figure in dots per inch [default: %(default)s]') -# FIXME: the savefig.transparent rcparam was added in Matplotlib 1.4, -# but we have to support Matplotlib 1.2 for Scientific Linux 7. -if mpl_version >= '1.4': - group.add_argument( - '--transparent', const='1', default='0', nargs='?', type=transparent, - help='Save image with transparent background [default: false]') -del colormap_choices -del group - - -# Defer loading SWIG bindings until version string is needed. -class VersionAction(argparse._VersionAction): - def __call__(self, parser, namespace, values, option_string=None): - self.version = get_version() - super(VersionAction, self).__call__( - parser, namespace, values, option_string) - - -@type_with_sideeffect(str) -def loglevel_type(value): - try: - value = int(value) - except ValueError: - value = value.upper() - logging.basicConfig(level=value) - - -class LogLevelAction(argparse._StoreAction): - - def __init__( - self, option_strings, dest, nargs=None, const=None, default=None, - type=None, choices=None, required=False, help=None, metavar=None): - # FIXME: this broke because of internal changes in the Python standard - # library logging module between Python 2.7 and 3.6. We should not rely - # on these undocumented module variables in the first place. - try: - logging._levelNames - except AttributeError: - metavar = '|'.join(logging._levelToName.values()) - else: - metavar = '|'.join( - _ for _ in logging._levelNames.keys() if isinstance(_, str)) - type = loglevel_type - super(LogLevelAction, self).__init__( - option_strings, dest, nargs=nargs, const=const, default=default, - type=type, choices=choices, required=required, help=help, - metavar=metavar) - - -class ArgumentParser(argparse.ArgumentParser): - """ - An ArgumentParser subclass with some sensible defaults. - - - Any ``.py`` suffix is stripped from the program name, because the - program is probably being invoked from the stub shell script. - - - The description is taken from the docstring of the file in which the - ArgumentParser is created. - - - If the description is taken from the docstring, then whitespace in - the description is preserved. - - - A ``--version`` option is added that prints the version of LALInference. - """ - def __init__(self, - prog=None, - usage=None, - description=None, - epilog=None, - parents=[], - formatter_class=None, - prefix_chars='-', - fromfile_prefix_chars=None, - argument_default=None, - conflict_handler='error', - add_help=True): - if prog is None: - prog = os.path.basename(sys.argv[0]).replace('.py', '') - if description is None: - parent_frame = inspect.currentframe().f_back - description = parent_frame.f_locals.get('__doc__', None) - if formatter_class is None: - formatter_class = argparse.RawDescriptionHelpFormatter - if formatter_class is None: - formatter_class = argparse.HelpFormatter - super(ArgumentParser, self).__init__( - prog=prog, - usage=usage, - description=description, - epilog=epilog, - parents=parents, - formatter_class=argparse.RawDescriptionHelpFormatter, - prefix_chars=prefix_chars, - fromfile_prefix_chars=fromfile_prefix_chars, - argument_default=argument_default, - conflict_handler=conflict_handler, - add_help=add_help) - self.register('action', 'glob', GlobAction) - self.register('action', 'loglevel', LogLevelAction) - self.register('action', 'version', VersionAction) - self.add_argument('--version', action='version') - self.add_argument('-l', '--loglevel', action='loglevel', default='INFO') - - -class DirType(object): - """Factory for directory arguments.""" - - def __init__(self, create=False): - self._create = create - - def __call__(self, string): - if self._create: - try: - mkpath(string) - except DistutilsFileError as e: - raise argparse.ArgumentTypeError(e.message) - else: - try: - os.listdir(string) - except OSError as e: - raise argparse.ArgumentTypeError(e) - return string - - -class SQLiteType(argparse.FileType): - """Open an SQLite database, or fail if it does not exist. - FIXME: use SQLite URI when we drop support for Python < 3.4. - See: https://docs.python.org/3.4/whatsnew/3.4.html#sqlite3 - - Here is an example of trying to open a file that does not exist for - reading (mode='r'). It should raise an exception: - - >>> import tempfile - >>> filetype = SQLiteType('r') - >>> filename = tempfile.mktemp() - >>> # Note, simply check or a FileNotFound error in Python 3. - >>> filetype(filename) - Traceback (most recent call last): - ... - argparse.ArgumentTypeError: ... - - If the file already exists, then it's fine: - >>> import sqlite3 - >>> filetype = SQLiteType('r') - >>> with tempfile.NamedTemporaryFile() as f: - ... with sqlite3.connect(f.name) as db: - ... _ = db.execute('create table foo (bar char)') - ... filetype(f.name) - - - Here is an example of opening a file for writing (mode='w'), which should - overwrite the file if it exists. Even if the file was not an SQLite - database beforehand, this should work: - - >>> filetype = SQLiteType('w') - >>> with tempfile.NamedTemporaryFile(mode='w') as f: - ... print('This is definitely not an SQLite file.', file=f) - ... f.flush() - ... with filetype(f.name) as db: - ... db.execute('create table foo (bar char)') - - - Here is an example of opening a file for appending (mode='a'), which should - NOT overwrite the file if it exists. If the file was not an SQLite database - beforehand, this should raise an exception. - - >>> import pytest - >>> filetype = SQLiteType('a') - >>> with tempfile.NamedTemporaryFile(mode='w') as f: - ... print('This is definitely not an SQLite file.', file=f) - ... f.flush() - ... with filetype(f.name) as db: - ... db.execute('create table foo (bar char)') - Traceback (most recent call last): - ... - sqlite3.DatabaseError: ... - - And if the database did exist beforehand, then opening for appending - (mode='a') should not clobber existing tables. - - >>> filetype = SQLiteType('a') - >>> with tempfile.NamedTemporaryFile() as f: - ... with sqlite3.connect(f.name) as db: - ... _ = db.execute('create table foo (bar char)') - ... with filetype(f.name) as db: - ... db.execute('select count(*) from foo').fetchone() - (0,) - """ - - def __init__(self, mode): - if mode not in 'arw': - raise ValueError('Unknown file mode: {}'.format(mode)) - self.mode = mode - - def __call__(self, string): - try: - return sqlite.open(string, self.mode) - except OSError as e: - raise argparse.ArgumentTypeError(e) - - -def _sanitize_arg_value_for_xmldoc(value): - if hasattr(value, 'read'): - return value.name - elif isinstance(value, tuple): - return tuple(_sanitize_arg_value_for_xmldoc(v) for v in value) - elif isinstance(value, list): - return [_sanitize_arg_value_for_xmldoc(v) for v in value] - else: - return value - - -def register_to_xmldoc(xmldoc, parser, opts, **kwargs): - from glue.ligolw.utils import process - params = {key: _sanitize_arg_value_for_xmldoc(value) - for key, value in opts.__dict__.items()} - return process.register_to_xmldoc(xmldoc, parser.prog, params, **kwargs) - - -start_msg = '\ -Waiting for input on stdin. Type control-D followed by a newline to terminate.' -stop_msg = 'Reached end of file. Exiting.' - - -def iterlines(file, start_message=start_msg, stop_message=stop_msg): - """Iterate over non-emtpy lines in a file.""" - is_tty = os.isatty(file.fileno()) - - if is_tty: - print(start_message, file=sys.stderr) - - while True: - # Read a line. - line = file.readline() - - if not line: - # If we reached EOF, then exit. - break - - # Strip off the trailing newline and any whitespace. - line = line.strip() - - # Emit the line if it is not empty. - if line: - yield line - - if is_tty: - print(stop_message, file=sys.stderr) - - -from lalinference.bayestar.deprecation import warn -warn('ligo.skymap.tool') diff --git a/lalinference/python/lalinference/bayestar/decorator.py b/lalinference/python/lalinference/bayestar/decorator.py deleted file mode 100644 index 4bafdd3318..0000000000 --- a/lalinference/python/lalinference/bayestar/decorator.py +++ /dev/null @@ -1,82 +0,0 @@ -# -# Copyright (C) 2013-2017 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -""" -Collection of Python decorators. -""" - -from functools import wraps -from collections import Hashable -from astropy.utils.misc import NumpyRNGContext - -__all__ = ('memoized', 'with_numpy_random_seed', 'as_dict') - - -try: - from functools import lru_cache -except ImportError: - # FIXME: Remove this when we drop support for Python < 3.2. - def memoized(func): - """Memoize a function or class by caching its return values for any - given arguments.""" - cache = {} - - @wraps(func) - def memo(*args, **kwargs): - # Create a key out of the arguments. - key = (args, frozenset(kwargs.items())) - - if isinstance(args, Hashable): # The key is immutable. - try: - # Look up the return value for these arguments. - ret = cache[key] - except KeyError: - # Not found; invoke function and store return value. - ret = cache[key] = func(*args, **kwargs) - else: # The key is mutable. We can't cache it. - ret = func(*args, **kwargs) - - # Done! - return ret - - # Return wrapped function. - return memo -else: - memoized = lru_cache(maxsize=None) - - -def with_numpy_random_seed(func, seed=0): - """Decorate a function so that it is called with a pre-defined random seed. - The random seed is restored when the function returns.""" - - @wraps(func) - def wrapped_func(*args, **kwargs): - with NumpyRNGContext(seed): - ret = func(*args, **kwargs) - return ret - - return wrapped_func - - -def as_dict(func): - """Decorate a generator function to turn its output into a dict.""" - - @wraps(func) - def wrapped_func(*args, **kwargs): - return dict(func(*args, **kwargs)) - - return wrapped_func diff --git a/lalinference/python/lalinference/bayestar/filter.py b/lalinference/python/lalinference/bayestar/filter.py deleted file mode 100644 index eb5598c547..0000000000 --- a/lalinference/python/lalinference/bayestar/filter.py +++ /dev/null @@ -1,720 +0,0 @@ -# -*- coding: UTF-8 -*- -# -# Copyright (C) 2013-2015 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -""" -Functions related to matched filtering. -""" - -from __future__ import division - -# General imports -import logging -import numpy as np -import math -from scipy import optimize - -# LAL imports -import lal -import lalsimulation - -# My own imports -from .decorator import memoized - -log = logging.getLogger('BAYESTAR') - - -# Useful sample units -unitInverseHertz = lal.Unit('s') -unitInverseSqrtHertz = lal.Unit('s^1/2') - - -# Memoize FFT plans -CreateForwardCOMPLEX16FFTPlan = memoized(lal.CreateForwardCOMPLEX16FFTPlan) -CreateForwardREAL8FFTPlan = memoized(lal.CreateForwardREAL8FFTPlan) -CreateReverseCOMPLEX16FFTPlan = memoized(lal.CreateReverseCOMPLEX16FFTPlan) -CreateReverseREAL8FFTPlan = memoized(lal.CreateReverseREAL8FFTPlan) - - -def ceil_pow_2(n): - """Return the least integer power of 2 that is greater than or equal to n. - - >>> ceil_pow_2(128.0) - 128.0 - >>> ceil_pow_2(0.125) - 0.125 - >>> ceil_pow_2(129.0) - 256.0 - >>> ceil_pow_2(0.126) - 0.25 - >>> ceil_pow_2(1.0) - 1.0 - """ - # frexp splits floats into mantissa and exponent, ldexp does the opposite. - # For positive numbers, mantissa is in [0.5, 1.). - mantissa, exponent = math.frexp(n) - return math.ldexp( - 1 if mantissa >= 0 else float('nan'), - exponent - 1 if mantissa == 0.5 else exponent - ) - - -def fftfilt(b, x): - """Apply the FIR filter with coefficients b to the signal x, as if the filter's - state is initially all zeros. The output has the same length as x.""" - - # Zero-pad by at least (len(b) - 1). - nfft = int(ceil_pow_2(len(x) + len(b) - 1)) - - # Create FFT plans. - forwardplan = CreateForwardCOMPLEX16FFTPlan(nfft, 0) - reverseplan = CreateReverseCOMPLEX16FFTPlan(nfft, 0) - - # Create temporary workspaces. - workspace1 = lal.CreateCOMPLEX16Vector(nfft) - workspace2 = lal.CreateCOMPLEX16Vector(nfft) - workspace3 = lal.CreateCOMPLEX16Vector(nfft) - - workspace1.data[:len(x)] = x - workspace1.data[len(x):] = 0 - lal.COMPLEX16VectorFFT(workspace2, workspace1, forwardplan) - workspace1.data[:len(b)] = b - workspace1.data[len(b):] = 0 - lal.COMPLEX16VectorFFT(workspace3, workspace1, forwardplan) - workspace2.data *= workspace3.data - lal.COMPLEX16VectorFFT(workspace1, workspace2, reverseplan) - - # Return result with zero-padding stripped. - return workspace1.data[:len(x)] / nfft - - -def abscissa(series): - """Produce the independent variable for a lal TimeSeries or - FrequencySeries.""" - try: - delta = series.deltaT - x0 = float(series.epoch) - except AttributeError: - delta = series.deltaF - x0 = series.f0 - return x0 + delta * np.arange(len(series.data.data)) - - -def colored_noise(epoch, duration, sample_rate, psd): - """Generate a REAL8TimeSeries containing duration seconds of colored - Gaussian noise at the given sample rate, with the start time given by - epoch. psd should be an instance of REAL8FrequencySeries containing a - discretely sample power spectrum with f0=0, deltaF=1/duration, and a length - of ((duration * sample_rate) // 2 + 1) samples. - """ - data_length = duration * sample_rate - plan = CreateReverseREAL8FFTPlan(data_length, 0) - x = lal.CreateREAL8TimeSeries( - None, lal.LIGOTimeGPS(0), 0, 0, lal.DimensionlessUnit, data_length) - xf = lal.CreateCOMPLEX16FrequencySeries( - None, epoch, 0, 1 / duration, - lal.DimensionlessUnit, data_length // 2 + 1) - white_noise = (np.random.randn(len(xf.data.data)) + - np.random.randn(len(xf.data.data)) * 1j) - - # On line 1288 of lal's AverageSpectrum.c, in the code comments for - # XLALWhitenCOMPLEX8FrequencySeries, it says that according to the LAL - # conventions a whitened frequency series should consist of bins whose - # real and imaginary parts each have a variance of 1/2. - white_noise /= np.sqrt(2) - - # The factor of sqrt(2 * psd.deltaF) comes from the value of 'norm' on - # line 1362 of AverageSpectrum.c. - xf.data.data = white_noise * np.sqrt(psd.data.data / (2 * psd.deltaF)) - - # Detrend the data: no DC component. - xf.data.data[0] = 0 - - # Return to time domain. - lal.REAL8FreqTimeFFT(x, xf, plan) - - # Copy over metadata. - x.epoch = epoch - x.sampleUnits = lal.StrainUnit - - # Done. - return x - - -def add_quadrature_phase(rseries): - rseries_len = len(rseries.data.data) - cseries = lal.CreateCOMPLEX16FrequencySeries( - rseries.name, rseries.epoch, - rseries.f0 - rseries.deltaF * (rseries_len - 1), rseries.deltaF, - rseries.sampleUnits, 2 * (rseries_len - 1)) - cseries.data.data[:rseries_len] = 0 - cseries.data.data[rseries_len:] = 2 * rseries.data.data[1:-1] - return cseries - - -def matched_filter_real_fd(template, psd): - fdfilter = lal.CreateCOMPLEX16FrequencySeries( - template.name, template.epoch, - template.f0, template.deltaF, template.sampleUnits, - len(template.data.data)) - fdfilter.data.data = template.data.data - fdfilter = lal.WhitenCOMPLEX16FrequencySeries(fdfilter, psd) - fdfilter.data.data /= np.sqrt(np.sum(np.abs(fdfilter.data.data)**2)) - fdfilter = lal.WhitenCOMPLEX16FrequencySeries(fdfilter, psd) - return fdfilter - - -def matched_filter_spa(template, psd): - """Create a complex matched filter kernel from a stationary phase approximation - template and a PSD.""" - fdfilter = matched_filter_real_fd(template, psd) - fdfilter2 = add_quadrature_phase(fdfilter) - tdfilter = lal.CreateCOMPLEX16TimeSeries( - None, lal.LIGOTimeGPS(0), 0, 0, - lal.DimensionlessUnit, len(fdfilter2.data.data)) - plan = CreateReverseCOMPLEX16FFTPlan(len(fdfilter2.data.data), 0) - lal.COMPLEX16FreqTimeFFT(tdfilter, fdfilter2, plan) - return tdfilter - - -def matched_filter_real(template, psd): - fdfilter = matched_filter_real_fd(template, psd) - tdfilter = lal.CreateREAL8TimeSeries( - None, lal.LIGOTimeGPS(0), 0, 0, - lal.DimensionlessUnit, 2 * (len(fdfilter.data.data) - 1)) - plan = CreateReverseREAL8FFTPlan(len(tdfilter.data.data), 0) - lal.REAL8FreqTimeFFT(tdfilter, fdfilter, plan) - return tdfilter - - -def exp_i(phi): - return np.cos(phi) + np.sin(phi) * 1j - - -def truncated_ifft(y, nsamples_out=None): - """Truncated inverse FFT. - - See http://www.fftw.org/pruned.html for a discussion of related algorithms. - - Perform inverse FFT to obtain truncated autocorrelation time series. - This makes use of a folded DFT for a speedup of - - log(nsamples)/log(nsamples_out) - - over directly computing the inverse FFT and truncating. Here is how it - works. Say we have a frequency-domain signal X[k], for 0 ≤ k ≤ N - 1. We - want to compute its DFT x[n], for 0 ≤ n ≤ M, where N is divisible by M: - N = cM, for some integer c. The DFT is: - - N - 1 - ______ - \ 2 π i k n - x[n] = \ exp[-----------] Y[k] - / N - /------ - k = 0 - - c - 1 M - 1 - ______ ______ - \ \ 2 π i n (m c + j) - = \ \ exp[------------------] Y[m c + j] - / / c M - /------ /------ - j = 0 m = 0 - - c - 1 M - 1 - ______ ______ - \ 2 π i n j \ 2 π i n m - = \ exp[-----------] \ exp[-----------] Y[m c + j] - / N / M - /------ /------ - j = 0 m = 0 - - So: we split the frequency series into c deinterlaced sub-signals, each of - length M, compute the DFT of each sub-signal, and add them back together - with complex weights. - - Parameters - ---------- - y : `numpy.ndarray` - Complex input vector. - nsamples_out : int, optional - Length of output vector. By default, same as length of input vector. - - Returns - ------- - x : `numpy.ndarray` - The first nsamples_out samples of the IFFT of x, zero-padded if - - - First generate the IFFT of a random signal: - >>> nsamples_out = 1024 - >>> y = np.random.randn(nsamples_out) + np.random.randn(nsamples_out) * 1j - >>> plan = CreateReverseCOMPLEX16FFTPlan(nsamples_out, 0) - >>> freq = lal.CreateCOMPLEX16Vector(nsamples_out) - >>> freq.data = y - >>> time = lal.CreateCOMPLEX16Vector(nsamples_out) - >>> _ = lal.COMPLEX16VectorFFT(time, freq, plan) - >>> x = time.data - - Now check that the truncated IFFT agrees: - >>> np.allclose(x, truncated_ifft(y), rtol=1e-15) - True - >>> np.allclose(x, truncated_ifft(y, 1024), rtol=1e-15) - True - >>> np.allclose(x[:128], truncated_ifft(y, 128), rtol=1e-15) - True - >>> np.allclose(x[:1], truncated_ifft(y, 1), rtol=1e-15) - True - >>> np.allclose(x[:32], truncated_ifft(y, 32), rtol=1e-15) - True - >>> np.allclose(x[:63], truncated_ifft(y, 63), rtol=1e-15) - True - >>> np.allclose(x[:25], truncated_ifft(y, 25), rtol=1e-15) - True - >>> truncated_ifft(y, 1025) - Traceback (most recent call last): - ... - ValueError: Input is too short: you gave me an input of length 1024, but you asked for an IFFT of length 1025. - """ - nsamples = len(y) - if nsamples_out is None: - nsamples_out = nsamples - elif nsamples_out > nsamples: - raise ValueError( - 'Input is too short: you gave me an input of length {0}, ' - 'but you asked for an IFFT of length {1}.'.format( - nsamples, nsamples_out)) - elif nsamples & (nsamples - 1): - raise NotImplementedError( - 'I am too lazy to implement for nsamples that is ' - 'not a power of 2.') - - # Find number of FFTs. - # FIXME: only works if nsamples is a power of 2. - # Would be better to find the smallest divisor of nsamples that is - # greater than or equal to nsamples_out. - nsamples_batch = int(ceil_pow_2(nsamples_out)) - c = nsamples // nsamples_batch - - # FIXME: Implement for real-to-complex FFTs as well. - plan = CreateReverseCOMPLEX16FFTPlan(nsamples_batch, 0) - input_workspace = lal.CreateCOMPLEX16Vector(nsamples_batch) - output_workspace = lal.CreateCOMPLEX16Vector(nsamples_batch) - twiddle = exp_i(2 * np.pi * np.arange(nsamples_batch) / nsamples) - - j = c - 1 - input_workspace.data = y[j::c] - lal.COMPLEX16VectorFFT(output_workspace, input_workspace, plan) - x = output_workspace.data.copy() # Make sure this is a deep copy - - for j in range(c-2, -1, -1): - input_workspace.data = y[j::c] - lal.COMPLEX16VectorFFT(output_workspace, input_workspace, plan) - x *= twiddle # FIXME: check stability of this recurrence relation. - x += output_workspace.data - - # Now need to truncate remaining samples. - if nsamples_out < nsamples_batch: - x = x[:nsamples_out] - - return x - - -def get_approximant_and_orders_from_string(s): - """Determine the approximant, amplitude order, and phase order for a string - of the form "TaylorT4threePointFivePN". In this example, the waveform is - "TaylorT4" and the phase order is 7 (twice 3.5). If the input contains the - substring "restricted" or "Restricted", then the amplitude order is taken - to be 0. Otherwise, the amplitude order is the same as the phase order.""" - # SWIG-wrapped functions apparently do not understand Unicode, but - # often the input argument will come from a Unicode XML file. - s = str(s) - approximant = lalsimulation.GetApproximantFromString(s) - try: - phase_order = lalsimulation.GetOrderFromString(s) - except RuntimeError: - phase_order = -1 - if 'restricted' in s or 'Restricted' in s: - amplitude_order = 0 - else: - amplitude_order = phase_order - return approximant, amplitude_order, phase_order - - -def get_f_lso(mass1, mass2): - """Calculate the GW frequency during the last stable orbit of a compact - binary.""" - return 1 / (6 ** 1.5 * np.pi * (mass1 + mass2) * lal.MTSUN_SI) - - -def sngl_inspiral_psd(waveform, mass1, mass2, f_min=10, f_max=2048, f_ref=0, **kwargs): - # FIXME: uberbank mass criterion. Should find a way to get this from - # pipeline output metadata. - if waveform == 'o1-uberbank': - log.warn('Template is unspecified; using ER8/O1 uberbank criterion') - if mass1 + mass2 < 4: - waveform = 'TaylorF2threePointFivePN' - else: - waveform = 'SEOBNRv2_ROM_DoubleSpin' - elif waveform == 'o2-uberbank': - log.warn('Template is unspecified; using ER10/O2 uberbank criterion') - if mass1 + mass2 < 4: - waveform = 'TaylorF2threePointFivePN' - else: - waveform = 'SEOBNRv4_ROM' - approx, ampo, phaseo = get_approximant_and_orders_from_string(waveform) - log.info('Selected template: %s', waveform) - - # Generate conditioned template. - params = lal.CreateDict() - lalsimulation.SimInspiralWaveformParamsInsertPNPhaseOrder(params, phaseo) - lalsimulation.SimInspiralWaveformParamsInsertPNAmplitudeOrder(params, ampo) - hplus, hcross = lalsimulation.SimInspiralFD( - m1=float(mass1) * lal.MSUN_SI, m2=float(mass2) * lal.MSUN_SI, - S1x=float(kwargs.get('spin1x') or 0), - S1y=float(kwargs.get('spin1y') or 0), - S1z=float(kwargs.get('spin1z') or 0), - S2x=float(kwargs.get('spin2x') or 0), - S2y=float(kwargs.get('spin2y') or 0), - S2z=float(kwargs.get('spin2z') or 0), - distance=1e6*lal.PC_SI, inclination=0, phiRef=0, - longAscNodes=0, eccentricity=0, meanPerAno=0, - deltaF=0, f_min=f_min, f_max=f_max, f_ref=f_ref, - LALparams=params, approximant=approx) - - # Force `plus' and `cross' waveform to be in quadrature. - h = 0.5 * (hplus.data.data + 1j * hcross.data.data) - - # For inspiral-only waveforms, nullify frequencies beyond ISCO. - # FIXME: the waveform generation functions pick the end frequency - # automatically. Shouldn't SimInspiralFD? - inspiral_only_waveforms = ( - lalsimulation.TaylorF2, - lalsimulation.SpinTaylorF2, - lalsimulation.TaylorF2RedSpin, - lalsimulation.TaylorF2RedSpinTidal, - lalsimulation.SpinTaylorT4Fourier) - if approx in inspiral_only_waveforms: - h[abscissa(hplus) >= get_f_lso(mass1, mass2)] = 0 - - # Drop Nyquist frequency. - if len(h) % 2: - h = h[:-1] - - # Create output frequency series. - psd = lal.CreateREAL8FrequencySeries( - 'signal PSD', 0, hplus.f0, hcross.deltaF, hplus.sampleUnits**2, len(h)) - psd.data.data = abs2(h) - - # Done! - return psd - - -def signal_psd_series(H, S): - n = H.data.data.size - f = H.f0 + np.arange(1, n) * H.deltaF - ret = lal.CreateREAL8FrequencySeries( - 'signal PSD / noise PSD', 0, H.f0, H.deltaF, lal.DimensionlessUnit, n) - ret.data.data[0] = 0 - ret.data.data[1:] = H.data.data[1:] / S(f) - return ret - - -def autocorrelation(H, out_duration): - """ - Calculate the complex autocorrelation sequence a(t), for t >= 0, of an - inspiral signal. - - Parameters - ---------- - H : lal.REAL8FrequencySeries - Signal PSD series. - S : callable - Noise power spectral density function. - - Returns - ------- - acor : `numpy.ndarray` - The complex-valued autocorrelation sequence. - """ - - # Compute duration of template, rounded up to a power of 2. - H_len = H.data.data.size - nsamples = 2 * H_len - sample_rate = nsamples * H.deltaF - - # Compute autopower spectral density. - power = np.empty(nsamples, H.data.data.dtype) - power[:H_len] = H.data.data - power[H_len:] = 0 - - # Determine length of output FFT. - nsamples_out = int(np.ceil(out_duration * sample_rate)) - - acor = truncated_ifft(power, nsamples_out) - acor /= np.abs(acor[0]) - - # If we have done this right, then the zeroth sample represents lag 0 - assert np.argmax(np.abs(acor)) == 0 - assert np.isreal(acor[0]) - - # Done! - return acor, float(sample_rate) - - -def generate_template(mass1, mass2, S, f_low, sample_rate, template_duration, - approximant, amplitude_order, phase_order): - template_length = sample_rate * template_duration - if approximant == lalsimulation.TaylorF2: - zf, _ = lalsimulation.SimInspiralChooseFDWaveform( - 0, 1 / template_duration, - mass1 * lal.MSUN_SI, mass2 * lal.MSUN_SI, - 0, 0, 0, 0, 0, 0, f_low, 0, 0, 1e6 * lal.PC_SI, - 0, 0, 0, None, None, amplitude_order, phase_order, approximant) - lal.ResizeCOMPLEX16FrequencySeries(zf, 0, template_length // 2 + 1) - - # Generate over-whitened template - psd = lal.CreateREAL8FrequencySeries( - None, zf.epoch, zf.f0, zf.deltaF, - lal.DimensionlessUnit, len(zf.data.data)) - psd.data.data = S(abscissa(psd)) - zW = matched_filter_spa(zf, psd) - elif approximant == lalsimulation.TaylorT4: - hplus, hcross = lalsimulation.SimInspiralChooseTDWaveform( - 0, 1 / sample_rate, - mass1 * lal.MSUN_SI, mass2 * lal.MSUN_SI, - 0, 0, 0, 0, 0, 0, - f_low, f_low, - 1e6 * lal.PC_SI, - 0, 0, 0, - None, None, - amplitude_order, - phase_order, - approximant) - - ht = lal.CreateREAL8TimeSeries( - None, lal.LIGOTimeGPS(-template_duration), hplus.f0, hplus.deltaT, - hplus.sampleUnits, template_length) - hf = lal.CreateCOMPLEX16FrequencySeries( - None, lal.LIGOTimeGPS(0), 0, 0, lal.DimensionlessUnit, - template_length // 2 + 1) - plan = CreateForwardREAL8FFTPlan(template_length, 0) - - ht.data.data[:-len(hplus.data.data)] = 0 - ht.data.data[-len(hplus.data.data):] = hplus.data.data - lal.REAL8TimeFreqFFT(hf, ht, plan) - - psd = lal.CreateREAL8FrequencySeries( - None, hf.epoch, hf.f0, hf.deltaF, - lal.DimensionlessUnit, len(hf.data.data)) - psd.data.data = S(abscissa(psd)) - - zWreal = matched_filter_real(hf, psd) - - ht.data.data[:-len(hcross.data.data)] = 0 - ht.data.data[-len(hcross.data.data):] = hcross.data.data - - lal.REAL8TimeFreqFFT(hf, ht, plan) - zWimag = matched_filter_real(hf, psd) - - zW = lal.CreateCOMPLEX16TimeSeries( - None, zWreal.epoch, zWreal.f0, zWreal.deltaT, zWreal.sampleUnits, - len(zWreal.data.data)) - zW.data.data = zWreal.data.data + zWimag.data.data * 1j - else: - raise ValueError("unrecognized approximant") - return (zW.data.data[::-1].conj() * np.sqrt(2) - * template_duration / sample_rate / 2) - - -def abs2(y): - """Return the |z|^2 for a complex number z.""" - return np.square(y.real) + np.square(y.imag) - - -# -# Lanczos interpolation -# - - -def lanczos(t, a): - """The Lanczos kernel.""" - return np.sinc(t) * np.sinc(t / a) - - -def lanczos_interpolant(t, y): - """An interpolant constructed by convolution of the Lanczos kernel with - a set of discrete samples at unit intervals.""" - a = len(y) // 2 - return sum(lanczos(t - i + a, a) * yi for i, yi in enumerate(y)) - - -def lanczos_interpolant_utility_func(t, y): - """Utility function for Lanczos interpolation.""" - return -abs2(lanczos_interpolant(t, y)) - - -def interpolate_max_lanczos(imax, y, window_length): - """Find the time and maximum absolute value of a time series by Lanczos - interpolation.""" - yi = y[imax-window_length:imax+window_length+1] - tmax = optimize.fminbound( - lanczos_interpolant_utility_func, -1., 1., (yi,), xtol=1e-5) - tmax = np.asscalar(tmax) - ymax = np.asscalar(lanczos_interpolant(tmax, yi)) - return imax + tmax, ymax - - -# -# Catmull-Rom spline interpolation -# - - -def poly_catmull_rom(y): - return np.poly1d([ - -0.5 * y[0] + 1.5 * y[1] - 1.5 * y[2] + 0.5 * y[3], - y[0] - 2.5 * y[1] + 2 * y[2] - 0.5 * y[3], - -0.5 * y[0] + 0.5 * y[2], - y[1] - ]) - - -def interpolate_max_catmull_rom_even(y): - - # Construct Catmull-Rom interpolating polynomials for - # real and imaginary parts - poly_re = poly_catmull_rom(y.real) - poly_im = poly_catmull_rom(y.imag) - - # Find the roots of d(|y|^2)/dt as approximated - roots = (poly_re * poly_re.deriv() + poly_im * poly_im.deriv()).r - - # Find which of the two matched interior points has a greater magnitude - t_max = 0. - y_max = y[1] - y_max_abs2 = abs2(y_max) - - new_t_max = 1. - new_y_max = y[2] - new_y_max_abs2 = abs2(new_y_max) - - if new_y_max_abs2 > y_max_abs2: - t_max = new_t_max - y_max = new_y_max - y_max_abs2 = new_y_max_abs2 - - # Find any real root in (0, 1) that has a magnitude greater than the - # greatest endpoint - for root in roots: - if np.isreal(root) and 0 < root < 1: - new_t_max = root - new_y_max = poly_re(new_t_max) + poly_im(new_t_max) * 1j - new_y_max_abs2 = abs2(new_y_max) - if new_y_max_abs2 > y_max_abs2: - t_max = new_t_max - y_max = new_y_max - y_max_abs2 = new_y_max_abs2 - - # Done - return t_max, y_max - - -def interpolate_max_catmull_rom(imax, y, window_length): - t_max, y_max = interpolate_max_catmull_rom_even(y[imax - 2:imax + 2]) - y_max_abs2 = abs2(y_max) - t_max = t_max - 1 - - new_t_max, new_y_max = interpolate_max_catmull_rom_even( - y[imax - 1:imax + 3]) - new_y_max_abs2 = abs2(new_y_max) - - if new_y_max_abs2 > y_max_abs2: - t_max = new_t_max - y_max = new_y_max - y_max_abs2 = new_y_max_abs2 - - return imax + t_max, y_max - - -# -# Quadratic fit -# - - -def interpolate_max_quadratic_fit(imax, y, window_length): - """Quadratic fit to absolute value of y. Note that this one does not alter - the value at the maximum.""" - - poly = np.polyfit( - np.arange(-window_length, window_length + 1.), - np.abs(y[imax - window_length:imax + window_length + 1]), - 2) - - # Find which of the two matched interior points has a greater magnitude - t_max = -1. - y_max = y[imax - 1] - y_max_abs2 = abs2(y_max) - - new_t_max = 1. - new_y_max = y[imax + 1] - new_y_max_abs2 = abs2(new_y_max) - - if new_y_max_abs2 > y_max_abs2: - t_max = new_t_max - y_max = new_y_max - y_max_abs2 = new_y_max_abs2 - - # Determine if the global extremum of the polynomial is a - # local maximum in (-1, 1) - A, B, C = poly - new_t_max = -0.5 * B / A - new_y_max_abs2 = np.square(np.polyval(poly, new_t_max)) - if -1 < new_t_max < 1 and new_y_max_abs2 > y_max_abs2: - t_max = new_t_max - - return imax + t_max, y[imax] - - -# -# Nearest neighbor interpolation -# - - -def interpolate_max_nearest_neighbor(imax, y, window_length): - """Trivial, nearest-neighbor interpolation""" - return imax, y[imax] - - -# -# Set default interpolation scheme -# - - -__interpolants = { - 'catmull-rom': interpolate_max_catmull_rom, - 'lanczos': interpolate_max_lanczos, - 'nearest-neighbor': interpolate_max_nearest_neighbor, - 'quadratic-fit': interpolate_max_quadratic_fit} - - -def interpolate_max(imax, y, window_length, method='catmull-rom'): - return __interpolants[method](imax, y, window_length) - - -from lalinference.bayestar.deprecation import warn -warn('ligo.skymap.bayestar.filter') diff --git a/lalinference/python/lalinference/bayestar/ligolw.py b/lalinference/python/lalinference/bayestar/ligolw.py deleted file mode 100755 index 2cf8ceb8bc..0000000000 --- a/lalinference/python/lalinference/bayestar/ligolw.py +++ /dev/null @@ -1,133 +0,0 @@ -# -# Copyright (C) 2013-2017 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -""" -LIGO-LW convenience functions. -""" - -# LIGO-LW XML imports. -from glue.ligolw.utils import process as ligolw_process -from glue.ligolw import ligolw -from glue.ligolw import array as ligolw_array -from glue.ligolw import param as ligolw_param -from glue.ligolw import table as ligolw_table -from glue.ligolw import lsctables -from lalinspiral.inspinjfind import InspiralSCExactCoincDef - - -def get_template_bank_f_low(xmldoc): - """Determine the low frequency cutoff from a template bank file, - whether the template bank was produced by lalapps_tmpltbank or - lalapps_cbc_sbank. bayestar_sim_to_tmpltbank does not have a command - line for the low-frequency cutoff; instead, it is recorded in a row - of the search_summvars table.""" - try: - template_bank_f_low, = ligolw_process.get_process_params( - xmldoc, 'tmpltbank', '--low-frequency-cutoff') - except ValueError: - try: - template_bank_f_low, = ligolw_process.get_process_params( - xmldoc, 'lalapps_cbc_sbank', '--flow') - except ValueError: - try: - search_summvars_table = ligolw_table.get_table( - xmldoc, lsctables.SearchSummVarsTable.tableName) - template_bank_f_low, = ( - search_summvars.value - for search_summvars in search_summvars_table - if search_summvars.name == 'low-frequency cutoff') - except ValueError: - raise ValueError("Could not determine low-frequency cutoff") - return template_bank_f_low - - -def sim_coinc_and_sngl_inspirals_for_xmldoc(xmldoc): - """Retrieve (as a generator) all of the - (sim_inspiral, coinc_event, (sngl_inspiral, sngl_inspiral, - ... sngl_inspiral) tuples from found coincidences in a LIGO-LW XML - document.""" - - # Look up necessary tables. - coinc_table = ligolw_table.get_table( - xmldoc, lsctables.CoincTable.tableName) - coinc_def_table = ligolw_table.get_table( - xmldoc, lsctables.CoincDefTable.tableName) - coinc_map_table = ligolw_table.get_table( - xmldoc, lsctables.CoincMapTable.tableName) - - # Look up coinc_def ids. - sim_coinc_def_id = coinc_def_table.get_coinc_def_id( - InspiralSCExactCoincDef.search, - InspiralSCExactCoincDef.search_coinc_type, - create_new=False) - - def events_for_coinc_event_id(coinc_event_id): - for coinc_map in coinc_map_table: - if coinc_map.coinc_event_id == coinc_event_id: - for row in ligolw_table.get_table( - xmldoc, coinc_map.table_name): - column_name = coinc_map.event_id.column_name - if getattr(row, column_name) == coinc_map.event_id: - yield coinc_map.event_id, row - - # Loop over all coinc_event <-> sim_inspiral coincs. - for sim_coinc in coinc_table: - - # If this is not a coinc_event <-> sim_inspiral coinc, skip it. - if sim_coinc.coinc_def_id != sim_coinc_def_id: - continue - - # Locate the sim_inspiral and coinc events. - sim_inspiral = None - coinc = None - for event_id, event in events_for_coinc_event_id( - sim_coinc.coinc_event_id): - if event_id.table_name == ligolw_table.Table.TableName( - lsctables.SimInspiralTable.tableName): - if sim_inspiral is not None: - raise RuntimeError( - 'Found more than one matching sim_inspiral entry') - sim_inspiral = event - elif event_id.table_name == ligolw_table.Table.TableName( - lsctables.CoincTable.tableName): - if coinc is not None: - raise RuntimeError( - 'Found more than one matching coinc entry') - coinc = event - else: - raise RuntimeError( - "Did not expect coincidence to contain an event of " - "type '{}'".format(event_id.table_name)) - - sngl_inspirals = tuple( - event for event_id, event in events_for_coinc_event_id( - coinc.coinc_event_id)) - - yield sim_inspiral, coinc, sngl_inspirals - - -@lsctables.use_in -class LSCTablesContentHandler(ligolw.LIGOLWContentHandler): - """Content handler for reading LIGO-LW XML files with LSC table schema.""" - - -@ligolw_array.use_in -@ligolw_param.use_in -@lsctables.use_in -class LSCTablesAndSeriesContentHandler(ligolw.LIGOLWContentHandler): - """Content handler for reading LIGO-LW XML files with LSC table schema and - gridded arrays.""" diff --git a/lalinference/python/lalinference/bayestar/postprocess.py b/lalinference/python/lalinference/bayestar/postprocess.py deleted file mode 100644 index 276a062f7e..0000000000 --- a/lalinference/python/lalinference/bayestar/postprocess.py +++ /dev/null @@ -1,977 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2013-2017 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -""" -Postprocessing utilities for HEALPix sky maps -""" -from __future__ import division -from __future__ import print_function -import collections -import pkg_resources - -from astropy import constants -from astropy.coordinates import (CartesianRepresentation, SkyCoord, - UnitSphericalRepresentation) -from astropy import units as u -from astropy.wcs import WCS -import healpy as hp -import numpy as np -from scipy.interpolate import interp1d -import six - -from .. import distance -from .. import moc -from ..healpix_tree import * - -try: - pkg_resources.require('numpy >= 1.10.0') -except pkg_resources.VersionConflict: - """FIXME: in numpy < 1.10.0, the digitize() function only works on 1D - arrays. Remove this workaround once we require numpy >= 1.10.0. - - Example: - >>> digitize([3], np.arange(5)) - array([4]) - >>> digitize(3, np.arange(5)) - array(4) - >>> digitize([[3]], np.arange(5)) - array([[4]]) - >>> digitize([], np.arange(5)) - array([], dtype=int64) - >>> digitize([[], []], np.arange(5)) - array([], shape=(2, 0), dtype=int64) - """ - def digitize(x, *args, **kwargs): - x_flat = np.ravel(x) - x_shape = np.shape(x) - if len(x_flat) == 0: - return np.zeros(x_shape, dtype=np.intp) - else: - return np.digitize(x_flat, *args, **kwargs).reshape(x_shape) - - # FIXME: np.cov() got the aweights argument in version 1.10. - # Until we require numpy >= 1.10, this is copied from the Numpy source. - def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, - aweights=None): - from numpy import array, average, dot, sum - import warnings - - # Check inputs - if ddof is not None and ddof != int(ddof): - raise ValueError( - "ddof must be integer") - - # Handles complex arrays too - m = np.asarray(m) - if m.ndim > 2: - raise ValueError("m has more than 2 dimensions") - - if y is None: - dtype = np.result_type(m, np.float64) - else: - y = np.asarray(y) - if y.ndim > 2: - raise ValueError("y has more than 2 dimensions") - dtype = np.result_type(m, y, np.float64) - - X = array(m, ndmin=2, dtype=dtype) - if not rowvar and X.shape[0] != 1: - X = X.T - if X.shape[0] == 0: - return np.array([]).reshape(0, 0) - if y is not None: - y = array(y, copy=False, ndmin=2, dtype=dtype) - if not rowvar and y.shape[0] != 1: - y = y.T - X = np.concatenate((X, y), axis=0) - - if ddof is None: - if bias == 0: - ddof = 1 - else: - ddof = 0 - - # Get the product of frequencies and weights - w = None - if fweights is not None: - fweights = np.asarray(fweights, dtype=float) - if not np.all(fweights == np.around(fweights)): - raise TypeError( - "fweights must be integer") - if fweights.ndim > 1: - raise RuntimeError( - "cannot handle multidimensional fweights") - if fweights.shape[0] != X.shape[1]: - raise RuntimeError( - "incompatible numbers of samples and fweights") - if any(fweights < 0): - raise ValueError( - "fweights cannot be negative") - w = fweights - if aweights is not None: - aweights = np.asarray(aweights, dtype=float) - if aweights.ndim > 1: - raise RuntimeError( - "cannot handle multidimensional aweights") - if aweights.shape[0] != X.shape[1]: - raise RuntimeError( - "incompatible numbers of samples and aweights") - if any(aweights < 0): - raise ValueError( - "aweights cannot be negative") - if w is None: - w = aweights - else: - w *= aweights - - avg, w_sum = average(X, axis=1, weights=w, returned=True) - w_sum = w_sum[0] - - # Determine the normalization - if w is None: - fact = X.shape[1] - ddof - elif ddof == 0: - fact = w_sum - elif aweights is None: - fact = w_sum - ddof - else: - fact = w_sum - ddof*sum(w*aweights)/w_sum - - if fact <= 0: - warnings.warn("Degrees of freedom <= 0 for slice", - RuntimeWarning, stacklevel=2) - fact = 0.0 - - X -= avg[:, None] - if w is None: - X_T = X.T - else: - X_T = (X*w).T - c = dot(X, X_T.conj()) - c *= 1. / np.float64(fact) - return c.squeeze() -else: - cov = np.cov - digitize = np.digitize - - -def flood_fill(nside, ipix, m, nest=False): - """Stack-based flood fill algorithm in HEALPix coordinates. - Based on . - """ - # Initialize stack with starting pixel index. - stack = [ipix] - while stack: - # Pop last pixel off of the stack. - ipix = stack.pop() - # Is this pixel in need of filling? - if m[ipix]: - # Fill in this pixel. - m[ipix] = False - # Find the pixels neighbors. - neighbors = hp.get_all_neighbours(nside, ipix, nest=nest) - # All pixels have up to 8 neighbors. If a pixel has less than 8 - # neighbors, then some entries of the array are set to -1. We - # have to skip those. - neighbors = neighbors[neighbors != -1] - # Push neighboring pixels onto the stack. - stack.extend(neighbors) - - -def count_modes(m, nest=False): - """Count the number of modes in a binary HEALPix image by repeatedly - applying the flood-fill algorithm. - - WARNING: The input array is clobbered in the process.""" - npix = len(m) - nside = hp.npix2nside(npix) - for nmodes in range(npix): - nonzeroipix = np.flatnonzero(m) - if len(nonzeroipix): - flood_fill(nside, nonzeroipix[0], m, nest=nest) - else: - break - return nmodes - - -def count_modes_moc(uniq, i): - n = len(uniq) - mask = np.concatenate((np.ones(i + 1, dtype=bool), - np.zeros(n - i - 1, dtype=bool))) - sky_map = np.rec.fromarrays((uniq, mask), names=('UNIQ', 'MASK')) - sky_map = moc.rasterize(sky_map)['MASK'] - return count_modes(sky_map, nest=True) - - -def indicator(n, i): - """Create a binary array of length n that is True for every index that is in - i and False for every other index. Named after the indicator function.""" - m = np.zeros(n, dtype=np.bool) - np.put(m, i, True) - return m - - -def cos_angle_distance(theta0, phi0, theta1, phi1): - """Cosine of angular separation in radians between two points on the - unit sphere.""" - cos_angle_distance = ( - np.cos(phi1 - phi0) * np.sin(theta0) * np.sin(theta1) - + np.cos(theta0) * np.cos(theta1)) - return np.clip(cos_angle_distance, -1, 1) - - -def angle_distance(theta0, phi0, theta1, phi1): - """Angular separation in radians between two points on the unit sphere.""" - return np.arccos(cos_angle_distance(theta0, phi0, theta1, phi1)) - - -# Class to hold return value of find_injection method -FoundInjection = collections.namedtuple( - 'FoundInjection', - 'searched_area searched_prob offset searched_modes contour_areas ' - 'area_probs contour_modes searched_prob_dist contour_dists ' - 'searched_vol searched_prob_vol contour_vols') - - -def find_injection_moc(sky_map, true_ra=None, true_dec=None, true_dist=None, - contours=(), areas=(), modes=False, nest=False): - """ - Given a sky map and the true right ascension and declination (in radians), - find the smallest area in deg^2 that would have to be searched to find the - source, the smallest posterior mass, and the angular offset in degrees from - the true location to the maximum (mode) of the posterior. Optionally, also - compute the areas of and numbers of modes within the smallest contours - containing a given total probability. - """ - - if (true_ra is None) ^ (true_dec is None): - raise ValueError('Both true_ra and true_dec must be provided or None') - - contours = np.asarray(contours) - - distmean = sky_map.meta.get('distmean', np.nan) - - # Sort the pixels by descending posterior probability. - sky_map = np.flipud(np.sort(sky_map, order='PROBDENSITY')) - - # Find the pixel that contains the injection. - order, ipix = moc.uniq2nest(sky_map['UNIQ']) - max_order = np.max(order) - max_nside = hp.order2nside(max_order) - max_ipix = ipix << np.uint64(2 * (max_order - order)) - ipix = ipix.astype(np.int64) - max_ipix = max_ipix.astype(np.int64) - if true_ra is not None: - true_theta = 0.5 * np.pi - true_dec - true_phi = true_ra - true_pix = hp.ang2pix(max_nside, true_theta, true_phi, nest=True) - i = np.argsort(max_ipix) - true_idx = i[digitize(true_pix, max_ipix[i]) - 1] - - # Find the angular offset between the mode and true locations. - mode_theta, mode_phi = hp.pix2ang( - hp.order2nside(order[0]), ipix[0].astype(np.int64), nest=True) - if true_ra is None: - offset = np.nan - else: - offset = np.rad2deg( - angle_distance(true_theta, true_phi, mode_theta, mode_phi)) - - # Calculate the cumulative area in deg2 and the cumulative probability. - dA = moc.uniq2pixarea(sky_map['UNIQ']) - dP = sky_map['PROBDENSITY'] * dA - prob = np.cumsum(dP) - area = np.cumsum(dA) * np.square(180 / np.pi) - - # Construct linear interpolants to map between probability and area. - # This allows us to compute more accurate contour areas and probabilities - # under the approximation that the pixels have constant probability - # density. - prob_padded = np.concatenate(([0], prob)) - area_padded = np.concatenate(([0], area)) - # FIXME: we should use the assume_sorted=True argument below, but - # it was added in Scipy 0.14.0, and our Scientific Linux 7 clusters - # only have Scipy 0.12.1. - prob_for_area = interp1d(area_padded, prob_padded) - area_for_prob = interp1d(prob_padded, area_padded) - - if true_ra is None: - searched_area = searched_prob = np.nan - else: - # Find the smallest area that would have to be searched to find - # the true location. - searched_area = area[true_idx] - - # Find the smallest posterior mass that would have to be searched to find - # the true location. - searched_prob = prob[true_idx] - - # Find the contours of the given credible levels. - contour_idxs = digitize(contours, prob) - 1 - - # For each of the given confidence levels, compute the area of the - # smallest region containing that probability. - contour_areas = area_for_prob(contours).tolist() - - # For each listed area, find the probability contained within the - # smallest credible region of that area. - area_probs = prob_for_area(areas).tolist() - - if modes: - if true_ra is None: - searched_modes = np.nan - else: - # Count up the number of modes in each of the given contours. - searched_modes = count_modes_moc(sky_map['UNIQ'], true_idx) - contour_modes = [ - count_modes_moc(sky_map['UNIQ'], i) for i in contour_idxs] - else: - searched_modes = np.nan - contour_modes = np.nan - - # Distance stats now... - if 'DISTMU' in sky_map.dtype.names: - probdensity = sky_map['PROBDENSITY'] - mu = sky_map['DISTMU'] - sigma = sky_map['DISTSIGMA'] - norm = sky_map['DISTNORM'] - args = (dP, mu, sigma, norm) - if true_dist is None: - searched_prob_dist = np.nan - else: - searched_prob_dist = distance.marginal_cdf(true_dist, *args) - # FIXME: old verisons of Numpy can't handle passing zero-length - # arrays to generalized ufuncs. Remove this workaround once LIGO - # Data Grid clusters provide a more modern version of Numpy. - if len(contours) == 0: - contour_dists = [] - else: - lo, hi = distance.marginal_ppf( - np.row_stack(( - 0.5 * (1 - contours), - 0.5 * (1 + contours) - )), *args) - contour_dists = (hi - lo).tolist() - - # Set up distance grid. - n_r = 1000 - max_r = 6 * distmean - if true_dist is not None and true_dist > max_r: - max_r = true_dist - d_r = max_r / n_r - r = d_r * np.arange(1, n_r) - - # Calculate volume of frustum-shaped voxels with distance centered on r - # and extending from (r - d_r) to (r + d_r). - dV = (np.square(r) + np.square(d_r) / 12) * d_r * dA.reshape(-1, 1) - - # Calculate probability within each voxel. - dP = probdensity.reshape(-1, 1) * dV * np.exp( - -0.5 * np.square( - (r.reshape(1, -1) - mu.reshape(-1, 1)) / sigma.reshape(-1, 1) - ) - ) * (norm / sigma).reshape(-1, 1) / np.sqrt(2 * np.pi) - dP[np.isnan(dP)] = 0 # Suppress invalid values - - # Calculate probability density per unit volume. - dP_dV = dP / dV - i = np.flipud(np.argsort(dP_dV.ravel())) - - P_flat = np.cumsum(dP.ravel()[i]) - V_flat = np.cumsum(dV.ravel()[i]) - - contour_vols = interp1d( - P_flat, V_flat, bounds_error=False)(contours).tolist() - P = np.empty_like(P_flat) - V = np.empty_like(V_flat) - P[i] = P_flat - V[i] = V_flat - P = P.reshape(dP.shape) - V = V.reshape(dV.shape) - if true_dist is None: - searched_vol = searched_prob_vol = np.nan - else: - i_radec = true_idx - i_dist = digitize(true_dist, r) - 1 - searched_prob_vol = P[i_radec, i_dist] - searched_vol = V[i_radec, i_dist] - else: - searched_vol = searched_prob_vol = searched_prob_dist = np.nan - contour_dists = [np.nan] * len(contours) - contour_vols = [np.nan] * len(contours) - - # Done. - return FoundInjection( - searched_area, searched_prob, offset, searched_modes, contour_areas, - area_probs, contour_modes, searched_prob_dist, contour_dists, - searched_vol, searched_prob_vol, contour_vols) - - -def _norm(vertices): - return np.sqrt(np.sum(np.square(vertices), -1)) - - -def _adjacent_triangle_areas(vertices): - return 0.5 * _norm(np.cross( - np.roll(vertices, -1, axis=0) - vertices, - np.roll(vertices, +1, axis=0) - vertices)) - - -def _simplify(vertices, min_area): - """Visvalingam's algorithm (see http://bost.ocks.org/mike/simplify/) - for linear rings on a sphere. This is a naive, slow implementation.""" - area = _adjacent_triangle_areas(vertices) - - while True: - i_min_area = np.argmin(area) - if area[i_min_area] > min_area: - break - - vertices = np.delete(vertices, i_min_area, axis=0) - area = np.delete(area, i_min_area) - new_area = _adjacent_triangle_areas(vertices) - area = np.maximum(area, new_area) - - return vertices - - -def _vec2radec(vertices, degrees=False): - theta, phi = hp.vec2ang(np.asarray(vertices)) - ret = np.column_stack((phi % (2 * np.pi), 0.5 * np.pi - theta)) - if degrees: - ret = np.rad2deg(ret) - return ret - - -def contour(m, levels, nest=False, degrees=False, simplify=True): - try: - import networkx as nx - except: - raise RuntimeError('This function requires the networkx package.') - - # Determine HEALPix resolution - npix = len(m) - nside = hp.npix2nside(npix) - min_area = 0.4 * hp.nside2pixarea(nside) - - # Compute faces, vertices, and neighbors. - # vertices is an N X 3 array of the distinct vertices of the HEALPix faces. - # faces is an npix X 4 array mapping HEALPix faces to their vertices. - # neighbors is an npix X 4 array mapping faces to their nearest neighbors. - faces = np.ascontiguousarray( - np.rollaxis(hp.boundaries(nside, np.arange(npix), nest=nest), 2, 1)) - dtype = faces.dtype - faces = faces.view(np.dtype((np.void, dtype.itemsize * 3))) - vertices, faces = np.unique(faces.ravel(), return_inverse=True) - faces = faces.reshape(-1, 4) - vertices = vertices.view(dtype).reshape(-1, 3) - neighbors = hp.get_all_neighbours(nside, np.arange(npix), nest=nest)[::2].T - - # Loop over the requested contours. - paths = [] - for level in levels: - - # Find credible region - indicator = (m >= level) - - # Construct a graph of the edges of the contour. - graph = nx.Graph() - face_pairs = set() - for ipix1, ipix2 in enumerate(neighbors): - for ipix2 in ipix2: - # Determine if we have already considered this pair of faces. - new_face_pair = frozenset((ipix1, ipix2)) - if new_face_pair in face_pairs: - continue - face_pairs.add(new_face_pair) - - # Determine if this pair of faces are on a boundary of the - # credible level. - if indicator[ipix1] == indicator[ipix2]: - continue - - # Add all common edges of this pair of faces. - i1 = np.concatenate((faces[ipix1], [faces[ipix1][0]])) - i2 = np.concatenate((faces[ipix2], [faces[ipix2][0]])) - edges1 = frozenset(frozenset(_) for _ in zip(i1[:-1], i1[1:])) - edges2 = frozenset(frozenset(_) for _ in zip(i2[:-1], i2[1:])) - for edge in edges1 & edges2: - graph.add_edge(*edge) - graph = nx.freeze(graph) - - # Record a closed path for each cycle in the graph. - cycles = [ - np.take(vertices, cycle, axis=0) - for cycle in nx.cycle_basis(graph)] - - # Simplify paths if requested - if simplify: - cycles = [_simplify(cycle, min_area) for cycle in cycles] - cycles = [cycle for cycle in cycles if len(cycle) > 2] - - # Convert to lists - cycles = [ - _vec2radec(cycle, degrees=degrees).tolist() for cycle in cycles] - - # Add to output paths - paths.append([cycle + [cycle[0]] for cycle in cycles]) - - return paths - - -def find_greedy_credible_levels(p, ranking=None): - """Find the greedy credible levels of a (possibly multi-dimensional) array. - - Parameters - ---------- - - p : np.ndarray - The input array, typically a HEALPix image. - - ranking : np.ndarray, optional - The array to rank in order to determine the greedy order. - The default is `p` itself. - - Returns - ------- - - cls : np.ndarray - An array with the same shape as `p`, with values ranging from `0` - to `p.sum()`, representing the greedy credible level to which each - entry in the array belongs. - """ - p = np.asarray(p) - pflat = p.ravel() - if ranking is None: - ranking = pflat - else: - ranking = np.ravel(ranking) - i = np.flipud(np.argsort(ranking)) - cs = np.cumsum(pflat[i]) - cls = np.empty_like(pflat) - cls[i] = cs - return cls.reshape(p.shape) - - -def _get_detector_location(ifo): - if isinstance(ifo, six.string_types): - try: - import lalsimulation - except ImportError: - raise RuntimeError('Looking up detectors by name ' - 'requires the lalsimulation package.') - ifo = lalsimulation.DetectorPrefixToLALDetector(ifo) - try: - ifo = ifo.location - except AttributeError: - pass - return ifo - - -def get_detector_pair_axis(ifo1, ifo2, gmst): - ifo1 = _get_detector_location(ifo1) - ifo2 = _get_detector_location(ifo2) - - n = ifo2 - ifo1 - light_travel_time = np.sqrt(np.sum(np.square(n))) / constants.c.value - (theta,), (phi,) = hp.vec2ang(n) - pole_ra = (gmst + phi) % (2 * np.pi) - pole_dec = 0.5 * np.pi - theta - return pole_ra, pole_dec, light_travel_time - - -def rotate_map_to_axis(m, ra, dec, nest=False, method='direct'): - """Rotate a sky map to place a given line of sight on the +z axis. - - Parameters - ---------- - - m : np.ndarray - The input HEALPix array. - - ra : float - Right ascension of axis in radians. - - To specify the axis in geocentric coordinates, supply ra=(lon + gmst), - where lon is the geocentric longitude and gmst is the Greenwich mean - sidereal time in radians. - - dec : float - Declination of axis in radians. - - To specify the axis in geocentric coordinates, supply dec=lat, - where lat is the geocentric latitude in radians. - - nest : bool, default=False - Indicates whether the input sky map is in nested rather than - ring-indexed HEALPix coordinates (default: ring). - - method : 'direct' or 'fft' - Select whether to use spherical harmonic transformation ('fft') or - direct coordinate transformation ('direct') - - Returns - ------- - - m_rotated : np.ndarray - The rotated HEALPix array. - """ - npix = len(m) - nside = hp.npix2nside(npix) - - theta = 0.5 * np.pi - dec - phi = ra - - if method == 'fft': - if nest: - m = hp.reorder(m, n2r=True) - alm = hp.map2alm(m) - hp.rotate_alm(alm, -phi, -theta, 0.0) - ret = hp.alm2map(alm, nside, verbose=False) - if nest: - ret = hp.reorder(ret, r2n=True) - elif method == 'direct': - R = hp.Rotator( - rot=np.asarray([0, theta, -phi]), - deg=False, inv=False, eulertype='Y') - theta, phi = hp.pix2ang(nside, np.arange(npix), nest=nest) - ipix = hp.ang2pix(nside, *R(theta, phi), nest=nest) - ret = m[ipix] - else: - raise ValueError('Unrecognized method: {0}'.format(method)) - - return ret - - -def polar_profile(m, nest=False): - """Obtain the marginalized polar profile of sky map. - - Parameters - ---------- - - m : np.ndarray - The input HEALPix array. - - nest : bool, default=False - Indicates whether the input sky map is in nested rather than - ring-indexed HEALPix coordinates (default: ring). - - Returns - ------- - - theta : np.ndarray - The polar angles (i.e., the colatitudes) of the isolatitude rings. - - m_int : np.ndarray - The normalized probability density, such that `np.trapz(m_int, theta)` - is approximately `np.sum(m)`. - """ - npix = len(m) - nside = hp.npix2nside(npix) - nrings, = hp.pix2ring(nside, np.asarray([npix])) - startpix, ringpix, costheta, sintheta, _ = hp.ringinfo( - nside, np.arange(1, nrings)) - - if nest: - m = hp.reorder(m, n2r=True) - - theta = np.arccos(costheta) - m_int = np.asarray( - [m[i:i+j].sum() * stheta * 0.5 * npix / j - for i, j, stheta in zip(startpix, ringpix, sintheta)]) - - return theta, m_int - - -def smooth_ud_grade(m, nside, nest=False): - """Resample a sky map to a new resolution using bilinear interpolation. - - Parameters - ---------- - - m : np.ndarray - The input HEALPix array. - - nest : bool, default=False - Indicates whether the input sky map is in nested rather than - ring-indexed HEALPix coordinates (default: ring). - - Returns - ------- - - new_m : np.ndarray - The resampled HEALPix array. The sum of `m` is approximately preserved. - """ - npix = hp.nside2npix(nside) - theta, phi = hp.pix2ang(nside, np.arange(npix), nest=nest) - new_m = hp.get_interp_val(m, theta, phi, nest=nest) - return new_m * len(m) / len(new_m) - - -def posterior_mean(prob, nest=False): - npix = len(prob) - nside = hp.npix2nside(npix) - xyz = hp.pix2vec(nside, np.arange(npix), nest=nest) - mean_xyz = np.average(xyz, axis=1, weights=prob) - pos = SkyCoord(*mean_xyz, representation=CartesianRepresentation) - pos.representation = UnitSphericalRepresentation - return pos - - -def posterior_max(prob, nest=False): - npix = len(prob) - nside = hp.npix2nside(npix) - i = np.argmax(prob) - return SkyCoord( - *hp.pix2ang(nside, i, nest=nest, lonlat=True), unit=u.deg) - - -def find_ellipse(prob, cl=90, projection='ARC', nest=False): - """For a HEALPix map, find an ellipse that contains a given probability. - - Parameters - ---------- - prob : np.ndarray - The HEALPix probability map. - cl : float - The desired credible level (default: 90). - projection : str - The WCS projection (default: 'ARC', or zenithal equidistant). - For a list of possible values, see: - http://docs.astropy.org/en/stable/wcs/index.html#supported-projections - nest : bool - HEALPix pixel ordering (default: False, or ring ordering). - - Returns - ------- - ra : float - The ellipse center right ascension in degrees. - dec : float - The ellipse center right ascension in degrees. - pa : float - The position angle of the semimajor axis in degrees. - a : float - The lenth of the semimajor axis in degrees. - b : float - The length o the semiminor axis in degrees. - - Examples - -------- - - I'm not showing the `ra` or `pa` output from the examples below because - the right ascension is arbitary when dec=90° and the position angle is - arbitrary when a=b; their arbitrary values may vary depending on your math - library. Also, I add 0.0 to the outputs because on some platforms you tend - to get values of dec or pa that get rounded to -0.0, which is within - numerical precision but would break the doctests (see - https://stackoverflow.com/questions/11010683). - - Example 1 - ~~~~~~~~~ - - This is an example sky map that is uniform in sin(theta) out to a given - radius in degrees. The 90% credible radius should be 0.9 * radius. (There - will be deviations for small radius due to finite resolution.) - - >>> def make_uniform_in_sin_theta(radius, nside=512): - ... npix = hp.nside2npix(nside) - ... theta, phi = hp.pix2ang(nside, np.arange(npix)) - ... theta_max = np.deg2rad(radius) - ... prob = np.where(theta <= theta_max, 1 / np.sin(theta), 0) - ... return prob / prob.sum() - ... - - >>> prob = make_uniform_in_sin_theta(1) - >>> ra, dec, pa, a, b = np.around(find_ellipse(prob), 5) + 0 - >>> print(dec, a, b) - 90.0 0.82241 0.82241 - - >>> prob = make_uniform_in_sin_theta(10) - >>> ra, dec, pa, a, b = np.around(find_ellipse(prob), 5) + 0 - >>> print(dec, a, b) - 90.0 9.05512 9.05512 - - >>> prob = make_uniform_in_sin_theta(120) - >>> ra, dec, pa, a, b = np.around(find_ellipse(prob), 5) + 0 - >>> print(dec, a, b) - 90.0 107.9745 107.9745 - - Example 2 - ~~~~~~~~~ - - These are approximately Gaussian distributions. - - >>> from scipy import stats - >>> def make_gaussian(mean, cov, nside=512): - ... npix = hp.nside2npix(nside) - ... xyz = np.transpose(hp.pix2vec(nside, np.arange(npix))) - ... # FIXME: stats.multivariate_normal was added in scipy 0.14, - ... # but we still need to support an older version on our - ... # Scientific Linux 7 clusters. - ... # - ... # dist = stats.multivariate_normal(mean, cov) - ... # prob = dist.pdf(xyz) - ... if np.ndim(cov) == 0: - ... cov = [cov] * 3 - ... if np.ndim(cov) == 1: - ... cov = np.diag(cov) - ... d = xyz - mean - ... prob = np.exp(-0.5 * (np.linalg.solve(cov, d.T) * d.T).sum(0)) - ... return prob / prob.sum() - ... - - This one is centered at RA=45°, Dec=0° and has a standard deviation of ~1°. - - >>> prob = make_gaussian( - ... [1/np.sqrt(2), 1/np.sqrt(2), 0], - ... np.square(np.deg2rad(1))) - ... - >>> ra, dec, pa, a, b = np.around(find_ellipse(prob), 5) + 0 - >>> print(ra, dec, a, b) - 45.0 0.0 2.14209 2.14209 - - This one is centered at RA=45°, Dec=0°, and is elongated in the north-south - direction. - - >>> prob = make_gaussian( - ... [1/np.sqrt(2), 1/np.sqrt(2), 0], - ... np.diag(np.square(np.deg2rad([1, 1, 10])))) - ... - >>> ra, dec, pa, a, b = np.around(find_ellipse(prob), 5) + 0 - >>> print(ra, dec, pa, a, b) - 45.0 0.0 0.0 13.44746 2.1082 - - This one is centered at RA=0°, Dec=0°, and is elongated in the east-west - direction. - - >>> prob = make_gaussian( - ... [1, 0, 0], - ... np.diag(np.square(np.deg2rad([1, 10, 1])))) - ... - >>> ra, dec, pa, a, b = np.around(find_ellipse(prob), 5) + 0 - >>> print(dec, pa, a, b) - 0.0 90.0 13.4194 2.1038 - - This one is centered at RA=0°, Dec=0°, and is tilted about 10° to the west - of north. - - >>> prob = make_gaussian( - ... [1, 0, 0], - ... [[0.1, 0, 0], - ... [0, 0.1, -0.15], - ... [0, -0.15, 1]]) - ... - >>> ra, dec, pa, a, b = np.around(find_ellipse(prob), 5) + 0 - >>> print(dec, pa, a, b) - 0.0 170.78253 63.82809 34.00824 - - This one is centered at RA=0°, Dec=0°, and is tilted about 10° to the east - of north. - - >>> prob = make_gaussian( - ... [1, 0, 0], - ... [[0.1, 0, 0], - ... [0, 0.1, 0.15], - ... [0, 0.15, 1]]) - ... - >>> ra, dec, pa, a, b = np.around(find_ellipse(prob), 5) + 0 - >>> print(dec, pa, a, b) - 0.0 9.21747 63.82809 34.00824 - - This one is centered at RA=0°, Dec=0°, and is tilted about 80° to the east - of north. - - >>> prob = make_gaussian( - ... [1, 0, 0], - ... [[0.1, 0, 0], - ... [0, 1, 0.15], - ... [0, 0.15, 0.1]]) - ... - >>> ra, dec, pa, a, b = np.around(find_ellipse(prob), 5) + 0 - >>> print(dec, pa, a, b) - 0.0 80.78252 63.82533 34.00677 - - This one is centered at RA=0°, Dec=0°, and is tilted about 80° to the west - of north. - - >>> prob = make_gaussian( - ... [1, 0, 0], - ... [[0.1, 0, 0], - ... [0, 1, -0.15], - ... [0, -0.15, 0.1]]) - ... - >>> ra, dec, pa, a, b = np.around(find_ellipse(prob), 5) + 0 - >>> print(dec, pa, a, b) - 0.0 99.21748 63.82533 34.00677 - """ - npix = len(prob) - nside = hp.npix2nside(npix) - - # Find mean right ascension and declination. - xyz0 = (hp.pix2vec(nside, np.arange(npix), nest=nest) * prob).sum(axis=1) - (ra,), (dec,) = hp.vec2ang(xyz0, lonlat=True) - - # Construct WCS with the specified projection - # and centered on mean direction. - w = WCS() - w.wcs.crval = [ra, dec] - w.wcs.ctype = ['RA---' + projection, 'DEC--' + projection] - - # Transform HEALPix to zenithal equidistant coordinates. - xy = w.wcs_world2pix( - np.transpose( - hp.pix2ang( - nside, np.arange(npix), nest=nest, lonlat=True)), 1) - - # Keep only values that were inside the projection. - keep = np.logical_and.reduce(np.isfinite(xy), axis=1) - xy = xy[keep] - prob = prob[keep] - - # Find covariance matrix. - c = cov(xy, aweights=prob, rowvar=False) - - # If each point is n-sigma from the center, find n. - nsigmas = np.sqrt(np.sum(xy.T * np.linalg.solve(c, xy.T), axis=0)) - - # Find the number of sigma that enclose the cl% credible level. - i = np.argsort(nsigmas) - nsigmas = nsigmas[i] - cls = np.cumsum(prob[i]) - nsigma = np.interp(1e-2 * cl, cls, nsigmas) - - # If the credible level is not within the projection, - # then stop here and return all nans. - if 1e-2 * cl > cls[-1]: - return np.nan, np.nan, np.nan, np.nan, np.nan - - # Find the eigendecomposition of the covariance matrix. - w, v = np.linalg.eigh(c) - - # Find the semi-minor and semi-major axes. - b, a = nsigma * np.sqrt(w) - - # Find the position angle. - pa = np.rad2deg(np.arctan2(*v[:, 1])) - - # An ellipse is symmetric under rotations of 180°. - # Return the smallest possible positive position angle. - pa %= 180 - - # Done! - return ra, dec, pa, a, b diff --git a/lalinference/python/lalinference/bayestar/sky_map.py b/lalinference/python/lalinference/bayestar/sky_map.py index 247b7d422d..73fbc1fb11 100644 --- a/lalinference/python/lalinference/bayestar/sky_map.py +++ b/lalinference/python/lalinference/bayestar/sky_map.py @@ -18,371 +18,15 @@ """ Convenience function to produce a sky map from LIGO-LW rows. """ -from __future__ import division - -import inspect -import itertools -import logging -import os -import sys import numpy as np -import healpy as hp from astropy.table import Column, Table from astropy import units as u -from .decorator import with_numpy_random_seed -from .. import distance -from . import filter -from . import timing from .. import moc from .. import healpix_tree -from .. import InferenceVCSInfo as vcs_info -try: - from . import _sky_map -except ImportError: - raise ImportError( - 'Could not import the lalinference.bayestar._sky_map Python C ' - 'extension module. This probably means that LALInfernece was built ' - 'without HEALPix support. Please install CHEALPix ' - '(https://sourceforge.net/projects/healpix/files/Healpix_3.30/' - 'chealpix-3.30.0.tar.gz), rebuild LALInference, and try again.') -import lal -import lalsimulation from lalinference.bayestar.deprecation import warn warn('ligo.skymap.bayestar') -log = logging.getLogger('BAYESTAR') - - -def toa_phoa_snr_log_prior( - params, min_distance, max_distance, prior_distance_power, max_abs_t): - ra, sin_dec, distance, u, twopsi, t = params - return ( - prior_distance_power * np.log(distance) - if 0 <= ra < 2*np.pi - and -1 <= sin_dec <= 1 - and min_distance <= distance <= max_distance - and -1 <= u <= 1 - and 0 <= twopsi < 2*np.pi - and -max_abs_t <= t <= max_abs_t - else -np.inf) - - -@with_numpy_random_seed -def localize_emcee( - logl, loglargs, logp, logpargs, xmin, xmax, - nside=-1, chain_dump=None): - # Set up sampler - import emcee - from sky_area.sky_area_clustering import Clustered3DKDEPosterior - ntemps = 20 - nwalkers = 100 - nburnin = 1000 - nthin = 10 - niter = 10000 + nburnin - ndim = len(xmin) - sampler = emcee.PTSampler( - ntemps=ntemps, nwalkers=nwalkers, dim=ndim, logl=logl, logp=logp, - loglargs=loglargs, logpargs=logpargs) - - # Draw initial state from multivariate uniform distribution - p0 = np.random.uniform(xmin, xmax, (ntemps, nwalkers, ndim)) - - # Collect samples. The .copy() is important because PTSampler.sample() - # reuses p on every iteration. - chain = np.vstack([ - p[0, :, :].copy() for p, _, _ - in itertools.islice( - sampler.sample(p0, iterations=niter, storechain=False), - nburnin, niter, nthin - )]) - - # Extract polar coordinates. For all likelihoodds, the first two parameters - # are ra, sin(dec). - theta = np.arccos(chain[:, 1]) - phi = chain[:, 0] - dist = chain[:, 2] - - ra = phi - dec = 0.5 * np.pi - theta - pts = np.column_stack((ra, dec, dist)) - # Pass a random subset of 1000 points to the KDE, to save time. - pts = np.random.permutation(pts)[:1000, :] - ckde = Clustered3DKDEPosterior(pts) - _, nside, ipix = zip(*ckde._bayestar_adaptive_grid()) - uniq = (4 * np.square(nside) + ipix).astype(np.uint64) - - pts = np.transpose(hp.pix2vec(nside, ipix, nest=True)) - - datasets = [kde.dataset for kde in ckde.kdes] - inverse_covariances = [kde.inv_cov for kde in ckde.kdes] - weights = ckde.weights - - # Compute marginal probability, conditional mean, and conditional - # standard deviation in all directions. - probdensity, distmean, diststd = np.transpose([distance.cartesian_kde_to_moments( - pt, datasets, inverse_covariances, weights) - for pt in pts]) - - # Optionally save posterior sample chain to file. - # Read back in with np.load(). - if chain_dump: - # Undo numerical conditioning of distances; convert back to Mpc - names = 'ra sin_dec distance cos_inclination twopsi time'.split()[:ndim] - np.save(chain_dump, np.rec.fromrecords(chain, names=names)) - - # Done! - return Table( - [uniq, probdensity, distmean, diststd], - names='UNIQ PROBDENSITY DISTMEAN DISTSTD'.split()) - - -def localize( - event, waveform='o2-uberbank', f_low=30.0, - min_distance=None, max_distance=None, prior_distance_power=None, - cosmology=False, method='toa_phoa_snr', nside=-1, chain_dump=None, - enable_snr_series=True, f_high_truncate=0.95): - """Convenience function to produce a sky map from LIGO-LW rows. Note that - min_distance and max_distance should be in Mpc. - - Returns a 'NESTED' ordering HEALPix image as a Numpy array. - """ - frame = inspect.currentframe() - argstr = inspect.formatargvalues(*inspect.getargvalues(frame)) - start_time = lal.GPSTimeNow() - - singles = event.singles - if not enable_snr_series: - singles = [single for single in singles if single.snr is not None] - - ifos = [single.detector for single in singles] - - # Extract SNRs from table. - snrs = np.ma.asarray([ - np.ma.masked if single.snr is None else single.snr - for single in singles]) - - # Look up physical parameters for detector. - detectors = [lalsimulation.DetectorPrefixToLALDetector(str(ifo)) - for ifo in ifos] - responses = np.asarray([det.response for det in detectors]) - locations = np.asarray([det.location for det in detectors]) / lal.C_SI - - # Power spectra for each detector. - psds = [single.psd for single in singles] - psds = [timing.InterpolatedPSD(filter.abscissa(psd), psd.data.data, - f_high_truncate=f_high_truncate) - for psd in psds] - - log.debug('calculating templates') - H = filter.sngl_inspiral_psd(waveform, f_min=f_low, **event.template_args) - - log.debug('calculating noise PSDs') - HS = [filter.signal_psd_series(H, S) for S in psds] - - # Signal models for each detector. - log.debug('calculating Fisher matrix elements') - signal_models = [timing.SignalModel(_) for _ in HS] - - # Get SNR=1 horizon distances for each detector. - horizons = np.asarray([signal_model.get_horizon_distance() - for signal_model in signal_models]) - - weights = np.ma.asarray([ - 1 / np.square(signal_model.get_crb_toa_uncert(snr)) - for signal_model, snr in zip(signal_models, snrs)]) - - # Center detector array. - locations -= np.sum(locations * weights.reshape(-1, 1), axis=0) / np.sum(weights) - - if cosmology: - log.warn('Enabling cosmological prior. ' - 'This feature is UNREVIEWED.') - - if enable_snr_series: - log.warn('Enabling input of SNR time series. ' - 'This feature is UNREVIEWED.') - snr_series = [single.snr_series for single in singles] - if all(s is None for s in snr_series): - snr_series = None - else: - snr_series = None - - # Maximum barycentered arrival time error: - # |distance from array barycenter to furthest detector| / c + 5 ms. - # For LHO+LLO, this is 15.0 ms. - # For an arbitrary terrestrial detector network, the maximum is 26.3 ms. - max_abs_t = np.max( - np.sqrt(np.sum(np.square(locations), axis=1))) + 0.005 - - if snr_series is None: - log.warn("No SNR time series found, so we are creating a zero-noise " - "SNR time series from the whitened template's autocorrelation " - "sequence. The sky localization uncertainty may be " - "underestimated.") - - acors, sample_rates = zip( - *[filter.autocorrelation(_, max_abs_t) for _ in HS]) - sample_rate = sample_rates[0] - deltaT = 1 / sample_rate - nsamples = len(acors[0]) - assert all(sample_rate == _ for _ in sample_rates) - assert all(nsamples == len(_) for _ in acors) - nsamples = nsamples * 2 - 1 - - snr_series = [] - for acor, single in zip(acors, singles): - series = lal.CreateCOMPLEX8TimeSeries( - 'fake SNR', 0, 0, deltaT, lal.StrainUnit, nsamples) - series.epoch = single.time - 0.5 * (nsamples - 1) * deltaT - acor = np.concatenate((np.conj(acor[:0:-1]), acor)) - series.data.data = single.snr * filter.exp_i(single.phase) * acor - snr_series.append(series) - - # Ensure that all of the SNR time series have the same sample rate. - # FIXME: for now, the Python wrapper expects all of the SNR time sries to - # also be the same length. - deltaT = snr_series[0].deltaT - sample_rate = 1 / deltaT - if any(deltaT != series.deltaT for series in snr_series): - raise ValueError('BAYESTAR does not yet support SNR time series with ' - 'mixed sample rates') - - # Ensure that all of the SNR time series have odd lengths. - if any(len(series.data.data) % 2 == 0 for series in snr_series): - raise ValueError('SNR time series must have odd lengths') - - # Trim time series to the desired length. - max_abs_n = int(np.ceil(max_abs_t * sample_rate)) - desired_length = 2 * max_abs_n - 1 - for i, series in enumerate(snr_series): - length = len(series.data.data) - if length > desired_length: - snr_series[i] = lal.CutCOMPLEX8TimeSeries( - series, length // 2 + 1 - max_abs_n, desired_length) - - # FIXME: for now, the Python wrapper expects all of the SNR time sries to - # also be the same length. - nsamples = len(snr_series[0].data.data) - if any(nsamples != len(series.data.data) for series in snr_series): - raise ValueError('BAYESTAR does not yet support SNR time series of ' - 'mixed lengths') - - # Perform sanity checks that the middle sample of the SNR time series match - # the sngl_inspiral records. Relax valid interval slightly from - # +/- 0.5 deltaT to +/- 0.6 deltaT for floating point roundoff error. - for single, series in zip(singles, snr_series): - if np.abs(0.5 * (nsamples - 1) * series.deltaT - + float(series.epoch - single.time)) >= 0.6 * deltaT: - raise ValueError('BAYESTAR expects the SNR time series to be ' - 'centered on the single-detector trigger times') - - # Extract the TOAs in GPS nanoseconds from the SNR time series, assuming - # that the trigger happened in the middle. - toas_ns = [series.epoch.ns() + 1e9 * 0.5 * (len(series.data.data) - 1) - * series.deltaT for series in snr_series] - - # Collect all of the SNR series in one array. - snr_series = np.vstack([series.data.data for series in snr_series]) - - # Center times of arrival and compute GMST at mean arrival time. - # Pre-center in integer nanoseconds to preserve precision of - # initial datatype. - epoch = sum(toas_ns) // len(toas_ns) - toas = 1e-9 * (np.asarray(toas_ns) - epoch) - # FIXME: np.average does not yet support masked arrays. - # Replace with np.average when numpy 1.13.0 is available. - mean_toa = np.sum(toas * weights) / np.sum(weights) - toas -= mean_toa - epoch += int(np.round(1e9 * mean_toa)) - epoch = lal.LIGOTimeGPS(0, int(epoch)) - gmst = lal.GreenwichMeanSiderealTime(epoch) - - # Translate SNR time series back to time of first sample. - toas -= 0.5 * (nsamples - 1) * deltaT - - # If minimum distance is not specified, then default to 0 Mpc. - if min_distance is None: - min_distance = 0 - - # If maximum distance is not specified, then default to the SNR=4 - # horizon distance of the most sensitive detector. - if max_distance is None: - max_distance = max(horizons) / 4 - - # If prior_distance_power is not specified, then default to 2 - # (p(r) ~ r^2, uniform in volume). - if prior_distance_power is None: - prior_distance_power = 2 - - # Raise an exception if 0 Mpc is the minimum effective distance and the - # prior is of the form r**k for k<0 - if min_distance == 0 and prior_distance_power < 0: - raise ValueError(('Prior is a power law r^k with k={}, ' - 'undefined at min_distance=0') - .format(prior_distance_power)) - - # Time and run sky localization. - log.debug('starting computationally-intensive section') - if method == 'toa_phoa_snr': - skymap, log_bci, log_bsn = _sky_map.toa_phoa_snr( - min_distance, max_distance, prior_distance_power, cosmology, gmst, - sample_rate, toas, snr_series, responses, locations, horizons) - skymap = Table(skymap) - skymap.meta['log_bci'] = log_bci - skymap.meta['log_bsn'] = log_bsn - elif method == 'toa_phoa_snr_mcmc': - skymap = localize_emcee( - logl=_sky_map.log_likelihood_toa_phoa_snr, - loglargs=(gmst, sample_rate, toas, snr_series, responses, locations, - horizons), - logp=toa_phoa_snr_log_prior, - logpargs=(min_distance, max_distance, prior_distance_power, - max_abs_t), - xmin=[0, -1, min_distance, -1, 0, 0], - xmax=[2*np.pi, 1, max_distance, 1, 2*np.pi, 2 * max_abs_t], - nside=nside, chain_dump=chain_dump) - else: - raise ValueError('Unrecognized method: %s' % method) - - # Convert distance moments to parameters - distmean = skymap.columns.pop('DISTMEAN') - diststd = skymap.columns.pop('DISTSTD') - skymap['DISTMU'], skymap['DISTSIGMA'], skymap['DISTNORM'] = \ - distance.moments_to_parameters(distmean, diststd) - - # Add marginal distance moments - good = np.isfinite(distmean) & np.isfinite(diststd) - prob = (moc.uniq2pixarea(skymap['UNIQ']) * skymap['PROBDENSITY'])[good] - distmean = distmean[good] - diststd = diststd[good] - rbar = (prob * distmean).sum() - r2bar = (prob * (np.square(diststd) + np.square(distmean))).sum() - skymap.meta['distmean'] = rbar - skymap.meta['diststd'] = np.sqrt(r2bar - np.square(rbar)) - - log.debug('finished computationally-intensive section') - end_time = lal.GPSTimeNow() - - # Fill in metadata and return. - program, _ = os.path.splitext(os.path.basename(sys.argv[0])) - skymap.meta['creator'] = 'BAYESTAR' - skymap.meta['origin'] = 'LIGO/Virgo' - skymap.meta['vcs_info'] = vcs_info - skymap.meta['gps_time'] = float(epoch) - skymap.meta['runtime'] = float(end_time - start_time) - skymap.meta['instruments'] = {single.detector for single in singles} - skymap.meta['gps_creation_time'] = end_time - skymap.meta['history'] = [ - '', - 'Generated by calling the following Python function:', - '{}.{}{}'.format(__name__, frame.f_code.co_name, argstr), - '', - 'This was the command line that started the program:', - ' '.join([program] + sys.argv[1:])] - - return skymap - def rasterize(skymap): skymap = Table(moc.rasterize(skymap), meta=skymap.meta) @@ -410,11 +54,3 @@ def derasterize(skymap): column.unit = old_unit skymap.add_column(Column(uniq, name='UNIQ'), 0) return skymap - - -def test(): - """Run BAYESTAR C unit tests. - >>> test() - 0 - """ - return int(_sky_map.test()) diff --git a/lalinference/python/lalinference/bayestar/timing.py b/lalinference/python/lalinference/bayestar/timing.py deleted file mode 100644 index e18c164232..0000000000 --- a/lalinference/python/lalinference/bayestar/timing.py +++ /dev/null @@ -1,215 +0,0 @@ -# -*- coding: utf-8 -# -# Copyright (C) 2013-2015 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -from __future__ import division -""" -Functions for predicting timing accuracy of matched filters. -""" - - -import logging -import numpy as np -import lal -import lalsimulation -from scipy import interpolate -from scipy import linalg - -from lalinference.bayestar.deprecation import warn -warn('ligo.skymap.bayestar.filter') - -log = logging.getLogger('BAYESTAR') - - -_noise_psd_funcs = {} - - -class vectorize_swig_psd_func(object): - """Create a vectorized Numpy function from a SWIG-wrapped PSD function. - SWIG does not provide enough information for Numpy to determine the number - of input arguments, so we can't just use np.vectorize.""" - - def __init__(self, str): - self.__func = getattr(lalsimulation, str + 'Ptr') - self.__npyfunc = np.frompyfunc(getattr(lalsimulation, str), 1, 1) - - def __call__(self, f): - fa = np.asarray(f) - df = np.diff(fa) - if fa.ndim == 1 and df.size > 1 and np.all(df[0] == df[1:]): - fa = np.concatenate((fa, [fa[-1] + df[0]])) - ret = lal.CreateREAL8FrequencySeries( - None, 0, fa[0], df[0], lal.DimensionlessUnit, fa.size) - lalsimulation.SimNoisePSD(ret, 0, self.__func) - ret = ret.data.data[:-1] - else: - ret = self.__npyfunc(f) - if not np.isscalar(ret): - ret = ret.astype(float) - return ret - - -for _ifos, _func in ( - (("H1", "H2", "L1", "I1"), 'SimNoisePSDaLIGOZeroDetHighPower'), - (("V1",), 'SimNoisePSDAdvVirgo'), - (("K1"), 'SimNoisePSDKAGRA') -): - _func = vectorize_swig_psd_func(_func) - for _ifo in _ifos: - _noise_psd_funcs[_ifo] = _func - - -def get_noise_psd_func(ifo): - """Find a function that describes the given interferometer's noise PSD.""" - return _noise_psd_funcs[ifo] - - -class InterpolatedPSD(interpolate.interp1d): - """Create a (linear in log-log) interpolating function for a discretely - sampled power spectrum S(f).""" - - def __init__(self, f, S, f_high_truncate=1.0): - assert f_high_truncate <= 1.0 - f = np.asarray(f) - S = np.asarray(S) - - # Exclude DC if present - if f[0] == 0: - f = f[1:] - S = S[1:] - # FIXME: This is a hack to fix an issue with the detection pipeline's - # PSD conditioning. Remove this when the issue is fixed upstream. - if f_high_truncate < 1.0: - log.warn( - 'Truncating PSD at %g of maximum frequency to suppress ' - 'rolloff artifacts. This option may be removed in the future.', - f_high_truncate) - keep = (f <= f_high_truncate * max(f)) - f = f[keep] - S = S[keep] - super(InterpolatedPSD, self).__init__( - np.log(f), np.log(S), - kind='linear', bounds_error=False, fill_value=np.inf) - self._f_min = min(f) - self._f_max = max(f) - - def __call__(self, f): - f_min = np.min(f) - f_max = np.max(f) - if f_min < self._f_min: - log.warn('Assuming PSD is infinite at %g Hz because PSD is only ' - 'sampled down to %g Hz', f_min, self._f_min) - if f_max > self._f_max: - log.warn('Assuming PSD is infinite at %g Hz because PSD is only ' - 'sampled up to %g Hz', f_max, self._f_max) - return np.where( - (f >= self._f_min) & (f <= self._f_max), - np.exp(super(InterpolatedPSD, self).__call__(np.log(f))), np.inf) - - -class SignalModel(object): - """Class to speed up computation of signal/noise-weighted integrals and - Barankin and Cramér-Rao lower bounds on time and phase estimation. - - - Note that the autocorrelation series and the moments are related, - as shown below. - - Create signal model: - >>> from . import filter - >>> sngl = lambda: None - >>> H = filter.sngl_inspiral_psd( - ... 'TaylorF2threePointFivePN', mass1=1.4, mass2=1.4) - >>> S = get_noise_psd_func('H1') - >>> W = filter.signal_psd_series(H, S) - >>> sm = SignalModel(W) - - Compute one-sided autocorrelation function: - >>> out_duration = 0.1 - >>> a, sample_rate = filter.autocorrelation(W, out_duration) - - Restore negative time lags using symmetry: - >>> a = np.concatenate((a[:0:-1].conj(), a)) - - Compute the first 2 frequency moments by taking derivatives of the - autocorrelation sequence using centered finite differences. - The nth frequency moment should be given by (-1j)^n a^(n)(t). - >>> acor_moments = [] - >>> for i in range(2): - ... acor_moments.append(a[len(a) // 2]) - ... a = -0.5j * sample_rate * (a[2:] - a[:-2]) - >>> assert np.all(np.isreal(acor_moments)) - >>> acor_moments = np.real(acor_moments) - - Compute the first 2 frequency moments using this class. - >>> quad_moments = [sm.get_sn_moment(i) for i in range(2)] - - Compare them. - >>> for i, (am, qm) in enumerate(zip(acor_moments, quad_moments)): - ... assert np.allclose(am, qm, rtol=0.05) - """ - - def __init__(self, h): - """Create a TaylorF2 signal model with the given masses, PSD function - S(f), PN amplitude order, and low-frequency cutoff.""" - - # Find indices of first and last nonzero samples. - nonzero = np.flatnonzero(h.data.data) - first_nonzero = nonzero[0] - last_nonzero = nonzero[-1] - - # Frequency sample points - self.dw = 2 * np.pi * h.deltaF - f = h.f0 + h.deltaF * np.arange(first_nonzero, last_nonzero + 1) - self.w = 2 * np.pi * f - - # Throw away leading and trailing zeros. - h = h.data.data[first_nonzero:last_nonzero + 1] - - self.denom_integrand = 4 / (2 * np.pi) * h - self.den = np.trapz(self.denom_integrand, dx=self.dw) - - def get_horizon_distance(self, snr_thresh=1): - return np.sqrt(self.den) / snr_thresh - - def get_sn_average(self, func): - """Get the average of a function of angular frequency, weighted by the - signal to noise per unit angular frequency.""" - num = np.trapz(func(self.w) * self.denom_integrand, dx=self.dw) - return num / self.den - - def get_sn_moment(self, power): - """Get the average of angular frequency to the given power, weighted by - the signal to noise per unit frequency.""" - return self.get_sn_average(lambda w: w**power) - - def get_crb(self, snr): - """Get the Cramér-Rao bound, or inverse Fisher information matrix, - describing the phase and time estimation covariance.""" - w1 = self.get_sn_moment(1) - w2 = self.get_sn_moment(2) - I = np.asarray(((1, -w1), (-w1, w2))) - return linalg.inv(I) / np.square(snr) - - # FIXME: np.vectorize doesn't work on unbound instance methods. The - # excluded keyword, added in Numpy 1.7, could be used here to exclude the - # zeroth argument, self. - def __get_crb_toa_uncert(self, snr): - return np.sqrt(self.get_crb(snr)[1, 1]) - - def get_crb_toa_uncert(self, snr): - return np.frompyfunc(self.__get_crb_toa_uncert, 1, 1)(snr) diff --git a/lalinference/python/lalinference/io/Makefile.am b/lalinference/python/lalinference/io/Makefile.am index 8010ba988a..837b41cb51 100644 --- a/lalinference/python/lalinference/io/Makefile.am +++ b/lalinference/python/lalinference/io/Makefile.am @@ -3,8 +3,6 @@ MOSTLYCLEANFILES = EXTRA_DIST = include $(top_srcdir)/gnuscripts/lalsuite_python.am -SUBDIRS = events - if HAVE_PYTHON pymoduledir = $(pkgpythondir)/io diff --git a/lalinference/python/lalinference/io/events/Makefile.am b/lalinference/python/lalinference/io/events/Makefile.am deleted file mode 100644 index be5ca6d74e..0000000000 --- a/lalinference/python/lalinference/io/events/Makefile.am +++ /dev/null @@ -1,20 +0,0 @@ -BUILT_SOURCES = -MOSTLYCLEANFILES = -EXTRA_DIST = -include $(top_srcdir)/gnuscripts/lalsuite_python.am - -if HAVE_PYTHON - -pymoduledir = $(pkgpythondir)/io/events - -pymodule_PYTHON = \ - __init__.py \ - base.py \ - detector_disabled.py \ - ligolw.py \ - gracedb.py \ - hdf.py \ - magic.py \ - sqlite.py - -endif diff --git a/lalinference/python/lalinference/io/events/__init__.py b/lalinference/python/lalinference/io/events/__init__.py deleted file mode 100644 index 1dca09ce57..0000000000 --- a/lalinference/python/lalinference/io/events/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2017-2018 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -from __future__ import absolute_import -import os -import pkgutil -import six - -__all__ = () - -# Import all symbols from all submodules of this module. -for _, module, _ in pkgutil.iter_modules([os.path.dirname(__file__)]): - six.exec_('from . import {0};' - '__all__ += getattr({0}, "__all__", ());' - 'from .{0} import *'.format(module)) - del module - -# Clean up -del os, pkgutil, six - -from lalinference.bayestar.deprecation import warn -warn('ligo.skymap.io.events') diff --git a/lalinference/python/lalinference/io/events/base.py b/lalinference/python/lalinference/io/events/base.py deleted file mode 100644 index 6ab6e14c7a..0000000000 --- a/lalinference/python/lalinference/io/events/base.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (C) 2017 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -""" -Base classes for reading events from search pipelines. -""" - -from abc import ABCMeta, abstractproperty -from collections import Mapping -import six - -__all__ = ('EventSource', 'Event', 'SingleEvent') - - -def _fmt(obj, keys): - kvs = ', '.join('{}={!r}'.format(key, getattr(obj, key)) for key in keys) - return '<{}({})>'.format(obj.__class__.__name__, kvs) - - -class EventSource(Mapping): - """Abstraction of a source of coincident events.""" - - def __str__(self): - try: - length = len(self) - except NotImplementedError: - contents = '...' - else: - contents = '...{} items...'.format(length) - return '<{}({{{}}})>'.format(self.__class__.__name__, contents) - - def __repr__(self): - try: - len(self) - except NotImplementedError: - contents = '...' - else: - contents = ', '.join('{}: {!r}'.format(key, value) - for key, value in self.items()) - return '{}({{{}}})'.format(self.__class__.__name__, contents) - - -class Event(six.with_metaclass(ABCMeta)): - """Abstraction of a coincident trigger.""" - - @abstractproperty - def singles(self): - """Tuple of single-detector triggers.""" - raise NotImplementedError - - @abstractproperty - def template_args(self): - """Dictionary of template parameters.""" - raise NotImplementedError - - __str_keys = ('singles',) - - def __str__(self): - return _fmt(self, self.__str_keys) - - __repr__ = __str__ - - -class SingleEvent(six.with_metaclass(ABCMeta)): - """Abstraction of a single-detector trigger.""" - - @abstractproperty - def detector(self): - """Instrument name (e.g. 'H1')""" - raise NotImplementedError - - @abstractproperty - def snr(self): - """Signal to noise ratio (float)""" - raise NotImplementedError - - @abstractproperty - def phase(self): - """Phase on arrival (float)""" - raise NotImplementedError - - @abstractproperty - def time(self): - """Time on arrival (float, GPS)""" - raise NotImplementedError - - @abstractproperty - def zerolag_time(self): - """Time on arrival in zero-lag data, without time slides applied - (float, GPS)""" - raise NotImplementedError - - @abstractproperty - def psd(self): - """PSD (REAL8FrequencySeries)""" - raise NotImplementedError - - @property - def snr_series(self): - """SNR time series (COMPLEX8TimeSeries)""" - return None - - __str_keys = ('detector', 'snr', 'phase', 'time') - - def __str__(self): - keys = self.__str_keys - if self.time != self.zerolag_time: - keys += ('zerolag_time',) - return _fmt(self, keys) - - __repr__ = __str__ - -from lalinference.bayestar.deprecation import warn -warn('ligo.skymap.io.events.base') diff --git a/lalinference/python/lalinference/io/events/detector_disabled.py b/lalinference/python/lalinference/io/events/detector_disabled.py deleted file mode 100644 index f6c0b29d57..0000000000 --- a/lalinference/python/lalinference/io/events/detector_disabled.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (C) 2017 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -""" -Modify events by artificially disabling specified detectors. -""" -from .base import * - -__all__ = ('DetectorDisabledEventSource', 'DetectorDisabledError') - - -class DetectorDisabledError(ValueError): - pass - - -class DetectorDisabledEventSource(EventSource): - - def __init__(self, base_source, disabled_detectors, raises=True): - self.base_source = base_source - self.disabled_detectors = set(disabled_detectors) - self.raises = raises - - def __iter__(self): - return iter(self.base_source) - - def __getitem__(self, key): - return DetectorDisabledEvent(self, self.base_source[key]) - - def __len__(self): - return len(self.base_source) - - -class DetectorDisabledEvent(Event): - - def __init__(self, source, base_event): - self.source = source - self.base_event = base_event - - @property - def singles(self): - disabled_detectors = self.source.disabled_detectors - if self.source.raises: - detectors = {s.detector for s in self.base_event.singles} - if not detectors & disabled_detectors: - raise DetectorDisabledError( - 'Disabling detectors {{{}}} would have no effect on this ' - 'event with detectors {{{}}}'.format( - ' '.join(disabled_detectors), - ' '.join(detectors))) - if not detectors - disabled_detectors: - raise DetectorDisabledError( - 'Disabling detectors {{{}}} would exclude all data for ' - 'this event with detectors {{{}}}'.format( - ' '.join(disabled_detectors), - ' '.join(detectors))) - return tuple(s for s in self.base_event.singles - if s.detector not in disabled_detectors) - - @property - def template_args(self): - return self.base_event.template_args - -open = DetectorDisabledEventSource - -from lalinference.bayestar.deprecation import warn -warn('ligo.skymap.io.events.detector_disabled') diff --git a/lalinference/python/lalinference/io/events/gracedb.py b/lalinference/python/lalinference/io/events/gracedb.py deleted file mode 100644 index 7abef440c7..0000000000 --- a/lalinference/python/lalinference/io/events/gracedb.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (C) 2017 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -""" -Read events from a list of GraceDB IDs. -""" -from .base import * -from .ligolw import * - -__all__ = ('GraceDBEventSource',) - - -class GraceDBEventSource(EventSource): - - def __init__(self, graceids, client=None): - if client is None: - from ligo.gracedb.rest import GraceDb - client = GraceDb() - self._client = client - self._graceids = graceids - - def __iter__(self): - return iter(self._graceids) - - def __getitem__(self, graceid): - coinc_file = self._client.files(graceid, 'coinc.xml') - psd_file = self._client.files(graceid, 'psd.xml.gz') - event, = LigoLWEventSource( - coinc_file, psd_file=psd_file, coinc_def=None).values() - return event - - def __len__(self): - try: - return len(self._graceids) - except TypeError: - raise NotImplementedError - - -open = GraceDBEventSource - -from lalinference.bayestar.deprecation import warn -warn('ligo.skymap.io.events.gracedb') diff --git a/lalinference/python/lalinference/io/events/hdf.py b/lalinference/python/lalinference/io/events/hdf.py deleted file mode 100644 index 75013987c1..0000000000 --- a/lalinference/python/lalinference/io/events/hdf.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright (C) 2017 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -""" -Read events from PyCBC-style HDF5 output. -""" -from .base import * -from operator import itemgetter -from itertools import groupby -import h5py -import numpy as np -import lal -from glue.segments import segment, segmentlist - -__all__ = ('HDFEventSource',) - - -class _psd_segment(segment): - - def __new__(cls, psd, *args): - return segment.__new__(cls, *args) - - def __init__(self, psd, *args): - self.psd = psd - - -def _hdf_file(f): - if isinstance(f, h5py.File): - return f - elif hasattr(f, 'read') and hasattr(f, 'name'): - return h5py.File(f.name, 'r') - else: - return h5py.File(f, 'r') - - -def _classify_hdf_file(f, sample): - if sample in f: - return 'coincs' - for key, value in f.items(): - if isinstance(value, h5py.Group): - if 'psds' in value: - return 'psds' - if 'snr' in value and 'coa_phase' in value and 'end_time' in value: - return 'triggers' - if 'parameters' in f.attrs: - return 'bank' - raise ValueError('Unrecognized PyCBC file type') - - -class HDFEventSource(EventSource): - - def __init__(self, *files, **kwargs): - sample = kwargs.get('sample', 'foreground') - - # Open the files and split them into coinc files, bank files, psds, - # and triggers. - key = itemgetter(0) - files = [_hdf_file(f) for f in files] - files = sorted( - [(_classify_hdf_file(f, sample), f) for f in files], key=key) - files = {key: list(v[1] for v in value) - for key, value in groupby(files, key)} - - try: - coinc_file, = files['coincs'] - except (KeyError, ValueError): - raise ValueError('You must provide exactly one coinc file.') - try: - bank_file, = files['bank'] - except (KeyError, ValueError): - raise ValueError( - 'You must provide exactly one template bank file.') - try: - psd_files = files['psds'] - except KeyError: - raise ValueError('You must provide PSD files.') - try: - trigger_files = files['triggers'] - except KeyError: - raise ValueError('You must provide trigger files.') - - self._bank = bank_file - - key_prefix = 'detector_' - detector_nums, self._ifos = zip(*sorted( - (int(key[len(key_prefix):]), value) - for key, value in coinc_file.attrs.items() - if key.startswith(key_prefix))) - - coinc_group = coinc_file[sample] - self._timeslide_interval = coinc_file.attrs.get( - 'timeslide_interval', 0) - self._template_ids = coinc_group['template_id'] - self._timeslide_ids = coinc_group.get( - 'timeslide_id', np.zeros(len(self))) - self._trigger_ids = [ - coinc_group['trigger_id{}'.format(detector_num)] - for detector_num in detector_nums] - - triggers = {} - for f in trigger_files: - (ifo, group), = f.items() - triggers[ifo] = [ - group['snr'], group['coa_phase'], group['end_time']] - self._triggers = tuple(triggers[ifo] for ifo in self._ifos) - - psdseglistdict = {} - for psd_file in psd_files: - (ifo, group), = psd_file.items() - psd = [group['psds'][str(i)] for i in range(len(group['psds']))] - psdseglistdict[ifo] = segmentlist( - _psd_segment(*segargs) for segargs in zip( - psd, group['start_time'], group['end_time'])) - self._psds = [psdseglistdict[ifo] for ifo in self._ifos] - - def __getitem__(self, id): - return HDFEvent(self, id) - - def __iter__(self): - return iter(range(len(self))) - - def __len__(self): - return len(self._template_ids) - - -class HDFEvent(Event): - - def __init__(self, source, id): - self._source = source - self._id = id - - @property - def singles(self): - return tuple( - HDFSingleEvent( - ifo, self._id, i, trigger_ids[self._id], - self._source._timeslide_interval, triggers, - self._source._timeslide_ids, psds - ) - for i, (ifo, trigger_ids, triggers, psds) in enumerate(zip( - self._source._ifos, self._source._trigger_ids, - self._source._triggers, self._source._psds - )) - ) - - @property - def template_args(self): - bank = self._source._bank - bank_id = self._source._template_ids[self._id] - return {key: value[bank_id] for key, value in bank.items()} - - -class HDFSingleEvent(SingleEvent): - - def __init__( - self, detector, _coinc_id, _detector_num, _trigger_id, - _timeslide_interval, _triggers, _timeslide_ids, _psds): - self._detector = detector - self._coinc_id = _coinc_id - self._detector_num = _detector_num - self._trigger_id = _trigger_id - self._timeslide_interval = _timeslide_interval - self._triggers = _triggers - self._timeslide_ids = _timeslide_ids - self._psds = _psds - - @property - def detector(self): - return self._detector - - @property - def snr(self): - return self._triggers[0][self._trigger_id] - - @property - def phase(self): - return self._triggers[1][self._trigger_id] - - @property - def time(self): - value = self.zerolag_time - - # PyCBC does not specify which detector is time-shifted in time slides. - # Since PyCBC's coincidence format currently supports only two - # detectors, we arbitrarily apply half of the time slide to each - # detector. - shift = self._timeslide_ids[self._coinc_id] * self._timeslide_interval - if self._detector_num == 0: - value -= 0.5 * shift - elif self._detector_num == 1: - value += 0.5 * shift - else: - raise RuntimeError('This line should not be reached') - return value - - @property - def zerolag_time(self): - return self._triggers[2][self._trigger_id] - - @property - def psd(self): - try: - psd = self._psds[self._psds.find(self.zerolag_time)].psd - except ValueError: - raise ValueError( - 'No PSD found for detector {} at zero-lag GPS time {}'.format( - self.detector, self.zerolag_time)) - - dyn_range_fac = psd.file.attrs['dynamic_range_factor'] - flow = psd.file.attrs['low_frequency_cutoff'] - df = psd.attrs['delta_f'] - kmin = int(flow / df) - - fseries = lal.CreateREAL8FrequencySeries( - 'psd', 0, kmin * df, df, - lal.DimensionlessUnit, len(psd.value) - kmin) - fseries.data.data = psd.value[kmin:] / np.square(dyn_range_fac) - return fseries - - -open = HDFEventSource - -from lalinference.bayestar.deprecation import warn -warn('ligo.skymap.io.events.hdf') diff --git a/lalinference/python/lalinference/io/events/ligolw.py b/lalinference/python/lalinference/io/events/ligolw.py deleted file mode 100644 index 85df39664a..0000000000 --- a/lalinference/python/lalinference/io/events/ligolw.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright (C) 2017 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -""" -Read events from pipedown/GstLal-style XML output. -""" -from .base import * -from ...bayestar.decorator import memoized -from collections import OrderedDict, defaultdict -import errno -import logging -import operator -import os.path -from itertools import groupby -import six -import lal -import lal.series -from lalinspiral.thinca import InspiralCoincDef -from glue.ligolw import array, lsctables, param -from glue.ligolw.ligolw import Element, LIGOLWContentHandler, LIGO_LW -from glue.ligolw.lsctables import ( - CoincDefTable, CoincMapTable, CoincTable, ProcessTable, ProcessParamsTable, - SnglInspiralID, SnglInspiralTable, TimeSlideID, TimeSlideTable) -from glue.ligolw.table import get_table -from glue.ligolw.utils import load_filename, load_fileobj - -__all__ = ('LigoLWEventSource',) - -log = logging.getLogger('BAYESTAR') - - -@array.use_in -@param.use_in -@lsctables.use_in -class _ContentHandler(LIGOLWContentHandler): - pass - - -def _read_xml(f, fallbackpath=None): - if f is None: - doc = filename = None - elif isinstance(f, Element): - doc = f - filename = '' - elif isinstance(f, six.string_types): - try: - doc = load_filename(f, contenthandler=_ContentHandler) - except IOError as e: - if e.errno == errno.ENOENT and fallbackpath and not os.path.isabs(f): - f = os.path.join(fallbackpath, f) - doc = load_filename(f, contenthandler=_ContentHandler) - else: - raise - filename = f - else: - doc, _ = load_fileobj(f, contenthandler=_ContentHandler) - try: - filename = f.name - except AttributeError: - filename = '' - return doc, filename - - -PHASE_CONVENTION_WARNING = """\ -Using anti-FINDCHIRP phase convention; inverting phases. This is currently \ -the default and it is appropriate for gstlal and MBTA but not pycbc as of \ -observing run 1 ("O1"). The default setting is likely to change in the future.\ -""" - - -class LigoLWEventSource(OrderedDict, EventSource): - - def __init__(self, f, psd_file=None, coinc_def=InspiralCoincDef, **kwargs): - doc, filename = _read_xml(f) - self._fallbackpath = os.path.dirname(filename) if filename else None - self._psds_for_file = memoized(self._psds_for_file) - super(LigoLWEventSource, self).__init__( - self._make_events(doc, psd_file, coinc_def)) - - _template_keys = '''mass1 mass2 - spin1x spin1y spin1z spin2x spin2y spin2z'''.split() - - _invert_phases = { - 'pycbc': False, - 'gstlal_inspiral': True, - 'bayestar_realize_coincs': True, - 'MBTAOnline': True - } - - @classmethod - def _phase_convention(cls, program): - try: - return cls._invert_phases[program] - except KeyError: - raise KeyError( - ('The pipeline "{}" is unknown, so the phase ' - 'convention could not be deduced.').format(program)) - - def _psds_for_file(self, f): - doc, _ = _read_xml(f, self._fallbackpath) - return lal.series.read_psd_xmldoc(doc, root_name=None) - - def _make_events(self, doc, psd_file, coinc_def): - # Look up necessary tables. - coinc_table = get_table(doc, CoincTable.tableName) - coinc_map_table = get_table(doc, CoincMapTable.tableName) - sngl_inspiral_table = get_table(doc, SnglInspiralTable.tableName) - try: - time_slide_table = get_table(doc, TimeSlideTable.tableName) - except ValueError: - offsets_by_time_slide_id = None - else: - offsets_by_time_slide_id = time_slide_table.as_dict() - - # Indices to speed up lookups by ID. - key = operator.attrgetter('coinc_event_id') - event_ids_by_coinc_event_id = { - coinc_event_id: - tuple(coinc_map.event_id for coinc_map in coinc_maps) - for coinc_event_id, coinc_maps - in groupby(sorted(coinc_map_table, key=key), key=key)} - sngl_inspirals_by_event_id = { - row.event_id: row for row in sngl_inspiral_table} - - # Filter rows by coinc_def if requested. - if coinc_def is not None: - coinc_def_table = get_table(doc, CoincDefTable.tableName) - coinc_def_ids = { - row.coinc_def_id for row in coinc_def_table - if (row.search, row.search_coinc_type) == - (coinc_def.search, coinc_def.search_coinc_type)} - coinc_table = ( - row for row in coinc_table - if row.coinc_def_id in coinc_def_ids) - - snr_dict = dict(self._snr_series_by_sngl_inspiral(doc)) - - process_table = get_table(doc, ProcessTable.tableName) - program_for_process_id = { - row.process_id: row.program for row in process_table} - - try: - process_params_table = get_table(doc, ProcessParamsTable.tableName) - except ValueError: - psd_filenames_by_process_id = {} - else: - psd_filenames_by_process_id = { - process_param.process_id: process_param.value - for process_param in process_params_table - if process_param.param == '--reference-psd'} - - for coinc in coinc_table: - coinc_event_id = coinc.coinc_event_id - coinc_event_num = int(coinc_event_id) - sngls = [sngl_inspirals_by_event_id[event_id] for event_id - in event_ids_by_coinc_event_id[coinc_event_id]] - if offsets_by_time_slide_id is None and coinc.time_slide_id == TimeSlideID(0): - log.warn( - 'Time slide record is missing for %s, ' - 'guessing that this is zero-lag', coinc.time_slide_id) - offsets = defaultdict(float) - else: - offsets = offsets_by_time_slide_id[coinc.time_slide_id] - - template_args = [ - {key: getattr(sngl, key) for key in self._template_keys} - for sngl in sngls] - if any(d != template_args[0] for d in template_args[1:]): - raise ValueError( - 'Template arguments are not identical for all detectors!') - template_args = template_args[0] - - invert_phases = self._phase_convention( - program_for_process_id[coinc.process_id]) - if invert_phases: - log.warn(PHASE_CONVENTION_WARNING) - - singles = tuple(LigoLWSingleEvent( - self, sngl.ifo, sngl.snr, sngl.coa_phase, - float(sngl.end + offsets[sngl.ifo]), float(sngl.end), - psd_file or psd_filenames_by_process_id.get(sngl.process_id), - snr_dict.get(sngl.event_id), invert_phases) - for sngl in sngls) - - event = LigoLWEvent(coinc_event_num, singles, template_args) - - yield coinc_event_num, event - - @classmethod - def _snr_series_by_sngl_inspiral(cls, doc): - for elem in doc.getElementsByTagName(LIGO_LW.tagName): - try: - if elem.Name != lal.COMPLEX8TimeSeries.__name__: - continue - array.get_array(elem, 'snr') - event_id = param.get_pyvalue(elem, 'event_id') - if not isinstance(event_id, SnglInspiralID): - continue - except (AttributeError, ValueError): - continue - else: - yield event_id, lal.series.parse_COMPLEX8TimeSeries(elem) - - -class LigoLWEvent(Event): - - def __init__(self, id, singles, template_args): - self._id = id - self._singles = singles - self._template_args = template_args - - @property - def singles(self): - return self._singles - - @property - def template_args(self): - return self._template_args - - -class LigoLWSingleEvent(SingleEvent): - - def __init__(self, source, detector, snr, phase, time, zerolag_time, - psd_file, snr_series, invert_phases): - self._source = source - self._detector = detector - self._snr = snr - self._phase = phase - self._time = time - self._zerolag_time = zerolag_time - self._psd_file = psd_file - self._snr_series = snr_series - self._invert_phases = invert_phases - - @property - def detector(self): - return self._detector - - @property - def snr(self): - return self._snr - - @property - def phase(self): - value = self._phase - if value is not None and self._invert_phases: - value *= -1 - return value - - @property - def time(self): - return self._time - - @property - def zerolag_time(self): - return self._zerolag_time - - @property - def psd(self): - return self._source._psds_for_file(self._psd_file)[self._detector] - - @property - def snr_series(self): - value = self._snr_series - if self._invert_phases and value is not None: - value = lal.CutCOMPLEX8TimeSeries(value, 0, len(value.data.data)) - value.data.data = value.data.data.conj() - return value - - -open = LigoLWEventSource - -from lalinference.bayestar.deprecation import warn -warn('ligo.skymap.io.events.ligolw') diff --git a/lalinference/python/lalinference/io/events/magic.py b/lalinference/python/lalinference/io/events/magic.py deleted file mode 100644 index 019b30ed6e..0000000000 --- a/lalinference/python/lalinference/io/events/magic.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (C) 2017 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -""" -Read events from either HDF or LIGO-LW files. -""" -import os -import sqlite3 -from subprocess import check_output - -from glue.ligolw.ligolw import LIGO_LW -import h5py - -from . import hdf, ligolw, sqlite - -__all__ = ('MagicEventSource', 'open') - - -def _get_file_type(f): - """Determine the file type by calling the POSIX ``file`` utility. - - Parameters - ---------- - f : file, str - A file object or the path to a file - - Returns - ------- - filetype : bytes - A string describing the file type - """ - try: - f.read - except AttributeError: - filetype = check_output( - ['file', f], env=dict(os.environ, POSIXLY_CORRECT='1')) - else: - filetype = check_output( - ['file', '-'], env=dict(os.environ, POSIXLY_CORRECT='1'), stdin=f) - f.seek(0) - _, _, filetype = filetype.partition(b': ') - return filetype.strip() - - -def MagicEventSource(f, *args, **kwargs): - """ - Read events from either HDF or LIGO-LW files. The format is determined - using the POSIX `file` command, which determines the file by looking for - 'magic' byte strings (hence the name of this module). - """ - if isinstance(f, h5py.File): - opener = hdf.open - elif isinstance(f, sqlite3.Connection): - opener = sqlite.open - elif isinstance(f, LIGO_LW): - opener = ligolw.open - else: - filetype = _get_file_type(f) - if filetype == b'Hierarchical Data Format (version 5) data': - opener = hdf.open - elif filetype.startswith(b'SQLite 3.x database'): - opener = sqlite.open - elif filetype.startswith(b'XML') or filetype.startswith(b'gzip'): - opener = ligolw.open - else: - raise IOError('Unknown file format') - return opener(f, *args, **kwargs) - - -open = MagicEventSource - -from lalinference.bayestar.deprecation import warn -warn('ligo.skymap.io.events.magic') diff --git a/lalinference/python/lalinference/io/events/sqlite.py b/lalinference/python/lalinference/io/events/sqlite.py deleted file mode 100644 index 4a1accd4a9..0000000000 --- a/lalinference/python/lalinference/io/events/sqlite.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2017 Leo Singer -# -# This program is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation; either version 2 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -""" -Read events from GstLal-style SQLite output. -""" -import os -import sqlite3 -import sys - -from glue.ligolw import dbtables - -from ...util import sqlite -from .ligolw import LigoLWEventSource - -__all__ = ('SQLiteEventSource',) - - -class SQLiteEventSource(LigoLWEventSource): - - def __init__(self, f, *args, **kwargs): - if isinstance(f, sqlite3.Connection): - db = f - filename = sqlite.get_filename(f) - else: - if hasattr(f, 'read'): - filename = f.name - f.close() - else: - filename = f - db = sqlite.open(filename, 'r') - super(SQLiteEventSource, self).__init__(dbtables.get_xml(db), - *args, **kwargs) - self._fallbackpath = os.path.dirname(filename) if filename else None - - -open = SQLiteEventSource - -from lalinference.bayestar.deprecation import warn -warn('ligo.skymap.io.events.sqlite') diff --git a/lalinference/python/lalinference_average_skymaps.py b/lalinference/python/lalinference_average_skymaps.py deleted file mode 100644 index 2ac74fad8b..0000000000 --- a/lalinference/python/lalinference_average_skymaps.py +++ /dev/null @@ -1,84 +0,0 @@ -# -# Copyright (C) 2017 Leo Singer -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -""" -Average several sky maps according to the Burst group established by: - * LIGO-T1700050-v7 (https://dcc.ligo.org/LIGO-T1700050-v7) - * LIGO-T1700078-v2 (https://dcc.ligo.org/LIGO-T1700078-v2) - -The sky maps are first upsampled to a common resolution and then averaged -pixel-by-pixel. Some FITS header values are copied from the first input -sky map. -""" -__author__ = "Leo Singer " - - -# Command line interface -import argparse -from lalinference.bayestar import command -parser = command.ArgumentParser() -parser.add_argument( - 'input', metavar='INPUT.fits[.gz]', type=argparse.FileType('rb'), - nargs='+', help='Input FITS files') -parser.add_argument( - '-o', '--output', metavar='OUTPUT.fits[.gz]', - help='Ouptut FITS file [default: derived from input filenames]') -opts = parser.parse_args() - - -# Late imports -import os.path -import healpy as hp -import numpy as np -from lalinference.io import fits - - -# Read all input sky maps -probs, metas = zip(*(fits.read_sky_map(file.name, nest=True) - for file in opts.input)) - -# Determine output HEALPix resolution -npix = max(len(prob) for prob in probs) -nside = hp.npix2nside(npix) - -# Average sky maps -prob = np.mean([hp.ud_grade(prob, nside, - order_in='NESTED', order_out='NESTED', - power=-2) for prob in probs], axis=0) - -# Describe the processing of this file -history = ['Arithmetic mean of the following {} sky maps:'.format(len(probs))] -history += [' ' + file.name for file in opts.input] - -# Create metadata dictionary for FITS header -meta = dict(creator=parser.prog, nest=True, history=history) - -# Copy some header values from the first sky map -copy_keys = ('objid', 'url', 'instruments', 'gps_time') -meta.update({key: metas[0][key] for key in copy_keys if key in metas[0]}) - -# Determine output filename -if opts.output is not None: - output = opts.output -else: - output = '_plus_'.join(os.path.split(file.name)[1].partition('.fits')[0] - for file in opts.input) + '.fits.gz' - -# Write output filename to stdout -print(output) - -# Write output file -fits.write_sky_map(output, prob, **meta) -- GitLab