...
 
Commits (83)
......@@ -3,7 +3,7 @@
#
AC_INIT([gstlal-burst],[0.1.0],[gstlal-discuss@ligo.org],[gstlal-burst])
AC_INIT([gstlal-burst],[0.1.1],[gstlal-discuss@ligo.org],[gstlal-burst])
AC_COPYRIGHT([Copyright (C) The authors (see source code for details)])
# a file whose existance can be used to use to check that we are in the
# top-level directory of the source tree
......
gstlal-burst (0.1.1) unstable; urgency=low
* Updated gstlal_feature_aggregator, gstlal_feature_monitor to deal with
ligo-scald API change
-- Patrick Godwin <patrick.godwin@ligo.org> Sun, 03 Mar 2019 21:27:15 -0500
gstlal-burst (0.1.0) unstable; urgency=low
* Add feature extraction toolkit
......
......@@ -9,6 +9,7 @@ dist_bin_SCRIPTS = \
gstlal_inspiral_calc_likelihood \
gstlal_inspiral_calc_rank_pdfs \
gstlal_inspiral_coinc_extractor \
gstlal_inspiral_compress_ranking_stat \
gstlal_inspiral_add_dt_dphi_snr_ratio_pdfs \
gstlal_inspiral_create_dt_dphi_snr_ratio_pdfs \
gstlal_inspiral_create_dt_dphi_snr_ratio_pdfs_dag \
......@@ -25,6 +26,7 @@ dist_bin_SCRIPTS = \
gstlal_inspiral_lvalert_sim_equiv \
gstlal_inspiral_lvalert_sngls_plotter \
gstlal_inspiral_lvalert_snrtimeseries_plotter \
gstlal_inspiral_lvalert_uberplotter \
gstlal_inspiral_make_snr_pdf \
gstlal_inspiral_marginalize_likelihood \
gstlal_inspiral_marginalize_likelihoods_online \
......
......@@ -88,7 +88,7 @@ def group_templates(templates, n, overlap = 0):
def parse_command_line():
parser = OptionParser()
parser.add_option("--output-path", metavar = "path", default = ".", help = "Set the path to the directory where output files will be written. Default is \".\".")
parser.add_option("--output-full-bank-file", metavar = "path", default = "gstlal_bank.xml.gz", help = "Set the path to output the bank. Default is gstlal_bank.xml.gz")
parser.add_option("--output-full-bank-file", metavar = "path", help = "Set the path to output the bank.")
parser.add_option("--output-cache", metavar = "file", help = "Set the file name for the output cache.")
parser.add_option("--n", metavar = "count", type = "int", help = "Set the number of templates per output file (required). It will be rounded to make all sub banks approximately the same size.")
parser.add_option("--overlap", default = 0, metavar = "count", type = "int", help = "overlap the templates in each file by this amount, must be even")
......@@ -146,7 +146,8 @@ sngl_inspiral_table = lsctables.SnglInspiralTable.get_table(xmldoc)
# FIXME
#process = ligolw_process.register_to_xmldoc(xmldoc, program = "gstlal_bank_splitter", paramdict = options.__dict__, comment = "Assign template IDs")
ligolw_utils.write_filename(xmldoc, options.output_full_bank_file, gz = options.output_full_bank_file.endswith('gz'), verbose = options.verbose)
if options.output_full_bank_file is not None:
ligolw_utils.write_filename(xmldoc, options.output_full_bank_file, gz = options.output_full_bank_file.endswith('gz'), verbose = options.verbose)
# Bin by Chi
sngl_inspiral_table.sort(key = lambda row: spawaveform.computechi(row.mass1, row.mass2, row.spin1z, row.spin2z))
......
......@@ -253,6 +253,8 @@ def parse_command_line():
parser.add_option_group(group)
group = OptionGroup(parser, "Ranking Statistic Options", "Adjust ranking statistic behaviour")
group.add_option("--cap-singles", action = "store_true", help = "Cap singles to 1 / livetime if computing FAR. No effect otherwise")
group.add_option("--FAR-trialsfactor", metavar = "trials", type = "float", default = 1.0, help = "Add trials factor to FAR before uploading to gracedb")
group.add_option("--chisq-type", metavar = "type", default = "autochisq", help = "Choose the type of chisq computation to perform. Must be one of (autochisq|timeslicechisq). The default is autochisq.")
group.add_option("--coincidence-threshold", metavar = "seconds", type = "float", default = 0.005, help = "Set the coincidence window in seconds (default = 0.005 s). The light-travel time between instruments will be added automatically in the coincidence test.")
group.add_option("--min-instruments", metavar = "count", type = "int", default = 2, help = "Set the minimum number of instruments that must contribute triggers to form a candidate (default = 2).")
......@@ -835,6 +837,8 @@ for output_file_number, (svd_bank_url_dict, output_url, ranking_stat_output_url,
tag = options.job_tag,
kafka_server = options.output_kafka_server,
cluster = True,#options.data_source in ("lvshm", "framexmit"),# If uncommented, we only cluster when running online
cap_singles = options.cap_singles,
FAR_trialsfactor = options.FAR_trialsfactor,
verbose = options.verbose
)
if options.verbose:
......
......@@ -48,8 +48,8 @@ N = sum(len(lsctables.SnglInspiralTable.get_table(xmldoc)) for fname, xmldoc in
# N get too close to the number of available IDs to keep the probability of
# two ID sequences being the same small.
assert N < 50000000, "too many templates: increase size of draw space"
ids = sorted(random.sample(xrange(99999999), N))
assert N < 5000000, "too many templates: increase size of draw space"
ids = sorted(random.sample(xrange(9999999), N))
# assign the IDs and write back to disk
......
#!/usr/bin/env python
#
# Copyright (C) 2019 Kipp Cannon
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import math
from optparse import OptionParser
import sys
import numpy
from ligo.lw import utils as ligolw_utils
from gstlal import far
__author__ = "Kipp Cannon <kipp.cannon@ligo.org>"
__version__ = "git id %s" % "" # FIXME
__date__ = "" # FIXME
#
# =============================================================================
#
# Command Line
#
# =============================================================================
#
def parse_command_line():
parser = OptionParser(
version = "Name: %%prog\n%s" % "" # FIXME
)
parser.add_option("-t", "--threshold", type = "float", default = 0.03, help = "Only keep horizon distance values that differ by this much, fractionally, from their neighbours (default = 0.03).")
parser.add_option("--remove-horizon-deviations", action = "store_true", help = "Remove horizon entries that display an uncharacteristic deviation in sensitivity from the non-zero mean.")
parser.add_option("--deviation-percent", type = "float", default = 0.50, help = "Remove horizon entries that deviate by this fraction from the non-zero mean.")
parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
options, filenames = parser.parse_args()
process_params = dict(options.__dict__)
return options, process_params, filenames
#
# =============================================================================
#
# Main
#
# =============================================================================
#
#
# command line
#
options, process_params, filenames = parse_command_line()
#
# loop over ranking statistic files
#
for filename in filenames:
#
# load file
#
xmldoc = ligolw_utils.load_filename(filename, verbose = options.verbose, contenthandler = far.RankingStat.LIGOLWContentHandler)
#
# extract ranking statistic object, and erase from XML tree
#
rankingstat, rankingstatpdf = far.parse_likelihood_control_doc(xmldoc)
if rankingstatpdf is not None and options.verbose:
print >>sys.stderr, "WARNING: \"%s\" contains a RankingStatPDF object, it is not a pure ranking statistic file, you might be using this program on the wrong files." % filename
# FIXME: don't hard-code object name
name = u"gstlal_inspiral_likelihood"
elem = rankingstat.get_xml_root(xmldoc, name)
elem.parentNode.removeChild(elem)
elem.unlink()
elem = rankingstatpdf.get_xml_root(xmldoc, name)
elem.parentNode.removeChild(elem)
elem.unlink()
#
# compress horizon distance history. the outer loop makes a list
# to ensure no problems modifying the object being iterated over
#
abs_ln_thresh = math.log1p(options.threshold)
for instrument, horizon_history in list(rankingstat.numerator.horizon_history.items()):
# GPS time / distance pairs
items = horizon_history.items()
if options.remove_horizon_deviations:
values = numpy.array(items)[:,1]
mean_horizon = values[values!=0].mean()
items = [item for item in items if item[1] < (mean_horizon * (1. + options.deviation_percent))]
# compress array
j = 1
for i in range(1, len(items) - 1):
values = items[j - 1][1], items[i][1], items[i + 1][1]
# remove distances that are non-zero and differ
# fractionally from both neighbours by less than
# the selected threshold. always keep the first
# and last values
if values[0] > 0. and values[1] > 0. and values[2] > 0. and abs(math.log(values[1] / values[0])) < abs_ln_thresh and abs(math.log(values[1] / values[2])) < abs_ln_thresh:
continue
# remove distances that are 0 and surrounded by 0
# on both sides (basically the same as the last
# test, but we can't take log(0)).
if values == (0., 0., 0.):
continue
items[j] = items[i]
j += 1
del items[j:]
if options.verbose:
print >>sys.stderr, "\"%s\": %s horizon history reduced to %.3g%% of original size" % (filename, instrument, 100. * j / (i + 1.))
# replace
rankingstat.numerator.horizon_history[instrument] = type(horizon_history)(items)
#
# re-insert into XML tree
#
far.gen_likelihood_control_doc(xmldoc, rankingstat, rankingstatpdf)
#
# write to disk
#
ligolw_utils.write_filename(xmldoc, filename, gz = filename.endswith(".gz"), verbose = options.verbose)
......@@ -48,6 +48,19 @@ from gstlal import far
from gstlal import lvalert_helper
from gstlal import plotfar
import matplotlib
matplotlib.rcParams.update({
"font.size": 10.0,
"axes.titlesize": 10.0,
"axes.labelsize": 10.0,
"xtick.labelsize": 8.0,
"ytick.labelsize": 8.0,
"legend.fontsize": 8.0,
"figure.dpi": 100,
"savefig.dpi": 100,
"text.usetex": True
})
#
# =============================================================================
......
......@@ -46,16 +46,16 @@ from gstlal import lvalert_helper
from gstlal import plotpsd
from ligo.gracedb import rest as gracedb
plotpsd.matplotlib.rcParams.update({
import matplotlib
matplotlib.rcParams.update({
"font.size": 10.0,
"axes.titlesize": 10.0,
"axes.labelsize": 10.0,
"xtick.labelsize": 8.0,
"ytick.labelsize": 8.0,
"legend.fontsize": 8.0,
"figure.dpi": 300,
"savefig.dpi": 300,
"figure.dpi": 100,
"savefig.dpi": 100,
"text.usetex": True,
"path.simplify": True
})
......
......@@ -46,16 +46,16 @@ from gstlal import plotpsd
from ligo.gracedb import rest as gracedb
plotpsd.matplotlib.rcParams.update({
import matplotlib
matplotlib.rcParams.update({
"font.size": 10.0,
"axes.titlesize": 10.0,
"axes.labelsize": 10.0,
"xtick.labelsize": 8.0,
"ytick.labelsize": 8.0,
"legend.fontsize": 8.0,
"lines.linewidth": 0.75,
"figure.dpi": 300,
"savefig.dpi": 300,
"figure.dpi": 100,
"savefig.dpi": 100,
"text.usetex": True,
"path.simplify": True
})
......@@ -150,7 +150,7 @@ for graceid in graceids:
# PSD plot
#
fig = plotpsd.plot_psds(psds, coinc_xmldoc, plot_width = 2400)
fig = plotpsd.plot_psds(psds, coinc_xmldoc, plot_width = 800)
fig.tight_layout()
filename = "%s_psd.%s" % (graceid, options.format)
......@@ -164,7 +164,7 @@ for graceid in graceids:
# Cumulative SNRs plot
#
fig = plotpsd.plot_cumulative_snrs(psds, coinc_xmldoc, plot_width = 2400)
fig = plotpsd.plot_cumulative_snrs(psds, coinc_xmldoc, plot_width = 800)
fig.tight_layout()
filename = "%s_cumulative_snrs.%s" % (graceid, options.format)
......
......@@ -29,8 +29,8 @@ matplotlib.rcParams.update({
"xtick.labelsize": 10.0,
"ytick.labelsize": 10.0,
"legend.fontsize": 10.0,
"figure.dpi": 600,
"savefig.dpi": 600,
"figure.dpi": 100,
"savefig.dpi": 100,
"text.usetex": True
})
from optparse import OptionParser
......
This diff is collapsed.
......@@ -321,6 +321,8 @@ class CoincDatabase(object):
# =============================================================================
#
def sim_get_chirp_eff_dist(sim, instrument):
return getattr(sim, "eff_dist_%s" % instrument[0].lower()) * sim.mchirp**(5./6.) / 1.22**(5./6.)
def roman(i, arabics = (1000,900,500,400,100,90,50,40,10,9,5,4,1), romans = ("m","cm","d","cd","c","xc","l","xl","x","ix","v","iv","i")):
if not arabics:
......@@ -678,7 +680,7 @@ FROM
"""):
sim = contents.sim_inspiral_table.row_from_cols(values)
del sim.process_id, sim.source, sim.simulation_id
instruments = frozenset(instrument for instrument, segments in contents.seglists.items() if sim.get_time_geocent() in segments)
instruments = frozenset(instrument for instrument, segments in contents.seglists.items() if sim.time_geocent in segments)
self.injections.setdefault(sim.waveform, []).append(sim)
@staticmethod
......@@ -764,7 +766,7 @@ FROM
""", (self.far_thresh if self.far_thresh is not None else float("+inf"),)):
sim = contents.sim_inspiral_table.row_from_cols(values)
del sim.process_id, sim.source, sim.simulation_id
if sim.get_time_geocent() in zerolag_segments:
if sim.time_geocent in zerolag_segments:
# any nonzero nonz spin -> precession_bool=True -> precessing injection
# all zero nonz spin -> precession_bool=Fales -> non precesssing injection
precession_bool = any(s != 0.0 for s in (sim.spin1x, sim.spin1y, sim.spin2x, sim.spin2y))
......@@ -785,14 +787,14 @@ FROM
# function does not work
def decisive_distance(sim, instruments):
if len(instruments) > 1:
return sorted(sim.get_eff_dist(instrument) for instrument in instruments)[1]
return sorted(sim_get_chirp_eff_dist(sim,instrument) for instrument in instruments)[1]
else:
return sim.get_eff_dist(list(instruments)[0])
return sim_get_chirp_eff_dist(sim,list(instruments)[0])
def decisive_chirp_distance(sim, instruments):
if len(instruments) > 1:
return sorted(sim.get_chirp_eff_dist(instrument) for instrument in instruments)[1]
return sorted(sim_get_chirp_eff_dist(sim,instrument) for instrument in instruments)[1]
else:
return sim.get_chirp_eff_dist(list(instruments)[0])
return sim_get_chirp_eff_dist(sim,list(instruments)[0])
def decisive_charsnr(sim, oninstruments):
if len(oninstruments) > 3:
......@@ -816,7 +818,7 @@ FROM
(r"$\textrm{Chirp Decisive Distance vs.\ Eta (With %s Operating)}$" % ", ".join(sorted(self.on_instruments)), r"$\eta$", lambda sim: sim.eta, r"$\mathrm{Decisive} D_{\mathrm{chirp, eff}}$ ($\mathrm{Mpc}$)", decisive_chirp_distance, "chirpdist_vs_eta"),
(r"$\textrm{Decisive Distance vs.\ Total Mass (With %s Operating)}$" % ", ".join(sorted(self.on_instruments)), r"$M_{\mathrm{total}}$ ($\mathrm{M}_{\odot}$)", lambda sim: sim.mass1 + sim.mass2, r"$\mathrm{Decisive} D_{\mathrm{eff}}$ ($\mathrm{Mpc}$)", decisive_distance, "deff_vs_mtotal"),
(r"$\textrm{Decisive Distance vs.\ Effective Spin (With %s Operating)}$" % ", ".join(sorted(self.on_instruments)), r"$\chi$", lambda sim: (sim.spin1z*sim.mass1 + sim.spin2z*sim.mass2)/(sim.mass1 + sim.mass2), r"$\mathrm{Decisive} D_{\mathrm{eff}}$ ($\mathrm{Mpc}$)", decisive_distance, "deff_vs_chi"),
(r"$\textrm{Decisive Distance vs.\ Time (With %s Operating)}$" % ", ".join(sorted(self.on_instruments)), r"GPS Time (s)", lambda sim: sim.get_time_geocent(), r"$\mathrm{Decisive} D_{\mathrm{eff}}$ ($\mathrm{Mpc}$)", decisive_distance, "deff_vs_t")
(r"$\textrm{Decisive Distance vs.\ Time (With %s Operating)}$" % ", ".join(sorted(self.on_instruments)), r"GPS Time (s)", lambda sim: sim.time_geocent, r"$\mathrm{Decisive} D_{\mathrm{eff}}$ ($\mathrm{Mpc}$)", decisive_distance, "deff_vs_t")
)):
fig, axes = create_plot(x_label, y_label)
......@@ -845,7 +847,7 @@ FROM
(r"Decisive Characteristic SNR vs.\ Total Mass (With %s Operating)" % ", ".join(sorted(self.on_instruments)), r"$M_{\mathrm{total}}$ ($\mathrm{M}_{\odot}$)", lambda sim: sim.mass1 + sim.mass2, r"Decisive Characteristic SNR", "dec_expsnr_vs_mtotal"),
(r"Decisive Characteristic SNR vs.\ Eta (With %s Operating)" % ", ".join(sorted(self.on_instruments)), r"$\eta$", lambda sim: sim.eta, r"Decisive Characteristic SNR", "dec_expsnr_vs_eta"),
(r"Decisive Characteristic SNR vs.\ Effective Spin (With %s Operating)" % ", ".join(sorted(self.on_instruments)), r"$\chi$", lambda sim: (sim.spin1z*sim.mass1 + sim.spin2z*sim.mass2)/(sim.mass1 + sim.mass2), r"Decisive Characteristic SNR", "dec_expsnr_vs_chi"),
(r"Decisive Characteristic SNR vs.\ Time (With %s Operating)" % ", ".join(sorted(self.on_instruments)), r"GPS Time (s)", lambda sim: sim.get_time_geocent(), r"Decisivive Characteristic SNR", "dec_expsnr_vs_t")
(r"Decisive Characteristic SNR vs.\ Time (With %s Operating)" % ", ".join(sorted(self.on_instruments)), r"GPS Time (s)", lambda sim: sim.time_geocent, r"Decisivive Characteristic SNR", "dec_expsnr_vs_t")
)):
fig, axes = create_plot(x_label, y_label)
legend = []
......
This diff is collapsed.
......@@ -3,7 +3,7 @@
#
AC_INIT([gstlal-inspiral],[1.6.1],[gstlal-discuss@ligo.org],[gstlal-inspiral])
AC_INIT([gstlal-inspiral],[1.6.8],[gstlal-discuss@ligo.org],[gstlal-inspiral])
AC_COPYRIGHT([Copyright (C) The authors (see source code for details)])
# a file whose existance can be used to use to check that we are in the
# top-level directory of the source tree
......@@ -230,6 +230,7 @@ to your GI_TYPELIB_PATH environment variable.])
#
AC_SUBST([MIN_NUMPY_VERSION], [1.7.0])
AX_PYTHON_MODULE(numpy, fatal)
NUMPY_CFLAGS=-I`$PYTHON -c "import numpy;print (numpy.get_include());"`
old_CFLAGS="$CFLAGS"
......@@ -266,6 +267,7 @@ AX_PYTHON_GLUE([$MIN_GLUE_VERSION])
AC_SUBST([MIN_LIGO_SEGMENTS_VERSION], [1.2.0])
AX_PYTHON_LIGO_SEGMENTS([$MIN_LIGO_SEGMENTS_VERSION])
AC_SUBST([MIN_LIGO_LW_VERSION], [1.5.3])
AX_PYTHON_LIGO_LW([$MIN_LIGO_LW_VERSION])
#
......@@ -274,7 +276,7 @@ AC_SUBST([MIN_LIGO_LW_VERSION], [1.5.3])
AC_SUBST([MIN_GSTLAL_VERSION], [1.5.0])
AC_SUBST([MIN_GSTLALUGLY_VERSION], [1.6.0])
AC_SUBST([MIN_GSTLALUGLY_VERSION], [1.6.5])
PKG_CHECK_MODULES([GSTLAL], [gstlal >= ${MIN_GSTLAL_VERSION}])
AC_SUBST([GSTLAL_VERSION], [`$PKG_CONFIG --modversion gstlal`])
AX_GSTLAL_SPLIT_VERSION([GSTLAL_VERSION])
......
gstlal-inspiral (1.6.8-1) unstable; urgency=low
* lloidhandler: aggregate max SNRs from each IFO rather than across ifos -
Fixes weird correlations in SNR heat map - addresses request to provide
gstlal background for Virgo
* gstlal_ll_inspiral_trigger_aggregator: port over to changed topic schema,
allow for auth + https
* gstlal_ll_inspiral_pipe: add option to turn on auth + https for trigger
aggregator
-- Alexander Pace <alexander.pace@ligo.org> Thu, 08 Aug 2019 08:37:59 -0700
gstlal-inspiral (1.6.7-1) unstable; urgency=low
* inspiral.py: make gstlal's pastro the production pastro
-- Alexander Pace <alexander.pace@ligo.org> Thu, 25 Jul 2019 13:53:03 -0700
gstlal-inspiral (1.6.6-1) unstable; urgency=low
* gstlal_ll_inspiral_pipe: allow aggregators to be load-balanced, scale
based on number of jobs they process
* lloidhandler.py: change how kafka topics/partitions are arranged, remove
data decimation for kafka topics
* gstlal_inspiral_compress_ranking_stat: added option to remove large
deviations in horizon history.
* p_astro_gstlal.py changes
* lloidhandler: truncate ranking data even more since we are still causing
gracedb problems with the new analysis
-- Alexander Pace <alexander.pace@ligo.org> Thu, 18 Jul 2019 09:19:04 -0700
gstlal-inspiral (1.6.5-1) unstable; urgency=low
* gstlal_ll_inspiral_pipe: fix repeated option in gstlal_ll_dq jobs causing
issues when auth/https is disabled
* gstlal_inspiral_compress_ranking_stat
* Added p_astro module
* lloidhandler: reduce size of ranking stat to gracedb
* gstlal_ll_inspiral_pipe fixes
-- Alexander Pace <alexander.pace@ligo.org> Wed, 05 Jun 2019 09:53:10 -0700
gstlal-inspiral (1.6.4-1) unstable; urgency=low
* lvalert_plots: reduce pressure on gracedb file server
* plotfar.py: fix ValueError for misssing instrument
* inspiral.py, lloidhandler.py: only provide sub-threshold snr time series
if the detector is on at coalescence (Jolien's review request)
* cbc_template_fir: moving_median speed improvement
* gstlal_itacac.c: actually set the snr threshold when computing chisq
* lloidhandler: reduce some of the data going to kafka
* gstlal_ll_inspiral_pipe: support external kafka service, remove daily
pages - they will be replaced scale out aggregators as future proofing
* Revert "normalize the background only using values at likelihood ratios
greater than the threshold"
-- Alexander Pace <alexander.pace@ligo.org> Thu, 28 Mar 2019 07:23:36 -0700
gstlal-inspiral (1.6.3-1) unstable; urgency=low
* inspiral.py: Fix bug in subthreshold trigger channel name
* inspiral.py: Fix bug causing time shift in subthreshold snr time series
* inspiral.py: changed ranking stat tag for gracedb uploads
* cbc_template_fir: multiple changes
* Bug fixes and performance improvements
-- Alexander Pace <alexander.pace@ligo.org> Sun, 17 Mar 2019 09:55:37 -0700
gstlal-inspiral (1.6.2-1) unstable; urgency=low
* Enforce that appended zeros in subthreshold trigger generation have same
dtype as snr time series
-- Alexander Pace <alexander.pace@ligo.org> Sun, 03 Mar 2019 21:51:57 -0500
gstlal-inspiral (1.6.1-1) unstable; urgency=low
* Packaging differences for rpms: disabling mass model.
......
......@@ -27,7 +27,7 @@ Build-Depends:
python-all-dev (>= @MIN_PYTHON_VERSION@),
python-glue (>= @MIN_GLUE_VERSION@),
python-glue-ligolw-tools,
python-gobject-dev
python-gobject-2-dev
Package: gstlal-inspiral
Architecture: any
......@@ -51,7 +51,8 @@ Depends: ${shlibs:Depends}, ${misc:Depends}, ${python:Depends},
python-ligo-gracedb (>= 1.11),
python-ligo-lw (>= @MIN_LIGO_LW_VERSION@),
python-ligo-segments (>= @MIN_LIGO_SEGMENTS_VERSION@),
python-numpy,
python-ligo-scald,
python-numpy (>= @MIN_NUMPY_VERSION@),
python-scipy
Description: GStreamer for GW data analysis (inspiral parts)
This package provides a variety of gstreamer elements for
......
......@@ -3,7 +3,7 @@
dh $@ --with=python2
override_dh_auto_configure:
dh_auto_configure -- --enable-gtk-doc
dh_auto_configure -- --enable-gtk-doc --disable-massmodel
override_dh_auto_test:
export PYTHONPATH=$$(pwd)/python; \
......
......@@ -774,12 +774,12 @@ static void generate_triggers(GSTLALItacac *itacac, GSTLALItacacPad *itacacpad,
if(itacac->peak_type == GSTLAL_PEAK_DOUBLE_COMPLEX) {
/* extract data around peak for chisq calculation */
gstlal_double_complex_series_around_peak(this_maxdata, (double complex *) itacacpad->data->data + peak_finding_start * this_maxdata->channels, (double complex *) this_snr_mat, this_maxdata->pad);
gstlal_autocorrelation_chi2((double *) this_chi2, (double complex *) this_snr_mat, autocorrelation_length(itacacpad), -((int) autocorrelation_length(itacacpad)) / 2, 0.0, itacacpad->autocorrelation_matrix, itacacpad->autocorrelation_mask, itacacpad->autocorrelation_norm);
gstlal_autocorrelation_chi2((double *) this_chi2, (double complex *) this_snr_mat, autocorrelation_length(itacacpad), -((int) autocorrelation_length(itacacpad)) / 2, itacacpad->snr_thresh, itacacpad->autocorrelation_matrix, itacacpad->autocorrelation_mask, itacacpad->autocorrelation_norm);
} else if (itacac->peak_type == GSTLAL_PEAK_COMPLEX) {
/* extract data around peak for chisq calculation */
gstlal_float_complex_series_around_peak(this_maxdata, (float complex *) itacacpad->data->data + peak_finding_start * this_maxdata->channels, (float complex *) this_snr_mat, this_maxdata->pad);
gstlal_autocorrelation_chi2_float((float *) this_chi2, (float complex *) this_snr_mat, autocorrelation_length(itacacpad), -((int) autocorrelation_length(itacacpad)) / 2, 0.0, itacacpad->autocorrelation_matrix, itacacpad->autocorrelation_mask, itacacpad->autocorrelation_norm);
gstlal_autocorrelation_chi2_float((float *) this_chi2, (float complex *) this_snr_mat, autocorrelation_length(itacacpad), -((int) autocorrelation_length(itacacpad)) / 2, itacacpad->snr_thresh, itacacpad->autocorrelation_matrix, itacacpad->autocorrelation_mask, itacacpad->autocorrelation_norm);
} else
g_assert_not_reached();
}
......
......@@ -18,7 +18,7 @@ Requires: %{gstreamername}-plugins-base >= @MIN_GSTREAMER_VERSION@
Requires: %{gstreamername}-plugins-good >= @MIN_GSTREAMER_VERSION@
Requires: %{gstreamername}-plugins-bad-free
Requires: h5py
Requires: numpy
Requires: numpy >= @MIN_NUMPY_VERSION@
Requires: scipy
Requires: lal >= @MIN_LAL_VERSION@
Requires: lal-python >= @MIN_LAL_VERSION@
......@@ -28,6 +28,13 @@ Requires: lalinspiral-python >= @MIN_LALINSPIRAL_VERSION@
Requires: gsl
Requires: ligo-gracedb >= 1.11
Requires: python-%{gstreamername}
Requires: python2-lal >= @MIN_LAL_VERSION@
Requires: python2-lalinspiral >= @MIN_LALINSPIRAL_VERSION@
Requires: python-ligo-lw >= @MIN_LIGO_LW_VERSION@
Requires: python2-ligo-segments >= @MIN_LIGO_SEGMENTS_VERSION@
Requires: python2-ligo-scald
Requires: numpy >= @MIN_NUMPY_VERSION@
Requires: scipy
BuildRequires: doxygen >= @MIN_DOXYGEN_VERSION@
BuildRequires: gobject-introspection-devel >= @MIN_GOBJECT_INTROSPECTION_VERSION@
BuildRequires: graphviz
......
......@@ -33,7 +33,8 @@ pkgpython_PYTHON = \
streamthinca.py \
svd_bank.py \
templates.py \
webpage.py
webpage.py \
p_astro_gstlal.py
pkgpyexec_LTLIBRARIES = _rate_estimation.la _snglinspiraltable.la _spawaveform.la
......
......@@ -199,9 +199,27 @@ def compute_autocorrelation_mask( autocorrelation ):
def movingmedian(interval, window_size):
interval = list(interval)
import bisect
tmp = numpy.copy(interval)
A = None
As = None
prev = None
for i in range(window_size, len(interval)-window_size):
tmp[i] = numpy.median(interval[i-window_size:i+window_size])
if A is None:
A = interval[i-window_size:i+window_size]
ix = numpy.argsort(A)
As = list(numpy.array(A)[ix])
else:
newdata = interval[i+window_size-1]
A = A + [newdata]
bisect.insort(As, newdata)
if len(As) % 2:
tmp[i] = As[len(As)/2]
else:
tmp[i] = (As[len(As)/2-1] + As[len(As)/2]) / 2.
prev = A.pop(0)
del As[bisect.bisect_left(As, prev)]
return tmp
......
......@@ -177,11 +177,8 @@ class RankingStat(snglcoinc.LnLikelihoodRatioMixin):
# full ln L ranking stat. we define the ranking statistic
# to be the largest ln L from all allowed subsets of
# triggers
# FIXME: temporarily disabled due to performance concerns.
# just chain to parent class
return lnP + super(RankingStat, self).__call__(**kwargs)
#return max(super(RankingStat, self).__call__(**kwargs) for kwargs in kwarggen(min_instruments = self.min_instruments, **kwargs))
# triggers. Maximizes over higher than double IFO combos.
return lnP + super(RankingStat, self).__call__(**kwargs) if len(kwargs["snrs"])==1 else max(super(RankingStat, self).__call__(**kwargs) for kwargs in kwarggen(min_instruments = max(2, self.min_instruments), **kwargs))
@property
def template_ids(self):
......@@ -799,11 +796,25 @@ WHERE
return health >= 1.
@classmethod
def get_xml_root(cls, xml, name):
"""
Sub-classes can use this in their overrides of the
.from_xml() method to find the root element of the XML
serialization.
"""
name = u"%s:%s" % (name, cls.ligo_lw_name_suffix)
xml = [elem for elem in xml.getElementsByTagName(ligolw.LIGO_LW.tagName) if elem.hasAttribute(u"Name") and elem.Name == name]
if len(xml) != 1:
raise ValueError("XML tree must contain exactly one %s element named %s" % (ligolw.LIGO_LW.tagName, name))
return xml[0]
@classmethod
def from_xml(cls, xml, name):
# find the root of the XML tree containing the
# serialization of this object
xml, = [elem for elem in xml.getElementsByTagName(ligolw.LIGO_LW.tagName) if elem.hasAttribute(u"Name") and elem.Name == u"%s:%s" % (name, cls.ligo_lw_name_suffix)]
xml = cls.get_xml_root(xml, name)
# create a mostly uninitialized instance
self = cls(None)
# populate from XML
......
......@@ -121,9 +121,20 @@ def do_it_to(xmldoc):
# the table_name column
newrowtype = newtable.RowType
def newrow(row, coinc_id_ilwdcls = ilwdchar_tables["coinc_event"]["coinc_event_id"]):
# FIXME this is probably a dumb way to do this,
# but it shouldn't matter once we have no
# reason to convert back to ilwdchar
if "event_id" in ilwdchar_tables[row.table_name]:
event_id = ilwdchar_tables[row.table_name]["event_id"](row.event_id)
elif "simulation_id" in ilwdchar_tables[row.table_name]:
event_id = ilwdchar_tables[row.table_name]["simulation_id"](row.event_id)
elif "coinc_event_id" in ilwdchar_tables[row.table_name]:
event_id = ilwdchar_tables[row.table_name]["coinc_event_id"](row.event_id)
else:
raise KeyError("event_id, simulation_id or coinc_event_id not in " + ilwdchar_tables[row.table_name])
return newrowtype(
table_name = row.table_name,
event_id = ilwdchar_tables[row.table_name]["event_id"](row.event_id),
event_id = event_id,
coinc_event_id = coinc_id_ilwdcls(row.coinc_event_id)
)
......
This diff is collapsed.
......@@ -82,6 +82,7 @@ from ligo.lw.utils import segments as ligolw_segments
from gstlal import bottle
from gstlal import far
from gstlal import inspiral
from gstlal import p_astro_gstlal
from gstlal import pipeio
from gstlal import simplehandler
from gstlal import streamthinca
......@@ -184,7 +185,11 @@ class EyeCandy(object):
if self.kafka_server is not None:
from kafka import KafkaProducer
self.producer = KafkaProducer(bootstrap_servers=[self.kafka_server], value_serializer=lambda m: json.dumps(m).encode('utf-8'))
self.producer = KafkaProducer(
bootstrap_servers=[self.kafka_server],
key_serializer=lambda m: json.dumps(m).encode('utf-8'),
value_serializer=lambda m: json.dumps(m).encode('utf-8'),
)
else:
self.producer = None
......@@ -199,11 +204,16 @@ class EyeCandy(object):
def update(self, events, last_coincs):
self.ram_history.append((float(lal.UTCToGPS(time.gmtime())), (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss) / 1048576.)) # GB
if events:
max_snr_event = max(events, key = lambda event: event.snr)
self.ifo_snr_history[max_snr_event.ifo].append((float(max_snr_event.end), max_snr_event.snr))
if self.producer is not None:
for ii, column in enumerate(["time", "data"]):
self.kafka_data["%s_snr_history" % max_snr_event.ifo][column].append(float(self.ifo_snr_history[max_snr_event.ifo][-1][ii]))
maxevents = {}
for event in events:
if (event.ifo not in maxevents) or (event.snr > maxevents[event.ifo].snr):
maxevents[event.ifo] = event
for ifo, event in maxevents.items():
t, snr = float(event.end), event.snr
self.ifo_snr_history[ifo].append((t, snr))
if self.producer is not None:
self.kafka_data["%s_snr_history" % ifo]["time"].append(t)
self.kafka_data["%s_snr_history" % ifo]["data"].append(snr)
if last_coincs:
coinc_inspiral_index = last_coincs.coinc_inspiral_index
coinc_event_index = last_coincs.coinc_event_index
......@@ -260,6 +270,7 @@ class EyeCandy(object):
t = inspiral.now()
if self.time_since_last_state is None:
self.time_since_last_state = t
# NOTE only dump to kafka every 1 seconds
if self.producer is not None and (t - self.time_since_last_state) >= 1:
self.time_since_last_state = t
for ii, column in enumerate(["time", "data"]):
......@@ -284,9 +295,14 @@ class EyeCandy(object):
self.kafka_data["%s_strain_dropped" % instrument]["time"].append(float(t))
self.kafka_data["%s_strain_dropped" % instrument]["data"].append(elem.get_property("add") / 16384.)
# Send and flush all of the kafka messages and clear the data
self.producer.send(self.tag, self.kafka_data)
self.producer.flush()
# Send all of the kafka messages and clear the data
#self.producer.send(self.tag, self.kafka_data)
for route in self.kafka_data.keys():
self.producer.send(route, key=self.tag, value=self.kafka_data[route])
# This line forces the send but is blocking!! not the
# best idea for production running since we value
# latency over getting metric data out
#self.producer.flush()
for route in self.kafka_data.keys():
self.kafka_data[route] = {'time': [], 'data': []}
self.kafka_data["coinc"] = []
......@@ -620,7 +636,7 @@ class Handler(simplehandler.Handler):
dumps of segment information, trigger files and background
distribution statistics.
"""
def __init__(self, mainloop, pipeline, coincs_document, rankingstat, horizon_distance_func, gracedbwrapper, zerolag_rankingstatpdf_url = None, rankingstatpdf_url = None, ranking_stat_output_url = None, ranking_stat_input_url = None, likelihood_snapshot_interval = None, sngls_snr_threshold = None, tag = "", kafka_server = "10.14.0.112:9092", cluster = False, verbose = False):
def __init__(self, mainloop, pipeline, coincs_document, rankingstat, horizon_distance_func, gracedbwrapper, zerolag_rankingstatpdf_url = None, rankingstatpdf_url = None, ranking_stat_output_url = None, ranking_stat_input_url = None, likelihood_snapshot_interval = None, sngls_snr_threshold = None, tag = "", kafka_server = "10.14.0.112:9092", cluster = False, cap_singles = False, FAR_trialsfactor = 1.0, verbose = False):
"""!
@param mainloop The main application's event loop
@param pipeline The gstreamer pipeline that is being
......@@ -645,6 +661,8 @@ class Handler(simplehandler.Handler):
self.likelihood_snapshot_interval = likelihood_snapshot_interval
self.likelihood_snapshot_timestamp = None
self.cluster = cluster
self.cap_singles = cap_singles
self.FAR_trialsfactor = FAR_trialsfactor
self.gracedbwrapper = gracedbwrapper
# FIXME: detangle this
......@@ -1093,7 +1111,7 @@ class Handler(simplehandler.Handler):
if not self.stream_thinca.push(instrument, [event for event in events if event.ifo == instrument], buf_timestamp):
continue
flushed_sngls = self.stream_thinca.pull(self.rankingstat, fapfar = self.fapfar, zerolag_rankingstatpdf = self.zerolag_rankingstatpdf, coinc_sieve = self.rankingstat.fast_path_cut_from_triggers, cluster = self.cluster)
flushed_sngls = self.stream_thinca.pull(self.rankingstat, fapfar = self.fapfar, zerolag_rankingstatpdf = self.zerolag_rankingstatpdf, coinc_sieve = self.rankingstat.fast_path_cut_from_triggers, cluster = self.cluster, cap_singles = self.cap_singles, FAR_trialsfactor = self.FAR_trialsfactor)
self.coincs_document.commit()
# do GraceDB alerts and update eye candy
......@@ -1241,7 +1259,7 @@ class Handler(simplehandler.Handler):
return outstr
def __get_rankingstat_xmldoc(self):
def __get_rankingstat_xmldoc(self, clipped = False):
# generate a ranking statistic output document. NOTE: if
# we are in possession of ranking statistic PDFs then those
# are included in the output. this allows a single
......@@ -1255,10 +1273,35 @@ class Handler(simplehandler.Handler):
xmldoc = ligolw.Document()
xmldoc.appendChild(ligolw.LIGO_LW())
process = ligolw_process.register_to_xmldoc(xmldoc, u"gstlal_inspiral", paramdict = {}, ifos = self.rankingstat.instruments)
far.gen_likelihood_control_doc(xmldoc, self.rankingstat, self.rankingstatpdf)
# FIXME: don't do this. find a way to reduce the storage
# requirements of the horizon distance history and then go
# back to uploading the full file to gracedb
if clipped:
rankingstat = self.rankingstat.copy()
try:
endtime = rankingstat.numerator.horizon_history.maxkey()
except ValueError:
# empty horizon history
pass
else:
# keep the last day of history
endtime -= 86400. * 1
for history in rankingstat.numerator.horizon_history.values():
del history[:endtime]
else:
rankingstat = self.rankingstat
far.gen_likelihood_control_doc(xmldoc, rankingstat, self.rankingstatpdf)
ligolw_process.set_process_end_time(process)
return xmldoc
def __get_p_astro_json(self, lr, m1, m2, snr, far):
return p_astro_gstlal.compute_p_astro(lr, m1, m2, snr, far, self.rankingstatpdf.copy())
def __get_rankingstat_xmldoc_for_gracedb(self):
# FIXME: remove this wrapper when the horizon history
# encoding is replaced with something that uses less space
return self.__get_rankingstat_xmldoc(clipped = True)
def web_get_rankingstat(self):
with self.lock:
......@@ -1292,7 +1335,7 @@ class Handler(simplehandler.Handler):
# whatever triggers remain in the queues, and processes
# them
flushed_sngls = self.stream_thinca.pull(self.rankingstat, fapfar = self.fapfar, zerolag_rankingstatpdf = self.zerolag_rankingstatpdf, coinc_sieve = self.rankingstat.fast_path_cut_from_triggers, flush = True, cluster = self.cluster)
flushed_sngls = self.stream_thinca.pull(self.rankingstat, fapfar = self.fapfar, zerolag_rankingstatpdf = self.zerolag_rankingstatpdf, coinc_sieve = self.rankingstat.fast_path_cut_from_triggers, flush = True, cluster = self.cluster, cap_singles = self.cap_singles, FAR_trialsfactor = self.FAR_trialsfactor)
self.coincs_document.commit()
# do GraceDB alerts
......@@ -1319,7 +1362,7 @@ class Handler(simplehandler.Handler):
assert self.fapfar is not None
# do alerts
self.gracedbwrapper.do_alerts(last_coincs, self.psds, self.__get_rankingstat_xmldoc)
self.gracedbwrapper.do_alerts(last_coincs, self.psds, self.__get_rankingstat_xmldoc_for_gracedb, self.segmentstracker.seglistdicts, self.__get_p_astro_json)
def web_get_sngls_snr_threshold(self):
......
This diff is collapsed.
......@@ -417,7 +417,10 @@ def plot_horizon_distance_vs_time(rankingstat, (tlo, thi), masses = (1.4, 1.4),
if tref is not None:
x -= float(tref)
axes.plot(x, y, color = plotutil.colour_from_instruments([instrument]), label = "%s" % instrument)
yhi = max(max(y), yhi)
try:
yhi = max(max(y), yhi)
except ValueError:
pass
if tref is not None:
axes.set_xlabel("Time From GPS %.2f (s)" % float(tref))
else:
......
......@@ -257,7 +257,7 @@ class StreamThinca(object):
return self.time_slide_graph.push(instrument, events, t_complete)
def pull(self, rankingstat, fapfar = None, zerolag_rankingstatpdf = None, coinc_sieve = None, flush = False, cluster = False, cap_singles = True):
def pull(self, rankingstat, fapfar = None, zerolag_rankingstatpdf = None, coinc_sieve = None, flush = False, cluster = False, cap_singles = False, FAR_trialsfactor = 1.0):
# NOTE: rankingstat is not used to compute the ranking
# statistic, it supplies the detector livetime segment
# lists to determine which triggers are eligible for
......@@ -335,9 +335,9 @@ class StreamThinca(object):
if fapfar is not None:
# FIXME: add proper columns to
# store these values in
coinc_inspiral.combined_far = fapfar.far_from_rank(coinc.likelihood)
coinc_inspiral.combined_far = fapfar.far_from_rank(coinc.likelihood) * FAR_trialsfactor
if len(events) == 1 and cap_singles and coinc_inspiral.combined_far < 1. / fapfar.livetime:
coinc_inspiral.combined_far = 1. / fapfar.livetime
coinc_inspiral.combined_far = 1. / fapfar.livetime
coinc_inspiral.false_alarm_rate = fapfar.fap_from_rank(coinc.likelihood)
if zerolag_rankingstatpdf is not None and coinc.likelihood is not None:
zerolag_rankingstatpdf.zero_lag_lr_lnpdf.count[coinc.likelihood,] += 1
......
SHELL := /bin/bash # Use bash syntax
########################
# User/Accounting Tags #
########################
# Set the accounting tag from https://ldas-gridmon.ligo.caltech.edu/ldg_accounting/user
ACCOUNTING_GROUP=ligo.dev.o3.cbc.em.gstlalonline
ACCOUNTING_USER=patrick.godwin
ANALYSIS_TAG = er14
CONDOR_UNIVERSE=local
##################
# Kafka Settings #
##################
# kafka options
KAFKA_NODE = cbc.ldas.cit
KAFKA_HOSTNAME := $(shell host $(KAFKA_NODE) | awk 'NF>1{print $$NF}')
KAFKA_PORT = 9182
ZOOKEEPER_PORT = 2271
############
# Workflow #
############
all : kafka_broker_$(ANALYSIS_TAG).dag
@echo "launch kafka dag: condor_submit_dag kafka_broker_$(ANALYSIS_TAG).dag"
kafka_broker_$(ANALYSIS_TAG).dag :
gstlal_kafka_dag \
--analysis-tag $(ANALYSIS_TAG) \
--kafka-hostname $(KAFKA_HOSTNAME) \
--kafka-port $(KAFKA_PORT) \
--zookeeper-port $(ZOOKEEPER_PORT) \
--condor-universe $(CONDOR_UNIVERSE) \
--condor-command=accounting_group=$(ACCOUNTING_GROUP) \
--condor-command=accounting_group_user=$(ACCOUNTING_USER) \
clean :
-rm -rvf *.sub *.dag* *.cache *.sh logs *.ini *.txt
clean-all :
-rm -rvf *.sub *.dag* *.cache *.sh logs *.ini *.txt kafka* zookeeper*
ACCOUNTING_GROUP=ligo.dev.o3.cbc.em.gstlalonline
ACCOUNTING_USER=cody.messick
ACCOUNTING_USER=patrick.godwin
CONDOR_INSPIRAL_ALLOCATION=Online_CBC_gstlal_Skylake_inspiral
CONDOR_OTHER_ALLOCATION=Online_CBC_gstlal_Skylake_other
# bank settings
H1_BANK_CACHE = ../svd/replay/hyper/H1_bank.cache
L1_BANK_CACHE = ../svd/replay/hyper/L1_bank.cache
V1_BANK_CACHE = ../svd/replay/hyper/V1_bank.cache
BANK = ../bank/hyper/gstlal_hyperbank.xml.gz
H1_BANK_CACHE = /home/gstlalcbc/observing/3/online/svd/psd1April2019/H1_bank.cache
L1_BANK_CACHE = /home/gstlalcbc/observing/3/online/svd/psd1April2019/L1_bank.cache
V1_BANK_CACHE = /home/gstlalcbc/observing/3/online/svd/psd1April2019/V1_bank.cache
BANK = /home/gstlalcbc/observing/3/online/bank/hyper/gstlal_hyperbank.xml.gz
# FIXME FIXME don't hardcode the H1 bank cache here
RANKING_STATS := $(shell for n in $$(seq -f '%04.f' $$(wc -l ../svd/replay/hyper/H1_bank.cache | awk '{print $$1}')); do echo "$${n}_rankingstat.xml.gz"; done)
RANKING_STATS := $(shell for n in $$(seq -f '%04.f' $$(wc -l /home/gstlalcbc/observing/3/online/svd/H1_bank.cache | awk '{print $$1}')); do echo "$${n}_rankingstat.xml.gz"; done)
WEBDIR=$(HOME)/public_html/replay/last_2_wks_o2/hyper/
GSTLALSHAREDIR=/home/gstlalcbc/engineering/14/code/master_icc_190212/git/gstlal/gstlal-inspiral/share
# web settings
ANALYSIS_TAG = o3
WEBAPP_NAME=inspiral_$(ANALYSIS_TAG)
GSTLALSHAREDIR=$(LAL_PATH)/../git/gstlal/gstlal-inspiral/share
GSTLAL_FIR_WHITEN=0
# kafka settings
KAFKA_HOSTNAME=10.14.0.112
KAFKA_HOSTNAME=cbc.ldas.cit:9182
# aggregator settings
DATA_BACKEND=influx
#DATA_BACKEND=hdf5
INFLUX_HOSTNAME=10.9.0.112
INFLUX_HOSTNAME=10.14.0.100
INFLUX_PORT=8086
INFLUX_DATABASE_NAME=gstlal_inspiral
INFLUX_DATABASE_NAME=gstlal_inspiral_$(ANALYSIS_TAG)
# gracedb info
GRACEDB=playground
ifeq ($(GRACEDB),production)
GRACEDB_SERVICE_URL:=https://gracedb.ligo.org/api/
LVALERT_SERVER_URL:=lvalert.cgca.uwm.edu
else
GRACEDB_SERVICE_URL:=https://gracedb-playground.ligo.org/api/
LVALERT_SERVER_URL:=lvalert-playground.cgca.uwm.edu
endif
# data settings
H1CHANNEL=GDS-CALIB_STRAIN_O2Replay
L1CHANNEL=GDS-CALIB_STRAIN_O2Replay
V1CHANNEL=Hrec_hoft_16384Hz_O2Replay
H1CHANNEL=GDS-CALIB_STRAIN
L1CHANNEL=GDS-CALIB_STRAIN
V1CHANNEL=Hrec_hoft_16384Hz
H1STATECHANNEL=GDS-CALIB_STATE_VECTOR
L1STATECHANNEL=GDS-CALIB_STATE_VECTOR
V1STATECHANNEL=Hrec_STATE_VECTOR
#H1INJSTATECHANNEL=GDS-CALIB_STATE_VECTOR
#L1INJSTATECHANNEL=GDS-CALIB_STATE_VECTOR
#V1INJSTATECHANNEL=Hrec_STATE_VECTOR
V1STATECHANNEL=DQ_ANALYSIS_STATE_VECTOR
H1DQCHANNEL=DMT-DQ_VECTOR
L1DQCHANNEL=DMT-DQ_VECTOR
V1DQCHANNEL=DQ_ANALYSIS_STATE_VECTOR
#H1INJDQCHANNEL=DMT-DQ_VECTOR
#L1INJDQCHANNEL=DMT-DQ_VECTOR
#V1INJDQCHANNEL=DQ_ANALYSIS_STATE_VECTOR
H1SHM=R1LHO_Data
L1SHM=R1LLO_Data
V1SHM=R1VIRGO_Data
H1SHM=X1LHO_Data
L1SHM=X1LLO_Data
V1SHM=X1VIRGO_Data
# FIXME currently bit 0 is not required due to an issue with misreported calib stat
# set bits 1,5,6,7,8 for non-injections jobs (adds up to dec 482)
......@@ -82,9 +90,9 @@ H1play :
--dq-channel-name=H1=$(H1DQCHANNEL) \
--state-channel-name=H1=$(H1STATECHANNEL) \
--shared-memory-partition=H1=$(H1SHM) \
--state-vector-on-bits=H1=482 \
--state-vector-on-bits=H1=3 \
--state-vector-off-bits=H1=0 \
--dq-vector-on-bits=H1=7 \
--dq-vector-on-bits=H1=0 \
--dq-vector-off-bits=H1=0 \
--data-source lvshm \
--output /dev/stderr \
......@@ -97,9 +105,9 @@ L1play :
--dq-channel-name=L1=$(L1DQCHANNEL) \
--state-channel-name=L1=$(L1STATECHANNEL) \
--shared-memory-partition=L1=$(L1SHM) \
--state-vector-on-bits=L1=482 \
--state-vector-on-bits=L1=3 \
--state-vector-off-bits=L1=0 \
--dq-vector-on-bits=L1=7 \
--dq-vector-on-bits=L1=0 \
--dq-vector-off-bits=L1=0 \
--data-source lvshm \
--output /dev/stderr \
......@@ -111,7 +119,7 @@ V1play :
--dq-channel-name=V1=$(V1DQCHANNEL) \
--state-channel-name=V1=$(V1STATECHANNEL) \
--shared-memory-partition=V1=$(V1SHM) \
--state-vector-on-bits=V1=4095 \
--state-vector-on-bits=V1=2 \
--state-vector-off-bits=V1=0 \
--dq-vector-on-bits=V1=0 \
--dq-vector-off-bits=V1=0 \
......@@ -123,7 +131,6 @@ tisi.xml :
lalapps_gen_timeslides --instrument=H1=0:0:0 --instrument=L1=0:0:0 --instrument=V1=0:0:0 $@
dag : plots ll_simplify_and_cluster.sql ll_simplify.sql tisi.xml rankingstat.cache zerolag_rankingstat_pdf.cache online-web-deploy
# FIXME disable virgo for now!!!
gstlal_ll_inspiral_pipe \
--bank-cache H1=$(H1_BANK_CACHE),L1=$(L1_BANK_CACHE),V1=$(V1_BANK_CACHE) \
--max-jobs 1000 \
......@@ -142,14 +149,14 @@ dag : plots ll_simplify_and_cluster.sql ll_simplify.sql tisi.xml rankingstat.cac
--shared-memory-partition=L1=$(L1SHM) \
--shared-memory-partition=V1=$(V1SHM) \
--framexmit-iface=10.14.0.1 \
--state-vector-on-bits=H1=482 \
--state-vector-on-bits=L1=482 \
--state-vector-on-bits=V1=4095 \
--state-vector-on-bits=H1=3 \
--state-vector-on-bits=L1=3 \
--state-vector-on-bits=V1=2 \
--state-vector-off-bits=H1=0 \
--state-vector-off-bits=L1=0 \
--state-vector-off-bits=V1=0 \
--dq-vector-on-bits=H1=7 \
--dq-vector-on-bits=L1=7 \
--dq-vector-on-bits=H1=0 \
--dq-vector-on-bits=L1=0 \
--dq-vector-on-bits=V1=0 \
--dq-vector-off-bits=H1=0 \
--dq-vector-off-bits=L1=0 \
......@@ -160,34 +167,32 @@ dag : plots ll_simplify_and_cluster.sql ll_simplify.sql tisi.xml rankingstat.cac
--marginalized-likelihood-file rankingstat_pdf.xml.gz \
--gracedb-group CBC \
--gracedb-search AllSky \
--gracedb-service-url https://gracedb-playground.ligo.org/api/ \
--gracedb-service-url $(GRACEDB_SERVICE_URL) \
--lvalert-server-url $(LVALERT_SERVER_URL) \
--ht-gate-threshold 50 \
--data-source lvshm \
--shared-memory-assumed-duration 1 \
--likelihood-snapshot-interval 14400 \
--lvalert-listener-program gstlal_inspiral_lvalert_background_plotter \
--lvalert-listener-program gstlal_inspiral_lvalert_psd_plotter \
--inspiral-condor-command '+Online_CBC_gstlal_Skylake_inspiral=True' \
--inspiral-condor-command 'Requirements=(TARGET.Online_CBC_gstlal_Skylake_inspiral=?=True)' \
--inspiral-condor-command '+$(CONDOR_INSPIRAL_ALLOCATION)=True' \
--inspiral-condor-command 'Requirements=(TARGET.$(CONDOR_NODE_ALLOCATION)=?=True)' \
--inspiral-condor-command 'accounting_group = $(ACCOUNTING_GROUP)' \
--inspiral-condor-command 'accounting_group_user = $(ACCOUNTING_USER)' \
--inspiral-condor-command 'request_cpus = 2' \
--inspiral-condor-command 'request_memory = 7000' \
--non-inspiral-condor-command '+Online_CBC_gstlal_Skylake_other=True' \
--non-inspiral-condor-command 'Requirements=(TARGET.Online_CBC_gstlal_Skylake_other=?=True)' \
--non-inspiral-condor-command '+$(CONDOR_OTHER_ALLOCATION)=True' \
--non-inspiral-condor-command 'Requirements=(TARGET.$(CONDOR_OTHER_ALLOCATION)=?=True)' \
--non-inspiral-condor-command 'accounting_group = $(ACCOUNTING_GROUP)' \
--non-inspiral-condor-command 'accounting_group_user = $(ACCOUNTING_USER)' \
--non-inspiral-condor-command 'request_cpus = 2' \
--non-inspiral-condor-command 'request_memory = 7000' \
--local-condor-command 'accounting_group = $(ACCOUNTING_GROUP)' \
--local-condor-command 'accounting_group_user = $(ACCOUNTING_USER)' \
--web-dir $(WEBDIR) \
--min-instruments 1 \
--state-backup-destination gstlalcbc@pcdev3.phys.uwm.edu:/home/gstlalcbc/observing/3/uber_state_backup \
--time-slide-file tisi.xml \
--gracedb-far-threshold -1 \
--output-kafka-server $(KAFKA_HOSTNAME):9093 \
--zookeeper-port 2183 \
--gracedb-far-threshold 2.78e-4 \
--analysis-tag $(ANALYSIS_TAG) \
--output-kafka-server $(KAFKA_HOSTNAME)\
--zookeeper-port 2271 \
--agg-data-backend $(DATA_BACKEND) \
--influx-hostname $(INFLUX_HOSTNAME) \
--influx-port $(INFLUX_PORT) \
......@@ -196,13 +201,11 @@ dag : plots ll_simplify_and_cluster.sql ll_simplify.sql tisi.xml rankingstat.cac
#python ~/trim_online_dag.py trigger_pipe.dag
#python ~/noretries.py trigger_pipe.dag
# submit: condor_submit_dag trigger_pipe.dag
online-web-deploy : inspiral.yml
scald deploy -c inspiral.yml -o ~/public_html
scald deploy -c inspiral.yml -o ~/public_html -n $(WEBAPP_NAME) --add-egg-cache
inspiral.yml:
ln -s $(GSTLALSHAREDIR)/O3/$@ .
cp $(GSTLALSHAREDIR)/O3/$@ .
ll_simplify.sql :
ln -s $(GSTLALSHAREDIR)/$@ .
......@@ -212,7 +215,6 @@ ll_simplify_and_cluster.sql :
plots:
mkdir plots
mkdir -p $(WEBDIR)
set-min-instruments:
gstlal_ll_inspiral_gracedb_min_instruments --min-instruments 2 0*registry.txt
......@@ -222,8 +224,9 @@ set-min-instruments-2:
set-far-thresh :
gstlal_ll_inspiral_gracedb_threshold \
--gracedb-far-threshold 1e-4 \
--gracedb-far-threshold 3e-4 \
0*registry.txt
get-far-thresh :
gstlal_ll_inspiral_gracedb_threshold \
0*registry.txt
......@@ -252,19 +255,16 @@ gstlal_mass_model.h5 : $(BANK)
--instrument V1 \
--min-instruments 1 \
--coincidence-threshold 0.005 \