Commit da100213 authored by Patrick Godwin's avatar Patrick Godwin

remove H1-H2 ifo functionality throughout

parent 2bdd4e33
......@@ -403,8 +403,8 @@ class DataSourceInfo(object):
else:
self.channel_subsets = partition_channels_to_subsets(self.channel_dict, self.max_streams, self.min_sample_rate, self.max_sample_rate)
## A dictionary for shared memory partition, e.g., {"H1": "LHO_Data", "H2": "LHO_Data", "L1": "LLO_Data", "V1": "VIRGO_Data"}
self.shm_part_dict = {"H1": "LHO_Data", "H2": "LHO_Data", "L1": "LLO_Data", "V1": "VIRGO_Data"}
## A dictionary for shared memory partition, e.g., {"H1": "LHO_Data", "L1": "LLO_Data", "V1": "VIRGO_Data"}
self.shm_part_dict = {"H1": "LHO_Data", "L1": "LLO_Data", "V1": "VIRGO_Data"}
if options.shared_memory_partition is not None:
self.shm_part_dict.update( datasource.channel_dict_from_channel_list(options.shared_memory_partition) )
......
......@@ -247,7 +247,7 @@ def parse_command_line():
group.add_option("--control-peak-time", metavar = "seconds", type = "int", help = "Set a time window in seconds to find peaks in the control signal (optional, default is to disable composite detection statistic).")
group.add_option("--output", metavar = "filename", action = "append", default = [], help = "Set the name of the LIGO light-weight XML output file *.{xml,xml.gz} or an SQLite database *.sqlite (required). Can be given multiple times. Exactly as many output files must be specified as svd-bank files will be processed (see --svd-bank).")
group.add_option("--output-cache", metavar = "filename", help = "Provide a cache of output files. This can be used instead of giving multiple --output options. Cannot be combined with --output.")
group.add_option("--svd-bank", metavar = "filename", action = "append", default = [], help = "Set the name of the LIGO light-weight XML file from which to load the svd bank for a given instrument in the form ifo:file. These can be given as a comma separated list such as H1:file1,H2:file2,L1:file3 to analyze multiple instruments. This option can be given multiple times, unless --data-source is lvshm or framexmit in which case it must be given exactly once. If given multiple times, the banks will be processed one-by-one, in order. At least one svd bank for at least 2 detectors is required, but see also --svd-bank-cache.")
group.add_option("--svd-bank", metavar = "filename", action = "append", default = [], help = "Set the name of the LIGO light-weight XML file from which to load the svd bank for a given instrument in the form ifo:file. These can be given as a comma separated list such as H1:file1,L1:file2,V1:file3 to analyze multiple instruments. This option can be given multiple times, unless --data-source is lvshm or framexmit in which case it must be given exactly once. If given multiple times, the banks will be processed one-by-one, in order. At least one svd bank for at least 2 detectors is required, but see also --svd-bank-cache.")
group.add_option("--svd-bank-cache", metavar = "filename", help = "Provide a cache file of svd-bank files. This can be used instead of giving multiple --svd-bank options. Cannot be combined with --svd-bank options.")
# NOTE: the clustering SQL scripts search for this option in the
# process_params table to determine the threshold below which it
......
......@@ -164,13 +164,13 @@ def parse_command_line():
parser.add_option_group(group)
group = OptionGroup(parser, "Template Options", "Choose a template from a SVD bank file / a bank file (see svd_bank_snr.Bank).")
group.add_option("--svd-bank", metavar = "filename", help = "A LIGO light-weight xml / xml.gz file containing svd bank information. These can be given as a comma separated list such as H1:file1,H2:file2,L1:file3 to analyze multiple instruments (require)." )
group.add_option("--svd-bank", metavar = "filename", help = "A LIGO light-weight xml / xml.gz file containing svd bank information. These can be given as a comma separated list such as H1:file1,L1:file2,V1:file3 to analyze multiple instruments (require)." )
group.add_option("--coinc", metavar = "filename", help = "The coinc.xml file associated with --svd-bank. This is used to find the --bank-number and --row-number for a particular event. If given, the --bank-number and --row-number will be overwritten. (optional)")
group.add_option("--bank-number", type = "int", help = "Bank id is of the form <int>ID_<int>N where N is the sub bank id. (optional). SNRs in all banks will be used if it is not given.")
group.add_option("--bank-counts", type = "int", default = 1, help = "Number of banks to used; Counted from --bank-number; default = 1")
group.add_option("--row-number", type = "int", help = "The row number of the template (optional). All the SNRs will be outputed if it is not given.")
group.add_option("--row-counts", type = "int", default = 1, help = "Number of rows to be outputted; Counted from --row-number; default = 1")
group.add_option("--bank", metavar = "filename", help = "LIGO light-weight xml.gz file(s) containing only one template. These can be given as a comma separated list such as H1:file1,H2:file2,L1:file3. Expecting one template only for each file (require).")
group.add_option("--bank", metavar = "filename", help = "LIGO light-weight xml.gz file(s) containing only one template. These can be given as a comma separated list such as H1:file1,L1:file2,V1:file3. Expecting one template only for each file (require).")
parser.add_option_group(group)
group = OptionGroup(parser, "Data Quality Options", "Adjust data quality handling")
......
......@@ -99,7 +99,7 @@ def parse_command_line():
parser.add_option("--samples-max-256", type = "int", default = 1024, help = "The maximum number of samples to use for time slices with frequencies above 256Hz, default 1024")
parser.add_option("--samples-max-64", type = "int", default = 2048, help = "The maximum number of samples to use for time slices with frequencies above 64Hz, default 2048")
parser.add_option("--samples-max", type = "int", default = 4096, help = "The maximum number of samples to use for time slices with frequencies below 64Hz, default 4096")
parser.add_option("--bank-cache", metavar = "filenames", action = "append", help = "Set the bank cache files in format H1=H1.cache,H2=H2.cache, etc.. (can be given multiple times)")
parser.add_option("--bank-cache", metavar = "filenames", action = "append", help = "Set the bank cache files in format H1=H1.cache,L1=L1.cache, etc.. (can be given multiple times)")
parser.add_option("--tolerance", metavar = "float", type = "float", default = 0.9999, help = "set the SVD tolerance, default 0.9999")
parser.add_option("--flow", metavar = "num", type = "float", action = "append", help = "set the low frequency cutoff. Can be given multiple times.")
parser.add_option("--fmax", metavar = "num", type = "float", default = 1600, help = "set the max frequency cutoff, default 1600 (Hz)")
......
......@@ -49,7 +49,7 @@ from lalburst import calc_likelihood
def parse_command_line():
parser = OptionParser()
parser.add_option("--svd-bank", metavar = "filename", action = "append", default = [], help = "Set the name of the LIGO light-weight XML file from which to load the svd bank for a given instrument in the form ifo:file, These can be given as a comma separated list such as H1:file1,H2:file2,L1:file3 to analyze multiple instruments. This option can be given multiple times in order to analyze bank serially. At least one svd bank for at least 2 detectors is required.")
parser.add_option("--svd-bank", metavar = "filename", action = "append", default = [], help = "Set the name of the LIGO light-weight XML file from which to load the svd bank for a given instrument in the form ifo:file, These can be given as a comma separated list such as H1:file1,L1:file2,V1:file3 to analyze multiple instruments. This option can be given multiple times in order to analyze bank serially. At least one svd bank for at least 2 detectors is required.")
parser.add_option("--svd-bank-cache", metavar = "filename", help = "Provide a cache file of svd-bank files")
parser.add_option("--likelihood-file", metavar = "filename", action = "append", help = "Set the name of the likelihood ratio data file to use. Can be given more than once. File names should file T050017 naming convention, with an integer corresponding to split bank it is associated with (e.g. H1L1-0_DIST_STATS-1127307417-3805.xml.gz)")
parser.add_option("--global-likelihood-file", metavar = "filename", action = "append", help = "Set the name of a likelihood file not associated with a specific split ban. Can be given multiple times.")
......
......@@ -100,7 +100,7 @@ def parse_command_line():
# caches
parser.add_option("--dist-stats-cache", metavar = "filename", help = "Set the cache file for dist stats")
parser.add_option("--lloid-cache", metavar = "filename", help = "Set the cache file for LLOID")
parser.add_option("--bank-cache", metavar = "filenames", action = "append", help = "Set the bank cache files in format H1=H1.cache,H2=H2.cache, etc.. (can be given multiple times)")
parser.add_option("--bank-cache", metavar = "filenames", action = "append", help = "Set the bank cache files in format H1=H1.cache,L1=L1.cache, etc.. (can be given multiple times)")
# Data from a zero lag run in the case of an injection-only run.
parser.add_option("--svd-bank-cache", metavar = "filename", help = "Set the cache file for svd banks (required iff running injection-only analysis)")
......
......@@ -151,7 +151,7 @@ injrecovery += report.ImageGrid("Horizon distance", grid_size=1, footer=imgfoote
#
injrecovery += report.Header("Injection Missed/Found")
injrecovery += report.Description("""Measuring the found and missed injections as a function of various parameters aids in diagnosing the pipeline as well as providing the expected sensitivity of the pipeline to real signals. The plots in this section show the missed and found injections as a for the various IFO times for coincident triggers. We allow double coincident events so some categories can have multiple types of found injections (for example H1L1 and H1H2L1 triggers in H1H2L1 time). Because of ambiguity concerning the time of an injection and the injection window it is occasionally possible to find an injection in more detectors than what the "time" refers to. For example, an injection's geocentric end time might be in H1L1 time but that might occur near a boundary where H2 was also on. Thus one could find an H1L1 injection in H1H2L1 time.""")
injrecovery += report.Description("""Measuring the found and missed injections as a function of various parameters aids in diagnosing the pipeline as well as providing the expected sensitivity of the pipeline to real signals. The plots in this section show the missed and found injections as a for the various IFO times for coincident triggers. We allow double coincident events so some categories can have multiple types of found injections (for example H1L1 and H1L1V1 triggers in H1L1V1 time). Because of ambiguity concerning the time of an injection and the injection window it is occasionally possible to find an injection in more detectors than what the "time" refers to. For example, an injection's geocentric end time might be in H1L1 time but that might occur near a boundary where V1 was also on. Thus one could find an H1L1 injection in H1L1V1 time.""")
injrecovery += report.Header("Found / Missed Summary Table")
for tag in opts.output_user_tag:
......
......@@ -129,7 +129,7 @@ def parse_command_line():
parser.add_option("--analysis-tag", metavar = "name", help = "Set the name of the analysis, used to distinguish between different DAGs running simultaneously.")
parser.add_option("--psd-fft-length", metavar = "s", default = 32, type = "int", help = "FFT length, default 32s. Note that 50% will be used for zero-padding.")
parser.add_option("--reference-psd", metavar = "filename", help = "Set the reference psd file.")
parser.add_option("--bank-cache", metavar = "filenames", help = "Set the bank cache files in format H1=H1.cache,H2=H2.cache, etc..")
parser.add_option("--bank-cache", metavar = "filenames", help = "Set the bank cache files in format H1=H1.cache,L1=L1.cache, etc..")
parser.add_option("--min-instruments", metavar = "count", type = "int", default = 2, help = "Set the minimum number of instruments that must contribute triggers to form a candidate (default = 2).")
parser.add_option("--inj-channel-name", metavar = "name", default=[], action = "append", help = "Set the name of the injection channel to process for given mass bins (optional). 0000:0002:IFO1=CHANNEL-NAME1,IFO2=CHANNEL-NAME2 can be given multiple times.")
parser.add_option("--inj-state-channel-name", metavar = "name", default=[], action = "append", help = "Set the name of the injection state channel to process (required if --inj-channel-name set).")
......
......@@ -123,7 +123,7 @@ def parse_svdbank_string(bank_string):
"""
parses strings of form
H1:bank1.xml,H2:bank2.xml,L1:bank3.xml
H1:bank1.xml,L1:bank2.xml,V1:bank3.xml
into a dictionary of lists of bank files.
"""
......@@ -142,7 +142,7 @@ def parse_iirbank_string(bank_string):
"""
parses strings of form
H1:bank1.xml,H2:bank2.xml,L1:bank3.xml,H2:bank4.xml,...
H1:bank1.xml,L1:bank2.xml,V1:bank3.xml,H1:bank4.xml,...
into a dictionary of lists of bank files.
"""
......
......@@ -650,9 +650,8 @@ def mkLLOIDmulti(pipeline, detectors, banks, psd, psd_fft_length = 32, ht_gate_t
control_branch = {}
for instrument, bank in [(instrument, bank) for instrument, banklist in banks.items() for bank in banklist]:
suffix = "%s%s" % (instrument, (bank.logname and "_%s" % bank.logname or ""))
if instrument != "H2":
control_branch[(instrument, bank.bank_id)] = mkcontrolsnksrc(pipeline, max(bank.get_rates()), verbose = verbose, suffix = suffix, control_peak_samples = control_peak_time * max(bank.get_rates()))
#pipeparts.mknxydumpsink(pipeline, pipeparts.mkqueue(pipeline, control_branch[(instrument, bank.bank_id)][1]), "control_%s.dump" % suffix, segment = nxydump_segment)
control_branch[(instrument, bank.bank_id)] = mkcontrolsnksrc(pipeline, max(bank.get_rates()), verbose = verbose, suffix = suffix, control_peak_samples = control_peak_time * max(bank.get_rates()))
#pipeparts.mknxydumpsink(pipeline, pipeparts.mkqueue(pipeline, control_branch[(instrument, bank.bank_id)][1]), "control_%s.dump" % suffix, segment = nxydump_segment)
else:
control_branch = None
......@@ -664,10 +663,7 @@ def mkLLOIDmulti(pipeline, detectors, banks, psd, psd_fft_length = 32, ht_gate_t
for i, (instrument, bank) in enumerate([(instrument, bank) for instrument, banklist in banks.items() for bank in banklist]):
suffix = "%s%s" % (instrument, (bank.logname and "_%s" % bank.logname or ""))
if control_branch is not None:
if instrument != "H2":
control_snksrc = control_branch[(instrument, bank.bank_id)]
else:
control_snksrc = (None, control_branch[("H1", bank.bank_id)][1])
control_snksrc = control_branch[(instrument, bank.bank_id)]
else:
control_snksrc = (None, None)
if chisq_type == 'timeslicechisq':
......
......@@ -6,5 +6,5 @@ PYTHON = $(shell env which python3)
pkgpythondir = $(plugindir)/python
pkgpyexecdir = $(pkgpythondir)
#pkgpython_PYTHON = lal_channelgram.py lal_checktimestamps.py lal_fakeadvvirgosrc.py lal_fakeadvligosrc.py lal_fakeligosrc.py lal_histogramplot.py lal_lho_coherent_null.py lal_spectrumplot.py
#pkgpython_PYTHON = lal_channelgram.py lal_checktimestamps.py lal_fakeadvvirgosrc.py lal_fakeadvligosrc.py lal_fakeligosrc.py lal_histogramplot.py lal_spectrumplot.py
pkgpython_PYTHON = lal_checktimestamps.py lal_spectrumplot.py
# Copyright (C) 2012 Madeline Wade
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Produce LHO coherent and null data streams"""
__author__ = "Madeline Wade <madeline.wade@ligo.org>"
import scipy.fftpack
import numpy
import gobject
gobject.threads_init()
import pygtk
pygtk.require('2.0')
import pygst
pygst.require('0.10')
import gst
from gstlal import pipeparts
#
# =============================================================================
#
# Functions
#
# =============================================================================
#
class lal_lho_coherent_null(gst.Bin):
__gstdetails__ = (
'LHO Coherent and Null Streams',
'Filter',
__doc__,
__author__
)
__gproperties__ = {
'block-stride' : (
gobject.TYPE_UINT,
'block stride',
'block stride for fir bank',
1, gobject.G_MAXUINT, 1024,
gobject.PARAM_READWRITE
),
'H1-impulse' : (
gobject.TYPE_PYOBJECT,
'H1 impulse',
'impulse response for H1',
gobject.PARAM_READWRITE
),
'H2-impulse' : (
gobject.TYPE_PYOBJECT,
'H2 impulse',
'impulse response for H2',
gobject.PARAM_READWRITE
),
'H1-latency' : (
gobject.TYPE_UINT,
'H1 latency',
'latency for H1',
0, gobject.G_MAXUINT, 0,
gobject.PARAM_READWRITE
),
'H2-latency' : (
gobject.TYPE_UINT,
'H2 latency',
'latency for H2',
0, gobject.G_MAXUINT, 0,
gobject.PARAM_READWRITE
)
}
def do_set_property(self, prop, val):
if prop.name == "block-stride":
self.H1firfilter.set_property("block-stride", val)
self.H2firfilter.set_property("block-stride", val)
elif prop.name == "H1-impulse":
self.H1firfilter.set_property("fir-matrix", [val])
elif prop.name == "H2-impulse":
self.H2firfilter.set_property("fir-matrix", [val])
elif prop.name == "H1-latency":
self.H1firfilter.set_property("latency", val)
elif prop.name == "H2-latency":
self.H2firfilter.set_property("latency", val)
else:
raise AssertionError
def do_get_property(self, prop):
if prop.name == "block-stride":
return self.H1firfilter.get_property("block-stride")
elif prop.name == "H1-impulse":
return self.H1firfilter.get_property("fir-matrix")[0]
elif prop.name == "H2-impulse":
return self.H2firfilter.get_property("fir-matrix")[0]
elif prop.name == "H1-latency":
return self.H1firfilter.get_property("latency")
elif prop.name == "H2-latency":
return self.H2firfilter.get_property("latency")
else:
raise AssertionError
def __init__(self):
super(lal_lho_coherent_null, self).__init__()
# tee off sources
H1tee = gst.element_factory_make("tee")
self.add(H1tee)
H2tee = gst.element_factory_make("tee")
self.add(H2tee)
self.add_pad(gst.GhostPad("H1sink", H1tee.get_pad("sink")))
self.add_pad(gst.GhostPad("H2sink", H2tee.get_pad("sink")))
# apply fir filter to H1 data
self.H1firfilter = H1head = pipeparts.mkfirbank(self, H1tee)
# apply fir filter to H2 data
self.H2firfilter = H2head = pipeparts.mkfirbank(self, H2tee)
#
# create coherent stream
#
COHhead = gst.element_factory_make("lal_adder")
COHhead.set_property("sync", True)
self.add(COHhead)
pipeparts.mkqueue(self, H1head).link(COHhead)
pipeparts.mkqueue(self, H2head).link(COHhead)
#
# create null stream
#
NULLhead = gst.element_factory_make("lal_adder")
NULLhead.set_property("sync", True)
self.add(NULLhead)
pipeparts.mkqueue(self, H1tee).link(NULLhead)
pipeparts.mkaudioamplify(self, pipeparts.mkqueue(self, H2tee), -1).link(NULLhead)
self.add_pad(gst.GhostPad("COHsrc", COHhead.get_pad("src")))
self.add_pad(gst.GhostPad("NULLsrc", NULLhead.get_pad("src")))
gobject.type_register(lal_lho_coherent_null)
__gstelementfactory__ = (
lal_lho_coherent_null.__name__,
gst.RANK_NONE,
lal_lho_coherent_null
)
......@@ -11,7 +11,6 @@ pkgpythondir = $(pkgpyexecdir)
pkgpython_PYTHON = \
__init__.py \
bottle.py \
coherent_null.py \
dagfile.py \
dagparts.py \
datasource.py \
......
# Copyright (C) 2012 Madeline Wade
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
## @file
## @package coherent_null
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import scipy.fftpack
import numpy
import math
import lal
#
# =============================================================================
#
# Functions
#
# =============================================================================
#
def factors_to_fir_kernel(coh_facs):
"""
Compute a finite impulse-response filter kernel from a power
spectral density conforming to the LAL normalization convention,
such that if zero-mean unit-variance Gaussian random noise is fed
into an FIR filter using the kernel the filter's output will
possess the given PSD. The PSD must be provided as a
REAL8FrequencySeries object (see lal's swig binding documentation).
The return value is the tuple (kernel, latency, sample rate). The
kernel is a numpy array containing the filter kernel, the latency
is the filter latency in samples and the sample rate is in Hz.
"""
#
# this could be relaxed with some work
#
assert coh_facs.f0 == 0.0
#
# extract the PSD bins and determine sample rate for kernel
#
data = coh_facs.data.data
sample_rate = 2 * int(round(coh_facs.f0 + len(data) * coh_facs.deltaF))
#
# compute the FIR kernel. it always has an odd number of samples
# and no DC offset.
#
data[0] = data[-1] = 0.0
tmp = numpy.zeros((2 * len(data) - 1,), dtype = data.dtype)
tmp[0] = data[0]
tmp[1::2] = data[1:]
data = tmp
del tmp
kernel = scipy.fftpack.irfft(data)
kernel = numpy.roll(kernel, (len(kernel) - 1) / 2)
#
# apply a Tukey window whose flat bit is 50% of the kernel.
# preserve the FIR kernel's square magnitude
#
norm_before = numpy.dot(kernel, kernel)
kernel *= lal.CreateTukeyREAL8Window(len(kernel), .5).data.data
kernel *= math.sqrt(norm_before / numpy.dot(kernel, kernel))
#
# the kernel's latency
#
latency = (len(kernel) + 1) / 2 - 1
#
# done
#
return kernel, latency, sample_rate
def psd_to_impulse_response(PSD1, PSD2):
assert PSD1.f0 == PSD2.f0
assert PSD1.deltaF == PSD2.deltaF
assert len(PSD1.data.data) == len(PSD2.data.data)
coh_facs_H1 = lal.CreateREAL8FrequencySeries(
name = "",
epoch = PSD1.epoch,
f0 = PSD1.f0,
deltaF = PSD1.deltaF,
sampleUnits = lal.DimensionlessUnit,
length = len(PSD1.data.data)
)
coh_facs_H1.data.data = PSD2.data.data / (PSD1.data.data + PSD2.data.data)
coh_facs_H1.data.data[0] = coh_facs_H1.data.data[-1] = 0.0
coh_facs_H2 = lal.REAL8FrequencySeries(
name = "",
epoch = PSD2.epoch,
f0 = PSD2.f0,
deltaF = PSD2.deltaF,
sampleUnits = lal.DimensionlessUnit,
length = len(PSD2.data.data)
)
coh_facs_H2.data = PSD1.data.data / (PSD1.data.data + PSD2.data.data)
coh_facs_H2.data[0] = coh_facs_H2.data[-1] = 0.0
#
# set up fir filter
#
H1_impulse, H1_latency, H1_srate = factors_to_fir_kernel(coh_facs_H1)
H2_impulse, H2_latency, H2_srate = factors_to_fir_kernel(coh_facs_H2)
assert H1_srate == H2_srate
return H1_impulse, H1_latency, H2_impulse, H2_latency, H1_srate
......@@ -72,8 +72,8 @@ def channel_dict_from_channel_list(channel_list):
Examples:
>>> channel_dict_from_channel_list(["H1=LSC-STRAIN", "H2=SOMETHING-ELSE"])
{'H1': 'LSC-STRAIN', 'H2': 'SOMETHING-ELSE'}
>>> channel_dict_from_channel_list(["H1=LSC-STRAIN", "L1=SOMETHING-ELSE"])
{'H1': 'LSC-STRAIN', 'L1': 'SOMETHING-ELSE'}
"""
return dict(instrument_channel.split("=") for instrument_channel in channel_list)
......@@ -111,14 +111,14 @@ def pipeline_channel_list_from_channel_dict(channel_dict, ifos = None, opt = "ch
Examples:
>>> pipeline_channel_list_from_channel_dict({'H2': 'SOMETHING-ELSE', 'H1': 'LSC-STRAIN'})
'H2=SOMETHING-ELSE --channel-name=H1=LSC-STRAIN '
>>> pipeline_channel_list_from_channel_dict({'L1': 'SOMETHING-ELSE', 'H1': 'LSC-STRAIN'})
'L1=SOMETHING-ELSE --channel-name=H1=LSC-STRAIN '
>>> pipeline_channel_list_from_channel_dict({'H2': 'SOMETHING-ELSE', 'H1': 'LSC-STRAIN'}, ifos=["H1"])
>>> pipeline_channel_list_from_channel_dict({'L1': 'SOMETHING-ELSE', 'H1': 'LSC-STRAIN'}, ifos=["H1"])
'H1=LSC-STRAIN '
>>> pipeline_channel_list_from_channel_dict({'H2': 'SOMETHING-ELSE', 'H1': 'LSC-STRAIN'}, opt="test-string")
'H2=SOMETHING-ELSE --test-string=H1=LSC-STRAIN '
>>> pipeline_channel_list_from_channel_dict({'L1': 'SOMETHING-ELSE', 'H1': 'LSC-STRAIN'}, opt="test-string")
'L1=SOMETHING-ELSE --test-string=H1=LSC-STRAIN '
"""
outstr = ""
if ifos is None:
......@@ -145,14 +145,14 @@ def pipeline_channel_list_from_channel_dict_with_node_range(channel_dict, node =
Examples:
>>> pipeline_channel_list_from_channel_dict_with_node_range({'0000': {'H2': 'SOMETHING-ELSE', 'H1': 'LSC-STRAIN'}}, node=0)
'H2=SOMETHING-ELSE --channel-name=H1=LSC-STRAIN '
>>> pipeline_channel_list_from_channel_dict_with_node_range({'0000': {'L1': 'SOMETHING-ELSE', 'H1': 'LSC-STRAIN'}}, node=0)
'L1=SOMETHING-ELSE --channel-name=H1=LSC-STRAIN '
>>> pipeline_channel_list_from_channel_dict_with_node_range({'0000': {'H2': 'SOMETHING-ELSE', 'H1': 'LSC-STRAIN'}}, node=0, ifos=["H1"])
>>> pipeline_channel_list_from_channel_dict_with_node_range({'0000': {'L1': 'SOMETHING-ELSE', 'H1': 'LSC-STRAIN'}}, node=0, ifos=["H1"])
'H1=LSC-STRAIN '
>>> pipeline_channel_list_from_channel_dict_with_node_range({'0000': {'H2': 'SOMETHING-ELSE', 'H1': 'LSC-STRAIN'}}, node=0, opt="test-string")
'H2=SOMETHING-ELSE --test-string=H1=LSC-STRAIN '
>>> pipeline_channel_list_from_channel_dict_with_node_range({'0000': {'L1': 'SOMETHING-ELSE', 'H1': 'LSC-STRAIN'}}, node=0, opt="test-string")
'L1=SOMETHING-ELSE --test-string=H1=LSC-STRAIN '
"""
outstr = ""
node = str(node).zfill(4)
......@@ -189,7 +189,6 @@ def injection_dict_from_channel_list_with_node_range(injection_list):
# Used as the default argument to state_vector_on_off_dict_from_bit_lists()
state_vector_on_off_dict = {
"H1" : [0x7, 0x160],
"H2" : [0x7, 0x160],
"L1" : [0x7, 0x160],
"V1" : [0x67, 0x100]
}
......@@ -199,7 +198,6 @@ state_vector_on_off_dict = {
# Used as the default argument to dq_vector_on_off_dict_from_bit_lists()
dq_vector_on_off_dict = {
"H1" : [0x7, 0x0],
"H2" : [0x7, 0x0],
"L1" : [0x7, 0x0],
"V1" : [0x7, 0x0]
}
......@@ -220,7 +218,7 @@ def state_vector_on_off_dict_from_bit_lists(on_bit_list, off_bit_list, state_vec
>>> on_bit_list = ["V1=7", "H1=7", "L1=7"]
>>> off_bit_list = ["V1=256", "H1=352", "L1=352"]
>>> state_vector_on_off_dict_from_bit_lists(on_bit_list, off_bit_list)
{'H1': [7, 352], 'H2': [7, 352], 'L1': [7, 352], 'V1': [7, 256]}
{'H1': [7, 352], 'L1': [7, 352], 'V1': [7, 256]}
>>> state_vector_on_off_dict_from_bit_lists(on_bit_list, off_bit_list,{})
{'V1': [7, 256], 'H1': [7, 352], 'L1': [7, 352]}
......@@ -256,9 +254,9 @@ def state_vector_on_off_list_from_bits_dict(bit_dict):
Examples:
>>> state_vector_on_off_dict = {"H1":[0x7, 0x160], "H2":[0x7, 0x160], "L1":[0x7, 0x160], "V1":[0x67, 0x100]}
>>> state_vector_on_off_dict = {"H1":[0x7, 0x160], "L1":[0x7, 0x160], "V1":[0x67, 0x100]}
>>> state_vector_on_off_list_from_bits_dict(state_vector_on_off_dict)
('H1=7 --state-vector-on-bits=H2=7 --state-vector-on-bits=L1=7 --state-vector-on-bits=V1=103 ', 'H1=352 --state-vector-off-bits=H2=352 --state-vector-off-bits=L1=352 --state-vector-off-bits=V1=256 ')
('H1=7 --state-vector-on-bits=L1=7 --state-vector-on-bits=V1=103 ', 'H1=352 --state-vector-off-bits=L1=352 --state-vector-off-bits=V1=256 ')
"""
onstr = ""
......@@ -443,7 +441,7 @@ class GWDataSourceInfo(object):
## A dictionary of the requested channels, e.g., {"H1":"LDAS-STRAIN", "L1":"LDAS-STRAIN"}
self.channel_dict = channel_dict_from_channel_list(options.channel_name)
## A dictionary for shared memory partition, e.g., {"H1": "LHO_Data", "H2": "LHO_Data", "L1": "LLO_Data", "V1": "VIRGO_Data"}
## A dictionary for shared memory partition, e.g., {"H1": "LHO_Data", "L1": "LLO_Data", "V1": "VIRGO_Data"}
self.shm_part_dict = {"H1": "LHO_Data", "L1": "LLO_Data", "V1": "VIRGO_Data"}
if options.shared_memory_partition is not None:
self.shm_part_dict.update( channel_dict_from_channel_list(options.shared_memory_partition) )
......@@ -495,9 +493,9 @@ class GWDataSourceInfo(object):
## if no frame segments provided, set them to an empty segment list dictionary
self.frame_segments = segments.segmentlistdict((instrument, None) for instrument in self.channel_dict)
## DQ and state vector channel dictionary, e.g., { "H1": "LLD-DQ_VECTOR", "H2": "LLD-DQ_VECTOR","L1": "LLD-DQ_VECTOR", "V1": "LLD-DQ_VECTOR" }
self.state_channel_dict = { "H1": "LLD-DQ_VECTOR", "H2": "LLD-DQ_VECTOR","L1": "LLD-DQ_VECTOR", "V1": "LLD-DQ_VECTOR" }
self.dq_channel_dict = { "H1": "DMT-DQ_VECTOR", "H2": "DMT-DQ_VECTOR","L1": "DMT-DQ_VECTOR", "V1": "DMT-DQ_VECTOR" }
## DQ and state vector channel dictionary, e.g., { "H1": "LLD-DQ_VECTOR", "L1": "LLD-DQ_VECTOR", "V1": "LLD-DQ_VECTOR" }
self.state_channel_dict = { "H1": "LLD-DQ_VECTOR", "L1": "LLD-DQ_VECTOR", "V1": "LLD-DQ_VECTOR" }
self.dq_channel_dict = { "H1": "DMT-DQ_VECTOR", "L1": "DMT-DQ_VECTOR", "V1": "DMT-DQ_VECTOR" }
if options.state_channel_name is not None:
state_channel_dict_from_options = channel_dict_from_channel_list( options.state_channel_name )
......@@ -509,7 +507,7 @@ class GWDataSourceInfo(object):
instrument = list(dq_channel_dict_from_options.keys())[0]
self.dq_channel_dict.update( dq_channel_dict_from_options )
## Dictionary of state vector on, off bits like {"H1" : [0x7, 0x160], "H2" : [0x7, 0x160], "L1" : [0x7, 0x160], "V1" : [0x67, 0x100]}
## Dictionary of state vector on, off bits like {"H1" : [0x7, 0x160], "L1" : [0x7, 0x160], "V1" : [0x67, 0x100]}
self.state_vector_on_off_bits = state_vector_on_off_dict_from_bit_lists(options.state_vector_on_bits, options.state_vector_off_bits, state_vector_on_off_dict)
self.dq_vector_on_off_bits = state_vector_on_off_dict_from_bit_lists(options.dq_vector_on_bits, options.dq_vector_off_bits, dq_vector_on_off_dict)
......
......@@ -980,15 +980,6 @@ def mktrigger(pipeline, src, n, autocorrelation_matrix = None, mask_matrix = Non
def mklatency(pipeline, src, name = None, silent = False):
return mkgeneric(pipeline, src, "lal_latency", name = name, silent = silent)
def mklhocoherentnull(pipeline, H1src, H2src, H1_impulse, H1_latency, H2_impulse, H2_latency, srate):
elem = mkgeneric(pipeline, None, "lal_lho_coherent_null", block_stride = srate, H1_impulse = H1_impulse, H2_impulse = H2_impulse, H1_latency = H1_latency, H2_latency = H2_latency)
for peer, padname in ((H1src, "H1sink"), (H2src, "H2sink")):
if isinstance(peer, Gst.Pad):
peer.get_parent_element().link_pads(peer, elem, padname)
elif peer is not None:
peer.link_pads(None, elem, padname)
return elem
def mkcomputegamma(pipeline, dctrl, exc, cos, sin, **properties):
elem = mkgeneric(pipeline, None, "lal_compute_gamma", **properties)
for peer, padname in ((dctrl, "dctrl_sink"), (exc, "exc_sink"), (cos, "cos"), (sin, "sin")):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment