Commit a93bf092 authored by Kipp Cannon's avatar Kipp Cannon

gstlal-inspiral: remove whitespace from ends of lines

parent c003fb17
......@@ -367,7 +367,7 @@ def parse_command_line():
options.ranking_stat_output = [None] * len(options.output)
required_urls = [options.time_slide_file]
for svd_bank_set in svd_banks:
for svd_bank_set in svd_banks:
required_urls += svd_bank_set.values()
for filename in (options.veto_segments_file, options.injections, options.blind_injections, options.reference_psd):
if filename:
......@@ -719,7 +719,7 @@ for output_file_number, (svd_bank_url_dict, output_url, ranking_stat_output_url,
pipeline = Gst.Pipeline(name="gstlal_inspiral")
mainloop = GObject.MainLoop()
triggersrc = lloidparts.mkLLOIDmulti(
pipeline,
detectors = detectors,
......@@ -736,7 +736,7 @@ for output_file_number, (svd_bank_url_dict, output_url, ranking_stat_output_url,
fir_stride = options.fir_stride,
reconstruction_segment_list = reconstruction_segment_list
)
if options.verbose:
print >>sys.stderr, "done"
......@@ -874,7 +874,7 @@ for output_file_number, (svd_bank_url_dict, output_url, ranking_stat_output_url,
if options.verbose:
print >>sys.stderr, "running pipeline ..."
mainloop.run()
#
# write output file
......
......@@ -227,14 +227,14 @@ def condition_psd(psd, newdeltaF, minfs = (35.0, 40.0), maxfs = (1800., 2048.),
kmax = int(minfs[1] / newdeltaF)
psddata[:kmin] = float('Inf')
psddata[kmin:kmax] /= numpy.sin(numpy.arange(kmax-kmin) / (kmax-kmin-1.) * numpy.pi / 2.0)**4
kmin = int(maxfs[0] / newdeltaF)
kmax = int(maxfs[1] / newdeltaF)
psddata[kmax:] = float('Inf')
psddata[kmin:kmax] /= numpy.cos(numpy.arange(kmax-kmin) / (kmax-kmin-1.) * numpy.pi / 2.0)**4
psd.data.data = psddata
#
# compute the psd horizon after conditioning and renormalize
#
......
......@@ -76,8 +76,8 @@ def waveform(m1, m2, fLow, fhigh, sampleRate):
deltaT = 1.0 / sampleRate
T = spawaveform.chirptime(m1, m2 , 4, fLow, fhigh)
tc = -spawaveform.chirptime(m1, m2 , 4, fhigh)
# the last sampling point of any waveform is always set
# at abs(t) >= delta. this is to avoid ill-condition of
# the last sampling point of any waveform is always set
# at abs(t) >= delta. this is to avoid ill-condition of
# frequency when abs(t) < 1e-5
n_start = math.floor((tc-T) / deltaT + 0.5)
n_end = min(math.floor(tc/deltaT), -1)
......@@ -349,7 +349,7 @@ def makeiirbank(xmldoc, sampleRate = None, padding=1.1, epsilon=0.02, alpha=.99,
root.appendChild(array.from_array('autocorrelation_bank_real', autocorrelation_bank.real))
root.appendChild(array.from_array('autocorrelation_bank_imag', -autocorrelation_bank.imag))
root.appendChild(array.from_array('autocorrelation_mask', autocorrelation_mask))
return A, B, D, snrvec
def crosscorr(a, b, autocorrelation_length = 201):
......
......@@ -124,7 +124,7 @@ class Handler(simplehandler.Handler):
#
# =============================================================================
#
# Modified Version of mkbasicsrc from datasource.py
# Modified Version of mkbasicsrc from datasource.py
#
# =============================================================================
#
......
......@@ -163,7 +163,7 @@ def get_min_far_inspiral_injections(connection, segments = None, table_name = "c
# now actually remove the missed injections
for k in found_injections:
del missed_injections[k]
return found_injections.values(), total_injections.values(), missed_injections.values()
......
......@@ -116,10 +116,10 @@ def now():
def parse_svdbank_string(bank_string):
"""
parses strings of form
parses strings of form
H1:bank1.xml,H2:bank2.xml,L1:bank3.xml
into a dictionary of lists of bank files.
"""
out = {}
......@@ -135,10 +135,10 @@ def parse_svdbank_string(bank_string):
def parse_iirbank_string(bank_string):
"""
parses strings of form
H1:bank1.xml,H2:bank2.xml,L1:bank3.xml,H2:bank4.xml,...
parses strings of form
H1:bank1.xml,H2:bank2.xml,L1:bank3.xml,H2:bank4.xml,...
into a dictionary of lists of bank files.
"""
out = {}
......
......@@ -14,7 +14,7 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
##
##
# @file
#
# A file that contains the inspiral_pipe module code; used to construct condor dags
......@@ -290,7 +290,7 @@ def pipeline_dot_py_append_opts_hack(opt, vals):
>>> pipeline_dot_py_append_opts_hack("my-favorite-option", [1,2,3])
'1 --my-favorite-option 2 --my-favorite-option 3'
"""
"""
out = str(vals[0])
for v in vals[1:]:
out += " --%s %s" % (opt, str(v))
......@@ -311,7 +311,7 @@ def group(inlist, parts):
>>> B = [2,3]
>>> for g in group(A,B):
... print g
...
...
[0, 1]
[2, 3]
[4, 5]
......@@ -424,7 +424,7 @@ def group_T050017_filename_from_T050017_files(cache_entries, extension, path = N
the same IFO, for example the template bank cache could contain template bank
files from H1 and template bank files from L1.
"""
# Check that every file has same observatory.
# Check that every file has same observatory.
observatories = [cache_entries[0].observatory]
for entry in cache_entries[1:]:
if entry.observatory == observatories[0]:
......
......@@ -14,7 +14,7 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
##
##
# @file
#
# A file that contains the lloidparts module code; Roughly speaking it
......@@ -44,7 +44,7 @@
#
#
# #### Functions/classes not reviewed since they will be moved
# - DetectorData
# - DetectorData
# - mkSPIIRmulti
# - mkSPIIRhoftToSnrSlices
# - mkLLOIDSnrSlicesToTimeSliceChisq
......@@ -93,7 +93,7 @@ from gstlal import pipeparts
##
# A "sum-of-squares" aggregator
#
#
# _Gstreamer graph describing this function:_
#
# @dot
......@@ -132,7 +132,7 @@ def mkcontrolsnksrc(pipeline, rate, verbose = False, suffix = None, control_peak
@param rate An integer representing the target sample rate of the resulting src
@param verbose Make verbose
@param suffix Log name for verbosity
@param control_peak_samples If nonzero, this would do peakfinding on the control signal with the window specified by this parameter. The peak finding would give a single sample of "on" state at the peak. This will cause far less CPU to be used if you only want to reconstruct SNR around the peak of the control signal.
@param control_peak_samples If nonzero, this would do peakfinding on the control signal with the window specified by this parameter. The peak finding would give a single sample of "on" state at the peak. This will cause far less CPU to be used if you only want to reconstruct SNR around the peak of the control signal.
"""
#
# start with an adder and caps filter to select a sample rate
......@@ -205,8 +205,8 @@ def mkcontrolsnksrc(pipeline, rate, verbose = False, suffix = None, control_peak
# "mkcontrolsnksrc()" [URL="\ref mkcontrolsnksrc()"];
# lal_sumsquares [URL="\ref pipeparts.mksumsquares()"];
# audioresample [URL="\ref pipeparts.mkresample()"];
#
#
#
#
# lal_checktimestamps1 -> tee [label="iff control_snk, control_src are not None"];
# tee -> lal_sumsquares -> queue2;
# queue2 -> lal_checktimestamps3;
......@@ -232,7 +232,7 @@ def mkLLOIDbranch(pipeline, src, bank, bank_fragment, (control_snk, control_src)
Specifically this implements the filtering of multirate svd basis and
(conditional) resampling and reconstruction of the physical SNR
@param pipeline The gstreamer pipeline in which to place this graph
@param src The source of data for this graph provided by a gstreamer element
@param bank The template bank class
......
......@@ -123,13 +123,13 @@ td {
border-radius: 25px;
border: 2px solid gray;
background: white;
padding: 20px;
padding: 20px;
width: 97%;
box-shadow: 10px 10px 5px #888888;
}
.tabs {
position: relative;
position: relative;
min-height: 10in; /* This part sucks */
clear: both;
margin: 25px 0;
......@@ -140,16 +140,16 @@ td {
}
.tab label {
background: #eee;
padding: 10px;
border: 1px solid #ccc;
margin-left: -1px;
background: #eee;
padding: 10px;
border: 1px solid #ccc;
margin-left: -1px;
position: relative;
left: 1px;
left: 1px;
}
.tab [type=radio] {
display: none;
display: none;
}
.content {
......@@ -160,7 +160,7 @@ td {
right: 0;
bottom: 0;
padding: 20px;
border: 1px solid #ccc;
border: 1px solid #ccc;
}
[type=radio]:checked ~ label {
......@@ -245,7 +245,7 @@ class GstlalWebSummary(object):
def status(self):
valid_latency = self.valid_latency()
if self.oldest_data() > 1800:
return 2, "<em class=red>SOME DATA OLDER THAN %d seconds</em>" % self.oldest_data()
return 2, "<em class=red>SOME DATA OLDER THAN %d seconds</em>" % self.oldest_data()
if not valid_latency:
return 1, "<em class=red>NO COINCIDENT EVENTS FOUND!</em>"
if self.missed["latency_history"]:
......@@ -323,7 +323,7 @@ class GstlalWebSummary(object):
return fig, h
def finish_plot(self, ylim):
plt.grid(color=(0.1,0.4,0.5), linewidth=2)
plt.grid(color=(0.1,0.4,0.5), linewidth=2)
ticks = ["%s : %s " % (id, reg) for (id, reg) in sorted(self.registry.items())]
plt.xticks(numpy.arange(len(ticks))+.3, ticks, rotation=90, fontsize = 10)
plt.xlim([0, len(ticks)])
......@@ -375,7 +375,7 @@ class GstlalWebSummary(object):
max_y = 1
missed_x = range(len(missed))
missed_y = numpy.ones(len(missed_x)) * max_y
h.bar(missed_x, missed_y, color='r', alpha=0.9, linewidth=2)
h.bar(found_x, found_y, color='w', alpha=0.9, linewidth=2)
h.bar(found_x, time_y, color='w', alpha=0.7, linewidth=2)
......@@ -447,17 +447,17 @@ class GstlalWebSummary(object):
return self.finish_plot([0.9 * min_y, max_y])
def plot_ram(self, fig, h, found, missed):
found_x = range(len(found))
found_y = numpy.log10(numpy.array([found[k][0,1] for k in sorted(found)]))
try:
try:
max_y, min_y = max(found_y), min(found_y)
except ValueError:
max_y, min_y = (1,0)
missed_x = range(len(missed))
missed_y = numpy.ones(len(missed_x)) * max_y
h.bar(missed_x, missed_y, color='r', alpha=0.9, linewidth=2)
h.bar(found_x, found_y, color='w', alpha=0.9, linewidth=2)
plt.title("max RAM usage (GB)")
......@@ -475,7 +475,7 @@ class GstlalWebSummary(object):
#
# Single Node plots
#
#
def livetime_pie(self):
out = ""
......
......@@ -328,7 +328,7 @@ def plot_snr_joint_pdf(snrpdf, instruments, horizon_distances, min_instruments,
axes.set_title(r"$\ln P(%s | \{%s\}, \mathrm{signal})$" % (", ".join("\mathrm{SNR}_{\mathrm{%s}}" % instrument for instrument in instruments), ", ".join("{D_{\mathrm{H}}}_{\mathrm{%s}}=%.3g" % item for item in sorted(horizon_distances.items()))))
fig.tight_layout(pad = .8)
return fig
def plot_likelihood_ratio_pdf(rankingstatpdf, (xlo, xhi), title, which = "noise"):
fig, axes = init_plot((8., 8. / plotutil.golden_ratio))
......
......@@ -53,13 +53,13 @@ lsctables.use_in(ligolwcontenthandler)
def parse_segments_xml(path):
llwsegments = ligolw_segments.LigolwSegments(ligolw_utils.load_filename(path,contenthandler=ligolwcontenthandler))
seglistdicts = { #'frame gate': llwsegments.get_by_name(u'framesegments')}
'h(t) gate': llwsegments.get_by_name(u'whitehtsegments'),
'state vector': llwsegments.get_by_name(u'statevectorsegments'),
'h(t) gate': llwsegments.get_by_name(u'whitehtsegments'),
'state vector': llwsegments.get_by_name(u'statevectorsegments'),
'trigger buffers': llwsegments.get_by_name(u'triggersegments') }
# FIXME This needs to be generalized to more than two IFOs
seglistdicts['joint segments'] = {'H1L1': seglistdicts['state vector'].intersection(['H1','L1'])}
return seglistdicts
def plot_segments_history(seglistdicts, segments_to_plot = ['trigger buffers', 'h(t) gate', 'state vector'], t_max = None, length = 86400., labelspacing = 10800., colors = {'H1': numpy.array((1.0, 0.0, 0.0)), 'L1': numpy.array((0.0, 0.8, 0.0)), 'V1': numpy.array((1.0, 0.0, 1.0)), 'H1L1': numpy.array((.5, .5, .5))}, fig = None, axes = None):
if fig is None:
......
......@@ -76,7 +76,7 @@ def plot_range(found_inj, missed_inj, seg_bins, (tlo, thi), (dlo, dhi), horizon_
fig = plt.figure()
if axes is None:
axes = fig.add_subplot(111)
# FIXME Add number of distance bins as option
ndbins = rate.NDBins((rate.LinearBins(dlo, dhi, int(dhi - dlo + 1)), rate.IrregularBins(seg_bins)))
vol, err = imr_utils.compute_search_volume_in_bins([f[1] for f in found_inj], missed_inj + [f[1] for f in found_inj], ndbins, lambda sim: (sim.distance, sim.geocent_end_time))
......
......@@ -17,7 +17,7 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
##
##
# @file
#
# A file that contains the rate estimation code.
......
......@@ -64,7 +64,7 @@ def imrchirptime(m1, m2, fLower, chi, a_hat = 0.98, e_folds = 10):
tau = 2 * (m1+m2) * 5e-6 * (0.7 + 1.4187 * (1-a_hat)**-0.4990) / (1.5251 - 1.1568 * (1-a_hat)**0.1292)
from (7-9) of LIGO-P1300156.
from (7-9) of LIGO-P1300156.
@param m1 Mass 1
@param m2 Mass 2
......
......@@ -89,9 +89,9 @@ The probabilities are factored in the following way:
.. math::
P(\\vec{\\rho}, \\vec{t}, \\vec{\phi}, \\vec{O} | \\vec{D_H}, s)
=
\underbrace{P(\\vec{\\rho}, \\vec{t}, \\vec{\phi} | \\vec{O}, \\vec{D_H}, s)}_{\mathrm{1:\,TimePhaseSNR()}}
\\times
=
\underbrace{P(\\vec{\\rho}, \\vec{t}, \\vec{\phi} | \\vec{O}, \\vec{D_H}, s)}_{\mathrm{1:\,TimePhaseSNR()}}
\\times
\underbrace{P(\\vec{O} | \\vec{D_H}, s)}_{\mathrm{2:\,p\_of\_instruments\_given\_horizons()}}
where:
......@@ -166,7 +166,7 @@ and triples. *Ideally* false alarm probabilities for all doubles would be
higher than all triples, but this is not a requirement since noise and
especially glitches can cause a triple to be ranked below a double. The plot
below shows that at least the trend is correct. NOTE we are not currently
computing doubles and triples and picking the best.
computing doubles and triples and picking the best.
|O2_O3_LR_double_vs_triple|
......@@ -200,7 +200,7 @@ code with the previous implementation of additional dt and dphi terms (green)
and finally the current implementation (blue). The improvement of the present
implementation is consistent with the above injection results and this further
demonstrates that the reimplementation has "done no harm" to the O2
configuration.
configuration.
|O2_O3_LR_ROC|
......@@ -210,7 +210,7 @@ Review Status
Do no harm check of O2 results (Complete)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Comparing runs before and after (done)
- Comparing runs before and after (done)
- Checking the probabilities returned by new code and old code to show consistent results (done)
Check of error assumptions (For O3)
......@@ -1060,9 +1060,9 @@ class TimePhaseSNR(object):
e.g.,
.. math::
P(\\vec{D_{\mathrm{eff}}} / D_{\mathrm{eff}\,0}, \\vec{t} - t_0, \\vec{\phi} - \phi_0 | \\vec{O}, s)
P(\\vec{D_{\mathrm{eff}}} / D_{\mathrm{eff}\,0}, \\vec{t} - t_0, \\vec{\phi} - \phi_0 | \\vec{O}, s)
\\times
|\\vec{\\rho}|^{-4}
|\\vec{\\rho}|^{-4}
\equiv
P(\\vec\lambda|\\vec{O}, s)
\\times
......@@ -1089,8 +1089,8 @@ class TimePhaseSNR(object):
.. math::
P(\\vec{\lambda} | \\vec{O}, \\vec{\lambda_{mi}})
=
\\frac{1}{\sqrt{(2\pi)^k |\pmb{\Sigma}|}}
=
\\frac{1}{\sqrt{(2\pi)^k |\pmb{\Sigma}|}}
\exp{ \left[ -\\frac{1}{2} \\vec{\Delta\lambda}^T \, \pmb{\Sigma}^{-1} \, \\vec{\Delta\lambda} \\right] }
where :math:`\\vec{\Delta\lambda_i} = \\vec{\lambda} -
......@@ -1473,7 +1473,7 @@ class p_of_instruments_given_horizons(object):
self.histograms = histograms
# NOTE we end up pushing any value outside of our
# histogram to just be the value in the last(first)
# bin, so we track those center values here.
# bin, so we track those center values here.
self.first_center = histograms.values()[0].centres()[0][0]
self.last_center = histograms.values()[0].centres()[0][-1]
else:
......@@ -1489,7 +1489,7 @@ class p_of_instruments_given_horizons(object):
# NOTE we end up pushing any value outside of our
# histogram to just be the value in the last(first)
# bin, so we track those center values here.
# bin, so we track those center values here.
self.first_center = histograms.values()[0].centres()[0][0]
self.last_center = histograms.values()[0].centres()[0][-1]
# The sky tile resolution here is lower than the
......@@ -1629,7 +1629,7 @@ class InspiralExtrinsics(object):
data files with :py:class:`TimePhaseSNR` and :py:class:`p_of_instruments_given_horizons`.
This class is used to compute p_of_instruments_given_horizons
and the probability of getting time phase and snrs from
and the probability of getting time phase and snrs from
a given instrument combination. The argument min_instruments will be
used to normalize the p_of_instruments_given_horizons to set the probability of
a combination with fewer than min_instruments to be 0.
......
......@@ -409,7 +409,7 @@ class LnSignalDensity(LnLRDensity):
"""
# FIXME: this is still busted since the rewrite
# FIXME need to add dt and dphi
# FIXME need to add dt and dphi
#
# retrieve horizon distance from history if not given
# explicitly. retrieve SNR threshold from class attribute
......
......@@ -26,7 +26,7 @@
#
# #### Actions
# - Consider a study of how to supply the svd / time slice boundaries
#
#
## @package svd_bank
......@@ -165,7 +165,7 @@ class Bank(object):
time_slices,
autocorrelation_length = autocorrelation_length,
verbose = verbose)
# Include signal inspiral table
sngl_inspiral_table = lsctables.SnglInspiralTable.get_table(bank_xmldoc)
self.sngl_inspiral_table = sngl_inspiral_table.copy()
......@@ -273,7 +273,7 @@ def write_bank(filename, banks, cliplefts = None, cliprights = None, verbose = F
# FIXME FIXME FIXME move this clipping stuff to the Bank class
# set the right clipping index
clipright = len(bank.sngl_inspiral_table) - clipright
# Apply clipping option to sngl inspiral table
# put the bank table into the output document
new_sngl_table = lsctables.New(lsctables.SnglInspiralTable, bank.sngl_inspiral_table.columnnames)
......@@ -352,14 +352,14 @@ def read_banks(filename, contenthandler, verbose = False):
# FIXME in principle this could be different for each bank included in
# this file, but we only put one in the file for now
# FIXME, right now there is only one instrument so we just pull out the
# only psd there is
# only psd there is
# FIXME enable this again in the future, but right now we don't use it,
# and it makes the files very slow to read
#processed_psd = lal.series.read_psd_xmldoc(xmldoc).values()[0]
processed_psd = None
for root in (elem for elem in xmldoc.getElementsByTagName(ligolw.LIGO_LW.tagName) if elem.hasAttribute(u"Name") and elem.Name == "gstlal_svd_bank_Bank"):
# Create new SVD bank object
bank = Bank.__new__(Bank)
......
......@@ -26,7 +26,7 @@
# #### Actions
#
# - Performance of time slices could be improved by e.g., not using powers of 2 for time slice sample size. This might also simplify the code
#
#
## @package templates
......@@ -222,7 +222,7 @@ def normalized_autocorrelation(fseries, revplan):
data = tseries.data.data
tseries.data.data = data / data[0]
return tseries
# Round a number up to the nearest power of 2
def ceil_pow_2(x):
......@@ -317,7 +317,7 @@ def time_slices(
segment_samples_max = samples_max_64
else:
segment_samples_max = samples_max
if segment_samples_min > segment_samples_max:
raise ValueError("The input template bank must have fewer than %d templates, but had %d." % (segment_samples_max, 2 * len(sngl_inspiral_rows)))
......
......@@ -36,7 +36,7 @@ class tabs(elem):
except TypeError:
self.content += [content]
return self
class tab(elem):
def __init__(self, href, div, text, charts=[], active = False):
......@@ -45,7 +45,7 @@ class tab(elem):
elem.__init__(self, tag="li", content = [elem("a", [text], """ href=#%s class="tablinks" onclick="openGstlalTab(event, '%s',%s)" """ % (href, div, ",".join(charts)) )], attributes = "")
else:
elem.__init__(self, tag="li", content = [elem("a", [text], """ href=#%s class="tablinks" onclick="openGstlalTab(event, '%s')" """ % (href, div) )], attributes = "")
def __call__(self, content=[]):
return elem("div", content, """ id="%s" class="tabcontent" """ % self.div)
......@@ -59,15 +59,15 @@ class image_glob(elem):
for img in sorted(glob.glob(globpat)):
td += [elem("a", [elem("img", [], """ src="%s" width=500 """ % img)], """ class="fancybox" href="%s" rel="group" """ % img)]
self.content = [cap, tr]
class page(object):
def __init__(self, title="cbc web page", path='./',
css=["https://cdnjs.cloudflare.com/ajax/libs/fancybox/2.1.5/jquery.fancybox.css"
],
],
script=["https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.min.js",
"https://cdnjs.cloudflare.com/ajax/libs/fancybox/2.1.5/jquery.fancybox.js",
"https://www.gstatic.com/charts/loader.js"
],
],
content = None, header_content = None, verbose=False):
if content is None:
content = []
......
......@@ -61,9 +61,9 @@ __version__ = "$Revision$" #FIXME
def which(prog):
which = subprocess.Popen(['which',prog], stdout=subprocess.PIPE)
out = which.stdout.read().strip()
if not out:
if not out:
print >>sys.stderr, "ERROR: could not find %s in your path, have you built the proper software and sourced the proper environment scripts?" % (prog,prog)
raise ValueError
raise ValueError
return out
......@@ -199,7 +199,7 @@ def breakupsegs(seglist, maxextent, overlap):
for bigseg in seglist:
newseglist.extend(breakupseg(bigseg, maxextent, overlap))
return newseglist
def breakupseglists(seglists, maxextent, overlap):
for instrument, seglist in seglists.iteritems():
......
......@@ -106,7 +106,7 @@ def pipeline_channel_list_from_channel_dict(channel_dict, ifos = None, opt = "ch
that encodes the other instances of the option.
- override --channel-name with a different option by setting opt.
- restrict the ifo keys to a subset of the channel_dict by
- restrict the ifo keys to a subset of the channel_dict by
setting ifos
Examples:
......@@ -188,7 +188,7 @@ def injection_dict_from_channel_list_with_node_range(injection_list):
## #### Default dictionary of state vector on/off bits by ifo
# Used as the default argument to state_vector_on_off_dict_from_bit_lists()
state_vector_on_off_dict = {
"H1" : [0x7, 0x160],
"H1" : [0x7, 0x160],
"H2" : [0x7, 0x160],
"L1" : [0x7, 0x160],
"V1" : [0x67, 0x100]
......@@ -249,7 +249,7 @@ def state_vector_on_off_list_from_bits_dict(bit_dict):
"""
Produce a tuple of useful command lines from a dictionary of on / off state
vector bits keyed by detector
FIXME: This function exists to work around pipeline.py's inability to
give the same option more than once by producing a string to pass as an argument
that encodes the other instances of the option.
......@@ -278,8 +278,8 @@ def state_vector_on_off_list_from_bits_dict(bit_dict):
# Look-up table to map instrument name to framexmit multicast address and
# port
#
# used in mkbasicsrc()
#
# used in mkbasicsrc()
#
# FIXME: this is only here temporarily while we test this approach to data
# aquisition. obviously we can't hard-code this stuff
#
......@@ -336,7 +336,7 @@ def framexmit_list_from_framexmit_dict(framexmit_dict, ifos = None, opt = "frame
def pipeline_seek_for_gps(pipeline, gps_start_time, gps_end_time, flags = Gst.SeekFlags.FLUSH):
"""
Create a new seek event, i.e., Gst.Event.new_seek() for a given
gps_start_time and gps_end_time, with optional flags.
gps_start_time and gps_end_time, with optional flags.
@param gps_start_time start time as LIGOTimeGPS, double or float
@param gps_end_time start time as LIGOTimeGPS, double or float
......@@ -419,7 +419,7 @@ class GWDataSourceInfo(object):
def __init__(self, options):
"""!