Skip to content
Snippets Groups Projects
Commit 80eebff8 authored by Kipp Cannon's avatar Kipp Cannon
Browse files

streamthinca: add latency_tolerance feature

- allow coinc processing to be deferred until the oldest trigger in the
  queues reaches some minimum age to condense coinc processing into fewer,
  less frequent, intervals at the expense of latency
parent c48469df
No related branches found
No related tags found
1 merge request!144streamthinca: add latency_tolerance feature
Pipeline #345169 passed with warnings
......@@ -235,7 +235,7 @@ class timereversebackgroundcollector(object):
class StreamThinca(object):
def __init__(self, xmldoc, process_id, delta_t, min_instruments = 2, sngls_snr_threshold = None, background_collector_type = "normal"):
def __init__(self, xmldoc, process_id, delta_t, min_instruments = 2, sngls_snr_threshold = None, background_collector_type = "normal", latency_tolerance = 0.):
if background_collector_type not in ("normal", "time_reverse"):
raise ValueError("background_collector_type can only be 'normal' or 'time_reverse'")
self.background_collector_type = background_collector_type
......@@ -243,6 +243,7 @@ class StreamThinca(object):
self.delta_t = delta_t
self.min_instruments = min_instruments
self.sngls_snr_threshold = sngls_snr_threshold
self.latency_tolerance = latency_tolerance
self.set_xmldoc(xmldoc, process_id)
self.clustered_sngl_ids = set()
......@@ -267,9 +268,17 @@ class StreamThinca(object):
Push new triggers into the coinc engine. Returns True if
the coinc engine's internal state has changed in a way that
might enable new candidates to be constructed, False if
not.
not. If latency_tolerance is not 0, then triggers
are allowed to accumulate in the internal queues until the
oldest is latency_tolerance older than t_complete before
this method reports that candidates can be extracted from
the graph. This concentrates the time spent in the Python
coincidence code into smaller intervals, which allows the
gstreamer code to achieve greater parallelism, which
achieves more efficient CPU use at the expense of longer
latencies.
"""
return self.time_slide_graph.push(instrument, events, t_complete)
return self.time_slide_graph.push(instrument, events, t_complete) and (t_complete - self.time_slide_graph.age() >= self.latency_tolerance)
def pull(self, rankingstat, fapfar = None, zerolag_rankingstatpdf = None, coinc_sieve = None, flush = False, cluster = False, cap_singles = False, FAR_trialsfactor = 1.0, template_id_time_map = None):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment