Skip to content
Snippets Groups Projects
Commit c2c825b8 authored by Kipp Cannon's avatar Kipp Cannon
Browse files

gstlal_inspiral: remove --min-log-L feature

- currently nonsense, redundant since addition of sum-of-SNR^2 cut.  the
  latter will be fixed, cleaned up, and made to incorporate this feature
parent 2a2acf5b
No related branches found
No related tags found
No related merge requests found
Showing
with 18 additions and 39 deletions
...@@ -257,8 +257,7 @@ def parse_command_line(): ...@@ -257,8 +257,7 @@ def parse_command_line():
group.add_option("--chisq-type", metavar = "type", default = "autochisq", help = "Choose the type of chisq computation to perform. Must be one of (autochisq|timeslicechisq). The default is autochisq.") group.add_option("--chisq-type", metavar = "type", default = "autochisq", help = "Choose the type of chisq computation to perform. Must be one of (autochisq|timeslicechisq). The default is autochisq.")
group.add_option("--coincidence-threshold", metavar = "seconds", type = "float", default = 0.005, help = "Set the coincidence window in seconds (default = 0.005 s). The light-travel time between instruments will be added automatically in the coincidence test.") group.add_option("--coincidence-threshold", metavar = "seconds", type = "float", default = 0.005, help = "Set the coincidence window in seconds (default = 0.005 s). The light-travel time between instruments will be added automatically in the coincidence test.")
group.add_option("--min-instruments", metavar = "count", type = "int", default = 2, help = "Set the minimum number of instruments that must contribute triggers to form a candidate (default = 2).") group.add_option("--min-instruments", metavar = "count", type = "int", default = 2, help = "Set the minimum number of instruments that must contribute triggers to form a candidate (default = 2).")
group.add_option("--min-log-L", metavar = "log likelihood ratio", type = "float", help = "Discard candidates that get assigned log likelihood ratios below this threshold (default = keep all). When used without --ranking-stat-input, the cut is decided based on an internal approximate ranking statistic.") group.add_option("--ranking-stat-input", metavar = "url", help = "Set the URL from which to load a ranking statistic definition. When this is enabled, signal candidates will have ranking statistic values assigned on-the-fly. Required when --data-source is lvshm or framexmit; must also set --likelihood-snapshot-interval.")
group.add_option("--ranking-stat-input", metavar = "url", help = "Set the URL from which to load a ranking statistic definition. When this is enabled, signal candidates will have ranking statistic values assigned on-the-fly, and the --min-log-L cut will be applied based on the assigned values. Required when --data-source is lvshm or framexmit; must also set --likelihood-snapshot-interval.")
group.add_option("--ranking-stat-output", metavar = "filename", action = "append", default = [], help = "Set the name of the file to which to write ranking statistic data collected from triggers (optional). Can be given more than once. If given, exactly as many must be provided as there are --svd-bank options and they will be writen to in order.") group.add_option("--ranking-stat-output", metavar = "filename", action = "append", default = [], help = "Set the name of the file to which to write ranking statistic data collected from triggers (optional). Can be given more than once. If given, exactly as many must be provided as there are --svd-bank options and they will be writen to in order.")
group.add_option("--ranking-stat-output-cache", metavar = "filename", help = "Provide a cache of ranking statistic output files. This can be used instead of giving multiple --ranking-stat-output options. Cannot be combined with --ranking-stat-output.") group.add_option("--ranking-stat-output-cache", metavar = "filename", help = "Provide a cache of ranking statistic output files. This can be used instead of giving multiple --ranking-stat-output options. Cannot be combined with --ranking-stat-output.")
group.add_option("--likelihood-snapshot-interval", type = "float", metavar = "seconds", help = "How often to snapshot candidate and ranking statistic data to disk when running online.") group.add_option("--likelihood-snapshot-interval", type = "float", metavar = "seconds", help = "How often to snapshot candidate and ranking statistic data to disk when running online.")
...@@ -822,7 +821,6 @@ for output_file_number, (svd_bank_url_dict, output_url, ranking_stat_output_url, ...@@ -822,7 +821,6 @@ for output_file_number, (svd_bank_url_dict, output_url, ranking_stat_output_url,
rankingstatpdf_url = options.ranking_stat_pdf, rankingstatpdf_url = options.ranking_stat_pdf,
zerolag_rankingstatpdf_url = zerolag_rankingstat_pdf, zerolag_rankingstatpdf_url = zerolag_rankingstat_pdf,
likelihood_snapshot_interval = options.likelihood_snapshot_interval, likelihood_snapshot_interval = options.likelihood_snapshot_interval,
min_log_L = options.min_log_L,
sngls_snr_threshold = options.singles_threshold, sngls_snr_threshold = options.singles_threshold,
tag = options.job_tag, tag = options.job_tag,
kafka_server = options.output_kafka_server, kafka_server = options.output_kafka_server,
......
...@@ -357,7 +357,6 @@ def inspiral_node_gen(gstlalInspiralJob, gstlalInspiralInjJob, dag, svd_nodes, s ...@@ -357,7 +357,6 @@ def inspiral_node_gen(gstlalInspiralJob, gstlalInspiralInjJob, dag, svd_nodes, s
"data-source":"frames", "data-source":"frames",
"local-frame-caching":"", "local-frame-caching":"",
"min-instruments":options.min_instruments, "min-instruments":options.min_instruments,
"min-log-L":options.min_log_L,
"reference-likelihood-file":options.reference_likelihood_file "reference-likelihood-file":options.reference_likelihood_file
}, },
input_files = { "time-slide-file":options.time_slide_file, input_files = { "time-slide-file":options.time_slide_file,
...@@ -430,7 +429,6 @@ def inspiral_node_gen(gstlalInspiralJob, gstlalInspiralInjJob, dag, svd_nodes, s ...@@ -430,7 +429,6 @@ def inspiral_node_gen(gstlalInspiralJob, gstlalInspiralInjJob, dag, svd_nodes, s
"data-source":"frames", "data-source":"frames",
"local-frame-caching":"", "local-frame-caching":"",
"min-instruments":options.min_instruments, "min-instruments":options.min_instruments,
"min-log-L":options.min_log_L,
"reference-likelihood-file":options.reference_likelihood_file "reference-likelihood-file":options.reference_likelihood_file
}, },
input_files = { "time-slide-file":options.inj_time_slide_file, input_files = { "time-slide-file":options.inj_time_slide_file,
...@@ -1038,8 +1036,7 @@ def parse_command_line(): ...@@ -1038,8 +1036,7 @@ def parse_command_line():
parser.add_option("--control-peak-time", type="int", default = 8, metavar = "secs", help = "Set the peak finding time for the control signal, default 8") parser.add_option("--control-peak-time", type="int", default = 8, metavar = "secs", help = "Set the peak finding time for the control signal, default 8")
parser.add_option("--coincidence-threshold", metavar = "value", type = "float", default = 0.005, help = "Set the coincidence window in seconds (default = 0.005). The light-travel time between instruments will be added automatically in the coincidence test.") parser.add_option("--coincidence-threshold", metavar = "value", type = "float", default = 0.005, help = "Set the coincidence window in seconds (default = 0.005). The light-travel time between instruments will be added automatically in the coincidence test.")
parser.add_option("--min-instruments", metavar = "count", type = "int", default = 2, help = "Set the minimum number of instruments that must contribute triggers to form a candidate (default = 2).") parser.add_option("--min-instruments", metavar = "count", type = "int", default = 2, help = "Set the minimum number of instruments that must contribute triggers to form a candidate (default = 2).")
parser.add_option("--min-log-L", metavar = "log likelihood ratio", type = "float", help = "Discard candidates that get assigned log likelihood ratios below this threshold (default = keep all).") parser.add_option("--reference-likelihood-file", metavar = "file", help = "Set a reference likelihood file to compute initial likelihood ratios. Required")
parser.add_option("--reference-likelihood-file", metavar = "file", help = "Set a reference likelihood file to compute initial likelihood ratios for the min-log-L cut. Required")
parser.add_option("--num-banks", metavar = "str", help = "The number of parallel subbanks per gstlal_inspiral job. can be given as a list like 1,2,3,4 then it will split up the bank cache into N groups with M banks each.") parser.add_option("--num-banks", metavar = "str", help = "The number of parallel subbanks per gstlal_inspiral job. can be given as a list like 1,2,3,4 then it will split up the bank cache into N groups with M banks each.")
parser.add_option("--max-inspiral-jobs", type="int", metavar = "jobs", help = "Set the maximum number of gstlal_inspiral jobs to run simultaneously, default no constraint.") parser.add_option("--max-inspiral-jobs", type="int", metavar = "jobs", help = "Set the maximum number of gstlal_inspiral jobs to run simultaneously, default no constraint.")
parser.add_option("--ht-gate-threshold", type="float", help="set a threshold on whitened h(t) to veto glitches") parser.add_option("--ht-gate-threshold", type="float", help="set a threshold on whitened h(t) to veto glitches")
......
...@@ -199,7 +199,6 @@ def parse_command_line(): ...@@ -199,7 +199,6 @@ def parse_command_line():
parser.add_option("--reference-psd", metavar = "filename", help = "Set the reference psd file.") parser.add_option("--reference-psd", metavar = "filename", help = "Set the reference psd file.")
parser.add_option("--bank-cache", metavar = "filenames", help = "Set the bank cache files in format H1=H1.cache,H2=H2.cache, etc..") parser.add_option("--bank-cache", metavar = "filenames", help = "Set the bank cache files in format H1=H1.cache,H2=H2.cache, etc..")
parser.add_option("--min-instruments", metavar = "count", type = "int", default = 2, help = "Set the minimum number of instruments that must contribute triggers to form a candidate (default = 2).") parser.add_option("--min-instruments", metavar = "count", type = "int", default = 2, help = "Set the minimum number of instruments that must contribute triggers to form a candidate (default = 2).")
parser.add_option("--min-log-L", metavar = "log likelihood ratio", type = "float", help = "Discard candidates that get assigned log likelihood ratios below this threshold (default = keep all).")
parser.add_option("--inj-channel-name", metavar = "name", default=[], action = "append", help = "Set the name of the injection channel to process for given mass bins (optional). 0000:0002:IFO1=CHANNEL-NAME1,IFO2=CHANNEL-NAME2 can be given multiple times.") parser.add_option("--inj-channel-name", metavar = "name", default=[], action = "append", help = "Set the name of the injection channel to process for given mass bins (optional). 0000:0002:IFO1=CHANNEL-NAME1,IFO2=CHANNEL-NAME2 can be given multiple times.")
parser.add_option("--inj-state-channel-name", metavar = "name", default=[], action = "append", help = "Set the name of the injection state channel to process (required if --inj-channel-name set).") parser.add_option("--inj-state-channel-name", metavar = "name", default=[], action = "append", help = "Set the name of the injection state channel to process (required if --inj-channel-name set).")
parser.add_option("--inj-dq-channel-name", metavar = "name", default=[], action = "append", help = "Set the name of the injection DQ channel to process (required if --inj-channel-name set).") parser.add_option("--inj-dq-channel-name", metavar = "name", default=[], action = "append", help = "Set the name of the injection DQ channel to process (required if --inj-channel-name set).")
...@@ -459,7 +458,6 @@ for num_insp_nodes, (svd_banks, likefile, zerolikefile) in enumerate(zip(bank_gr ...@@ -459,7 +458,6 @@ for num_insp_nodes, (svd_banks, likefile, zerolikefile) in enumerate(zip(bank_gr
"job-tag":jobTags[-1], "job-tag":jobTags[-1],
"likelihood-snapshot-interval":options.likelihood_snapshot_interval, "likelihood-snapshot-interval":options.likelihood_snapshot_interval,
"min-instruments":options.min_instruments, "min-instruments":options.min_instruments,
"min-log-L":options.min_log_L,
"time-slide-file":options.time_slide_file, "time-slide-file":options.time_slide_file,
"output-kafka-server": options.output_kafka_server "output-kafka-server": options.output_kafka_server
} }
...@@ -522,7 +520,6 @@ for num_insp_nodes, (svd_banks, likefile, zerolikefile) in enumerate(zip(bank_gr ...@@ -522,7 +520,6 @@ for num_insp_nodes, (svd_banks, likefile, zerolikefile) in enumerate(zip(bank_gr
"job-tag":inj_jobTags[-1], "job-tag":inj_jobTags[-1],
"likelihood-snapshot-interval":options.likelihood_snapshot_interval, "likelihood-snapshot-interval":options.likelihood_snapshot_interval,
"min-instruments":options.min_instruments, "min-instruments":options.min_instruments,
"min-log-L":options.min_log_L,
"time-slide-file":options.time_slide_file "time-slide-file":options.time_slide_file
} }
common_opts.update(datasource_opts) common_opts.update(datasource_opts)
......
...@@ -574,7 +574,7 @@ class Handler(simplehandler.Handler): ...@@ -574,7 +574,7 @@ class Handler(simplehandler.Handler):
dumps of segment information, trigger files and background dumps of segment information, trigger files and background
distribution statistics. distribution statistics.
""" """
def __init__(self, mainloop, pipeline, coincs_document, rankingstat, horizon_distance_func, gracedbwrapper, zerolag_rankingstatpdf_url = None, rankingstatpdf_url = None, ranking_stat_output_url = None, ranking_stat_input_url = None, likelihood_snapshot_interval = None, min_log_L = None, sngls_snr_threshold = None, tag = "", kafka_server = "10.14.0.112:9092", verbose = False): def __init__(self, mainloop, pipeline, coincs_document, rankingstat, horizon_distance_func, gracedbwrapper, zerolag_rankingstatpdf_url = None, rankingstatpdf_url = None, ranking_stat_output_url = None, ranking_stat_input_url = None, likelihood_snapshot_interval = None, sngls_snr_threshold = None, tag = "", kafka_server = "10.14.0.112:9092", verbose = False):
"""! """!
@param mainloop The main application's event loop @param mainloop The main application's event loop
@param pipeline The gstreamer pipeline that is being @param pipeline The gstreamer pipeline that is being
...@@ -634,7 +634,6 @@ class Handler(simplehandler.Handler): ...@@ -634,7 +634,6 @@ class Handler(simplehandler.Handler):
coincs_document.process_id, coincs_document.process_id,
delta_t = rankingstat.delta_t, delta_t = rankingstat.delta_t,
min_instruments = rankingstat.min_instruments, min_instruments = rankingstat.min_instruments,
min_log_L = min_log_L,
sngls_snr_threshold = sngls_snr_threshold sngls_snr_threshold = sngls_snr_threshold
) )
...@@ -669,10 +668,7 @@ class Handler(simplehandler.Handler): ...@@ -669,10 +668,7 @@ class Handler(simplehandler.Handler):
# #
# if we have been supplied with external ranking statistic # if we have been supplied with external ranking statistic
# information then use it to enable ranking statistic # information then use it to enable ranking statistic
# assignment in streamthinca. otherwise, if we have not # assignment in streamthinca.
# been and yet we have been asked to apply the min log L
# cut anyway then enable ranking statistic assignment using
# the dataless ranking statistic variant
# #
if self.ranking_stat_input_url is not None: if self.ranking_stat_input_url is not None:
...@@ -684,7 +680,11 @@ class Handler(simplehandler.Handler): ...@@ -684,7 +680,11 @@ class Handler(simplehandler.Handler):
self.stream_thinca.ln_lr_from_triggers = None self.stream_thinca.ln_lr_from_triggers = None
if self.verbose: if self.verbose:
print >>sys.stderr, "ranking statistic assignment DISABLED" print >>sys.stderr, "ranking statistic assignment DISABLED"
elif min_log_L is not None: elif False:
# FIXME: move sum-of-SNR^2 cut into this object's
# .__call__() and then use as coinc sieve function
# instead. left here temporariliy to remember how
# to initialize it
self.stream_thinca.ln_lr_from_triggers = far.DatalessRankingStat( self.stream_thinca.ln_lr_from_triggers = far.DatalessRankingStat(
template_ids = rankingstat.template_ids, template_ids = rankingstat.template_ids,
instruments = rankingstat.instruments, instruments = rankingstat.instruments,
......
...@@ -235,11 +235,10 @@ def lower_bound_in_seglist(seglist, x): ...@@ -235,11 +235,10 @@ def lower_bound_in_seglist(seglist, x):
class StreamThinca(object): class StreamThinca(object):
def __init__(self, xmldoc, process_id, delta_t, min_instruments = 2, min_log_L = None, sngls_snr_threshold = None): def __init__(self, xmldoc, process_id, delta_t, min_instruments = 2, sngls_snr_threshold = None):
self.ln_lr_from_triggers = None self.ln_lr_from_triggers = None
self.delta_t = delta_t self.delta_t = delta_t
self.min_instruments = min_instruments self.min_instruments = min_instruments
self.min_log_L = min_log_L
self.sngls_snr_threshold = sngls_snr_threshold self.sngls_snr_threshold = sngls_snr_threshold
self.set_xmldoc(xmldoc, process_id) self.set_xmldoc(xmldoc, process_id)
...@@ -367,14 +366,9 @@ class StreamThinca(object): ...@@ -367,14 +366,9 @@ class StreamThinca(object):
if zerolag_rankingstatpdf is not None and coinc.likelihood is not None: if zerolag_rankingstatpdf is not None and coinc.likelihood is not None:
zerolag_rankingstatpdf.zero_lag_lr_lnpdf.count[coinc.likelihood,] += 1 zerolag_rankingstatpdf.zero_lag_lr_lnpdf.count[coinc.likelihood,] += 1
# finally, append coinc to tables
# if min_log_L is None, this test always passes, self.coinc_tables.append_coinc(coinc, coincmaps, coinc_inspiral)
# regardless of the value of .likelihood, be it self.last_coincs.add(events, coinc, coincmaps, coinc_inspiral)
# None, some number, -inf or even nan.
if coinc.likelihood >= self.min_log_L:
# finally, append coinc to tables
self.coinc_tables.append_coinc(coinc, coincmaps, coinc_inspiral)
self.last_coincs.add(events, coinc, coincmaps, coinc_inspiral)
# add singles that were not used for any candidates to the # add singles that were not used for any candidates to the
# noise model # noise model
...@@ -386,9 +380,9 @@ class StreamThinca(object): ...@@ -386,9 +380,9 @@ class StreamThinca(object):
# add any triggers that have been used in coincidences for # add any triggers that have been used in coincidences for
# the first time to the sngl_inspiral table # the first time to the sngl_inspiral table
# FIXME: because this information comes from the # FIXME: because this information comes from the
# coincidence code, which is not aware of the min_log_L cut # coincidence code, which is not aware of the clustering,
# or the clustering, we record a lot of singles that aren't # we record a lot of singles that aren't really used for
# really used for any (retained) coincs. # any (retained) coincs.
self.sngl_inspiral_table.extend(newly_reported) self.sngl_inspiral_table.extend(newly_reported)
......
...@@ -213,7 +213,6 @@ dag : segments.xml.gz vetoes.xml.gz frame.cache inj_tisi.xml tisi.xml plots $(WE ...@@ -213,7 +213,6 @@ dag : segments.xml.gz vetoes.xml.gz frame.cache inj_tisi.xml tisi.xml plots $(WE
--singles-threshold 100.0 \ --singles-threshold 100.0 \
--request-cpu 2 \ --request-cpu 2 \
--request-memory 5GB \ --request-memory 5GB \
--min-log-L 4. \
--min-instruments $(MIN_IFOS) --min-instruments $(MIN_IFOS)
sed -i '1s/^/JOBSTATE_LOG logs\/trigger_pipe.jobstate.log\n/' trigger_pipe.dag sed -i '1s/^/JOBSTATE_LOG logs\/trigger_pipe.jobstate.log\n/' trigger_pipe.dag
......
...@@ -171,7 +171,6 @@ dag : segments.xml.gz vetoes.xml.gz frame.cache inj_tisi.xml tisi.xml plots $(WE ...@@ -171,7 +171,6 @@ dag : segments.xml.gz vetoes.xml.gz frame.cache inj_tisi.xml tisi.xml plots $(WE
$(ADDITIONAL_DAG_OPTIONS) \ $(ADDITIONAL_DAG_OPTIONS) \
$(CONDOR_COMMANDS) \ $(CONDOR_COMMANDS) \
--ht-gate-threshold-linear 0.8:15.0-45.0:100.0 \ --ht-gate-threshold-linear 0.8:15.0-45.0:100.0 \
--min-log-L -50.0 \
--singles-threshold 100.0 \ --singles-threshold 100.0 \
--request-cpu 2 \ --request-cpu 2 \
--request-memory 5GB \ --request-memory 5GB \
......
...@@ -182,7 +182,6 @@ dag : segments.xml.gz vetoes.xml.gz frame.cache inj_tisi.xml tisi.xml plots $(WE ...@@ -182,7 +182,6 @@ dag : segments.xml.gz vetoes.xml.gz frame.cache inj_tisi.xml tisi.xml plots $(WE
$(ADDITIONAL_DAG_OPTIONS) \ $(ADDITIONAL_DAG_OPTIONS) \
$(CONDOR_COMMANDS) \ $(CONDOR_COMMANDS) \
--ht-gate-threshold-linear 0.8:15.0-45.0:100.0 \ --ht-gate-threshold-linear 0.8:15.0-45.0:100.0 \
--min-log-L -50.0 \
--singles-threshold 100.0 \ --singles-threshold 100.0 \
--request-cpu 2 \ --request-cpu 2 \
--request-memory 5GB \ --request-memory 5GB \
......
...@@ -121,7 +121,6 @@ dag : svd_bank.cache dist_stats.cache $(REF_PSD) $(ZERO_LAG_DB) $(SEGMENTS) $(VE ...@@ -121,7 +121,6 @@ dag : svd_bank.cache dist_stats.cache $(REF_PSD) $(ZERO_LAG_DB) $(SEGMENTS) $(VE
--reference-likelihood-file reference_likelihood_file.xml.gz \ --reference-likelihood-file reference_likelihood_file.xml.gz \
--request-cpu 4 \ --request-cpu 4 \
--request-memory 7GB \ --request-memory 7GB \
--min-log-L -50.0 \
--min-instruments 1 \ --min-instruments 1 \
$(ADDITIONAL_DAG_OPTIONS) $(ADDITIONAL_DAG_OPTIONS)
sed -i '1s/^/JOBSTATE_LOG logs\/trigger_pipe.jobstate.log\n/' trigger_pipe.dag sed -i '1s/^/JOBSTATE_LOG logs\/trigger_pipe.jobstate.log\n/' trigger_pipe.dag
......
...@@ -152,7 +152,6 @@ dag : marginalized_likelihood.xml.gz prior.cache plots ll_simplify_and_cluster.s ...@@ -152,7 +152,6 @@ dag : marginalized_likelihood.xml.gz prior.cache plots ll_simplify_and_cluster.s
--injection-file $(NSBHINJFILE) \ --injection-file $(NSBHINJFILE) \
--injection-file $(BBHINJFILE) \ --injection-file $(BBHINJFILE) \
--time-slide-file tisi.xml --time-slide-file tisi.xml
#--min-log-L -50.0 \
ll_simplify_and_cluster.sql: ll_simplify_and_cluster.sql:
wget http://versions.ligo.org/cgit/gstlal/plain/gstlal-inspiral/share/ll_simplify_and_cluster.sql wget http://versions.ligo.org/cgit/gstlal/plain/gstlal-inspiral/share/ll_simplify_and_cluster.sql
......
...@@ -147,8 +147,7 @@ dag : marginalized_likelihood.xml.gz prior.cache plots ll_simplify_and_cluster.s ...@@ -147,8 +147,7 @@ dag : marginalized_likelihood.xml.gz prior.cache plots ll_simplify_and_cluster.s
--injection-file $(BNSINJFILE) \ --injection-file $(BNSINJFILE) \
--injection-file $(NSBHINJFILE) \ --injection-file $(NSBHINJFILE) \
--injection-file $(BBHINJFILE) \ --injection-file $(BBHINJFILE) \
--time-slide-file tisi.xml \ --time-slide-file tisi.xml
--min-log-L 4
ll_simplify_and_cluster.sql: ll_simplify_and_cluster.sql:
wget http://versions.ligo.org/cgit/gstlal/plain/gstlal-inspiral/share/ll_simplify_and_cluster.sql wget http://versions.ligo.org/cgit/gstlal/plain/gstlal-inspiral/share/ll_simplify_and_cluster.sql
......
...@@ -107,8 +107,7 @@ dag : plots ll_simplify_and_cluster.sql ll_simplify.sql tisi.xml rankingstat.cac ...@@ -107,8 +107,7 @@ dag : plots ll_simplify_and_cluster.sql ll_simplify.sql tisi.xml rankingstat.cac
--web-dir $(WEBDIR) \ --web-dir $(WEBDIR) \
--min-instruments 1 \ --min-instruments 1 \
--state-backup-destination gstlalcbc@pcdev3.phys.uwm.edu:/home/gstlalcbc/observing/2/uber_state_backup \ --state-backup-destination gstlalcbc@pcdev3.phys.uwm.edu:/home/gstlalcbc/observing/2/uber_state_backup \
--time-slide-file tisi.xml \ --time-slide-file tisi.xml
--min-log-L 4
#python ~/trim_online_dag.py trigger_pipe.dag #python ~/trim_online_dag.py trigger_pipe.dag
#python ~/noretries.py trigger_pipe.dag #python ~/noretries.py trigger_pipe.dag
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment