From 198f25c2384d79384cd66c195456db9b76274ce3 Mon Sep 17 00:00:00 2001 From: Kipp Cannon <kipp.cannon@ligo.org> Date: Sun, 15 Apr 2018 11:07:20 -0500 Subject: [PATCH] gstlal_inspiral: rename --liklihood-file options - --likelihood-file is renamed to --ranking-stat-output to make its function more clear - --likelihood-file-cache is renamed to --ranking-stat-output-cache to retain the symmetry - --reference-likelihood-url is renamed to --ranking-stat-input to make its function more clear --- gstlal-inspiral/bin/gstlal_inspiral | 68 +++++++++---------- gstlal-inspiral/bin/gstlal_inspiral_pipe | 2 +- ...inspiral_recompute_online_far_from_gracedb | 2 +- gstlal-inspiral/bin/gstlal_ll_inspiral_pipe | 4 +- gstlal-inspiral/python/inspiral.py | 59 +++++++--------- 5 files changed, 62 insertions(+), 73 deletions(-) diff --git a/gstlal-inspiral/bin/gstlal_inspiral b/gstlal-inspiral/bin/gstlal_inspiral index 0652f05ade..78581937e9 100755 --- a/gstlal-inspiral/bin/gstlal_inspiral +++ b/gstlal-inspiral/bin/gstlal_inspiral @@ -138,7 +138,7 @@ # + `--chisq-type" [type]: Choose the type of chisq computation to perform. Must be one of (autochisq|timeslicechisq). The default is autochisq. # + `--coincidence-threshold` [seconds] (float): Set the coincidence window in seconds (default = 0.005 s). The light-travel time between instruments will be added automatically in the coincidence test. # + `--min-instruments` [count] (int): Set the minimum number of instruments that must contribute triggers to form a candidate (default = 2). -# + `--min-log-L` [log likelihood ratio] (float): Discard candidates that get assigned log likelihood ratios below this threshold (default = keep all). +# + `--min-log-L` [log likelihood ratio] (float): Discard candidates that get assigned log likelihood ratios below this threshold (default = keep all). When used without --ranking-stat-input, the cut is decided based on an internal approximate ranking statistic. # + `--write-pipeline` [filename]: Write a DOT graph description of the as-built pipeline to this file (optional). The environment variable GST_DEBUG_DUMP_DOT_DIR must be set for this option to work. # + `--comment`: Set the string to be recorded in comment and tag columns in various places in the output file (optional). # + `--check-time-stamps`: Turn on time stamp checking. @@ -146,11 +146,11 @@ # + `--tmp-space` [path]: Path to a directory suitable for use as a work area while manipulating the database file. The database file will be worked on in this directory, and then moved to the final location when complete. This option is intended to improve performance when running in a networked environment, where there might be a local disk with higher bandwidth than is available to the filesystem on which the final output will reside. # + `--blind-injections` [filename]: Set the name of an injection file that will be added to the data without saving the sim_inspiral_table or otherwise processing the data differently. Has the effect of having hidden signals in the input data. --injections must not be specified in this case. # + `--job-tag`: Set the string to identify this job and register the resources it provides on a node. Should be 4 digits of the form 0001, 0002, etc.. -# + `--likelihood-file` [filename]: Set the name of the file to which to write ranking statistic data collected from triggers (optional). Can be given more than once. If given, exactly as many must be provided as there are --svd-bank options and they will be writen to in order. -# + `--likelihood-file-cache` [filename]: Provide a cache of likelihood files. This can be used instead of giving multiple --likelihood-file options. Cannot be combined with --likelihood-file. -# + `--reference-likelihood-file` [filename]: Set the name of the likelihood ratio data file to use for ranking events. Can only use with --data-source lvshm or framexmit, must also set --likelihood-snapshot-interval. +# + `--ranking-stat-output` [filename]: Set the name of the file to which to write ranking statistic data collected from triggers (optional). Can be given more than once. If given, exactly as many must be provided as there are --svd-bank options and they will be writen to in order. +# + `--ranking-stat-output-cache` [filename]: Provide a cache of ranking statistic output files. This can be used instead of giving multiple --ranking-stat-output options. Cannot be combined with --ranking-stat-output. +# + `--ranking-stat-input` [filename]: Set the URL from which to load a ranking statistic definition. When this is enabled, signal candidates will have ranking statistic values assigned on-the-fly, and the --min-log-L cut will be applied based on the assigned values. Can only use with --data-source lvshm or framexmit; must also set --likelihood-snapshot-interval. # + `--zerolag-rankingstatpdf-filename` [filename]: Record a histogram of the likelihood ratio ranking statistic values assigned to zero-lag candidates in this XML file, which must exist at start up and contain a RankingStatPDF object. The counts will be added to the file. Optional. Can be given multiple times. -# + `--likelihood-snapshot-interval` [seconds] (float): How often to reread the marginalized likelihoood data. If --likelihood-file is provided, the likelihood file will be overwritten by a snapshot of the trigger files and a duplicate snapshot will be generated to keep a record of past ranking statistics. +# + `--likelihood-snapshot-interval` [seconds] (float): How often to snapshot candidate and ranking statistic data to disk when running online. # + `--ranking-stat-pdf` [url]: Set the URL from which to load the ranking statistic PDF. This is used to compute false-alarm probabilities and false-alarm rates and is required for online operation (when --data-source is framexmit or lvshm). It is forbidden for offline operation (all other data sources). # + `--gracedb-far-threshold` (float): False-alarm rate threshold for gracedb uploads in Hertz (default = do not upload to gracedb). # + `--gracedb-search`: Name of search to provide in GracedB uploads (default is LowMass). @@ -296,12 +296,12 @@ def parse_command_line(): group.add_option("--chisq-type", metavar = "type", default = "autochisq", help = "Choose the type of chisq computation to perform. Must be one of (autochisq|timeslicechisq). The default is autochisq.") group.add_option("--coincidence-threshold", metavar = "seconds", type = "float", default = 0.005, help = "Set the coincidence window in seconds (default = 0.005 s). The light-travel time between instruments will be added automatically in the coincidence test.") group.add_option("--min-instruments", metavar = "count", type = "int", default = 2, help = "Set the minimum number of instruments that must contribute triggers to form a candidate (default = 2).") - group.add_option("--min-log-L", metavar = "log likelihood ratio", type = "float", help = "Discard candidates that get assigned log likelihood ratios below this threshold (default = keep all).") - group.add_option("--likelihood-file", metavar = "filename", action = "append", default = [], help = "Set the name of the file to which to write ranking statistic data collected from triggers (optional). Can be given more than once. If given, exactly as many must be provided as there are --svd-bank options and they will be writen to in order.") - group.add_option("--likelihood-file-cache", metavar = "filename", help = "Provide a cache of likelihood files. This can be used instead of giving multiple --likelihood-file options. Cannot be combined with --likelihood-file.") - group.add_option("--likelihood-snapshot-interval", type = "float", metavar = "seconds", help = "How often to reread the marginalized likelihoood data. If --likelihood-file is provided, the likelihood file will be overwritten by a snapshot of the trigger files and a duplicate snapshot will be generated to keep a record of past ranking statistics.") + group.add_option("--min-log-L", metavar = "log likelihood ratio", type = "float", help = "Discard candidates that get assigned log likelihood ratios below this threshold (default = keep all). When used without --ranking-stat-input, the cut is decided based on an internal approximate ranking statistic.") + group.add_option("--ranking-stat-input", metavar = "url", help = "Set the URL from which to load a ranking statistic definition. When this is enabled, signal candidates will have ranking statistic values assigned on-the-fly, and the --min-log-L cut will be applied based on the assigned values. Can only use with --data-source lvshm or framexmit; must also set --likelihood-snapshot-interval.") + group.add_option("--ranking-stat-output", metavar = "filename", action = "append", default = [], help = "Set the name of the file to which to write ranking statistic data collected from triggers (optional). Can be given more than once. If given, exactly as many must be provided as there are --svd-bank options and they will be writen to in order.") + group.add_option("--ranking-stat-output-cache", metavar = "filename", help = "Provide a cache of ranking statistic output files. This can be used instead of giving multiple --ranking-stat-output options. Cannot be combined with --ranking-stat-output.") + group.add_option("--likelihood-snapshot-interval", type = "float", metavar = "seconds", help = "How often to snapshot candidate and ranking statistic data to disk when running online.") group.add_option("--ranking-stat-pdf", metavar = "url", help = "Set the URL from which to load the ranking statistic PDF. This is used to compute false-alarm probabilities and false-alarm rates and is required for online operation (when --data-source is framexmit or lvshm). It is forbidden for offline operation (all other data sources)") - group.add_option("--reference-likelihood-file", metavar = "filename", help = "Set the name of the likelihood ratio data file to use for ranking events. Can only use with --data-source lvshm or framexmit, must also set --likelihood-snapshot-interval.") group.add_option("--time-slide-file", metavar = "filename", help = "Set the name of the xml file to get time slide offsets (required).") group.add_option("--zerolag-rankingstatpdf-filename", metavar = "filename", action = "append", help = "Record a histogram of the likelihood ratio ranking statistic values assigned to zero-lag candidates in this XML file, which must exist at start up and contain a RankingStatPDF object. The counts will be added to the file. Optional. Can be given multiple times.") parser.add_option_group(group) @@ -378,14 +378,14 @@ def parse_command_line(): else: missing_options.append("either --output-cache or at least one --output") - if options.likelihood_file: - if options.likelihood_file_cache: - raise ValueError("cannot supply both --likelihood-file and --likelihood-file-cache") - elif options.likelihood_file_cache: + if options.ranking_stat_output: + if options.ranking_stat_output_cache: + raise ValueError("cannot supply both --ranking-stat-output and --ranking-stat-output-cache") + elif options.ranking_stat_output_cache: # do this out-of-place to preserve process_params' contents - options.likelihood_file = [CacheEntry(line).url for line in open(options.likelihood_file_cache)] - if not options.likelihood_file: - raise ValueError("--likelihood-file-cache is empty") + options.ranking_stat_output = [CacheEntry(line).url for line in open(options.ranking_stat_output_cache)] + if not options.ranking_stat_output: + raise ValueError("--ranking-stat-output-cache is empty") if not options.time_slide_file: missing_options.append("--time-slide-file") @@ -399,10 +399,10 @@ def parse_command_line(): if len(svd_banks) != len(options.output): raise ValueError("must supply exactly as many --svd-bank options as --output") - if options.likelihood_file and len(options.likelihood_file) != len(options.output): - raise ValueError("must supply either none or exactly as many --likelihood-file options as --output") - if options.likelihood_snapshot_interval and not options.likelihood_file: - raise ValueError("must set --likelihood-file when --likelihood-snapshot-interval is set") + if options.ranking_stat_output and len(options.ranking_stat_output) != len(options.output): + raise ValueError("must supply either none or exactly as many --ranking-stat-output options as --output") + if options.likelihood_snapshot_interval and not options.ranking_stat_output: + raise ValueError("must set --ranking-stat-output when --likelihood-snapshot-interval is set") required_urls = [options.time_slide_file] for svd_bank_set in svd_banks: @@ -463,9 +463,9 @@ def parse_command_line(): if options.likelihood_snapshot_interval is not None and options.likelihood_snapshot_interval <= 0.: raise ValueError("--likelihood-snapshot-interval cannot be <= 0") - if options.reference_likelihood_file: + if options.ranking_stat_input: if not options.likelihood_snapshot_interval: - raise ValueError("must set --likelihood-snapshot-interval when --reference-likelihood-file is set") + raise ValueError("must set --likelihood-snapshot-interval when --ranking-stat-input is set") if not options.ht_gate_threshold: # default threshold is +inf = disable feature. @@ -625,7 +625,7 @@ else: # -for output_file_number, (svd_bank_url_dict, output_url, likelihood_url, zerolag_rankingstatpdf_filename, ht_gate_threshold) in enumerate(zip(svd_banks, options.output, options.likelihood_file, options.zerolag_rankingstatpdf_filename, options.ht_gate_threshold)): +for output_file_number, (svd_bank_url_dict, output_url, ranking_stat_output_url, zerolag_rankingstatpdf_filename, ht_gate_threshold) in enumerate(zip(svd_banks, options.output, options.ranking_stat_output, options.zerolag_rankingstatpdf_filename, options.ht_gate_threshold)): # # Checkpointing only supported for gzip files in offline analysis # FIXME Implement a means by which to check for sqlite file @@ -638,8 +638,8 @@ for output_file_number, (svd_bank_url_dict, output_url, likelihood_url, zerolag_ # lines uses less memory for line in gzip.open(ligolw_utils.local_path_from_url(output_url)): pass - if likelihood_url is not None: - for line in gzip.open(ligolw_utils.local_path_from_url(likelihood_url)): + if ranking_stat_output_url is not None: + for line in gzip.open(ligolw_utils.local_path_from_url(ranking_stat_output_url)): pass # File is OK and there is no need to process it, # skip ahead in the loop @@ -767,16 +767,16 @@ for output_file_number, (svd_bank_url_dict, output_url, likelihood_url, zerolag_ if options.data_source in ("lvshm", "framexmit"): - assert likelihood_url is not None - rankingstat, _ = far.parse_likelihood_control_doc(ligolw_utils.load_url(likelihood_url, verbose = options.verbose, contenthandler = far.RankingStat.LIGOLWContentHandler)) + assert ranking_stat_output_url is not None + rankingstat, _ = far.parse_likelihood_control_doc(ligolw_utils.load_url(ranking_stat_output_url, verbose = options.verbose, contenthandler = far.RankingStat.LIGOLWContentHandler)) if rankingstat is None: - raise ValueError("\"%s\" does not contain parameter distribution data" % likelihood_url) + raise ValueError("\"%s\" does not contain parameter distribution data" % ranking_stat_output_url) if rankingstat.delta_t != options.coincidence_threshold: - raise ValueError("\"%s\" is for delta_t=%g, we need %g" % (likelihood_url, rankingstat.denominator.delta_t, options.coincidence_threshold)) + raise ValueError("\"%s\" is for delta_t=%g, we need %g" % (ranking_stat_output_url, rankingstat.denominator.delta_t, options.coincidence_threshold)) if rankingstat.min_instruments != options.min_instruments: - raise ValueError("\"%s\" is for min instruments = %d but we need %d" % (likelihood_url, rankingstat.denominator.min_instruments, options.min_instruments)) + raise ValueError("\"%s\" is for min instruments = %d but we need %d" % (ranking_stat_output_url, rankingstat.denominator.min_instruments, options.min_instruments)) if rankingstat.instruments != all_instruments: - raise ValueError("\"%s\" is for %s but we need %s" % (likelihood_url, ", ".join(sorted(rankingstat.instruments)), ", ".join(sorted(all_instruments)))) + raise ValueError("\"%s\" is for %s but we need %s" % (ranking_stat_output_url, ", ".join(sorted(rankingstat.instruments)), ", ".join(sorted(all_instruments)))) if rankingstat.template_ids is None: rankingstat.template_ids = template_ids elif rankingstat.template_ids != template_ids: @@ -809,9 +809,9 @@ for output_file_number, (svd_bank_url_dict, output_url, likelihood_url, zerolag_ pipeline = pipeline, rankingstat = rankingstat, zerolag_rankingstatpdf_filename = zerolag_rankingstatpdf_filename, + ranking_stat_input_url = options.ranking_stat_input, + ranking_stat_output_url = ranking_stat_output_url, rankingstatpdf_url = options.ranking_stat_pdf, - likelihood_url = likelihood_url, - reference_likelihood_url = options.reference_likelihood_file, likelihood_snapshot_interval = options.likelihood_snapshot_interval, thinca_interval = options.thinca_interval, min_log_L = options.min_log_L, diff --git a/gstlal-inspiral/bin/gstlal_inspiral_pipe b/gstlal-inspiral/bin/gstlal_inspiral_pipe index 4abe96c386..50466bee8e 100755 --- a/gstlal-inspiral/bin/gstlal_inspiral_pipe +++ b/gstlal-inspiral/bin/gstlal_inspiral_pipe @@ -403,7 +403,7 @@ def inspiral_node_gen(gstlalInspiralJob, gstlalInspiralInjJob, dag, svd_nodes, s input_cache_files = {"svd-bank-cache":svd_bank_cache_maker(svd_bank_strings)}, output_cache_files = { "output-cache":output_names, - "likelihood-file-cache":dist_stat_names + "ranking-stat-output-cache":dist_stat_names } ) # Set a post script to check for file integrity diff --git a/gstlal-inspiral/bin/gstlal_inspiral_recompute_online_far_from_gracedb b/gstlal-inspiral/bin/gstlal_inspiral_recompute_online_far_from_gracedb index fdc60be53f..28b487f6bb 100755 --- a/gstlal-inspiral/bin/gstlal_inspiral_recompute_online_far_from_gracedb +++ b/gstlal-inspiral/bin/gstlal_inspiral_recompute_online_far_from_gracedb @@ -74,7 +74,7 @@ def get_likelihood_files(gid_list, gracedb): marg_files_dict = {} for gid in gid_list: coinc_xmldoc = lvalert_helper.get_coinc_xmldoc(gracedb, gid) - likelihood_files_dict.setdefault(ligolw_process.get_process_params(coinc_xmldoc, "gstlal_inspiral", "--likelihood-file")[0], []).append([gid, coinc_xmldoc]) + likelihood_files_dict.setdefault(ligolw_process.get_process_params(coinc_xmldoc, "gstlal_inspiral", "--ranking-stat-output")[0], []).append([gid, coinc_xmldoc]) marg_files_dict.setdefault(ligolw_process.get_process_params(coinc_xmldoc, "gstlal_inspiral", "--ranking-stat-pdf")[0], []).append(gid) if len(marg_files_dict.keys()) > 1: diff --git a/gstlal-inspiral/bin/gstlal_ll_inspiral_pipe b/gstlal-inspiral/bin/gstlal_ll_inspiral_pipe index 82424be971..cf4e8717a8 100755 --- a/gstlal-inspiral/bin/gstlal_ll_inspiral_pipe +++ b/gstlal-inspiral/bin/gstlal_ll_inspiral_pipe @@ -424,7 +424,7 @@ for num_insp_nodes, (svd_banks, likefile, zerolikefile) in enumerate(zip(bank_gr }, input_files = {"ranking-stat-pdf":options.marginalized_likelihood_file}, output_files = {"output":"not_used.xml.gz", - "likelihood-file":likefile, + "ranking-stat-output":likefile, "zerolag-rankingstatpdf-filename":zerolikefile, } ) @@ -469,7 +469,7 @@ for num_insp_nodes, (svd_banks, likefile, zerolikefile) in enumerate(zip(bank_gr "time-slide-file":options.time_slide_file }, input_files = {"ranking-stat-pdf":options.marginalized_likelihood_file, - "reference-likelihood-file":[likefile]}, + "ranking-stat-input":[likefile]}, output_files = {"output":"not_used.xml.gz", } ) diff --git a/gstlal-inspiral/python/inspiral.py b/gstlal-inspiral/python/inspiral.py index 15e80e7a21..e293ae27e7 100644 --- a/gstlal-inspiral/python/inspiral.py +++ b/gstlal-inspiral/python/inspiral.py @@ -470,7 +470,7 @@ class CoincsDocument(object): class Data(object): - def __init__(self, coincs_document, pipeline, rankingstat, zerolag_rankingstatpdf_filename = None, rankingstatpdf_url = None, likelihood_url = None, reference_likelihood_url = None, likelihood_snapshot_interval = None, thinca_interval = 50.0, min_log_L = None, sngls_snr_threshold = None, gracedb_far_threshold = None, gracedb_min_instruments = None, gracedb_group = "Test", gracedb_search = "LowMass", gracedb_pipeline = "gstlal", gracedb_service_url = "https://gracedb.ligo.org/api/", upload_auxiliary_data_to_gracedb = True, verbose = False): + def __init__(self, coincs_document, pipeline, rankingstat, zerolag_rankingstatpdf_filename = None, rankingstatpdf_url = None, ranking_stat_output_url = None, ranking_stat_input_url = None, likelihood_snapshot_interval = None, thinca_interval = 50.0, min_log_L = None, sngls_snr_threshold = None, gracedb_far_threshold = None, gracedb_min_instruments = None, gracedb_group = "Test", gracedb_search = "LowMass", gracedb_pipeline = "gstlal", gracedb_service_url = "https://gracedb.ligo.org/api/", upload_auxiliary_data_to_gracedb = True, verbose = False): # # initialize # @@ -540,8 +540,8 @@ class Data(object): # # setup likelihood ratio book-keeping. # - # in online mode, if reference_likelihood_url is set then - # on each snapshot interval, and before providing stream + # in online mode, if ranking_stat_input_url is set then on + # each snapshot interval, and before providing stream # thinca with its ranking statistic information, the # current rankingstat object is replaced with the contents # of that file. this is intended to be used by trigger @@ -549,10 +549,9 @@ class Data(object): # analysis to import ranking statistic information from # their non-injection cousins instead of using whatever # statistics they've collected internally. - # reference_likelihood_url is not used when running - # offline. + # ranking_stat_input_url is not used when running offline. # - # likelihood_url provides the name of the file to which the + # ranking_stat_output_url provides the name of the file to which the # internally-collected ranking statistic information is to # be written whenever output is written to disk. if set to # None, then only the trigger file will be written, no @@ -562,8 +561,8 @@ class Data(object): # they produce nonsense. # - self.likelihood_url = likelihood_url - self.reference_likelihood_url = reference_likelihood_url + self.ranking_stat_output_url = ranking_stat_output_url + self.ranking_stat_input_url = ranking_stat_input_url self.rankingstat = rankingstat # @@ -698,7 +697,7 @@ class Data(object): if self.likelihood_snapshot_interval is not None and (self.likelihood_snapshot_timestamp is None or buf_timestamp - self.likelihood_snapshot_timestamp >= self.likelihood_snapshot_interval): self.likelihood_snapshot_timestamp = buf_timestamp - # if a reference likelihood file is given, + # if a ranking statistic source url is set, # overwrite rankingstat with its contents. # FIXME There is currently no guarantee # that the reference_likelihood_file on @@ -707,11 +706,11 @@ class Data(object): # not have that large of an effect. The # data loaded should never be older than # the snapshot before last - if self.reference_likelihood_url is not None: + if self.ranking_stat_input_url is not None: params_before = self.rankingstat.template_ids, self.rankingstat.instruments, self.rankingstat.min_instruments, self.rankingstat.delta_t - self.rankingstat, _ = far.parse_likelihood_control_doc(ligolw_utils.load_url(self.reference_likelihood_url, verbose = self.verbose, contenthandler = far.RankingStat.LIGOLWContentHandler)) + self.rankingstat, _ = far.parse_likelihood_control_doc(ligolw_utils.load_url(self.ranking_stat_input_url, verbose = self.verbose, contenthandler = far.RankingStat.LIGOLWContentHandler)) if params_before != (self.rankingstat.template_ids, self.rankingstat.instruments, self.rankingstat.min_instruments, self.rankingstat.delta_t): - raise ValueError("'%s' contains incompatible ranking statistic configuration" % self.reference_likelihood_url) + raise ValueError("'%s' contains incompatible ranking statistic configuration" % self.ranking_stat_input_url) # post a checkpoint message. # FIXME: make sure this triggers @@ -722,11 +721,11 @@ class Data(object): # should be responsible for it somehow, no? # NOTE: self.snapshot_output_url() writes # the current rankingstat object to the - # location identified by .likelihood_url, + # location identified by .ranking_stat_output_url, # so if that is either not set or at least # set to a different name than - # .reference_likelihood_url the file that - # has just been loaded above will not be + # .ranking_stat_input_url the file that has + # just been loaded above will not be # overwritten. self.pipeline.get_bus().post(message_new_checkpoint(self.pipeline, timestamp = buf_timestamp.ns())) @@ -830,7 +829,7 @@ class Data(object): # necessary for this test to be super precisely # defined. for event in itertools.chain(self.stream_thinca.add_events(self.coincs_document.xmldoc, self.coincs_document.process_id, events, buf_timestamp, snr_segments, fapfar = self.fapfar), self.stream_thinca.last_coincs.single_sngl_inspirals() if self.stream_thinca.last_coincs else ()): - if self.likelihood_url is None: + if self.ranking_stat_output_url is None: continue assert event.end in one_or_more_instruments, "trigger at time (%s) with no SNR (%s)" % (str(event.end), str(one_or_more_instruments)) if event.end in two_or_more_instruments: @@ -838,7 +837,7 @@ class Data(object): self.coincs_document.commit() # update zero-lag bin counts in rankingstat. - if self.stream_thinca.last_coincs and self.likelihood_url is not None: + if self.stream_thinca.last_coincs and self.ranking_stat_output_url is not None: for coinc_event_id, coinc_event in self.stream_thinca.last_coincs.coinc_event_index.items(): if coinc_event.time_slide_id in self.stream_thinca.last_coincs.zero_lag_time_slide_ids: for event in self.stream_thinca.last_coincs.sngl_inspirals(coinc_event_id): @@ -940,7 +939,7 @@ class Data(object): ratebinlists = self.rankingstat.denominator.triggerrates.values() for event in self.stream_thinca.flush(self.coincs_document.xmldoc, self.coincs_document.process_id, snr_segments, fapfar = self.fapfar): - if self.likelihood_url is None: + if self.ranking_stat_output_url is None: continue assert event.end in one_or_more_instruments, "trigger at time (%s) with no SNR (%s)" % (str(event.end), str(one_or_more_instruments)) if event.end in two_or_more_instruments: @@ -948,7 +947,7 @@ class Data(object): self.coincs_document.commit() # update zero-lag bin counts in rankingstat. - if self.stream_thinca.last_coincs and self.likelihood_url is not None: + if self.stream_thinca.last_coincs and self.ranking_stat_output_url is not None: for coinc_event_id, coinc_event in self.stream_thinca.last_coincs.coinc_event_index.items(): if coinc_event.time_slide_id in self.stream_thinca.last_coincs.zero_lag_time_slide_ids: for event in self.stream_thinca.last_coincs.sngl_inspirals(coinc_event_id): @@ -1293,18 +1292,8 @@ class Data(object): # can't be used anymore del self.coincs_document - def __write_likelihood_url(self, url, description, snapshot = False, verbose = False): - # write the parameter PDF file. NOTE; this file contains - # raw bin counts, and might or might not contain smoothed, - # normalized, PDF arrays but if it does they will not - # necessarily correspond to the bin counts. - # - # the parameter PDF arrays cannot be re-computed here - # because it would interfer with their use by stream - # thinca. we want to know precisely when the arrays get - # updated so we can have a hope of computing the likelihood - # ratio PDFs correctly. - + def __write_ranking_stat_url(self, url, description, snapshot = False, verbose = False): + # write the ranking statistic file. ligolw_utils.write_url(self.__get_likelihood_xmldoc(), ligolw_utils.local_path_from_url(url), gz = (url or "stdout").endswith(".gz"), verbose = verbose, trap_signals = None) # Snapshots get their own custom file and path if snapshot: @@ -1317,8 +1306,8 @@ class Data(object): def write_output_url(self, url = None, description = "", verbose = False): with self.lock: self.__write_output_url(url = url, verbose = verbose) - if self.likelihood_url is not None: - self.__write_likelihood_url(self.likelihood_url, description, verbose = verbose) + if self.ranking_stat_output_url is not None: + self.__write_ranking_stat_url(self.ranking_stat_output_url, description, verbose = verbose) def snapshot_output_url(self, description, extension, verbose = False): with self.lock: @@ -1329,8 +1318,8 @@ class Data(object): fname = self.T050017_filename(description, extension) fname = os.path.join(subdir_from_T050017_filename(fname), fname) self.__write_output_url(url = fname, verbose = verbose) - if self.likelihood_url is not None: - self.__write_likelihood_url(self.likelihood_url, description, snapshot = True, verbose = verbose) + if self.ranking_stat_output_url is not None: + self.__write_ranking_stat_url(self.ranking_stat_output_url, description, snapshot = True, verbose = verbose) if self.zerolag_rankingstatpdf is not None: self.__write_zero_lag_ranking_stats_file(self.zerolag_rankingstatpdf_filename, verbose = verbose) self.coincs_document = coincs_document -- GitLab