Skip to content
Snippets Groups Projects
Commit 339f17af authored by Patrick Godwin's avatar Patrick Godwin
Browse files

fix backwards compatibility with inspiral_pipe-based workflows

parent 1f82704c
No related branches found
No related tags found
1 merge request!41DAG Workflow Overhaul + OSG DAG support
......@@ -116,7 +116,12 @@ injs, inj_files = [], []
if options.check_vt:
connection = {}
for f in options.injection_database:
inj_file = CacheEntry.from_T050017(f).description.rpartition(f"GSTLAL_TRIGGER_DATABASE")[2].lstrip("_")
# get injection tag from filename, supporting
# both old and new filename convention
if "GSTLAL_TRIGGER_DATABASE" in f:
inj_file = CacheEntry.from_T050017(f).description.rpartition(f"GSTLAL_TRIGGER_DATABASE")[2].lstrip("_")
else:
inj_file = f.split("-")[1].replace("ALL_LLOID_","")
inj_files.append(inj_file)
connection[inj_file] = sqlite3.connect(f)
injs += lsctables.SimInspiralTable.get_table(dbtables.get_xml(connection[inj_file]))
......
......@@ -62,6 +62,7 @@ from gstlal import dagparts
from gstlal import datasource
from gstlal import inspiral
from gstlal import svd_bank
from gstlal.datafind import DataType
warnings.warn(
......@@ -407,12 +408,13 @@ def injection_template_match_layer(dag, jobs, parent_nodes, options, instruments
sim_name = sim_tag_from_inj_file(inj)
inj_tmplt_match_nodes[sim_name] = {}
for nsplit in range(options.num_split_inj_files):
inj_filename = DataType.SPLIT_INJECTIONS.filename("H1K1L1V1", segments.segment(0, 0), f"{nsplit:04d}", sim_name)
inj_tmplt_match_nodes[sim_name][nsplit] = dagparts.DAGNode(
jobs['injTmpltMatch'],
dag,
parent_nodes = parent_nodes,
input_files = {
"injection-file": "%s/%s_INJ_SPLIT_%04d.xml"%(jobs['injSplitter'].output_path, sim_name, nsplit),
"injection-file": os.path.join(jobs['injSplitter'].output_path, inj_filename),
"template-bank": options.template_bank
},
output_files = {"output": "%s/%s-INJECTION_TEMPLATE_MATCH_%s_%04d.xml.gz"%(jobs['injTmpltMatch'].output_path, instruments, sim_name, nsplit)}
......@@ -720,12 +722,13 @@ def inspiral_layer(dag, jobs, psd_nodes, svd_nodes, segsdict, options, channel_d
def expected_snr_layer(dag, jobs, ref_psd_parent_nodes, options, num_split_inj_snr_jobs):
ligolw_add_nodes = []
for inj in options.injections:
sim_tag = sim_tag_from_inj_file(inj.split(":")[-1])
inj_snr_nodes = []
inj_splitter_node = dagparts.DAGNode(jobs['injSplitter'], dag, parent_nodes=[],
opts = {
"output-path":jobs['injSplitter'].output_path,
"usertag": sim_tag_from_inj_file(inj.split(":")[-1]),
"usertag": sim_tag,
"nsplit": num_split_inj_snr_jobs
},
input_files = {"": inj.split(":")[-1]}
......@@ -733,8 +736,10 @@ def expected_snr_layer(dag, jobs, ref_psd_parent_nodes, options, num_split_inj_s
inj_splitter_node.set_priority(98)
# FIXME Use machinery in inspiral_pipe.py to create reference_psd.cache
injection_files = ["%s/%s_INJ_SPLIT_%04d.xml" % (jobs['injSplitter'].output_path, sim_tag_from_inj_file(inj.split(":")[-1]), i) for i in range(num_split_inj_snr_jobs)]
for injection_file in injection_files:
injection_files = []
for i in range(num_split_inj_snr_jobs):
inj_filename = DataType.SPLIT_INJECTIONS.filename("H1K1L1V1", segments.segment(0, 0), f"{i:04d}", sim_tag)
injection_file = os.path.join(jobs['injSplitter'].output_path, inj_filename)
injSNRnode = dagparts.DAGNode(jobs['gstlalInjSnr'], dag, parent_nodes=ref_psd_parent_nodes + [inj_splitter_node],
# FIXME somehow choose the actual flow based on mass?
# max(flow) is chosen for performance not
......@@ -748,6 +753,7 @@ def expected_snr_layer(dag, jobs, ref_psd_parent_nodes, options, num_split_inj_s
)
injSNRnode.set_priority(98)
inj_snr_nodes.append(injSNRnode)
injection_files.append(injection_file)
addnode = dagparts.DAGNode(jobs['ligolwAdd'], dag, parent_nodes=inj_snr_nodes,
input_files = {"": ' '.join(injection_files)},
......@@ -1225,7 +1231,7 @@ def sql_cluster_and_merge_layer(dag, jobs, likelihood_nodes, ligolw_add_nodes, o
injdb = dagparts.T050017_filename(instruments, 'PART_LLOID_%s_CHUNK_%04d' % (sim_tag_from_inj_file(injections), chunk), boundary_seg, '.sqlite')
# merge and cluster
clusternode = merge_cluster_layer(dag, jobs, injnodes, injdb, [node.input_files[""] for node in injnodes], options.cluster_sql_file)
clusternode = merge_cluster_layer(dag, jobs, injnodes, injdb, [node.input_files[""] for node in injnodes], options.injection_sql_file)
final_nodes.append(clusternode)
# Setup the final output names, etc.
......@@ -1239,7 +1245,7 @@ def sql_cluster_and_merge_layer(dag, jobs, likelihood_nodes, ligolw_add_nodes, o
parent_nodes = final_nodes + ligolw_add_nodes
input_files = (vetoes + [options.frame_segments_file, injections])
input_cache_files = [node.input_files[""] for node in final_nodes]
clusternode = merge_cluster_layer(dag, jobs, parent_nodes, injdb, input_cache_files, options.cluster_sql_file, input_files=input_files)
clusternode = merge_cluster_layer(dag, jobs, parent_nodes, injdb, input_cache_files, options.injection_sql_file, input_files=input_files)
clusternode = dagparts.DAGNode(jobs['toXML'], dag, parent_nodes = [clusternode],
opts = {"tmp-space":dagparts.condor_scratch_space()},
......@@ -1379,8 +1385,11 @@ def make_mc_vtplot_layer(dag, jobs, parent_nodes, add_parent_node, options, inst
lnlrcdf_cache = dagparts.group_T050017_filename_from_T050017_files(lnlrcdf_caches, '.xml.gz', path = jobs['makeMcVtplot'].output_path)
input_files = {
"ranking-stat-pdf": "post_marginalized_likelihood.xml.gz",
"": inj
}
if injdbs:
input_files["injection-database"] = inj
else:
input_files["injection-files"] = inj
if injdbs:
job = jobs['makeMcVtplotCheck']
......@@ -1407,7 +1416,7 @@ def horizon_dist_layer(dag, jobs, psd_nodes, options, boundary_seg, output_dir,
dagparts.DAGNode(jobs['horizon'], dag,
parent_nodes = list(psd_nodes.values()),
input_files = {"":[node.output_files["write-psd"] for node in psd_nodes.values()]},
output_files = {"":dagparts.T050017_filename(instruments, "HORIZON", boundary_seg, '.png', path = output_dir)}
output_files = {"output":dagparts.T050017_filename(instruments, "HORIZON", boundary_seg, '.png', path = output_dir)}
)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment