Commit ecf7d006 authored by Cody Messick's avatar Cody Messick

Cache files used in offline inspiral analysis now point to URLs instead of

paths (i.e. file://localhost/path instead of path)
parent 85a41172
......@@ -224,8 +224,8 @@ def svd_bank_cache_maker(svd_bank_strings, counter, injection = False):
svd_cache_entries = []
parsed_svd_bank_strings = [inspiral.parse_svdbank_string(single_svd_bank_string) for single_svd_bank_string in svd_bank_strings]
for svd_bank_parsed_dict in parsed_svd_bank_strings:
for url in svd_bank_parsed_dict.itervalues():
svd_cache_entries.append(CacheEntry.from_T050017(url))
for filename in svd_bank_parsed_dict.itervalues():
svd_cache_entries.append(CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)))
return [svd_cache_entry.url for svd_cache_entry in svd_cache_entries]
......@@ -456,7 +456,7 @@ def rank_and_merge(dag, createPriorDistStatsJob, calcRankPDFsJob, calcLikelihood
# Merging all the dbs from the same sub bank
for subbank, inputs in enumerate([node.input_files[""] for node in nodes]):
db = inspiral_pipe.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017(url) for url in inputs], '.sqlite')
db = inspiral_pipe.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)) for filename in inputs], '.sqlite')
sqlitenode = inspiral_pipe.generic_node(toSqliteJob, dag, parent_nodes = merge_nodes,
opts = {"replace":"", "tmp-space":inspiral_pipe.condor_scratch_space()},
input_cache_files = {"input-cache":inputs},
......@@ -481,7 +481,7 @@ def rank_and_merge(dag, createPriorDistStatsJob, calcRankPDFsJob, calcLikelihood
# Merging all the dbs from the same sub bank and injection run
for subbank, inputs in enumerate([node.input_files[""] for node in nodes]):
injdb = inspiral_pipe.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017(url) for url in inputs], '.sqlite')
injdb = inspiral_pipe.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)) for filename in inputs], '.sqlite')
sqlitenode = inspiral_pipe.generic_node(toSqliteJob, dag, parent_nodes = merge_nodes,
opts = {"replace":"", "tmp-space":inspiral_pipe.condor_scratch_space()},
input_cache_files = {"input-cache":inputs},
......@@ -506,7 +506,7 @@ def finalize_runs(dag, lalappsRunSqliteJob, toXMLJob, ligolwInspinjFindJob, toSq
# Process the chirp mass bins in chunks to paralellize the merging process
for chunk, dbs in enumerate(chunks([node.input_files[""] for node in innodes[None]], 20)):
# Merge the final non injection database into chunks
noninjdb = inspiral_pipe.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017(url) for url in dbs], '.sqlite')
noninjdb = inspiral_pipe.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)) for filename in dbs], '.sqlite')
sqlitenode = inspiral_pipe.generic_node(toSqliteJob, dag, parent_nodes = innodes[None],
opts = {"replace":"", "tmp-space":inspiral_pipe.condor_scratch_space()},
input_cache_files = {"input-cache": dbs},
......@@ -547,7 +547,7 @@ def finalize_runs(dag, lalappsRunSqliteJob, toXMLJob, ligolwInspinjFindJob, toSq
for chunk, dbs in enumerate(chunks([node.input_files[""] for node in thisinjnodes], 20)):
# Setup the final output names, etc.
injdb = inspiral_pipe.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017(url) for url in dbs], '.sqlite')
injdb = inspiral_pipe.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)) for filename in dbs], '.sqlite')
# merge
......@@ -620,7 +620,7 @@ def compute_FAP(marginalizeJob, gstlalInspiralComputeFarFromSnrChisqHistogramsJo
margnodes = []
margnum = 16
for i,n in enumerate(range(0, len(margin), margnum)):
margout.append(inspiral_pipe.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017(url) for url in margin[n:n+margnum]], '.xml.gz'))
margout.append(inspiral_pipe.group_T050017_filename_from_T050017_files([CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)) for filename in margin[n:n+margnum]], '.xml.gz'))
margnodes.append(inspiral_pipe.generic_node(marginalizeJob, dag, parent_nodes = final_sqlite_nodes + rankpdf_nodes,
output_files = {"output":margout[-1]},
input_cache_files = {"likelihood-cache":margin[n:n+margnum]}
......
......@@ -249,7 +249,7 @@ class generic_node(InspiralNode):
# is handled by gstlal_inspiral_pipe directly
for opt, val in input_cache_files.items():
cache_entries = [lal.CacheEntry.from_T050017(url) for url in val]
cache_entries = [lal.CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)) for filename in val]
cache_file_name = group_T050017_filename_from_T050017_files(cache_entries, '.cache', path = job.tag_base)
with open(cache_file_name, "w") as cache_file:
lal.Cache(cache_entries).tofile(cache_file)
......@@ -258,7 +258,7 @@ class generic_node(InspiralNode):
self.cache_inputs.setdefault(opt, []).append(cache_file_name)
for opt, val in output_cache_files.items():
cache_entries = [lal.CacheEntry.from_T050017(url) for url in val]
cache_entries = [lal.CacheEntry.from_T050017("file://localhost%s" % os.path.abspath(filename)) for filename in val]
cache_file_name = group_T050017_filename_from_T050017_files(cache_entries, '.cache', path = job.tag_base)
with open(cache_file_name, "w") as cache_file:
lal.Cache(cache_entries).tofile(cache_file)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment