Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • steffen.grunewald/gstlal
  • sumedha.biswas/gstlal
  • spiir-group/gstlal
  • madeline-wade/gstlal
  • hunter.schuler/gstlal
  • adam-mercer/gstlal
  • amit.reza/gstlal
  • alvin.li/gstlal
  • duncanmmacleod/gstlal
  • rebecca.ewing/gstlal
  • javed.sk/gstlal
  • leo.tsukada/gstlal
  • brian.bockelman/gstlal
  • ed-maros/gstlal
  • koh.ueno/gstlal
  • leo-singer/gstlal
  • lscsoft/gstlal
17 results
Show changes
Commits on Source (48)
Showing
with 2169 additions and 2858 deletions
......@@ -17,7 +17,7 @@ dist_bin_SCRIPTS = \
gstlal_inspiral_create_dt_dphi_snr_ratio_pdfs_dag \
gstlal_inspiral_create_p_of_ifos_given_horizon \
gstlal_inspiral_create_prior_diststats \
gstlal_inspiral_dag \
gstlal_inspiral_destagger_injections \
gstlal_inspiral_dlrs_diag \
gstlal_inspiral_grid_bank \
gstlal_inspiral_iir_bank_pipe \
......@@ -49,8 +49,9 @@ dist_bin_SCRIPTS = \
gstlal_inspiral_rate_posterior \
gstlal_inspiral_recompute_online_far \
gstlal_inspiral_recompute_online_far_from_gracedb \
gstlal_inspiral_rerank_pipe \
gstlal_inspiral_reset_likelihood \
gstlal_inspiral_split_injections \
gstlal_inspiral_combine_injection_sets \
gstlal_inspiral_svd_bank_pipe \
gstlal_ll_inspiral_calculate_range \
gstlal_ll_inspiral_daily_page \
......
......@@ -147,6 +147,7 @@ import socket
import StringIO
import sys
import tempfile
import time
import uuid
import warnings
......@@ -158,6 +159,7 @@ Gst.init(None)
import lal
from lal import LIGOTimeGPS
from lal import UTCToGPS
from lal.utils import CacheEntry
from ligo.gracedb.rest import DEFAULT_SERVICE_URL as gracedb_default_service_url
......@@ -181,6 +183,8 @@ from gstlal import servicediscovery
from gstlal import simulation
from gstlal import svd_bank
GSTLAL_PROCESS_START_TIME = UTCToGPS(time.gmtime())
@lsctables.use_in
class LIGOLWContentHandler(ligolw.LIGOLWContentHandler):
pass
......@@ -790,7 +794,6 @@ for output_file_number, (svd_bank_url_dict, output_url, ranking_stat_output_url,
rankingstat.numerator.set_horizon_factors(horizon_factors)
if rankingstat is None:
rankingstat = far.RankingStat(template_ids = template_ids, instruments = all_instruments, delta_t = options.coincidence_threshold, min_instruments = options.min_instruments, horizon_factors = horizon_factors)
rankingstat.numerator.add_signal_model()
#
......@@ -806,6 +809,7 @@ for output_file_number, (svd_bank_url_dict, output_url, ranking_stat_output_url,
coincs_document = inspiral.CoincsDocument(
url = output_url or "%s-%s_LLOID-%d-%d.xml.gz" % (lsctables.instrumentsproperty.set(detectors.channel_dict.keys()).replace(",", ""), options.job_tag, int(detectors.seg[0]), int(abs(detectors.seg))),
process_params = process_params,
process_start_time = GSTLAL_PROCESS_START_TIME,
comment = options.comment,
instruments = rankingstat.instruments,
seg = detectors.seg,
......
......@@ -65,7 +65,7 @@ def create_split_injection(processmap, process_params_dict, endtimesim_tuples, m
xmldoc.childNodes[-1].appendChild(siminspiraltable)
# FIXME add this to process params table
#process = ligolw_process.register_to_xmldoc(xmldoc, "gstlal_inspiral_split_injections", param_dict)
#process = ligolw_process.register_to_xmldoc(xmldoc, "gstlal_inspiral_combine_injection_sets", param_dict)
process_ids = {}
siminspiraltimemap = {}
......@@ -93,6 +93,7 @@ def parse_command_line():
parser.add_option("--injection-rate-tolerance", type = "float", default = 0.1, help = "Acceptable tolerance for target injection rate, files will be written to disk once a splitting has been found such that the injection rate is target-injection-rate +- (injection-rate-tolerance*target-injection-rate) (default: 0.1)")
parser.add_option("--generate-inj-string-only", action = "store_true", help = "Do not combine files, generate an injection string specifying which chirp mass bounds to use to search for the provided injection files")
parser.add_option("--single-output", action = "store_true", help = "Produce a single output file containing all injections")
parser.add_option("--injection-str-output", default = "injection_str.txt", help = "Write injection string used in analysis Makefile to this file. [Default: injection_str.txt]")
# FIXME Add option checking so num-output and target-injection-rate cannot both be provided
......@@ -159,4 +160,5 @@ else:
create_split_injection(processmap, process_params_dict, [siminspiralmap[tuple(k for k in key)] for key in keys], keys[0][0], keys[-1][0], "%s_%04d.xml" %( options.output_tag, i) if not options.single_output else "%s.xml" % options.output_tag, options)
mchirp_str += mchirp_injection_str(keys, "%s_%04d.xml" %( options.output_tag, i) if not options.single_output else "%s.xml" % options.output_tag)
print 'MCHIRP_INJECTIONS := %s' % mchirp_str[:-1]
with open(options.injection_str_output, 'w') as f:
f.write(mchirp_str[:-1])
......@@ -71,7 +71,6 @@ def parse_command_line():
version = "Name: %%prog\n%s" % "" # FIXME
)
parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
parser.add_option("-s", "--synthesize-numerator", action = "store_true", help = "Synthesize a numerator (SNR, \\chi^2) distribution.")
# FIXME: default must be identical to gstlal_inspiral's default
parser.add_option("--coincidence-threshold", metavar = "value", type = "float", default = 0.005, help = "Set the coincidence window in seconds (default = 0.005). The light-travel time between instruments will be added automatically in the coincidence test.")
# FIXME: default must be identical to gstlal_inspiral's default
......@@ -83,6 +82,7 @@ def parse_command_line():
parser.add_option("--svd-file", metavar = "filename", help = "The SVD file to read the template ids from")
parser.add_option("--mass-model-file", metavar = "filename", help = "The mass model file to read from (hdf5 format)")
parser.add_option("--dtdphi-file", metavar = "filename", help = "dtdphi snr ratio pdfs to read from (hdf5 format). Default passed by gstlal_inspiral_pipe, but not when run as a standalone program.")
parser.add_option("--idq-file", metavar = "filename", help = "idq glitch file (hdf5 format)")
parser.add_option("--psd-xml", type = "string", help = "Specify a PSD to use for computing template bandwidth. Required if df is bandwidth")
options, filenames = parser.parse_args()
......@@ -120,8 +120,8 @@ def parse_command_line():
bandwidths += [templates.bandwidth(row.mass1, row.mass2, row.spin1z, row.spin2z, f_min = 10.0, f_max = row.f_final, delta_f = 0.25, psd = psd[ifo])]
horizon_factors.update(bank.horizon_factors)
if options.df == "bandwidth":
# NOTE the 4000 is tuned by looking at real data distributions
options.df = int(4000. / min(bandwidths))
# don't let the bandwidth get too small
options.df = max(int(min(bandwidths)) + 1, 10)
return options, process_params, filenames, template_ids, horizon_factors
......@@ -158,13 +158,13 @@ process = ligolw_process.register_to_xmldoc(xmldoc, u"gstlal_inspiral_create_pri
#
rankingstat = far.RankingStat(template_ids = template_ids, instruments = options.instrument, delta_t = options.coincidence_threshold, min_instruments = options.min_instruments, population_model_file = options.mass_model_file, dtdphi_file = options.dtdphi_file, horizon_factors = horizon_factors)
rankingstat = far.RankingStat(template_ids = template_ids, instruments = options.instrument, delta_t = options.coincidence_threshold, min_instruments = options.min_instruments, population_model_file = options.mass_model_file, dtdphi_file = options.dtdphi_file, horizon_factors = horizon_factors, idq_file = options.idq_file)
if options.background_prior > 0:
rankingstat.denominator.add_noise_model(number_of_events = options.background_prior, df = int(options.df))
rankingstat.denominator.add_noise_model(number_of_events = options.background_prior)
if options.synthesize_numerator:
rankingstat.numerator.add_signal_model()
# Add the numerator
rankingstat.numerator.add_signal_model(df = int(options.df))
#
......
This diff is collapsed.
#!/usr/bin/env python
from ligo.lw import dbtables
from ligo.lw import lsctables
from ligo.lw import utils as ligolw_utils
from ligo.lw.ligolw import LIGOLWContentHandler
from ligo.lw.utils import process as ligolw_process
from ligo.lw.utils import segments as ligolw_segments
from optparse import OptionParser
from os import path
import numpy
import sqlite3
import sys
@lsctables.use_in
class ligolwcontenthandler(LIGOLWContentHandler):
pass
def parse_command_line():
parser = OptionParser(usage="%prog [options] database")
parser.add_option("--injection-file", "-i", metavar = "filename", action = "append", help = "An original injection file that was used as input to gstlal_inspiral_combine_injection_sets. Option can be provided multiple times. All of the injection files that were combined using gstlal_inspiral_combine_injection_sets must be provided.")
parser.add_option("--tmp-space", "-t", metavar = "path", default = None, help = "Path to a directory suitable for use as a work area while manipulating the database file. The database file will be worked on in this directory, and then moved to the final location when complete. This option is intended to improve performance when running in a networked environment, where there might be a local disk with higher bandwidth than is available to the filesystem on which the final output will reside.")
parser.add_option("--verbose", "-v", action = "store_true", help = "Be verbose.")
options, filenames = parser.parse_args()
if len(filenames) != 1:
raise ValueError("Must provide exactly one database, %d provided (%s)" % (len(filenames), ', '.join(filenames)))
return options, filenames[0]
def read_inj_files(options):
inj_dict = {}
for inj in options.injection_file:
xmldoc = ligolw_utils.load_filename(inj, contenthandler = ligolwcontenthandler, verbose = options.verbose)
start_time = 0
dt = 0
for processparam in lsctables.ProcessParamsTable.get_table(xmldoc):
if processparam.param == "--gps-start-time":
if start_time != 0:
raise ValueError("injection file %s contains more than one --gps-start-time entry in ProcessParams table" % inj)
start_time = int(processparam.value)
elif processparam.param == "--time-step":
if dt != 0:
raise ValueError("injection file %s contains more than one --time-step entry in ProcessParams table" % inj)
# Have to call it a float first because int() cant take a string that contains sci notation
dt = int(float(processparam.value))
if start_time == 0:
raise ValueError("injection file %s does not contain --gps-start-time entry in ProcessParams table" % inj)
if dt == 0:
raise ValueError("injection file %s does not contain --time-step entry in ProcessParams table" % inj)
# FIXME Need a standardized naming convention, this could easily fail
inj_dict[path.basename(inj).split('.')[0].split('-')[0]] = (start_time, dt)
xmldoc.unlink()
return inj_dict
def set_up(cursor, options):
inj_dict = read_inj_files(options)
# NOTE NOTE NOTE We only need the integer geocenter time because this
# is only used to associate injections with the original injection
# files. We need the nanoseconds for the detector times to determine
# which injection a coinc_inspiral that hasnt been associated with an
# injection already is closest to
# FIXME Will need to modify this to include Kagra
db_inj_times = set((t for t in cursor.execute('SELECT geocent_end_time, h_end_time+1e-9*h_end_time_ns, l_end_time+1e-9*l_end_time_ns, v_end_time+1e-9*v_end_time_ns, simulation_id FROM sim_inspiral ORDER BY geocent_end_time ASC').fetchall()))
sim_id_dict = {}
# files we care about have four types of coincidences, these are their descriptions
#sngl_inspiral<-->sngl_inspiral coincidences
#sim_inspiral<-->sngl_inspiral coincidences
#sim_inspiral<-->coinc_event coincidences (exact)
#sim_inspiral<-->coinc_event coincidences (nearby)
exact_inj_coinc_def_id = cursor.execute('SELECT coinc_def_id FROM coinc_definer WHERE description == "sim_inspiral<-->coinc_event coincidences (exact)"').fetchone()
nearby_inj_coinc_def_id = cursor.execute('SELECT coinc_def_id FROM coinc_definer WHERE description == "sim_inspiral<-->coinc_event coincidences (nearby)"').fetchone()
inj_trigger_coinc_def_id = cursor.execute('SELECT coinc_def_id FROM coinc_definer WHERE description == "sim_inspiral<-->sngl_inspiral coincidences"').fetchone()
coinc_inspiral_coinc_def_id = cursor.execute('SELECT coinc_def_id FROM coinc_definer WHERE description == "sngl_inspiral<-->sngl_inspiral coincidences"').fetchone()
# parse the sim_inspiral table into different injection sets
for inj_basename, (start_time, dt) in inj_dict.items():
for geocent_end_time, h_end_time, l_end_time, v_end_time, sim_id in db_inj_times:
tref = geocent_end_time - start_time
# Check if this injection is within 2 seconds of where an injection from this injection file would fall if every injection were spaced by dt
if tref % dt <= 2 or tref % dt == dt - 2 or tref % dt == dt - 1:
sim_id_dict.setdefault(inj_basename, []).append((geocent_end_time, h_end_time, l_end_time, v_end_time, sim_id))
if inj_basename in sim_id_dict.keys():
db_inj_times -= set(sim_id_dict[inj_basename])
return sim_id_dict, exact_inj_coinc_def_id, nearby_inj_coinc_def_id, inj_trigger_coinc_def_id, coinc_inspiral_coinc_def_id
options, db = parse_command_line()
split_db_name = path.basename(db).split('-')
working_filename = dbtables.get_connection_filename(db, tmp_path = options.tmp_space, verbose = options.verbose)
connection = sqlite3.connect(working_filename)
cursor = connection.cursor()
original_number_coinc_events = int(cursor.execute('SELECT count(*) FROM coinc_event').fetchone()[0])
new_number_coinc_events = 0
sim_id_dict, exact_inj_coinc_def_id, nearby_inj_coinc_def_id, inj_trigger_coinc_def_id, coinc_inspiral_coinc_def_id = set_up(cursor, options)
for i, sim_key in enumerate(sim_id_dict.keys()):
if i != 0:
working_filename = dbtables.get_connection_filename(db, tmp_path = options.tmp_space, verbose = True)
connection = sqlite3.connect(working_filename)
cursor = connection.cursor()
# register program in process table
xmldoc = dbtables.get_xml(connection)
# FIXME Make this not ugly
process_params_dict = dict(("--injection-file", inj_file) for inj_file in options.injection_file)
process_params_dict["--verbose"] = options.verbose
process_params_dict["--tmp-space"] = options.tmp_space
process = ligolw_process.register_to_xmldoc(xmldoc, u"gstlal_inspiral_destagger_injections", process_params_dict)
relevant_sim_ids = [sim_id for (g_end_time, h_end_time, l_end_time, v_end_time, sim_id) in sim_id_dict[sim_key]]
irrelevant_sim_ids = [sim_id for key in (set(sim_id_dict.keys()) - set((sim_key,))) for (g_end_time, h_end_time, l_end_time, v_end_time, sim_id) in sim_id_dict[key]]
irrelevant_keys = set(sim_id_dict.keys()) - set((sim_key,))
sim_times_relevant_sim_ids = {"H1": numpy.array([h_end_time for (g_end_time, h_end_time, l_end_time, v_end_time, sim_id) in sim_id_dict[sim_key]]), "L1": numpy.array([l_end_time for (g_end_time, h_end_time, l_end_time, v_end_time, sim_id) in sim_id_dict[sim_key]]), "V1": numpy.array([v_end_time for (g_end_time, h_end_time, l_end_time, v_end_time, sim_id) in sim_id_dict[sim_key]])}
sim_times_irrelevant_sim_ids = {"H1": numpy.array([h_end_time for key in irrelevant_keys for (g_end_time, h_end_time, l_end_time, v_end_time, sim_id) in sim_id_dict[key]]), "L1": numpy.array([l_end_time for key in irrelevant_keys for (g_end_time, h_end_time, l_end_time, v_end_time, sim_id) in sim_id_dict[key]]), "V1": numpy.array([v_end_time for key in irrelevant_keys for (g_end_time, h_end_time, l_end_time, v_end_time, sim_id) in sim_id_dict[key]])}
if options.verbose:
print >>sys.stderr, "Cleaning up exact coincidences"
# clean up exact coincidences
for coinc_event_id, in cursor.execute('SELECT coinc_event_id FROM coinc_event WHERE coinc_def_id == ?', exact_inj_coinc_def_id).fetchall():
# each coinc_event_id is a tuple, so no need to put it in another tuple for the cursor.execute()
sim_id = cursor.execute('SELECT event_id FROM coinc_event_map WHERE coinc_event_id == ? AND table_name == "sim_inspiral"', (coinc_event_id,)).fetchall()
assert len(sim_id) == 1, "%d sim_ids coincident with coinc" % len(sim_id)
# FIXME this needs to be generalized to use all of the files
if sim_id[0][0] not in relevant_sim_ids:
cursor.execute('DELETE FROM coinc_event WHERE coinc_event_id == ?', (coinc_event_id,))
if options.verbose:
print >>sys.stderr, "Cleaning up nearby coincidences"
# clean up nearby coincidences
for coinc_event_id, in cursor.execute('SELECT coinc_event_id FROM coinc_event WHERE coinc_def_id == ?', nearby_inj_coinc_def_id).fetchall():
sim_id = cursor.execute('SELECT event_id FROM coinc_event_map WHERE coinc_event_id == ? AND table_name == "sim_inspiral"', (coinc_event_id,)).fetchall()
assert len(sim_id) == 1, "multiple sim_ids coincident with coinc"
# FIXME this needs to be generalized to use all of the files
if sim_id[0][0] not in relevant_sim_ids:
cursor.execute('DELETE FROM coinc_event WHERE coinc_event_id == ?', (coinc_event_id,))
if options.verbose:
print >>sys.stderr, "Cleaning up sim_inspiral sngl_inspiral coincidences"
# clean up sim_inspiral sngl_inspiral coincs
for coinc_event_id, in cursor.execute('SELECT coinc_event_id FROM coinc_event WHERE coinc_def_id == ?', inj_trigger_coinc_def_id).fetchall():
sim_id = cursor.execute('SELECT event_id FROM coinc_event_map WHERE coinc_event_id == ? AND table_name == "sim_inspiral"', (coinc_event_id,)).fetchall()
assert len(sim_id) == 1, "%d sim_ids coincident with coinc" % len(sim_id)
# FIXME this needs to be generalized to use all of the files
if sim_id[0][0] not in relevant_sim_ids:
cursor.execute('DELETE FROM coinc_event WHERE coinc_event_id == ?', (coinc_event_id,))
if options.verbose:
print >>sys.stderr, "Cleaning up sim_inspiral coinc_inspiral coincidences"
# clean up coinc_inspirals
for coinc_event_id, in cursor.execute('SELECT coinc_event_id FROM coinc_event WHERE coinc_def_id == ?', coinc_inspiral_coinc_def_id).fetchall():
# first check if this is coincident with an injection
sim_coinc_event_id = cursor.execute('SELECT coinc_event_id FROM coinc_event_map WHERE table_name == "coinc_event" AND event_id == ?', (coinc_event_id,)).fetchall()
if len(sim_coinc_event_id) >= 1:
sim_ids = []
for sim_coinc_event_id in sim_coinc_event_id:
sim_ids.extend(cursor.execute('SELECT event_id FROM coinc_event_map WHERE coinc_event_id == ? AND table_name == "sim_inspiral"', sim_coinc_event_id).fetchall())
if len(sim_ids) > 1:
for sim_id in sim_ids[1:]:
# This should always be true, just putting this here to make sure I haven't missed some corner case I don't know about
assert sim_ids[0] == sim_id
if sim_ids[0][0] not in relevant_sim_ids:
cursor.execute('DELETE FROM coinc_event WHERE coinc_event_id == ?', (coinc_event_id,))
else:
# not coincident with any injections, we need to find the closest time
ifos = cursor.execute('SELECT ifos FROM coinc_inspiral WHERE coinc_event_id == ?', (coinc_event_id,)).fetchall()
assert len(ifos) == 1
# ifos should be a list of involved ifos separated by commas, e.g. H1,L1,V1, which would be returned as [(u"H1,L1,V1")]
first_ifo_alphabetically = sorted(ifos[0][0].split(','))[0]
end_time = cursor.execute('SELECT end_time+1e-9*end_time_ns FROM sngl_inspiral WHERE ifo == ? and event_id IN (SELECT event_id FROM coinc_event_map WHERE coinc_event_id == ? and table_name = "sngl_inspiral")', (first_ifo_alphabetically, coinc_event_id)).fetchall()
assert len(end_time) == 1
relevance_test = abs(sim_times_relevant_sim_ids[first_ifo_alphabetically] - end_time[0])
irrelevance_test = abs(sim_times_irrelevant_sim_ids[first_ifo_alphabetically] - end_time[0])
if min(irrelevance_test) < min(relevance_test):
# These coinc triggers are closer to an injection we dont care about than an injection we do care about
cursor.execute('DELETE FROM coinc_event WHERE coinc_event_id == ?', (coinc_event_id,))
if options.verbose:
print >>sys.stderr, "Cleaning up coinc_inspiral table"
# now clean up coinc_inspiral and coinc_event_map tables
cursor.execute('DELETE FROM coinc_inspiral WHERE coinc_event_id NOT IN (SELECT coinc_event_id FROM coinc_event)')
if options.verbose:
print >>sys.stderr, "Cleaning up coinc_event_map table"
cursor.execute('DELETE FROM coinc_event_map WHERE coinc_event_id NOT IN (SELECT coinc_event_id FROM coinc_event)')
if options.verbose:
print >>sys.stderr, "Cleaning up sngl_inspiral table"
# clean up sngl_inspiral table
cursor.execute('DELETE FROM sngl_inspiral WHERE event_id NOT IN (SELECT event_id FROM coinc_event_map WHERE table_name == "sngl_inspiral")')
if options.verbose:
print >>sys.stderr, "Cleaning up sim_inspiral table"
# finally, clean up the sim_inspiral table
cursor.executemany('DELETE FROM sim_inspiral WHERE simulation_id == ?', [(sim_id,) for sim_id in irrelevant_sim_ids])
if options.verbose:
print >>sys.stderr, "Vacuuming"
cursor.execute('VACUUM')
# Set process end time
ligolw_process.set_process_end_time(process)
cursor.execute("UPDATE process SET end_time = ? WHERE process_id == ?", (process.end_time, process.process_id))
if options.verbose:
print >>sys.stderr, "Committing"
connection.commit()
new_number_coinc_events += int(connection.execute('SELECT count(*) FROM coinc_event').fetchone()[0])
connection.close()
# Write destaggered db to disk
new_db_name = '%s-ALL_LLOID_%s_%s-%s-%s' % (split_db_name[0], sim_key, split_db_name[1].split('_')[-1], split_db_name[2], split_db_name[3])
dbtables.put_connection_filename(new_db_name, working_filename, verbose = True)
if original_number_coinc_events != new_number_coinc_events:
raise RuntimeError("Number of entries in coinc_event table in original document does not match the number of entries in the output coinc_event_tables. There were %d entries originally, and %d were output. Something has gone terribly wrong, and the output documents should not be trusted." % (original_number_coinc_events, new_number_coinc_events))
else:
print >>sys.stderr, "Confirmed there were %d entries in the original coinc_event table, and %d entries were written to disk in the new coinc_event tables." % (original_number_coinc_events, new_number_coinc_events)
This diff is collapsed.
......@@ -98,13 +98,13 @@ def mtotal(row):
return row.mass1 + row.mass2
def eta(row):
return row.mass1 * row.mass2 / row.mtotal**2.
return row.mass1 * row.mass2 / mtotal(row)**2.
def mchirp(row):
return row.mtotal * row.eta**0.6
return mtotal(row) * row.eta**0.6
def chi(row):
return (row.mass1 * row.spin1z + row.mass2 * row.spin2z) / row.mtotal
return (row.mass1 * row.spin1z + row.mass2 * row.spin2z) / mtotal(row)
def marker_and_size(n):
if n > 2000:
......
......@@ -338,6 +338,12 @@ def roman(i, arabics = (1000,900,500,400,100,90,50,40,10,9,5,4,1), romans = ("m"
return roman(i, arabics[1:], romans[1:])
return romans[0] + roman(i - arabics[0], arabics, romans)
def truncate(f, digits, si=True):
assert digits > 0, 'number of digits must be positive'
if si:
return float(('%10.' + str(digits) + 'e') % f)
else:
return float(('%10.' + str(digits) + 'f') % f)
#
# width is in mm, default aspect ratio is the golden ratio
......@@ -643,12 +649,12 @@ SELECT distinct_ifos.ifos, count(*) FROM coinc_inspiral JOIN distinct_ifos ON (d
# being out of sync.
#
continue
row = [rank] + [float(v) for v in values[:7]] + list(values[7:9])
row = [rank] + [truncate(float(v), 3) for v in values[:4]] + [truncate(float(values[4]), 6, si=False)] + [truncate(float(v), 3) for v in values[5:7]] + list(values[7:9])
# values[9] is a string that is e.g., H1:4.8993754:1.0139208:2.061641:1.145543 L1:8.2582664:1.1890973:2.061641:1.145543
ifodict = {"H1": [-1,-1,-1,-1,-1,-1], "L1": [-1,-1,-1,-1,-1,-1], "V1": [-1,-1,-1,-1,-1,-1]}
for ifo_row in values[9].split():
ifodata = ifo_row.split(":")
ifodict[ifodata[0]] = [float(v) for v in ifodata[1:]]
ifodict[ifodata[0]] = [truncate(float(v), 3) for v in ifodata[1:]]
row.extend(ifodict["H1"])
row.extend(ifodict["L1"])
row.extend(ifodict["V1"])
......@@ -830,18 +836,29 @@ FROM
fig, axes = create_plot(x_label, y_label)
legend = []
ifo_count_summary = {''.join(sorted(combo)): '---' for num_ifos in range(1, len(self.missed_found_plots.instruments)+1) for combo in sorted(list(itertools.combinations(self.missed_found_plots.instruments, num_ifos)), reverse=True)}
ifo_count_summary['missed'] = '---'
for participating_instruments, sims in sorted(self.found_in.items(), key = (lambda x: lsctables.instrumentsproperty.set(x[0]))):
if cnt == 0:
self.missed_found_plots.injection_summary_data.append(["Found", "".join(sorted(self.on_instruments)), "".join(sorted(participating_instruments)), len(sims)])
ifo_count_summary["".join(sorted(participating_instruments))] = len(sims)
legend.append("Found in %s" % ", ".join(sorted(participating_instruments)))
axes.semilogy([x_func(sim) for sim in sims], [y_func(sim, participating_instruments) for sim in sims], ".")
if missed:
if cnt == 0:
self.missed_found_plots.injection_summary_data.append(["Missed", "".join(sorted(self.on_instruments)), "---", len(missed)])
ifo_count_summary["missed"] = len(missed)
for rank, sim in enumerate(missed):
self.missed_found_plots.missed_summary_data.append(["".join(sorted(self.on_instruments)), sim.waveform, float(sim.time_at_instrument("H1", {"H1": 0.0})), float(sim.time_at_instrument("L1", {"L1": 0.0})), float(sim.time_at_instrument("V1", {"V1": 0.0})), sim.mass1, sim.mass2, sim.spin1x, sim.spin1y, sim.spin1z, sim.spin2x, sim.spin2y, sim.spin2z, sim.distance, decisive_chirp_distance(sim, self.on_instruments), sim.inclination, sim.alpha4, sim.alpha5, sim.alpha6, decisive_charsnr(sim, self.on_instruments)])
legend.append("Missed")
axes.semilogy([x_func(sim) for sim in missed], [y_func(sim, self.on_instruments) for sim in missed], "k.")
if cnt == 0:
total_count = 0
for ifos in ifo_count_summary.keys():
if ifo_count_summary[ifos] != '---':
total_count += ifo_count_summary[ifos]
ifo_count_summary['total'] = total_count
ifo_columns = sorted([key for key in ifo_count_summary.keys() if 'missed' not in key and 'total' not in key], key=lambda item: (-len(item), item)) + ['missed', 'total']
missed_found_row = ["".join(sorted(self.on_instruments))] + [ifo_count_summary[ifo_col] for ifo_col in ifo_columns]
self.missed_found_plots.injection_summary_data.append(missed_found_row)
if legend:
......@@ -880,23 +897,29 @@ FROM
self.remove_precession = remove_precession
self.isolate_precession = isolate_precession
self.snr_segments = snr_segments
self.instruments = set()
def add_contents(self, contents):
self.base = contents.base
if contents.sim_inspiral_table is None:
# no injections
return
for on_instruments in contents.on_instruments_combos:
self.instruments.update(on_instruments)
for on_instruments in contents.on_instruments_combos:
if on_instruments not in self.plots:
self.plots[on_instruments] = self.MissedFound(on_instruments, self.far_thresh, self, self.remove_precession, self.isolate_precession, self.snr_segments)
self.plots[on_instruments].add_contents(contents)
def finish(self):
for on_instruments, plot in self.plots.items():
for on_instruments in sorted(self.plots.keys(), key=lambda item: (-len(item), item)):
plot = self.plots[on_instruments]
for fig, filename_fragment, is_open_box in plot.finish():
yield fig, "%s_%s" % (filename_fragment, "".join(sorted(on_instruments))), is_open_box
save_table(self.base + "_injection_summary.json", ["Found || Missed", "On Instruments", "Participating IFOs", "count"], self.injection_summary_data)
ifo_columns = [''.join(sorted(combo)) for num_ifos in range(1, len(self.instruments)+1) for combo in sorted(list(itertools.combinations(self.instruments, num_ifos)), reverse=True)]
ifo_columns.sort(key=lambda item: (-len(item), item))
save_table(self.base + "_injection_summary.json", ["On Instruments"] + ifo_columns + ['Missed', 'Total'], self.injection_summary_data)
save_table(self.base + "_missed_summary.json", ["On Instruments", "Waveform", "H1 time", "L1 time", "V1 time", "m1", "m2", "S_1x", "S_1y", "S_1z", "S_2x", "S_2y", "S_2z", "D (Mpc)", "Decisive D chirp,eff (Mpc)" , "Inclination", "H1 SNR", "L1 SNR", "V1 SNR", "Decisive SNR"], self.missed_summary_data)
......
#!/usr/bin/env python
#
# Copyright (C) 2011-2014 Chad Hanna
# Copyright (C) 2019 Patrick Godwin
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
### The offline gstlal inspiral workflow generator; Use to make HTCondor DAGs to rerank triggers
###
### Usage:
### ------
###
### It is rare that you would invoke this program in a standalone mode. Usually
### the inputs are complicated and best automated via a Makefile.
"""
This program makes a dag to rerank triggers from offline runs
"""
__author__ = 'Chad Hanna <chad.hanna@ligo.org>, Patrick Godwin <patrick.godwin@ligo.org>'
#----------------------------------------------------------
### imports
from collections import OrderedDict
from optparse import OptionParser
import os
import numpy
from lal.utils import CacheEntry
from ligo import segments
from ligo.lw import ligolw
from ligo.lw import lsctables
import ligo.lw.utils as ligolw_utils
from gstlal import inspiral_pipe
from gstlal import dagparts
from gstlal import datasource
from gstlal import paths as gstlal_config_paths
#----------------------------------------------------------
### ligolw initialization
class LIGOLWContentHandler(ligolw.LIGOLWContentHandler):
pass
lsctables.use_in(LIGOLWContentHandler)
#----------------------------------------------------------
### command line options
def parse_command_line():
parser = OptionParser(description = __doc__)
parser.add_option("--gps-start-time", metavar = "seconds", help = "Set the start time of the segment to analyze in GPS seconds. Required unless --data-source=lvshm")
parser.add_option("--gps-end-time", metavar = "seconds", help = "Set the end time of the segment to analyze in GPS seconds. Required unless --data-source=lvshm")
# mass model options
parser.add_option("--template-bank", metavar = "filename", help = "Set the template bank xml file.")
parser.add_option("--mass-model", metavar = "filename", help = "Set the name of the mass model. Options are 'narrow-bns', 'broad-bns', 'bbh', 'ligo', 'detected-logm', 'uniform-template', or 'file'")
parser.add_option("--mass-model-file", metavar = "filename", help = "Set the name of the mass model file, e.g., mass_model.h5. Required if --mass-model=file")
# dtdphi option
parser.add_option("--dtdphi-file", metavar = "filename", action = "append", help = "dtdphi snr ratio pdfs to read from (hdf5 format)")
# idq option
parser.add_option("--idq-file", metavar = "filename", action = "append", help = "idq glitch file (hdf5 format)")
# trigger generation options
parser.add_option("--web-dir", metavar = "directory", help = "Set the web directory like /home/USER/public_html")
parser.add_option("--coincidence-threshold", metavar = "value", type = "float", default = 0.005, help = "Set the coincidence window in seconds (default = 0.005). The light-travel time between instruments will be added automatically in the coincidence test.")
parser.add_option("--min-instruments", metavar = "count", type = "int", default = 2, help = "Set the minimum number of instruments that must contribute triggers to form a candidate (default = 2).")
parser.add_option("--reference-likelihood-file", metavar = "file", help = "Set a reference likelihood file to compute initial likelihood ratios. Required")
parser.add_option("--verbose", action = "store_true", help = "Be verbose")
parser.add_option("--ranking-stat-samples", metavar = "N", default = 2**24, type = "int", help = "Construct ranking statistic histograms by drawing this many samples from the ranking statistic generator (default = 2^24).")
# veto options
parser.add_option("--vetoes", metavar = "filename", help = "Set the veto xml file.")
parser.add_option("--frame-segments-file", metavar = "filename", help = "Set the name of the LIGO light-weight XML file from which to load frame segments. Optional iff --data-source=frames")
parser.add_option("--frame-segments-name", metavar = "name", help = "Set the name of the segments to extract from the segment tables. Required iff --frame-segments-file is given")
parser.add_option("--injections", action = "append", help = "append injection files to analyze. Must prepend filename with X:Y:, where X and Y are floats, e.g. 1.2:3.1:filename, so that the injections are only searched for in regions of the template bank with X <= chirp mass < Y.")
parser.add_option("--analysis-path", metavar = "path", help = "Set the path to the analysis you want to rerank.")
# caches
parser.add_option("--dist-stats-cache", metavar = "filename", help = "Set the cache file for dist stats")
parser.add_option("--lloid-cache", metavar = "filename", help = "Set the cache file for LLOID")
parser.add_option("--bank-cache", metavar = "filenames", action = "append", help = "Set the bank cache files in format H1=H1.cache,H2=H2.cache, etc.. (can be given multiple times)")
# Data from a zero lag run in the case of an injection-only run.
parser.add_option("--svd-bank-cache", metavar = "filename", help = "Set the cache file for svd banks (required iff running injection-only analysis)")
# NOTE: delete unless we want db metadata. injection db?
parser.add_option("--non-injection-db", metavar = "filename", help = "Set the non injection data base file (required iff running injection-only analysis)")
parser.add_option("--marginalized-likelihood-file", metavar = "filename", help = "Set the marginalized likelihood file (required iff running injection-only analysis)")
parser.add_option("--marginalized-likelihood-with-zerolag-file", metavar = "filename", help = "Set the marginalized likelihood with zerolag file (required iff running injection-only analysis)")
# Condor commands
parser.add_option("--condor-command", action = "append", default = [], metavar = "command=value", help = "set condor commands of the form command=value; can be given multiple times")
options, filenames = parser.parse_args()
if options.template_bank:
bank_xmldoc = ligolw_utils.load_filename(options.template_bank, verbose = options.verbose, contenthandler = LIGOLWContentHandler)
sngl_inspiral_table = lsctables.SnglInspiralTable.get_table(bank_xmldoc)
assert len(sngl_inspiral_table) == len(set([r.template_id for r in sngl_inspiral_table])), "Template bank ids are not unique"
if options.mass_model not in ("narrow-bns", "broad-bns", "bbh", "ligo", "detected-logm", "uniform-template", "file"):
raise ValueError("--mass-model must be 'narrow-bns', 'broad-bns', 'bbh', 'ligo', 'detected-logm', 'uniform-template', or 'file'")
if options.mass_model == "file" and not options.mass_model_file:
raise ValueError("--mass-model-file must be provided if --mass-model=file")
if not options.dtdphi_file:
options.dtdphi_file = [os.path.join(gstlal_config_paths["pkgdatadir"], "inspiral_dtdphi_pdf.h5")] * len(options.bank_cache)
if len(options.dtdphi_file) != len(options.bank_cache):
raise ValueError("You must provide as many dtdphi files as banks")
#FIXME a hack to find the sql paths
share_path = os.path.split(dagparts.which('gstlal_inspiral'))[0].replace('bin', 'share/gstlal')
options.cluster_sql_file = os.path.join(share_path, 'simplify_and_cluster.sql')
options.snr_cluster_sql_file = os.path.join(share_path, 'snr_simplify_and_cluster.sql')
options.injection_snr_cluster_sql_file = os.path.join(share_path, 'inj_snr_simplify_and_cluster.sql')
options.injection_sql_file = os.path.join(share_path, 'inj_simplify_and_cluster.sql')
options.injection_proc_sql_file = os.path.join(share_path, 'simplify_proc_table_in_inj_file.sql')
return options, filenames
#----------------------------------------------------------
### DAG utilities
def set_up_jobs(options):
jobs = {}
# default condor options
default_condor_opts = OrderedDict()
default_condor_opts['want_graceful_removal'] = "True"
default_condor_opts['kill_sig'] = "15"
default_condor_opts['request_cpus'] = "1"
default_condor_opts['+MemoryUsage'] = "( 1000 ) * 2 / 3"
default_condor_opts['request_memory'] = "( MemoryUsage ) * 3 / 2"
default_condor_opts['periodic_hold'] = "( MemoryUsage >= ( ( RequestMemory ) * 3 / 2 ) )"
default_condor_opts['periodic_release'] = "(JobStatus == 5) && ((CurrentTime - EnteredCurrentStatus) > 180) && (HoldReasonCode != 34)"
# job-specific condor options
calc_rank_pdf_condor_opts = default_condor_opts.copy()
calc_rank_pdf_condor_opts['+MemoryUsage'] = "( 3000 ) * 2 / 3"
calc_rank_pdf_condor_opts['request_cpus'] = "4"
inj_snr_condor_opts = default_condor_opts.copy()
inj_snr_condor_opts['+MemoryUsage'] = "( 2000 ) * 2 / 3"
inj_snr_condor_opts['request_cpus'] = "2"
marg_condor_opts = default_condor_opts.copy()
marg_condor_opts['+MemoryUsage'] = "( 2000 ) * 2 / 3"
# set condor commands
base_condor_commands = dagparts.condor_command_dict_from_opts(options.condor_command, default_condor_opts)
calc_rank_pdf_condor_commands = dagparts.condor_command_dict_from_opts(options.condor_command, calc_rank_pdf_condor_opts)
inj_snr_condor_commands = dagparts.condor_command_dict_from_opts(options.condor_command, inj_snr_condor_opts)
marg_condor_commands = dagparts.condor_command_dict_from_opts(options.condor_command, marg_condor_opts)
sh_condor_commands = dagparts.condor_command_dict_from_opts(options.condor_command, {"want_graceful_removal":"True", "kill_sig":"15"})
# jobs only needed to find paths
# NOTE: find a better way to do this?
jobs['svd'] = dagparts.DAGJob("gstlal_svd_bank", condor_commands = base_condor_commands)
jobs['medianPSD'] = dagparts.DAGJob("gstlal_median_of_psds", condor_commands = base_condor_commands)
# set up rest of jobs
jobs['model'] = dagparts.DAGJob("gstlal_inspiral_mass_model", condor_commands = base_condor_commands)
jobs['modelAdd'] = dagparts.DAGJob("gstlal_inspiral_add_mass_models", condor_commands = base_condor_commands)
jobs['horizon'] = dagparts.DAGJob("gstlal_plot_psd_horizon", condor_commands = base_condor_commands)
jobs['createPriorDistStats'] = dagparts.DAGJob("gstlal_inspiral_create_prior_diststats", condor_commands = base_condor_commands)
jobs['calcRankPDFs'] = dagparts.DAGJob("gstlal_inspiral_calc_rank_pdfs", condor_commands = calc_rank_pdf_condor_commands)
jobs['calcRankPDFsWithZerolag'] = dagparts.DAGJob("gstlal_inspiral_calc_rank_pdfs", tag_base="gstlal_inspiral_calc_rank_pdfs_with_zerolag", condor_commands=calc_rank_pdf_condor_commands)
jobs['calcLikelihood'] = dagparts.DAGJob("gstlal_inspiral_calc_likelihood", condor_commands = base_condor_commands)
jobs['marginalize'] = dagparts.DAGJob("gstlal_inspiral_marginalize_likelihood", condor_commands = marg_condor_commands)
jobs['marginalizeWithZerolag'] = dagparts.DAGJob("gstlal_inspiral_marginalize_likelihood", tag_base="gstlal_inspiral_marginalize_likelihood_with_zerolag", condor_commands=base_condor_commands)
jobs['injSplitter'] = dagparts.DAGJob("gstlal_injsplitter", tag_base="gstlal_injsplitter", condor_commands = base_condor_commands)
jobs['gstlalInjSnr'] = dagparts.DAGJob("gstlal_inspiral_injection_snr", condor_commands = inj_snr_condor_commands)
jobs['ligolwAdd'] = dagparts.DAGJob("ligolw_add", condor_commands = base_condor_commands)
jobs['calcLikelihoodInj'] = dagparts.DAGJob("gstlal_inspiral_calc_likelihood", tag_base='gstlal_inspiral_calc_likelihood_inj', condor_commands=base_condor_commands)
jobs['ComputeFarFromSnrChisqHistograms'] = dagparts.DAGJob("gstlal_compute_far_from_snr_chisq_histograms", condor_commands = base_condor_commands)
jobs['ligolwInspinjFind'] = dagparts.DAGJob("lalapps_inspinjfind", condor_commands = base_condor_commands)
jobs['toSqlite'] = dagparts.DAGJob("ligolw_sqlite", tag_base = "ligolw_sqlite_from_xml", condor_commands = base_condor_commands)
jobs['toSqliteNoCache'] = dagparts.DAGJob("ligolw_sqlite", tag_base = "ligolw_sqlite_from_xml_final", condor_commands = base_condor_commands)
jobs['toXML'] = dagparts.DAGJob("ligolw_sqlite", tag_base = "ligolw_sqlite_to_xml", condor_commands = base_condor_commands)
jobs['lalappsRunSqlite'] = dagparts.DAGJob("lalapps_run_sqlite", condor_commands = base_condor_commands)
jobs['plotSummary'] = dagparts.DAGJob("gstlal_inspiral_plotsummary", condor_commands = base_condor_commands)
jobs['plotSummaryIsolatePrecession'] = dagparts.DAGJob("gstlal_inspiral_plotsummary", tag_base="gstlal_inspiral_plotsummary_isolated_precession", condor_commands=base_condor_commands)
jobs['plotSnglInjSummary'] = dagparts.DAGJob("gstlal_inspiral_plotsummary", tag_base = "gstlal_inspiral_plotsummary_inj", condor_commands=base_condor_commands)
jobs['plotSnglInjSummaryIsolatePrecession'] = dagparts.DAGJob("gstlal_inspiral_plotsummary", tag_base="gstlal_inspiral_plotsummary_isolated_precession_inj", condor_commands=base_condor_commands)
jobs['plotSensitivity'] = dagparts.DAGJob("gstlal_inspiral_plot_sensitivity", condor_commands = base_condor_commands)
jobs['summaryPage'] = dagparts.DAGJob("gstlal_inspiral_summary_page", condor_commands = base_condor_commands)
jobs['plotBackground'] = dagparts.DAGJob("gstlal_inspiral_plot_background", condor_commands = base_condor_commands)
jobs['cp'] = dagparts.DAGJob("cp", tag_base = "cp", condor_commands = sh_condor_commands)
jobs['rm'] = dagparts.DAGJob("rm", tag_base = "rm_intermediate_merger_products", condor_commands = sh_condor_commands)
return jobs
#----------------------------------------------------------
### main
if __name__ == '__main__':
options, filenames = parse_command_line()
jobs = set_up_jobs(options)
# load analysis output from run
lloid_output, lloid_diststats, svd_dtdphi_map, instrument_set = inspiral_pipe.load_analysis_output(options)
instruments = "".join(sorted(instrument_set))
# load reference psd
boundary_seg = segments.segment(int(options.gps_start_time), int(options.gps_end_time))
gpsmod5 = str(int(boundary_seg[0]))[:5]
ref_psd_path = os.path.join(options.analysis_path, inspiral_pipe.subdir_path([jobs['medianPSD'].output_path, gpsmod5]))
reference_psd = dagparts.T050017_filename(instruments, "REFERENCE_PSD", boundary_seg, '.xml.gz', path = ref_psd_path)
# output directories
output_dir = "plots"
if not os.path.exists("logs"):
os.mkdir("logs")
#
# Set up DAG architecture
#
dag = dagparts.DAG("trigger_rerank_pipe")
# generate xml integrity checker (if requested) and pre-script to back up data
#inspiral_pipe.set_up_scripts(options)
# mass model job
model_node, model_file = inspiral_pipe.mass_model_layer(dag, jobs, [], instruments, options, boundary_seg, reference_psd)
# marginalize jobs
marg_nodes = inspiral_pipe.marginalize_layer(dag, jobs, [], lloid_output, lloid_diststats, options, boundary_seg, instrument_set, model_node, model_file, reference_psd, svd_dtdphi_map, options.idq_file)
# calc rank PDF jobs
rankpdf_nodes, rankpdf_zerolag_nodes = inspiral_pipe.calc_rank_pdf_layer(dag, jobs, marg_nodes, options, boundary_seg, instrument_set)
# final marginalization step
final_marg_nodes, margfiles_to_delete = inspiral_pipe.final_marginalize_layer(dag, jobs, rankpdf_nodes, rankpdf_zerolag_nodes, options)
# likelihood jobs
likelihood_nodes = inspiral_pipe.likelihood_layer(dag, jobs, marg_nodes, lloid_output, lloid_diststats, options, boundary_seg, instrument_set)
# Setup clustering and/or merging
# after all of the likelihood ranking and preclustering is finished put everything into single databases based on the injection file (or lack thereof)
injdbs, noninjdb, final_sqlite_nodes, dbs_to_delete = inspiral_pipe.sql_cluster_and_merge_layer(dag, jobs, likelihood_nodes, [], options, boundary_seg, instruments)
# Compute FAR
farnode = inspiral_pipe.compute_far_layer(dag, jobs, final_marg_nodes, injdbs, noninjdb, final_sqlite_nodes, options)
# make summary plots
plotnodes = inspiral_pipe.summary_plot_layer(dag, jobs, farnode, options, injdbs, noninjdb, output_dir)
# make a web page
inspiral_pipe.summary_page_layer(dag, jobs, plotnodes, options, boundary_seg, injdbs, output_dir)
# rm intermediate merger products
inspiral_pipe.clean_merger_products_layer(dag, jobs, plotnodes, dbs_to_delete, margfiles_to_delete)
#
# generate DAG files
#
dag.write_sub_files()
dag.write_dag()
dag.write_script()
dag.write_cache()
......@@ -132,8 +132,8 @@ class RankingStat(snglcoinc.LnLikelihoodRatioMixin):
# network SNR threshold
network_snrsq_threshold = 49.0
def __init__(self, template_ids = None, instruments = frozenset(("H1", "L1", "V1")), population_model_file = None, dtdphi_file = None, min_instruments = 1, delta_t = 0.005, horizon_factors = None):
self.numerator = inspiral_lr.LnSignalDensity(template_ids = template_ids, instruments = instruments, delta_t = delta_t, population_model_file = population_model_file, dtdphi_file = dtdphi_file, min_instruments = min_instruments, horizon_factors = horizon_factors)
def __init__(self, template_ids = None, instruments = frozenset(("H1", "L1", "V1")), population_model_file = None, dtdphi_file = None, min_instruments = 1, delta_t = 0.005, horizon_factors = None, idq_file = None):
self.numerator = inspiral_lr.LnSignalDensity(template_ids = template_ids, instruments = instruments, delta_t = delta_t, population_model_file = population_model_file, dtdphi_file = dtdphi_file, min_instruments = min_instruments, horizon_factors = horizon_factors, idq_file = idq_file)
self.denominator = inspiral_lr.LnNoiseDensity(template_ids = template_ids, instruments = instruments, delta_t = delta_t, min_instruments = min_instruments)
self.zerolag = inspiral_lr.LnLRDensity(template_ids = template_ids, instruments = instruments, delta_t = delta_t, min_instruments = min_instruments)
......@@ -173,7 +173,7 @@ class RankingStat(snglcoinc.LnLikelihoodRatioMixin):
# FIXME NOTE
# Here we put in a penalty for single detector triggers.
# This is a tuned parameter.
lnP = 0. if len(kwargs["snrs"]) > 1 else -14.
lnP = 0. if len(kwargs["snrs"]) > 1 else -4.
# full ln L ranking stat. we define the ranking statistic
# to be the largest ln L from all allowed subsets of
......@@ -211,9 +211,17 @@ class RankingStat(snglcoinc.LnLikelihoodRatioMixin):
return self.numerator.population_model_file
@property
def dtdphi_file(self, **kwargs):
def dtdphi_file(self):
return self.numerator.dtdphi_file
@property
def idq_file(self):
return self.numerator.idq_file
@property
def horizon_factors(self):
return self.numerator.horizon_factors
@property
def segmentlists(self):
return self.denominator.segmentlists
......@@ -403,6 +411,7 @@ class DatalessRankingStat(RankingStat):
self.numerator = inspiral_lr.DatalessLnSignalDensity(*args, **kwargs)
kwargs.pop("population_model_file", None)
kwargs.pop("dtdphi_file", None)
kwargs.pop("idq_file", None)
self.denominator = inspiral_lr.DatalessLnNoiseDensity(*args, **kwargs)
def finish(self):
......
......@@ -134,9 +134,9 @@ def mkbasicsrc(pipeline, gw_data_source_info, instrument, verbose = False):
if gw_data_source_info.data_source == "frames":
if instrument == "V1":
#FIXME Hack because virgo often just uses "V" in the file names rather than "V1". We need to sieve on "V"
src = pipeparts.mklalcachesrc(pipeline, location = gw_data_source_info.frame_cache, cache_src_regex = "V")
src = pipeparts.mklalcachesrc(pipeline, blocksize = 1048576, use_mmap = False, location = gw_data_source_info.frame_cache, cache_src_regex = "V")
else:
src = pipeparts.mklalcachesrc(pipeline, location = gw_data_source_info.frame_cache, cache_src_regex = instrument[0], cache_dsc_regex = instrument)
src = pipeparts.mklalcachesrc(pipeline, blocksize = 1048576, use_mmap = False, location = gw_data_source_info.frame_cache, cache_src_regex = instrument[0], cache_dsc_regex = instrument)
demux = pipeparts.mkframecppchanneldemux(pipeline, src, do_file_checksum = False, channel_list = map("%s:%s".__mod__, gw_data_source_info.channel_dict.items()))
pipeparts.framecpp_channeldemux_set_units(demux, dict.fromkeys(demux.get_property("channel-list"), "strain"))
# allow frame reading and decoding to occur in a diffrent thread
......
......@@ -313,12 +313,12 @@ def chisq_distribution(df, non_centralities, size):
class CoincsDocument(object):
sngl_inspiral_columns = ("process:process_id", "ifo", "end_time", "end_time_ns", "eff_distance", "coa_phase", "mass1", "mass2", "snr", "chisq", "chisq_dof", "bank_chisq", "bank_chisq_dof", "sigmasq", "spin1x", "spin1y", "spin1z", "spin2x", "spin2y", "spin2z", "template_duration", "event_id", "Gamma0", "Gamma1")
def __init__(self, url, process_params, comment, instruments, seg, offsetvectors, injection_filename = None, tmp_path = None, replace_file = None, verbose = False):
def __init__(self, url, process_params, process_start_time, comment, instruments, seg, offsetvectors, injection_filename = None, tmp_path = None, replace_file = None, verbose = False):
#
# how to make another like us
#
self.get_another = lambda: CoincsDocument(url = url, process_params = process_params, comment = comment, instruments = instruments, seg = seg, offsetvectors = offsetvectors, injection_filename = injection_filename, tmp_path = tmp_path, replace_file = replace_file, verbose = verbose)
self.get_another = lambda: CoincsDocument(url = url, process_params = process_params, process_start_time = process_start_time, comment = comment, instruments = instruments, seg = seg, offsetvectors = offsetvectors, injection_filename = injection_filename, tmp_path = tmp_path, replace_file = replace_file, verbose = verbose)
#
# url
......@@ -332,7 +332,16 @@ class CoincsDocument(object):
self.xmldoc = ligolw.Document()
self.xmldoc.appendChild(ligolw.LIGO_LW())
# NOTE FIXME override the process start time. Since gstlal
# inspiral can produce several output files from the same
# process, this can plot the process metadata by a factor of 50
# even though the same process generated the files. It is a
# major contributor to the size of merged databases and xml
# files. A patch to ligolw_process.register_to_xmldoc would be
# welcomed
self.process = ligolw_process.register_to_xmldoc(self.xmldoc, u"gstlal_inspiral", process_params, comment = comment, ifos = instruments)
if process_start_time is not None:
self.process.start_time = process_start_time
self.xmldoc.childNodes[-1].appendChild(lsctables.New(lsctables.SnglInspiralTable, columns = self.sngl_inspiral_columns))
self.xmldoc.childNodes[-1].appendChild(lsctables.New(lsctables.CoincDefTable))
self.xmldoc.childNodes[-1].appendChild(lsctables.New(lsctables.CoincTable))
......
This diff is collapsed.
......@@ -972,6 +972,10 @@ class Handler(simplehandler.Handler):
# FIXME: ugly way to get the instrument
instruments = set([event.ifo for event in events])
# FIXME calculate a chisq weighted SNR and store it in the bank_chisq column
for event in events:
event.bank_chisq = event.snr / ((1 + max(1., event.chisq)**3)/2.0)**(1./5.)
# extract segment. move the segment's upper
# boundary to include all triggers. ARGH the 1 ns
# offset is needed for the respective trigger to be
......
......@@ -942,6 +942,49 @@ class NumeratorSNRCHIPDF(rate.BinnedLnPDF):
snr, dsnr = lnpdf.bins[0].centres()[snrindices], lnpdf.bins[0].upper()[snrindices] - lnpdf.bins[0].lower()[snrindices]
rcoss, drcoss = lnpdf.bins[1].centres()[rcossindices], lnpdf.bins[1].upper()[rcossindices] - lnpdf.bins[1].lower()[rcossindices]
snr2 = snr**2.
ncparam_per_pf = snr2
# takes into account the mean depending on noncentrality parameter
snrchi2 = numpy.outer(snr2 * df * (1.0 + max(pfs)), rcoss)
arr = numpy.zeros_like(lnpdf.array)
for pf in pfs:
if progressbar is not None:
progressbar.increment()
arr[snrindices, rcossindices] += gstlalstats.ncx2pdf(snrchi2, df, numpy.array([pf * ncparam_per_pf]).T)
# convert to counts by multiplying by bin volume, and also
# multiply by an SNR powr law
arr[snrindices, rcossindices] *= numpy.outer(dsnr / snr**inv_snr_pow, drcoss)
# normalize to a total count of n
arr *= n / arr.sum()
# add to lnpdf
lnpdf.array += arr
@staticmethod
def add_glitch_model(lnpdf, n, prefactors_range, df, inv_snr_pow = 4., snr_min = 3.5, progressbar = None):
if df <= 0.:
raise ValueError("require df >= 0: %s" % repr(df))
pfs = numpy.linspace(prefactors_range[0], prefactors_range[1], 100)
if progressbar is not None:
progressbar.max = len(pfs)
# FIXME: except for the low-SNR cut, the slicing is done
# to work around various overflow and loss-of-precision
# issues in the extreme parts of the domain of definition.
# it would be nice to identify the causes of these and
# either fix them or ignore them one-by-one with a comment
# explaining why it's OK to ignore the ones being ignored.
# for example, computing snrchi2 by exponentiating the sum
# of the logs of the terms might permit its evaluation
# everywhere on the domain. can ncx2pdf() be made to work
# everywhere?
snrindices, rcossindices = lnpdf.bins[snr_min:1e10, 1e-10:1e10]
snr, dsnr = lnpdf.bins[0].centres()[snrindices], lnpdf.bins[0].upper()[snrindices] - lnpdf.bins[0].lower()[snrindices]
rcoss, drcoss = lnpdf.bins[1].centres()[rcossindices], lnpdf.bins[1].upper()[rcossindices] - lnpdf.bins[1].lower()[rcossindices]
snr2 = snr**2.
snrchi2 = numpy.outer(snr2, rcoss) * df
......
......@@ -80,6 +80,24 @@ __all__ = [
TYPICAL_HORIZON_DISTANCE = 150.
# utility function to make an idq interpolator from an h5 file
def idq_interp(idq_file):
# set a default function to simply return 0
out = {}
for ifo in ("H1", "L1", "V1", "K1"):
out[ifo] = lambda x: numpy.zeros(len(x))
if idq_file is not None:
import h5py
from scipy.interpolate import interp1d
f = h5py.File(idq_file, "r")
for ifo in f:
# Pad the boundaries - FIXME remove
t = [0.] + list(numpy.array(f[ifo]["time"])) + [2000000000.]
d = [0.] + list(numpy.array(f[ ifo]["data"])) + [0.]
out[ifo] = interp1d(t, d)
return out
#
# =============================================================================
#
......@@ -292,9 +310,10 @@ class LnLRDensity(snglcoinc.LnLRDensity):
class LnSignalDensity(LnLRDensity):
def __init__(self, *args, **kwargs):
population_model_file = kwargs.pop("population_model_file", None)
dtdphi_file = kwargs.pop("dtdphi_file", None)
self.population_model_file = kwargs.pop("population_model_file", None)
self.dtdphi_file = kwargs.pop("dtdphi_file", None)
self.horizon_factors = kwargs.pop("horizon_factors", None)
self.idq_file = kwargs.pop("idq_file", None)
super(LnSignalDensity, self).__init__(*args, **kwargs)
# install SNR, chi^2 PDF (one for all instruments)
self.densities = {
......@@ -304,14 +323,15 @@ class LnSignalDensity(LnLRDensity):
# record of horizon distances for all instruments in the
# network
self.horizon_history = horizonhistory.HorizonHistories((instrument, horizonhistory.NearestLeafTree()) for instrument in self.instruments)
self.population_model_file = population_model_file
self.dtdphi_file = dtdphi_file
# source population model
# FIXME: introduce a mechanism for selecting the file
self.population_model = inspiral_intrinsics.SourcePopulationModel(self.template_ids, filename = self.population_model_file)
if self.dtdphi_file is not None:
self.InspiralExtrinsics = inspiral_extrinsics.InspiralExtrinsics(self.min_instruments, self.dtdphi_file)
# initialize an IDQ likelihood of glitch interpolator
self.idq_glitch_lnl = idq_interp(self.idq_file)
def set_horizon_factors(self, horizon_factors):
self.horizon_factors = horizon_factors
......@@ -374,6 +394,14 @@ class LnSignalDensity(LnLRDensity):
# evalute the (snr, \chi^2 | snr) PDFs (same for all
# instruments)
interp = self.interps["snr_chi"]
# Evaluate the IDQ glitch probability # FIXME put in denominator
for ifo, seg in segments.items():
# FIXME don't just use the last segment, somehow include whole template duration?
t = float(seg[1])
# NOTE choose max over +-1 seconds because the sampling is only at 1 Hz.
lnP -= max(self.idq_glitch_lnl[ifo]([t-1., t, t+1.]))
return lnP + sum(interp(snrs[instrument], chi2_over_snr2) for instrument, chi2_over_snr2 in chi2s_over_snr2s.items())
def __iadd__(self, other):
......@@ -387,6 +415,10 @@ class LnSignalDensity(LnLRDensity):
raise ValueError("incompatible dtdphi files")
if self.dtdphi_file is None and other.dtdphi_file is not None:
self.dtdphi_file = other.dtdphi_file
if self.idq_file is not None and other.idq_file is not None and other.idq_file != self.idq_file:
raise ValueError("incompatible idq files")
if self.idq_file is None and other.idq_file is not None:
self.idq_file = other.idq_file
if self.horizon_factors is not None and other.horizon_factors is not None and other.horizon_factors != self.horizon_factors:
# require that the horizon factors be the same within 1%
for k in self.horizon_factors:
......@@ -405,10 +437,12 @@ class LnSignalDensity(LnLRDensity):
new.horizon_history = self.horizon_history.copy()
new.population_model_file = self.population_model_file
new.dtdphi_file = self.dtdphi_file
new.idq_file = self.idq_file
# okay to use references because read-only data
new.population_model = self.population_model
new.InspiralExtrinsics = self.InspiralExtrinsics
new.horizon_factors = self.horizon_factors
new.idq_glitch_lnl = self.idq_glitch_lnl
return new
def local_mean_horizon_distance(self, gps, window = segments.segment(-32., +2.)):
......@@ -423,10 +457,10 @@ class LnSignalDensity(LnLRDensity):
vtdict = self.horizon_history.functional_integral_dict(window.shift(float(gps)), lambda D: D**3.)
return dict((instrument, (vt / t)**(1./3.)) for instrument, vt in vtdict.items())
def add_signal_model(self, prefactors_range = (0.001, 0.01), df = 400, inv_snr_pow = 4.):
def add_signal_model(self, prefactors_range = (0.001, 0.30), df = 100, inv_snr_pow = 4.):
# normalize to 10 *mi*llion signals. this count makes the
# density estimation code choose a suitable kernel size
inspiral_extrinsics.NumeratorSNRCHIPDF.add_signal_model(self.densities["snr_chi"], 10000000., prefactors_range, df, inv_snr_pow = inv_snr_pow, snr_min = self.snr_min)
inspiral_extrinsics.NumeratorSNRCHIPDF.add_signal_model(self.densities["snr_chi"], 1e12, prefactors_range, df, inv_snr_pow = inv_snr_pow, snr_min = self.snr_min)
self.densities["snr_chi"].normalize()
def candidate_count_model(self, rate = 1000.):
......@@ -554,6 +588,7 @@ class LnSignalDensity(LnLRDensity):
xml.appendChild(ligolw_param.Param.from_pyvalue(u"population_model_file", self.population_model_file))
xml.appendChild(ligolw_param.Param.from_pyvalue(u"horizon_factors", json.dumps(self.horizon_factors) if self.horizon_factors is not None else None))
xml.appendChild(ligolw_param.Param.from_pyvalue(u"dtdphi_file", self.dtdphi_file))
xml.appendChild(ligolw_param.Param.from_pyvalue(u"idq_file", self.idq_file))
return xml
@classmethod
......@@ -563,6 +598,12 @@ class LnSignalDensity(LnLRDensity):
self.horizon_history = horizonhistory.HorizonHistories.from_xml(xml, u"horizon_history")
self.population_model_file = ligolw_param.get_pyvalue(xml, u"population_model_file")
self.dtdphi_file = ligolw_param.get_pyvalue(xml, u"dtdphi_file")
# FIXME this should be removed once there are not legacy files
try:
self.idq_file = ligolw_param.get_pyvalue(xml, u"idq_file")
except ValueError as e:
print >> sys.stderr, "idq file not found", e
self.idq_file = None
self.horizon_factors = ligolw_param.get_pyvalue(xml, u"horizon_factors")
if self.horizon_factors is not None:
# FIXME, how do we properly decode the json, I assume something in ligolw can do this?
......@@ -573,6 +614,7 @@ class LnSignalDensity(LnLRDensity):
self.population_model = inspiral_intrinsics.SourcePopulationModel(self.template_ids, filename = self.population_model_file)
if self.dtdphi_file is not None:
self.InspiralExtrinsics = inspiral_extrinsics.InspiralExtrinsics(self.min_instruments, self.dtdphi_file)
self.idq_glitch_lnl = idq_interp(self.idq_file)
return self
......@@ -797,7 +839,7 @@ class LnNoiseDensity(LnLRDensity):
# added
self.interps = dict((key, (pdf + self.lnzerolagdensity.densities[key]).mkinterp()) for key, pdf in self.densities.items())
def add_noise_model(self, number_of_events = 10000, prefactors_range = (2.0, 100.), df = 40, inv_snr_pow = 2.):
def add_noise_model(self, number_of_events = 1):
#
# populate snr,chi2 binnings with a slope to force
# higher-SNR events to be assesed to be more significant
......@@ -816,22 +858,18 @@ class LnNoiseDensity(LnLRDensity):
rcoss, drcoss = lnpdf.bins[1].centres()[rcossindices], lnpdf.bins[1].upper()[rcossindices] - lnpdf.bins[1].lower()[rcossindices]
prcoss = numpy.ones(len(rcoss))
# This adds a faint power law that falls off just faster than GWs
psnr = 1e-12 * snr**-6 #(1. + 10**6) / (1. + snr**6)
# This adds a faint power law that falls off faster than GWs
psnr = snr**-12
psnr = numpy.outer(psnr, numpy.ones(len(rcoss)))
# NOTE the magic numbers are just tuned from real data
psnrdcoss = numpy.outer(numpy.exp(-4. * (snr - 4.5)**2) * dsnr, numpy.exp(-(rcoss - .06)**2 / (1e-4)) * drcoss)
arr[snrindices, rcossindices] = psnrdcoss + psnr
arr[snrindices, rcossindices] = psnr
# normalize to the requested count. give 99% of the
# requested events to this portion of the model
arr *= 0.99 * number_of_events / arr.sum()
arr *= number_of_events / arr.sum()
for lnpdf in self.densities.values():
# add in the 99% noise model
# add in the noise model
lnpdf.array += arr
# add 1% from the "glitch model"
inspiral_extrinsics.NumeratorSNRCHIPDF.add_signal_model(lnpdf, n = 0.01 * number_of_events, prefactors_range = prefactors_range, df = df, inv_snr_pow = inv_snr_pow, snr_min = self.snr_min)
# re-normalize
lnpdf.normalize()
......
......@@ -211,12 +211,13 @@ class backgroundcollector(object):
elif len(event_ids) == 1:
self.zerolag_singles.update(event_ids)
def pull(self, snr_min, two_or_more_instruments, flushed_events):
def pull(self, snr_min, hl_on, flushed_events):
index = dict((id(event), event) for event in flushed_events)
flushed_ids = set(index)
background_ids = self.timeshifted_coincs & flushed_ids
self.timeshifted_coincs -= flushed_ids
background_ids |= set(event_id for event_id in self.zerolag_singles & flushed_ids if float(index[event_id].end) in two_or_more_instruments)
# put all virgo and kagra in their own background
background_ids |= set(event_id for event_id in self.zerolag_singles & flushed_ids if ((float(index[event_id].end) in hl_on) or (index[event_id].ifo not in ("H1", "L1"))))
self.zerolag_singles -= flushed_ids
return [event for event in map(index.__getitem__, background_ids) if event.snr >= snr_min]
......@@ -349,11 +350,14 @@ class StreamThinca(object):
# add selected singles to the noise model
if flushed:
# times when at least 2 instruments were generating
# times when H and L were generating
# SNR. used to select zero-lag singles for
# inclusion in the denominator.
two_or_more_instruments = segmentsUtils.vote(snr_segments.values(), 2)
if "H1" in snr_segments and "L1" in snr_segments:
hl_on = snr_segments["H1"] & snr_segments["L1"]
else:
hl_on = segments.segmentlist([])
# FIXME: this is needed to work around rounding
# problems in safety checks below, trying to
# compare GPS trigger times to float segment
......@@ -361,9 +365,9 @@ class StreamThinca(object):
# precision to know if triggers near the edge are
# in or out). it would be better not to have to
# screw around like this.
two_or_more_instruments.protract(1e-3) # 1 ms
hl_on.protract(1e-3) # 1 ms
for event in self.backgroundcollector.pull(rankingstat.snr_min, two_or_more_instruments, flushed):
for event in self.backgroundcollector.pull(rankingstat.snr_min, hl_on, flushed):
rankingstat.denominator.increment(event)
# add any triggers that have been used in coincidences for
......
......@@ -107,7 +107,7 @@ all : split
# N injection sets / Time step / M output split files
# 10. / 100. / 2. = 0.05
split : $(BNS_INJECTION_1) $(BNS_INJECTION_2) $(NSBH_INJECTION_1) $(NSBH_INJECTION_2) $(NSBH_INJECTION_3) $(BBH_INJECTION_1) $(BBH_INJECTION_2) $(IMBH_INJECTION_1) $(IMBH_INJECTION_2) $(IMBH_INJECTION_3)
gstlal_inspiral_split_injections $^ -r 0.05
gstlal_inspiral_combine_injection_sets $^ -r 0.05
# BNS 1
$(BNS_INJECTION_1) :
......
......@@ -108,9 +108,9 @@ deps_env.sh :
@echo 'unset GST_PLUGIN_PATH PYTHONPATH' > $@
@echo 'unset LD_LIBRARY_PATH LIBRARY_PATH LD_PRELOAD' >> $@
@echo 'DEPS_PATH=${PWD}/opt' >> $@
@echo 'export LDMKLFLAGS=" -L${MKLROOT}/lib/intel64 -lmkl_rt -lpthread -lm -ldl"' >> $@
@echo 'export GCCLDMKLFLAGS=" -L${MKLROOT}/lib/intel64 -Wl,--no-as-needed -lmkl_rt -lpthread -lm -ldl"' >> $@
@echo 'export GCCFLAGS="-fPIC -O3 -m64 -I$${MKLROOT}/include -I$${DEPS_PATH}/include"' >> $@
@echo 'export LDMKLFLAGS=" -L$${MKLROOT}/lib/intel64 -lmkl_rt -lpthread -limf -ldl"' >> $@
@echo 'export GCCLDMKLFLAGS=" -L$${MKLROOT}/lib/intel64 -Wl,--no-as-needed -lmkl_rt -lpthread -limf -ldl"' >> $@
@echo 'export GCCFLAGS="-fPIC -O3 -march=native -I$${MKLROOT}/include -I$${DEPS_PATH}/include"' >> $@
@echo 'export CFLAGS=$${GCCFLAGS}' >> $@
@if [[ ${CLUSTER} == *"ligo.caltech.edu" ]] ; then \
echo "source ${ICC_CIT} intel64" >> $@ ; \
......@@ -136,7 +136,7 @@ deps_env.sh :
@echo 'export MKL_THREADING_LAYER=SEQUENTIAL' >> $@
@echo 'export MKL_INTERFACE_LAYER=LP64' >> $@
@echo '# Force explicit linking of optimized FFTW libraries:' >> $@
@echo 'LDFLAGS="-lfftw3 -lfftw3f -lfftw3_threads -lfftw3f_threads $${LDFLAGS_INTEL} -L$${DEPS_PATH}/opt/lib "' >> $@
@echo 'LDFLAGS="-l:libfftw3.so -l:libfftw3f.so -l:libfftw3_threads.so -l:libfftw3f_threads.so $${LDFLAGS_INTEL} -L$${DEPS_PATH}/opt/lib "' >> $@
@echo '# These are environment variables that do get exported' >> $@
@echo 'PATH=$${DEPS_PATH}/bin:$${PATH}' >> $@
@echo 'PKG_CONFIG_PATH=$${DEPS_PATH}/lib/pkgconfig:$${DEPS_PATH}/lib64/pkgconfig:$${PKG_CONFIG_PATH}' >> $@
......@@ -206,7 +206,7 @@ logs/ldas-tools-framecpp.txt : $(LOGS_DIR)/ldas-tools-al.txt $(LDASTOOLSFRAMECPP
$(LDASTOOLSFRAMECPP_TARGET) : $(LDASTOOLSAL_TARGET)
@echo $(GREEN)ldas-tools-frmecpp$(WHITE) 1>&2
tar -xf $(TAR_DIR)/$(LDASTOOLSFRAMECPP).tar.gz -C $(SRC_DIR)
cd $(SRC_DIR)/$(LDASTOOLSFRAMECPP) && ./configure --prefix=$(INSTALL_DIR) --without-doxygen CC="gcc" CXX="g++" CFLAGS="$(GCCFLAGS)" LDFLAGS="$(GCCLDMKLFLAGS)"
cd $(SRC_DIR)/$(LDASTOOLSFRAMECPP) && ./configure --prefix=$(INSTALL_DIR) --without-doxygen
cd $(SRC_DIR)/$(LDASTOOLSFRAMECPP) && make && make install
# ldas-tools-al
......@@ -218,7 +218,7 @@ logs/ldas-tools-al.txt : $(LOGS_DIR)/swig.txt $(LDASTOOLSAL_TARGET)
$(LDASTOOLSAL_TARGET) : $(SWIG_TARGET)
@echo $(GREEN)ldas-tools-al$(WHITE) 1>&2
tar -xf $(TAR_DIR)/$(LDASTOOLSAL).tar.gz -C $(SRC_DIR)
cd $(SRC_DIR)/$(LDASTOOLSAL) && ./configure --prefix=$(INSTALL_DIR) --without-doxygen CC="gcc" CXX="g++" CFLAGS="$(GCCFLAGS)" LDFLAGS="$(GCCLDMKLFLAGS)"
cd $(SRC_DIR)/$(LDASTOOLSAL) && ./configure --prefix=$(INSTALL_DIR) --without-doxygen
cd $(SRC_DIR)/$(LDASTOOLSAL) && make -j$(CORES) && make install -j$(CORES)
# swig
......