Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • steffen.grunewald/gstlal
  • sumedha.biswas/gstlal
  • spiir-group/gstlal
  • madeline-wade/gstlal
  • hunter.schuler/gstlal
  • adam-mercer/gstlal
  • amit.reza/gstlal
  • alvin.li/gstlal
  • duncanmmacleod/gstlal
  • rebecca.ewing/gstlal
  • javed.sk/gstlal
  • leo.tsukada/gstlal
  • brian.bockelman/gstlal
  • ed-maros/gstlal
  • koh.ueno/gstlal
  • leo-singer/gstlal
  • lscsoft/gstlal
17 results
Show changes
Commits on Source (12)
Showing
with 72 additions and 33 deletions
......@@ -60,15 +60,15 @@ stages:
- |
cat <<EOF > Dockerfile
FROM igwn/base:conda
COPY gstlal/share/conda/envs/qualified-linux-64.lock .
COPY gstlal/share/conda/envs/lock/gstlal-dev-linux-64.lock .
SHELL ["/bin/bash", "-c"]
RUN conda config --set always_yes yes
RUN conda config --add channels conda-forge
RUN conda update -n base conda && \
conda clean -af
RUN conda create -n build-env --file qualified-linux-64.lock --force && \
RUN conda create -n build-env --file gstlal-dev-linux-64.lock --force && \
conda clean -af
RUN rm -f qualified-linux-64.lock
RUN rm -f gstlal-dev-linux-64.lock
ENV PKG_CONFIG_PATH $CONDA_PREFIX/lib/pkgconfig
ENV GST_PLUGIN_PATH $CONDA_PREFIX/lib/gstreamer-1.0
ENTRYPOINT bash
......@@ -252,7 +252,7 @@ test:offline:
needs:
- level0:rpm:gstlal
- level1:rpm:gstlal-ugly
#- level2:rpm:gstlal-calibration
- level2:rpm:gstlal-calibration
- level2:rpm:gstlal-inspiral
- level2:rpm:gstlal-burst
- test:gstlal
......@@ -317,7 +317,7 @@ docker:el7:
needs:
- level0:rpm:gstlal
- level1:rpm:gstlal-ugly
#- level2:rpm:gstlal-calibration
- level2:rpm:gstlal-calibration
- level2:rpm:gstlal-inspiral
- level2:rpm:gstlal-burst
only:
......
......@@ -42,7 +42,13 @@ This package contains the plugins and shared libraries required to run the gstla
%package devel
Summary: Files and documentation needed for compiling gstlal-calibration based plugins and programs.
Group: LSC Software/Data Analysis
Requires: %{name} = %{version} gstlal-devel >= @MIN_GSTLAL_VERSION@ python-devel >= @MIN_PYTHON_VERSION@ %{gstreamername}-devel >= @MIN_GSTREAMER_VERSION@ %{gstreamername}-plugins-base-devel >= @MIN_GSTREAMER_VERSION@ lal-devel >= @MIN_LAL_VERSION@ lalmetaio-devel >= @MIN_LALMETAIO_VERSION@ gsl-devel
Requires: gstlal-devel >= @MIN_GSTLAL_VERSION@
Requires: python3-devel >= @MIN_PYTHON_VERSION@
Requires: %{gstreamername}-devel >= @MIN_GSTREAMER_VERSION@
Requires: %{gstreamername}-plugins-base-devel >= @MIN_GSTREAMER_VERSION@
Requires: lal-devel >= @MIN_LAL_VERSION@
Requires: lalmetaio-devel >= @MIN_LALMETAIO_VERSION@
Requires: gsl-devel
%description devel
This package contains the files needed for building gstlal-calibration based
plugins and programs.
......
......@@ -1379,7 +1379,7 @@ def freqresp(filt, delay_samples = 0, samples_per_lobe = 8, return_double = Fals
#
def asd(data, sr, fft_samples, fft_spacing, window = 'blackman', freq_res = 1.0):
def asd(data, sr, fft_samples, fft_spacing, window = 'blackman', freq_res = 1.0, fast = True):
# How many FFTs will we take?
if len(data) < fft_samples:
......@@ -1405,9 +1405,14 @@ def asd(data, sr, fft_samples, fft_spacing, window = 'blackman', freq_res = 1.0)
win = Blackman(fft_samples)
# Compute the ASD
asd = abs(rfft(win * data[:fft_samples]))
for i in range(1, num_ffts):
asd += abs(rfft(win * data[i * fft_spacing : i * fft_spacing + fft_samples]))
if fast:
asd = abs(np.fft.rfft(win * data[:fft_samples]))
for i in range(1, num_ffts):
asd += abs(np.fft.rfft(win * data[i * fft_spacing : i * fft_spacing + fft_samples]))
else:
asd = abs(rfft(win * data[:fft_samples]))
for i in range(1, num_ffts):
asd += abs(rfft(win * data[i * fft_spacing : i * fft_spacing + fft_samples]))
asd /= num_ffts * sr * np.sqrt((float(fft_samples) / sr))
return np.float64(asd)
......
......@@ -801,7 +801,7 @@ for output_file_number, (svd_bank_url_dict, output_url, ranking_stat_output_url,
@bottle.route("/bank.txt")
def get_filter_length_and_chirpmass(banks = banks):
bank = banks.values()[0][0] #FIXME maybe shouldn't just take the first ones
bank = list(banks.values())[0][0] #FIXME maybe shouldn't just take the first ones
yield '%.14g %.4g %.4g' % (float(inspiral.now()), bank.filter_length, bank.sngl_inspiral_table[0].mchirp)
......
......@@ -389,7 +389,7 @@ else:
# Choose to optionally reconstruct segments around chosen segement.
# Reconstruct segment around injections is disable.
# if options.injections:
# offset_padding = max(math.ceil(abs(row.end))+1 for bank in banks_dict.values()[0] for row in bank.sngl_inspiral_table)
# offset_padding = max(math.ceil(abs(row.end))+1 for bank in list(banks_dict.values())[0] for row in bank.sngl_inspiral_table)
# reconstruction_segment_list = simulation.sim_inspiral_to_segment_list(options.injections, pad = offset_padding)
if options.reconstruction_segment:
reconstruction_segment_list = segments.segmentlist()
......@@ -516,9 +516,9 @@ if options.coinc_output != None:
raise ValueError("--time-slide-file names %s but have channel names for %s" % (", ".join(sorted(all_instruments)), ", ".join(sorted(gw_data_source_info.channel_dict))))
# Load template ids and horizon_factors
sngl_inspiral_table = banks_dict.values()[0][options.bank_number or 0].sngl_inspiral_table.copy()
sngl_inspiral_table = list(banks_dict.values())[0][options.bank_number or 0].sngl_inspiral_table.copy()
horizon_factors = {}
for bank in banks_dict.values()[0]:
for bank in list(banks_dict.values())[0]:
sngl_inspiral_table.extend(bank.sngl_inspiral_table)
horizon_factors.update(bank.horizon_factors)
template_ids = frozenset(row.template_id for row in sngl_inspiral_table)
......@@ -567,7 +567,7 @@ if options.coinc_output != None:
pipeline,
coincs_document,
rankingstat,
banks_dict.values()[0][options.bank_number].horizon_distance_func,
list(banks_dict.values())[0][options.bank_number].horizon_distance_func,
gracedbwrapper = inspiral.GracedBWrapper(
rankingstat.instruments,
far_threshold = options.gracedb_far_threshold,
......
......@@ -100,7 +100,7 @@ for filename in filenames:
rankingstat, rankingstatpdf = far.parse_likelihood_control_doc(xmldoc)
if rankingstatpdf is not None and options.verbose:
print >>sys.stderr, "WARNING: \"%s\" contains a RankingStatPDF object, it is not a pure ranking statistic file, you might be using this program on the wrong files." % filename
print("WARNING: \"%s\" contains a RankingStatPDF object, it is not a pure ranking statistic file, you might be using this program on the wrong files." % filename, file=sys.stderr)
# FIXME: don't hard-code object name
name = u"gstlal_inspiral_likelihood"
elem = rankingstat.get_xml_root(xmldoc, name)
......@@ -144,8 +144,7 @@ for filename in filenames:
j += 1
del items[j:]
if options.verbose:
print >>sys.stderr, "\"%s\": %s horizon history reduced to %.3g%% of original size" % (filename, instrument, 100. * j / (i + 1.))
print("\"%s\": %s horizon history reduced to %.3g%% of original size" % (filename, instrument, 100. * j / (i + 1.)), file=sys.stderr)
# replace
rankingstat.numerator.horizon_history[instrument] = type(horizon_history)(items)
......
......@@ -115,6 +115,8 @@ def parse_command_line():
bandwidths = []
if options.svd_file.endswith(".xml") or options.svd_file.endswith(".xml.gz"):
for n, bank in enumerate(svd_bank.read_banks(options.svd_file, contenthandler = LIGOLWContentHandler, verbose = options.verbose)):
if bank.bank_type != "signal_model":
continue
template_ids += [row.template_id for row in bank.sngl_inspiral_table]
# FIXME don't hard code
if options.df == "bandwidth":
......
......@@ -129,10 +129,10 @@ for gid in gid_list:
# # Find the template (to retrieve the autocorrelation later)
#
banknum = None
for i, bank in enumerate(banks.values()[0]):
for i, bank in enumerate(list(banks.values())[0]):
for j, row in enumerate(bank.sngl_inspiral_table):
# The templates should all have the same template_id, so just grab one
if row.Gamma0 == eventid_trigger_dict.values()[0].Gamma0:
if row.Gamma0 == list(eventid_trigger_dict.values())[0].Gamma0:
banknum = i
tmpltnum = j
break
......
......@@ -112,7 +112,7 @@ def parse_command_line():
parser.add_option("--fmax", metavar = "num", type = "float", default = 1600, help = "set the max frequency cutoff, default 1600 (Hz)")
parser.add_option("--sample-rate", metavar = "Hz", type = "int", help = "Set the sample rate. If not set, the sample rate will be based on the template frequency. The sample rate must be at least twice the highest frequency in the templates. If provided it must be a power of two")
parser.add_option("--identity-transform", action = "store_true", help = "Use identity transform, i.e. no SVD")
parser.add_option("--append-time-reversed-template", action = "store_true", help = "Append time reversed template to the svd bank files (optional).")
parser.add_option("--mchirp-threshold", default = 0.0, type = "float", help = "Set the mchirp threshold below which the time reversed templates will be appended to the svd bank file for background estimation. Default to 0.0, which effectively turns off this feature.")
# trigger generation options
parser.add_option("--vetoes", metavar = "filename", help = "Set the veto xml file.")
......
......@@ -87,7 +87,7 @@ if len(snrs_groups) != len(acs_groups):
#
#=============================================================================================
# This is only useful when SNRs are complex time series.
if numpy.iscomplexobj(SNRs_dict.values()[0][0].data.data):
if numpy.iscomplexobj(list(SNRs_dict.values())[0][0].data.data):
row = 0
for snrs_group, acs_group in zip(snrs_groups, acs_groups):
figure = plotsnr.plot_snr_with_ac(dict(zip(SNRs_dict.keys(), zip(snrs_group))), dict(zip(autocorrelations_dict.keys(), zip(acs_group))), width = options.width, ref_trigger_time = options.center, verbose = options.verbose)
......
......@@ -314,7 +314,7 @@ if options.verbose and credible_intervals is not None:
print >>sys.stderr, "rate posterior mean = %g signals/experiment" % rate_estimation.mean_from_lnpdf(signal_rate_ln_pdf)
print >>sys.stderr, "rate posterior median = %g signals/experiment" % rate_estimation.median_from_lnpdf(signal_rate_ln_pdf)
# all modes are the same, pick one and report it
print >>sys.stderr, "maximum-likelihood rate = %g signals/experiment" % credible_intervals.values()[0][0]
print >>sys.stderr, "maximum-likelihood rate = %g signals/experiment" % list(credible_intervals.values())[0][0]
for cred, (mode, lo, hi) in sorted(credible_intervals.items()):
print >>sys.stderr, "%g%% credible interval = [%g, %g] signals/experiment" % (cred * 100., lo, hi)
......
......@@ -294,7 +294,7 @@ def get_subbank_maps(svd_banks, options):
subbank_likelihood_file_map_index = {}
for svd_bank in svd_banks:
bank_id = None
banks = inspiral.parse_bank_files(svd_bank, verbose = options.verbose).values()[0]
banks = list(inspiral.parse_bank_files(svd_bank, verbose = options.verbose).values())[0]
for bank in banks:
#
# Create a dict keyed by bank id and populated by a
......
......@@ -404,10 +404,10 @@ class EventPlotter(events.EventProcessor):
# # Find the template (to retrieve the autocorrelation later)
#
banknum = None
for i, bank in enumerate(banks.values()[0]):
for i, bank in enumerate(list(banks.values())[0]):
for j, row in enumerate(bank.sngl_inspiral_table):
# The templates should all have the same template_id, so just grab one
if row.Gamma0 == eventid_trigger_dict.values()[0].Gamma0:
if row.Gamma0 == list(eventid_trigger_dict.values())[0].Gamma0:
banknum = i
tmpltnum = j
break
......
......@@ -87,7 +87,7 @@ parser = OptionParser(description = __doc__)
parser.add_option("--flow", metavar = "Hz", type = "float", default = 40.0, help = "Set the template low-frequency cut-off (default = 40.0).")
parser.add_option("--sample-rate", metavar = "Hz", type = "int", help = "Set the sample rate. If not set, the sample rate will be based on the template frequency. The sample rate must be at least twice the highest frequency in the templates. If provided it must be a power of two")
parser.add_option("--identity-transform", action = "store_true", default = False, help = "Do not perform an SVD; instead, use the original templates as the analyzing templates.")
parser.add_option("--append-time-reversed-template", action= "store_true", help = "A shortcut for appending time reversed template bank to the output file without manually adding --bank-type. (optional; cannot combined with --bank-type.)")
parser.add_option("--append-time-reversed-template", action = "store_true", help = "A shortcut for appending time reversed template bank to the output file without manually adding --bank-type. (optional; cannot combined with --bank-type.)")
parser.add_option("--bank-type", type= "string", metavar = "N", action = "append", default = [], help = "Define the type of the template bank: is it used to produce signal candidates or it is used to produce noise candidate? Use 'noise_model' to indicate that it's for noise candidates or 'signal_model' to indicate for signal candidates (default). (optional; if provided, it must be as many as --template-bank).")
parser.add_option("--padding", metavar = "pad", type = "float", default = 1.5, help = "Fractional amount to pad time slices.")
parser.add_option("--svd-tolerance", metavar = "match", type = "float", default = 0.9995, help = "Set the SVD reconstruction tolerance (default = 0.9995).")
......
......@@ -21,6 +21,8 @@ import os
from typing import Iterable
import numpy
import yaml
from yaml.loader import SafeLoader
from lal import rate
from lal.utils import CacheEntry
......@@ -1491,6 +1493,7 @@ def collect_metrics_layer(config, dag):
# timeseries metrics
agg_metrics = dagutil.groups(agg_metrics, max(max_agg_jobs // (4 * num_jobs), 1))
seg_metrics = dagutil.groups(seg_metrics, max(max_agg_jobs // (4 * num_jobs), 1))
for metrics in itertools.chain(agg_metrics, seg_metrics):
for i, _ in enumerate(agg_job_bounds):
arguments = list(common_opts)
......@@ -1507,6 +1510,28 @@ def collect_metrics_layer(config, dag):
else:
metric_layer += Node(arguments=arguments)
# add optional test suite metrics
if config.metrics.test_suite_config:
with open(config.metrics.test_suite_config, 'r') as f:
testsuite_config = yaml.load(f, Loader=SafeLoader)
testsuite_metrics = [metric for metric in testsuite_config["schemas"]]
testsuite_metrics = dagutil.groups(testsuite_metrics, max(max_agg_jobs // (4 * num_jobs), 1))
for metrics in itertools.chain(testsuite_metrics):
testsuite_arguments = [
Argument("command", "aggregate"),
Option("config", config.metrics.test_suite_config),
Option("uri", f"kafka://{config.tag}@{config.services.kafka_server}"),
]
testsuite_arguments.extend([
Option("data-type", "timeseries"),
Option("topic", [f"gstlal.{config.tag}.testsuite.{metric}" for metric in metrics]),
Option("schema", metrics)
])
metric_layer += Node(arguments=testsuite_arguments)
if metric_layer.nodes:
dag.attach(metric_leader_layer)
dag.attach(metric_layer)
......
......@@ -490,7 +490,6 @@ def svd_layer(dag, jobs, parent_nodes, psd, bank_cache, options, seg, output_dir
"svd-tolerance":options.tolerance,
"flow":options.flow[j],
"sample-rate":options.sample_rate,
"append-time-reversed-template":options.append_time_reversed_template,
"clipleft":clipleft,
"clipright":clipright,
"samples-min":options.samples_min[j],
......@@ -507,6 +506,8 @@ def svd_layer(dag, jobs, parent_nodes, psd, bank_cache, options, seg, output_dir
input_cache_file_name = os.path.basename(svd_bank_name).replace(".xml.gz", ".cache"),
output_files = {"write-svd":svd_bank_name},
)
if new_template_mchirp_dict['%04d' % (i+bin_offset,)][1] < options.mchirp_threshold:
svdnode.add_var_arg("--append-time-reversed-template")
# impose a priority to help with depth first submission
svdnode.set_priority(99)
......
......@@ -1714,8 +1714,8 @@ class p_of_instruments_given_horizons(object):
# NOTE we end up clipping any value outside of our
# histogram to just be the value in the last(first)
# bin, so we track those center values here.
self.first_center = self.histograms.values()[0].centres()[0][0]
self.last_center = self.histograms.values()[0].centres()[0][-1]
self.first_center = list(self.histograms.values())[0].centres()[0][0]
self.last_center = list(self.histograms.values())[0].centres()[0][-1]
# Now we start the monte carlo simulation of a bunch of
# signals distributed uniformly in the volume of space
......
......@@ -509,6 +509,7 @@ def read_banks(filename, contenthandler, verbose = False):
banks.append(bank)
template_id, func = horizon_distance_func(banks)
template_id = abs(template_id) # make sure horizon_distance_func did not pick the noise model template
horizon_norm = None
for bank in banks:
if template_id in bank.horizon_factors and bank.bank_type == "signal_model":
......
......@@ -648,9 +648,9 @@ def scan_svd_banks_for_row(coinc_xmldoc, banks_dict):
bank_number = None
row_number = None
for i, bank in enumerate(banks_dict.values()[0]):
for i, bank in enumerate(list(banks_dict.values())[0]):
for j, row in enumerate(bank.sngl_inspiral_table):
if row.template_id == eventid_trigger_dict.values()[0].template_id:
if row.template_id == list(eventid_trigger_dict.values())[0].template_id:
bank_number = i
row_number = j
break
......
......@@ -34,7 +34,7 @@ class AcCounts(object):
def normalize(self):
# FIXME this does no error checking that category keys are consistent
self.norm = dict((cat, 0.) for cat in self.counts.values()[0].keys())
self.norm = dict((cat, 0.) for cat in list(self.counts.values())[0].keys())
for b in self.counts:
for cat in self.norm:
self.norm[cat] += self.counts[b][cat]
......