Verified Commit 40533a90 authored by Duncan Macleod's avatar Duncan Macleod
Browse files

lalburst: updated print statements for python3

parent 76716266
......@@ -24,6 +24,9 @@
#
from __future__ import print_function
import math
import sys
......@@ -235,7 +238,7 @@ WHERE
self.found_x.append(sim.frequency)
self.found_y.append(amplitude)
elif found:
print >>sys.stderr, "odd, injection %s was found in %s but not injected..." % (sim.simulation_id, "+".join(self.instruments))
print("odd, injection %s was found in %s but not injected..." % (sim.simulation_id, "+".join(self.instruments)), file=sys.stderr)
def _bin_events(self, binning = None):
# called internally by finish()
......@@ -268,8 +271,8 @@ WHERE
# program will take too long to run
raise ValueError("smoothing filter too large (not enough injections)")
print >>sys.stderr, "The smoothing window for %s is %g x %g bins" % ("+".join(self.instruments), self.window_size_x, self.window_size_y),
print >>sys.stderr, "which is %g%% x %g%% of the binning" % (100.0 * self.window_size_x / binning[0].n, 100.0 * self.window_size_y / binning[1].n)
print("The smoothing window for %s is %g x %g bins" % ("+".join(self.instruments), self.window_size_x, self.window_size_y), end=' ', file=sys.stderr)
print("which is %g%% x %g%% of the binning" % (100.0 * self.window_size_x / binning[0].n, 100.0 * self.window_size_y / binning[1].n), file=sys.stderr)
def finish(self, binning = None):
# compute the binning if needed, and set the injections
......@@ -279,11 +282,11 @@ WHERE
self._bin_events(binning)
# smooth the efficiency data.
print >>sys.stderr, "Sum of numerator bins before smoothing = %g" % self.efficiency.numerator.array.sum()
print >>sys.stderr, "Sum of denominator bins before smoothing = %g" % self.efficiency.denominator.array.sum()
print("Sum of numerator bins before smoothing = %g" % self.efficiency.numerator.array.sum(), file=sys.stderr)
print("Sum of denominator bins before smoothing = %g" % self.efficiency.denominator.array.sum(), file=sys.stderr)
rate.filter_binned_ratios(self.efficiency, rate.gaussian_window(self.window_size_x, self.window_size_y))
print >>sys.stderr, "Sum of numerator bins after smoothing = %g" % self.efficiency.numerator.array.sum()
print >>sys.stderr, "Sum of denominator bins after smoothing = %g" % self.efficiency.denominator.array.sum()
print("Sum of numerator bins after smoothing = %g" % self.efficiency.numerator.array.sum(), file=sys.stderr)
print("Sum of denominator bins after smoothing = %g" % self.efficiency.denominator.array.sum(), file=sys.stderr)
# regularize to prevent divide-by-zero errors
self.efficiency.regularize()
......
......@@ -23,6 +23,9 @@
#
from __future__ import print_function
import itertools
import math
import matplotlib
......@@ -243,18 +246,18 @@ def summarize_coinc_database(contents, filename = None):
else:
filename = "%s: " % filename
cursor = contents.connection.cursor()
print >>sys.stderr, "%sdatabase stats:" % filename
print("%sdatabase stats:" % filename, file=sys.stderr)
for instrument, seglist in sorted(contents.seglists.items()):
print >>sys.stderr, "\t%s%s livetime: %g s (%g%% vetoed)" % (filename, instrument, abs(seglist), 100.0 * float(abs(instrument in contents.vetoseglists and (seglist & contents.vetoseglists[instrument]) or 0.0)) / float(abs(seglist)))
print("\t%s%s livetime: %g s (%g%% vetoed)" % (filename, instrument, abs(seglist), 100.0 * float(abs(instrument in contents.vetoseglists and (seglist & contents.vetoseglists[instrument]) or 0.0)) / float(abs(seglist))), file=sys.stderr)
if contents.sngl_burst_table is not None:
print >>sys.stderr, "\t%sburst events: %d" % (filename, len(contents.sngl_burst_table))
print("\t%sburst events: %d" % (filename, len(contents.sngl_burst_table)), file=sys.stderr)
if contents.sim_burst_table is not None:
print >>sys.stderr, "\t%sburst injections: %d" % (filename, len(contents.sim_burst_table))
print("\t%sburst injections: %d" % (filename, len(contents.sim_burst_table)), file=sys.stderr)
if contents.time_slide_table is not None:
print >>sys.stderr, "\t%stime slides: %d" % (filename, cursor.execute("SELECT COUNT(DISTINCT(time_slide_id)) FROM time_slide").fetchone()[0])
print("\t%stime slides: %d" % (filename, cursor.execute("SELECT COUNT(DISTINCT(time_slide_id)) FROM time_slide").fetchone()[0]), file=sys.stderr)
if contents.coinc_def_table is not None:
for description, n in cursor.execute("SELECT description, COUNT(*) FROM coinc_definer NATURAL JOIN coinc_event GROUP BY coinc_def_id ORDER BY description"):
print >>sys.stderr, "\t%s%s: %d" % (filename, description, n)
print("\t%s%s: %d" % (filename, description, n), file=sys.stderr)
cursor.close()
......@@ -306,15 +309,15 @@ def time_slides_livetime(seglists, time_slides, verbose = False):
old_offsets = seglists.offsets.copy()
N = len(time_slides)
if verbose:
print >>sys.stderr, "computing the live time for %d time slides:" % N
print("computing the live time for %d time slides:" % N, file=sys.stderr)
for n, time_slide in enumerate(time_slides):
if verbose:
print >>sys.stderr, "\t%.1f%%\r" % (100.0 * n / N),
print("\t%.1f%%\r" % (100.0 * n / N), end=' ', file=sys.stderr)
seglists.offsets.update(time_slide)
livetime += float(abs(seglists.intersection(time_slide.keys())))
seglists.offsets.update(old_offsets)
if verbose:
print >>sys.stderr, "\t100.0%"
print("\t100.0%", file=sys.stderr)
return livetime
......
......@@ -36,6 +36,9 @@ again using the standard coincidence infrastructure.
"""
from __future__ import print_function
import bisect
import sys
......@@ -598,7 +601,7 @@ def binjfind(xmldoc, process, search, snglcomparefunc, nearcoinccomparefunc, ver
#
if verbose:
print >>sys.stderr, "indexing ..."
print("indexing ...", file=sys.stderr)
b_b_def = {
"StringCusp": burca.StringCuspBBCoincDef,
......@@ -715,25 +718,25 @@ def binjfind(xmldoc, process, search, snglcomparefunc, nearcoinccomparefunc, ver
#
if verbose:
print >>sys.stderr, "constructing %s:" % sb_b_def.description
print("constructing %s:" % sb_b_def.description, file=sys.stderr)
for n, sim in enumerate(contents.simbursttable):
if verbose:
print >>sys.stderr, "\t%.1f%%\r" % (100.0 * n / N),
print("\t%.1f%%\r" % (100.0 * n / N), end=' ', file=sys.stderr)
events = find_sngl_burst_matches(contents, sim, snglcomparefunc, burst_peak_time_window)
if events:
add_sim_burst_coinc(contents, sim, events, contents.sb_b_coinc_def_id)
if verbose:
print >>sys.stderr, "\t100.0%"
print("\t100.0%", file=sys.stderr)
#
# Find sim_burst <--> coinc_event coincidences.
#
if verbose:
print >>sys.stderr, "constructing %s and %s:" % (sb_c_e_def.description, sb_c_n_def.description)
print("constructing %s and %s:" % (sb_c_e_def.description, sb_c_n_def.description), file=sys.stderr)
for n, sim in enumerate(contents.simbursttable):
if verbose:
print >>sys.stderr, "\t%.1f%%\r" % (100.0 * n / N),
print("\t%.1f%%\r" % (100.0 * n / N), end=' ', file=sys.stderr)
offsetvector = contents.offsetvectors[sim.time_slide_id]
coincs = contents.coincs_near_peaktime(sim.time_geocent, coinc_peak_time_window, offsetvector)
exact_coinc_event_ids = find_exact_coinc_matches(coincs, sim, snglcomparefunc, contents.seglists, offsetvector)
......@@ -744,9 +747,9 @@ def binjfind(xmldoc, process, search, snglcomparefunc, nearcoinccomparefunc, ver
if near_coinc_event_ids:
add_sim_coinc_coinc(contents, sim, near_coinc_event_ids, contents.sb_c_n_coinc_def_id)
if verbose:
print >>sys.stderr, "\t100.0%"
print("\t100.0%", file=sys.stderr)
elif verbose:
print >>sys.stderr, "no %s table in document, skipping" % lsctables.SimBurstTable.tableName
print("no %s table in document, skipping" % lsctables.SimBurstTable.tableName, file=sys.stderr)
#
# Search for sim_inspiral <--> * coincidences
......@@ -760,25 +763,25 @@ def binjfind(xmldoc, process, search, snglcomparefunc, nearcoinccomparefunc, ver
#
if verbose:
print >>sys.stderr, "constructing %s:" % si_b_def.description
print("constructing %s:" % si_b_def.description, file=sys.stderr)
for n, sim in enumerate(contents.siminspiraltable):
if verbose:
print >>sys.stderr, "\t%.1f%%\r" % (100.0 * n / N),
print("\t%.1f%%\r" % (100.0 * n / N), end=' ', file=sys.stderr)
events = find_sngl_burst_matches(contents, sim, snglcomparefunc, burst_peak_time_window)
if events:
add_sim_burst_coinc(contents, sim, events, contents.si_b_coinc_def_id)
if verbose:
print >>sys.stderr, "\t100.0%"
print("\t100.0%", file=sys.stderr)
#
# Find sim_inspiral <--> coinc_event coincidences.
#
if verbose:
print >>sys.stderr, "constructing %s and %s:" % (si_c_e_def.description, si_c_n_def.description)
print("constructing %s and %s:" % (si_c_e_def.description, si_c_n_def.description), file=sys.stderr)
for n, sim in enumerate(contents.siminspiraltable):
if verbose:
print >>sys.stderr, "\t%.1f%%\r" % (100.0 * n / N),
print("\t%.1f%%\r" % (100.0 * n / N), end=' ', file=sys.stderr)
offsetvector = contents.offsetvectors[sim.time_slide_id]
coincs = contents.coincs_near_peaktime(sim.time_geocent, coinc_peak_time_window, offsetvector)
exact_coinc_event_ids = find_exact_coinc_matches(coincs, sim, snglcomparefunc, contents.seglists, offsetvector)
......@@ -789,9 +792,9 @@ def binjfind(xmldoc, process, search, snglcomparefunc, nearcoinccomparefunc, ver
if near_coinc_event_ids:
add_sim_coinc_coinc(contents, sim, near_coinc_event_ids, contents.si_c_n_coinc_def_id)
if verbose:
print >>sys.stderr, "\t100.0%"
print("\t100.0%", file=sys.stderr)
elif verbose:
print >>sys.stderr, "no %s table in document, skipping" % lsctables.SimInspiralTable.tableName
print("no %s table in document, skipping" % lsctables.SimInspiralTable.tableName, file=sys.stderr)
#
# Done.
......
......@@ -24,6 +24,9 @@
#
from __future__ import print_function
import math
import sys
......@@ -365,7 +368,7 @@ def bucluster(
except ValueError:
# no-op: document does not contain a sngl_burst table
if verbose:
print >>sys.stderr, "document does not contain a sngl_burst table, skipping ..."
print("document does not contain a sngl_burst table, skipping ...", file=sys.stderr)
return xmldoc, False
seglists = ligolw_search_summary.segmentlistdict_fromsearchsummary(xmldoc, program = program).coalesce()
......@@ -374,7 +377,7 @@ def bucluster(
#
if verbose:
print >>sys.stderr, "pre-processing ..."
print("pre-processing ...", file=sys.stderr)
preprocess_output = prefunc(sngl_burst_table)
#
......@@ -388,7 +391,7 @@ def bucluster(
#
if verbose:
print >>sys.stderr, "post-processing ..."
print("post-processing ...", file=sys.stderr)
postfunc(sngl_burst_table, preprocess_output)
#
......
......@@ -24,6 +24,9 @@
#
from __future__ import print_function
from bisect import bisect_left, bisect_right
import math
import sys
......@@ -299,7 +302,7 @@ def burca(
#
if verbose:
print >>sys.stderr, "indexing ..."
print("indexing ...", file=sys.stderr)
coinc_tables = CoincTables(xmldoc, coinc_definer_row)
#
......
......@@ -24,6 +24,9 @@
#
from __future__ import print_function
import copy
import itertools
import math
......@@ -198,7 +201,7 @@ class BurcaCoincParamsDistributions(snglcoinc.LnLikelihoodRatioMixin):
self = None
for n, filename in enumerate(filenames, 1):
if verbose:
print >>sys.stderr, "%d/%d:" % (n, len(filenames)),
print("%d/%d:" % (n, len(filenames)), end=' ', file=sys.stderr)
xmldoc = ligolw_utils.load_filename(filename, verbose = verbose, contenthandler = cls.contenthandler)
if self is None:
self = cls.from_xml(xmldoc, name)
......
......@@ -29,6 +29,9 @@ LIGO Light-Weight XML coincidence analysis front end.
"""
from __future__ import print_function
import math
import sys
......@@ -63,7 +66,7 @@ def load_cache(filename, verbose = False):
taken from stdin.
"""
if verbose:
print >>sys.stderr, "reading %s ..." % (filename or "stdin")
print("reading %s ..." % (filename or "stdin"), file=sys.stderr)
if filename is not None:
f = open(filename)
else:
......@@ -356,7 +359,7 @@ def split_bins(cafepacker, extentlimit, verbose = False):
extents = [origbin.extent[0]] + [LIGOTimeGPS(origbin.extent[0] + i * float(abs(origbin.extent)) / n) for i in range(1, n)] + [origbin.extent[1]]
if verbose:
print >>sys.stderr, "\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(extent) for extent in extents[1:-1]))
print("\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join(str(extent) for extent in extents[1:-1])), file=sys.stderr)
extents = [segments.segment(*bounds) for bounds in zip(extents[:-1], extents[1:])]
#
......@@ -439,11 +442,11 @@ def write_caches(base, bins, instruments = None, verbose = False):
filename = pattern % (base, n)
filenames.append(filename)
if verbose:
print >>sys.stderr, "writing %s ..." % filename
print("writing %s ..." % filename, file=sys.stderr)
f = open(filename, "w")
for cacheentry in bin.objects:
if instruments is None or (instruments & set(cacheentry.segmentlistdict)):
print >>f, str(cacheentry)
print(str(cacheentry), file=f)
return filenames
......@@ -484,7 +487,7 @@ def ligolw_cafe(cache, offset_vectors, verbose = False, extentlimit = None):
#
if verbose:
print >>sys.stderr, "computing segment list ..."
print("computing segment list ...", file=sys.stderr)
seglists = cache_to_seglistdict(cache)
#
......@@ -507,7 +510,7 @@ def ligolw_cafe(cache, offset_vectors, verbose = False, extentlimit = None):
#
if verbose:
print >>sys.stderr, "filtering input cache ..."
print("filtering input cache ...", file=sys.stderr)
cache = [c for c in cache if seglists.intersects_all(c.segmentlistdict)]
#
......@@ -516,7 +519,7 @@ def ligolw_cafe(cache, offset_vectors, verbose = False, extentlimit = None):
#
if verbose:
print >>sys.stderr, "sorting input cache ..."
print("sorting input cache ...", file=sys.stderr)
cache.sort(key = lambda x: x.segment)
#
......@@ -529,13 +532,13 @@ def ligolw_cafe(cache, offset_vectors, verbose = False, extentlimit = None):
packer = CafePacker(outputcaches)
packer.set_offset_vectors(offset_vectors)
if verbose:
print >>sys.stderr, "packing files (considering %s offset vectors) ..." % len(offset_vectors)
print("packing files (considering %s offset vectors) ..." % len(offset_vectors), file=sys.stderr)
for n, cacheentry in enumerate(cache):
if verbose and not n % 13:
print >>sys.stderr, "\t%.1f%%\t(%d files, %d caches)\r" % (100.0 * n / len(cache), n + 1, len(outputcaches)),
print("\t%.1f%%\t(%d files, %d caches)\r" % (100.0 * n / len(cache), n + 1, len(outputcaches)), end=' ', file=sys.stderr)
packer.pack(cacheentry)
if verbose:
print >>sys.stderr, "\t100.0%%\t(%d files, %d caches)" % (len(cache), len(outputcaches))
print("\t100.0%%\t(%d files, %d caches)" % (len(cache), len(outputcaches)), file=sys.stderr)
#
# Split caches with extent more than extentlimit
......@@ -543,17 +546,17 @@ def ligolw_cafe(cache, offset_vectors, verbose = False, extentlimit = None):
if extentlimit is not None:
if verbose:
print >>sys.stderr, "splitting caches with extent greater than %g s ..." % extentlimit
print("splitting caches with extent greater than %g s ..." % extentlimit, file=sys.stderr)
split_bins(packer, extentlimit, verbose = verbose)
if verbose:
print >>sys.stderr, "\t\t(%d files, %d caches)" % (len(cache), len(outputcaches))
print("\t\t(%d files, %d caches)" % (len(cache), len(outputcaches)), file=sys.stderr)
#
# Sort output caches
#
if verbose:
print >>sys.stderr, "sorting output caches ..."
print("sorting output caches ...", file=sys.stderr)
for cache in outputcaches:
cache.objects.sort()
......
......@@ -24,6 +24,9 @@
#
from __future__ import print_function
import sys
import traceback
......@@ -89,7 +92,7 @@ def assign_likelihood_ratios(connection, coinc_def_id, offset_vectors, vetosegli
#
if verbose:
print >>sys.stderr, "computing likelihood ratios ..."
print("computing likelihood ratios ...", file=sys.stderr)
connection.cursor().execute("""
UPDATE
......
......@@ -24,6 +24,9 @@
#
from __future__ import print_function
try:
from fpconst import NegInf
except ImportError:
......@@ -373,7 +376,7 @@ def load_likelihood_data(filenames, verbose = False):
seglists = None
for n, filename in enumerate(filenames, 1):
if verbose:
print >>sys.stderr, "%d/%d:" % (n, len(filenames)),
print("%d/%d:" % (n, len(filenames)), end=' ', file=sys.stderr)
xmldoc = ligolw_utils.load_filename(filename, verbose = verbose, contenthandler = StringCoincParamsDistributions.LIGOLWContentHandler)
this_coinc_params = StringCoincParamsDistributions.from_xml(xmldoc, u"string_cusp_likelihood")
this_seglists = lsctables.SearchSummaryTable.get_table(xmldoc).get_out_segmentlistdict(lsctables.ProcessTable.get_table(xmldoc).get_ids_by_program(u"lalapps_string_meas_likelihood")).coalesce()
......@@ -413,17 +416,17 @@ def time_slides_livetime(seglists, time_slides, min_instruments, verbose = False
seglists = seglists.copy() # don't modify original
N = len(time_slides)
if verbose:
print >>sys.stderr, "computing the live time for %d time slides:" % N
print("computing the live time for %d time slides:" % N, file=sys.stderr)
for n, time_slide in enumerate(time_slides):
if verbose:
print >>sys.stderr, "\t%.1f%%\r" % (100.0 * n / N),
print("\t%.1f%%\r" % (100.0 * n / N), end=' ', file=sys.stderr)
seglists.offsets.update(time_slide)
if clip is None:
livetime += float(abs(segmentsUtils.vote(seglists.values(), min_instruments)))
else:
livetime += float(abs(segmentsUtils.vote((seglists & clip).values(), min_instruments)))
if verbose:
print >>sys.stderr, "\t100.0%"
print("\t100.0%", file=sys.stderr)
return livetime
......@@ -440,10 +443,10 @@ def time_slides_livetime_for_instrument_combo(seglists, time_slides, instruments
offseglists = seglists.copy(keys = set(seglists) - set(instruments))
N = len(time_slides)
if verbose:
print >>sys.stderr, "computing the live time for %s in %d time slides:" % (", ".join(instruments), N)
print("computing the live time for %s in %d time slides:" % (", ".join(instruments), N), file=sys.stderr)
for n, time_slide in enumerate(time_slides):
if verbose:
print >>sys.stderr, "\t%.1f%%\r" % (100.0 * n / N),
print("\t%.1f%%\r" % (100.0 * n / N), end=' ', file=sys.stderr)
onseglists.offsets.update(time_slide)
offseglists.offsets.update(time_slide)
if clip is None:
......@@ -451,7 +454,7 @@ def time_slides_livetime_for_instrument_combo(seglists, time_slides, instruments
else:
livetime += float(abs((onseglists & clip).intersection(onseglists.keys()) - offseglists.union(offseglists.keys())))
if verbose:
print >>sys.stderr, "\t100.0%"
print("\t100.0%", file=sys.stderr)
return livetime
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment