Skip to content
Snippets Groups Projects

first iteration of __isub__ method in RankingStat

Merged Leo Tsukada requested to merge offline_new-workflow-rankingstat-isub into offline_new-workflow
All threads resolved!
@@ -200,6 +200,47 @@ class LnLRDensity(snglcoinc.LnLRDensity):
pass
return self
def __isub__(self, other):
if type(other) != type(self):
raise TypeError(other)
# template_id set mismatch is allowed in the special case
# that one or the other is None to make it possible to
# construct generic seed objects providing initialization
# data for the ranking statistics.
if self.template_ids is not None and other.template_ids is not None and self.template_ids != other.template_ids:
raise ValueError("incompatible template IDs")
if self.instruments != other.instruments:
raise ValueError("incompatible instrument sets")
if self.min_instruments != other.min_instruments:
raise ValueError("incompatible minimum number of instruments")
if self.delta_t != other.delta_t:
raise ValueError("incompatible delta_t coincidence thresholds")
self.use_bankchisq = self.use_bankchisq or other.use_bankchisq
if self.template_ids is None and other.template_ids is not None:
self.template_ids = frozenset(other.template_ids)
for ifo, count_tracker in other.count_tracker.items():
for gpstime, counts in count_tracker.items():
if gpstime in list(self.count_tracker[ifo].keys()):
assert set(other.count_tracker[ifo][gpstime]).issubset(set(self.count_tracker[ifo][gpstime]))
for count in other.count_tracker[ifo][gpstime]:
ind = numpy.where(self.count_tracker[ifo][gpstime]==count)
self.count_tracker[ifo][gpstime] = numpy.delete(self.count_tracker[ifo][gpstime], ind)
else:
raise ValueError("the count tracker does not have record to subtract.")
for ifo, count_tracker in other.count_tracker_bankchi.items():
for gpstime, counts in count_tracker.items():
if gpstime in list(self.count_tracker_bankchi[ifo].keys()):
assert set(other.count_tracker_bankchi[ifo][gpstime]).issubset(set(self.count_tracker_bankchi[ifo][gpstime]))
for count in other.count_tracker_bankchi[ifo][gpstime]:
ind = numpy.where(self.count_tracker_bankchi[ifo][gpstime]==count)
self.count_tracker_bankchi[ifo][gpstime] = numpy.delete(self.count_tracker_bankchi[ifo][gpstime], ind)
else:
raise ValueError("the count tracker does not have record to subtract.")
return self
# size and duration: useful stuff for the count tracker
def time_key(self, time):
size = 10 #granularity for tracking counts
@@ -659,6 +700,52 @@ class LnSignalDensity(LnLRDensity):
return self
def __isub__(self, other):
super(LnSignalDensity, self).__isub__(other)
self.horizon_history -= other.horizon_history
if self.population_model_file is not None and other.population_model_file is not None and other.population_model_file != self.population_model_file:
raise ValueError("incompatible mass model file names")
if self.population_model_file is None and other.population_model_file is not None:
self.population_model_file = other.population_model_file
if self.dtdphi_file is not None and other.dtdphi_file is not None and other.dtdphi_file != self.dtdphi_file:
raise ValueError("incompatible dtdphi files")
if self.dtdphi_file is None and other.dtdphi_file is not None:
self.dtdphi_file = other.dtdphi_file
if self.idq_file is not None and other.idq_file is not None and other.idq_file != self.idq_file:
raise ValueError("incompatible idq files")
if self.idq_file is None and other.idq_file is not None:
self.idq_file = other.idq_file
if self.horizon_factors is not None and other.horizon_factors is not None and other.horizon_factors != self.horizon_factors:
# require that the horizon factors be the same within 1%
try:
# FIXME The newer version of dist stat files assumes ifo-dependent horizon factors.
for ifo in self.horizon_factors:
for k in self.horizon_factors[ifo]:
if not 0.99 < self.horizon_factors[ifo][k] / other.horizon_factors[ifo][k] < 1.01:
raise ValueError("incompatible horizon_factors")
except TypeError:
# FIXME load horizon_factors in an old way (constant across
# ifos) for older version of dist stat files. This patch should
# be removed when we don't go back to the old version anymore.
for k in self.horizon_factors:
if not 0.99 < self.horizon_factors[k] / other.horizon_factors[k] < 1.01:
raise ValueError("incompatible horizon_factors")
if self.horizon_factors is None and other.horizon_factors is not None:
self.horizon_factors = other.horizon_factors
if hasattr(self, "skip_kde") and hasattr(other, "skip_kde"):
assert self.skip_kde == other.skip_kde, "skip_kde attribute needs to be consistent if both numerator classes have it."
elif hasattr(other, "skip_kde"):
self.skip_kde = other.skip_kde
# NOTE : this assumes the signal model is identical between self and
# other and just takes self's. Should we have a check to assert that and
# how?
try:
del self.interps
except AttributeError:
pass
return self
def increment(self, *args, **kwargs):
raise NotImplementedError
@@ -1036,6 +1123,22 @@ class LnNoiseDensity(LnLRDensity):
self.triggerrates += other.triggerrates
return self
def __isub__(self, other):
super(LnNoiseDensity, self).__isub__(other)
self.triggerrates -= other.triggerrates
for key, lnpdf in self.densities.items():
# NOTE : __isub__ method is actually not defined in BinnedLnPDF
# class, so .array attribute needs to be called.
lnpdf.array -= other.densities[key].array
assert all(lnpdf.array>=0), "SNR-chisq histogam for %s in the denominator contains negative counts. Check if you are subtracting a right histogram." % key
lnpdf.normalize()
try:
del self.interps
except AttributeError:
pass
return self
def copy(self):
new = super(LnNoiseDensity, self).copy()
new.triggerrates = self.triggerrates.copy()
Loading