Skip to content
Snippets Groups Projects
Commit ca55c845 authored by Daichi Tsuna's avatar Daichi Tsuna
Browse files

cs_triggergen: add amplitude calculation & less PSD updates

parent b5b3a1cf
No related branches found
No related tags found
No related merge requests found
...@@ -81,11 +81,35 @@ options, filenames = parse_command_line() ...@@ -81,11 +81,35 @@ options, filenames = parse_command_line()
# handler for obtaining psd # handler for obtaining psd
# #
class PSDHandler(simplehandler.Handler): class PipelineHandler(simplehandler.Handler):
def __init__(self, mainloop, pipeline, firbank): def __init__(self, mainloop, pipeline, template_bank, firbank):
simplehandler.Handler.__init__(self,mainloop, pipeline) simplehandler.Handler.__init__(self, mainloop, pipeline)
self.template_bank = template_bank
self.firbank = firbank self.firbank = firbank
self.triggergen = triggergen self.triggergen = triggergen
# use central_freq to uniquely identify templates
self.sigma = dict((row.central_freq, 0.0) for row in template_bank)
# counter for controlling how often we update PSD
self.update_psd = 0
def appsink_new_buffer(self, elem):
buf = elem.emit("pull-sample").get_buffer()
events = []
for i in range(buf.n_memory()):
memory = buf.peek_memory(i)
result, mapinfo = memory.map(Gst.MapFlags.READ)
assert result
if mapinfo.data:
events.extend(snglbursttable.GSTLALSnglBurst.from_buffer(mapinfo.data))
memory.unmap(mapinfo)
# put info of each event in the sngl burst table
for event in events:
event.process_id = process.process_id
event.event_id = sngl_burst_table.get_next_id()
event.amplitude = event.snr / self.sigma[event.central_freq]
sngl_burst_table.append(event)
# event counter
search_summary.nevents += 1
def do_on_message(self, bus, message): def do_on_message(self, bus, message):
if message.type == Gst.MessageType.ELEMENT and message.get_structure().get_name() == "spectrum": if message.type == Gst.MessageType.ELEMENT and message.get_structure().get_name() == "spectrum":
...@@ -95,73 +119,87 @@ class PSDHandler(simplehandler.Handler): ...@@ -95,73 +119,87 @@ class PSDHandler(simplehandler.Handler):
stability = float(message.src.get_property("n-samples")) / message.src.get_property("average-samples") stability = float(message.src.get_property("n-samples")) / message.src.get_property("average-samples")
if stability > 0.3: if stability > 0.3:
print >> sys.stderr, "At GPS time", timestamp, "PSD stable" if self.update_psd > 0:
template_t = [None] * len(template_bank_table) # do nothing, just decrease the counter
autocorr = [None] * len(template_bank_table) self.update_psd -= 1
# make templates, whiten, put into firbank else:
for i, row in enumerate(template_bank_table): # PSD counter reached zero
# linearly polarized, so just use plus mode time series print >> sys.stderr, "At GPS time", timestamp, "updating PSD"
template_t[i], _ = lalsimulation.GenerateStringCusp(1.0,row.central_freq,1.0/options.sample_rate) # NOTE this initialization determines how often the PSD gets updated. This should be given by the user, or we can think of other fancier updates.
# zero-pad it to 32 seconds to obtain same deltaF as the PSD self.update_psd = 10
template_t[i] = lal.ResizeREAL8TimeSeries(template_t[i],-int(32*options.sample_rate - template_t[i].data.length)//2,int(32*options.sample_rate)) template_t = [None] * len(self.template_bank)
# setup of frequency domain autocorr = [None] * len(self.template_bank)
length = template_t[i].data.length # make templates, whiten, put into firbank
duration = float(length) / options.sample_rate # FIXME Currently works only for cusps. this for-loop needs to be revisited when searching for other sources (kinks, ...)
epoch = -(length-1) // 2 /options.sample_rate for i, row in enumerate(self.template_bank):
template_f = lal.CreateCOMPLEX16FrequencySeries("template_freq", LIGOTimeGPS(epoch), psd.f0, 1.0/duration, lal.Unit("strain s"), length // 2 + 1) # Obtain cusp waveform. A cusp signal is linearly polarized, so just use plus mode time series
fplan = lal.CreateForwardREAL8FFTPlan(length,0) template_t[i], _ = lalsimulation.GenerateStringCusp(1.0,row.central_freq,1.0/options.sample_rate)
# FFT to frequency domain # zero-pad it to 32 seconds to obtain same deltaF as the PSD
lal.REAL8TimeFreqFFT(template_f,template_t[i],fplan) template_t[i] = lal.ResizeREAL8TimeSeries(template_t[i],-int(32*options.sample_rate - template_t[i].data.length)//2,int(32*options.sample_rate))
# set DC and Nyquist to zero # setup of frequency domain
template_f.data.data[0] = 0.0 length = template_t[i].data.length
template_f.data.data[template_f.data.length-1] = 0.0 duration = float(length) / options.sample_rate
# whiten epoch = -(length-1) // 2 / options.sample_rate
template_f = lal.WhitenCOMPLEX16FrequencySeries(template_f,psd) template_f = lal.CreateCOMPLEX16FrequencySeries("template_freq", LIGOTimeGPS(epoch), psd.f0, 1.0/duration, lal.Unit("strain s"), length // 2 + 1)
# obtain autocorr time series by squaring template and inverse FFT it fplan = lal.CreateForwardREAL8FFTPlan(length,0)
template_f_squared = lal.CreateCOMPLEX16FrequencySeries("whitened template_freq squared", LIGOTimeGPS(epoch), psd.f0, 1.0/duration, lal.Unit("s"), length // 2 + 1) # FFT to frequency domain
autocorr_t = lal.CreateREAL8TimeSeries("autocorr_time", LIGOTimeGPS(epoch), psd.f0, 1.0 / options.sample_rate, lal.Unit("strain"), length) lal.REAL8TimeFreqFFT(template_f,template_t[i],fplan)
rplan = lal.CreateReverseREAL8FFTPlan(length,0) # set DC and Nyquist to zero
template_f_squared.data.data = abs(template_f.data.data)**2 template_f.data.data[0] = 0.0
lal.REAL8FreqTimeFFT(autocorr_t,template_f_squared,rplan) template_f.data.data[template_f.data.length-1] = 0.0
# normalize autocorrelation by central (maximum) value # whiten
autocorr_t.data.data /= numpy.max(autocorr_t.data.data) assert template_f.deltaF == psd.deltaF, "freq interval not same between template and PSD"
autocorr_t = autocorr_t.data.data template_f = lal.WhitenCOMPLEX16FrequencySeries(template_f,psd)
max_index = numpy.argmax(autocorr_t) # Obtain the normalization for getting the amplitude of signal from SNR
# find the index of the third extremum for the template with lowest high-f cutoff. # Integrate over frequency range covered by template. Note that template_f is already whitened.
# we do this because we know that the template with the lowest high-f cutoff will have sigmasq = 0.0
# the largest chi2_index. sigmasq = numpy.trapz(4.0 * template_f.data.data**2, dx = psd.deltaF)
if i == 0: self.sigma[row.central_freq] = numpy.sqrt(sigmasq.real)
extr_ctr = 0 # obtain autocorr time series by squaring template and inverse FFT it
chi2_index = 0 template_f_squared = lal.CreateCOMPLEX16FrequencySeries("whitened template_freq squared", LIGOTimeGPS(epoch), psd.f0, 1.0/duration, lal.Unit("s"), length // 2 + 1)
for j in range(max_index+1, len(autocorr_t)): autocorr_t = lal.CreateREAL8TimeSeries("autocorr_time", LIGOTimeGPS(epoch), psd.f0, 1.0 / options.sample_rate, lal.Unit("strain"), length)
slope1 = autocorr_t[j+1] - autocorr_t[j] rplan = lal.CreateReverseREAL8FFTPlan(length,0)
slope0 = autocorr_t[j] - autocorr_t[j-1] template_f_squared.data.data = abs(template_f.data.data)**2
chi2_index += 1 lal.REAL8FreqTimeFFT(autocorr_t,template_f_squared,rplan)
if(slope1 * slope0 < 0): # normalize autocorrelation by central (maximum) value
extr_ctr += 1 autocorr_t.data.data /= numpy.max(autocorr_t.data.data)
if(extr_ctr == 2): autocorr_t = autocorr_t.data.data
break max_index = numpy.argmax(autocorr_t)
assert extr_ctr == 2, 'could not find 3rd extremum' # find the index of the third extremum for the template with lowest high-f cutoff.
# extract the part within the third extremum, setting the peak to be the center. # we don't want to do this for all templates, because we know that
autocorr[i] = numpy.concatenate((autocorr_t[1:(chi2_index+1)][::-1], autocorr_t[:(chi2_index+1)])) # the template with the lowest high-f cutoff will have the largest chi2_index.
assert len(autocorr[i])%2==1, 'autocorr must have odd number of samples' if i == 0:
# Inverse FFT template bank back to time domain extr_ctr = 0
template_t[i] = lal.CreateREAL8TimeSeries("whitened template time", LIGOTimeGPS(epoch), psd.f0, 1.0 / options.sample_rate, lal.Unit("strain"), length) chi2_index = 0
lal.REAL8FreqTimeFFT(template_t[i],template_f,rplan) for j in range(max_index+1, len(autocorr_t)):
# normalize slope1 = autocorr_t[j+1] - autocorr_t[j]
template_t[i].data.data /= numpy.sqrt(numpy.dot(template_t[i].data.data, template_t[i].data.data)) slope0 = autocorr_t[j] - autocorr_t[j-1]
template_t[i] = template_t[i].data.data chi2_index += 1
firbank.set_property("latency", -(len(template_t[0]) - 1) // 2) if(slope1 * slope0 < 0):
firbank.set_property("fir_matrix", template_t) extr_ctr += 1
triggergen.set_property("autocorrelation_matrix", autocorr) if(extr_ctr == 2):
self.firbank = firbank break
self.triggergen = triggergen assert extr_ctr == 2, 'could not find 3rd extremum'
# extract the part within the third extremum, setting the peak to be the center.
autocorr[i] = numpy.concatenate((autocorr_t[1:(chi2_index+1)][::-1], autocorr_t[:(chi2_index+1)]))
assert len(autocorr[i])%2==1, 'autocorr must have odd number of samples'
# Inverse FFT template bank back to time domain
template_t[i] = lal.CreateREAL8TimeSeries("whitened template time", LIGOTimeGPS(epoch), psd.f0, 1.0 / options.sample_rate, lal.Unit("strain"), length)
lal.REAL8FreqTimeFFT(template_t[i],template_f,rplan)
# normalize
template_t[i].data.data /= numpy.sqrt(numpy.dot(template_t[i].data.data, template_t[i].data.data))
template_t[i] = template_t[i].data.data
firbank.set_property("latency", -(len(template_t[0]) - 1) // 2)
firbank.set_property("fir_matrix", template_t)
triggergen.set_property("autocorrelation_matrix", autocorr)
self.firbank = firbank
self.triggergen = triggergen
else: else:
# use templates with all zeros during burn-in period, that way we won't get any triggers. # use templates with all zeros during burn-in period, that way we won't get any triggers.
print >> sys.stderr, "At GPS time", timestamp, "burn in period" print >> sys.stderr, "At GPS time", timestamp, "burn in period"
template = [None] * len(template_bank_table) template = [None] * len(self.template_bank)
autocorr = [None] * len(template_bank_table) autocorr = [None] * len(self.template_bank)
for i, row in enumerate(template_bank_table): for i, row in enumerate(self.template_bank):
template[i], _ = lalsimulation.GenerateStringCusp(1.0,30,1.0/options.sample_rate) template[i], _ = lalsimulation.GenerateStringCusp(1.0,30,1.0/options.sample_rate)
template[i] = lal.ResizeREAL8TimeSeries(template[i], -int(32*options.sample_rate - template[i].data.length)//2 ,int(32*options.sample_rate)) template[i] = lal.ResizeREAL8TimeSeries(template[i], -int(32*options.sample_rate - template[i].data.length)//2 ,int(32*options.sample_rate))
template[i] = template[i].data.data template[i] = template[i].data.data
...@@ -252,6 +290,23 @@ process = ligolw_process.register_to_xmldoc(xmldoc, "StringSearch", options.__di ...@@ -252,6 +290,23 @@ process = ligolw_process.register_to_xmldoc(xmldoc, "StringSearch", options.__di
sngl_burst_table = lsctables.New(lsctables.SnglBurstTable, ["process:process_id", "event_id","ifo","search","channel","start_time","start_time_ns","peak_time","peak_time_ns","duration","central_freq","bandwidth","amplitude","snr","confidence","chisq","chisq_dof"]) sngl_burst_table = lsctables.New(lsctables.SnglBurstTable, ["process:process_id", "event_id","ifo","search","channel","start_time","start_time_ns","peak_time","peak_time_ns","duration","central_freq","bandwidth","amplitude","snr","confidence","chisq","chisq_dof"])
xmldoc.childNodes[-1].appendChild(sngl_burst_table) xmldoc.childNodes[-1].appendChild(sngl_burst_table)
#
# append search_summary table
# nevents will be filled out later
#
search_summary_table = lsctables.New(lsctables.SearchSummaryTable, ["process:process_id", "comment", "ifos", "in_start_time", "in_start_time_ns", "in_end_time", "in_end_time_ns", "out_start_time", "out_start_time_ns", "out_end_time", "out_end_time_ns", "nevents", "nnodes"])
xmldoc.childNodes[-1].appendChild(search_summary_table)
search_summary = lsctables.SearchSummary()
search_summary.process_id = process.process_id
if options.user_tag:
search_summary.comment = options.user_tag
search_summary.ifos = template_bank_table[0].ifo
search_summary.out_start = search_summary.in_start = LIGOTimeGPS(options.gps_start_time)
search_summary.out_end = search_summary.in_end = LIGOTimeGPS(options.gps_end_time)
search_summary.nnodes = 1
search_summary.nevents = 0
# #
# trigger generator # trigger generator
...@@ -260,26 +315,15 @@ xmldoc.childNodes[-1].appendChild(sngl_burst_table) ...@@ -260,26 +315,15 @@ xmldoc.childNodes[-1].appendChild(sngl_burst_table)
head = triggergen = pipeparts.mkgeneric(pipeline, head, "lal_string_triggergen", threshold = options.threshold, cluster = options.cluster_events, bank_filename = options.template_bank, autocorrelation_matrix = numpy.zeros((len(template_bank_table), 403),dtype=numpy.float64)) head = triggergen = pipeparts.mkgeneric(pipeline, head, "lal_string_triggergen", threshold = options.threshold, cluster = options.cluster_events, bank_filename = options.template_bank, autocorrelation_matrix = numpy.zeros((len(template_bank_table), 403),dtype=numpy.float64))
mainloop = GObject.MainLoop()
handler = PipelineHandler(mainloop, pipeline, template_bank_table, firbank)
# #
# appsync # appsync
# #
def appsink_new_buffer(elem): appsync = pipeparts.AppSync(appsink_new_buffer = handler.appsink_new_buffer)
buf = elem.emit("pull-sample").get_buffer()
events = []
for i in range(buf.n_memory()):
memory = buf.peek_memory(i)
result, mapinfo = memory.map(Gst.MapFlags.READ)
assert result
if mapinfo.data:
events.extend(snglbursttable.GSTLALSnglBurst.from_buffer(mapinfo.data))
memory.unmap(mapinfo)
for event in events:
event.process_id = process.process_id
event.event_id = sngl_burst_table.get_next_id()
sngl_burst_table.append(event)
appsync = pipeparts.AppSync(appsink_new_buffer = appsink_new_buffer)
appsync.add_sink(pipeline, head, caps = Gst.Caps.from_string("application/x-lal-snglburst")) appsync.add_sink(pipeline, head, caps = Gst.Caps.from_string("application/x-lal-snglburst"))
...@@ -295,18 +339,15 @@ options.gps_start_time = LIGOTimeGPS(options.gps_start_time) ...@@ -295,18 +339,15 @@ options.gps_start_time = LIGOTimeGPS(options.gps_start_time)
options.gps_end_time = LIGOTimeGPS(options.gps_end_time) options.gps_end_time = LIGOTimeGPS(options.gps_end_time)
datasource.pipeline_seek_for_gps(pipeline, options.gps_start_time, options.gps_end_time); datasource.pipeline_seek_for_gps(pipeline, options.gps_start_time, options.gps_end_time);
if pipeline.set_state(Gst.State.PLAYING) != Gst.StateChangeReturn.SUCCESS: if pipeline.set_state(Gst.State.PLAYING) != Gst.StateChangeReturn.SUCCESS:
raise RuntimeError("pipeline did not enter playing state") raise RuntimeError("pipeline did not enter playing state")
mainloop = GObject.MainLoop()
handler = PSDHandler(mainloop, pipeline, firbank)
mainloop.run() mainloop.run()
# #
# write output to disk # write output to disk
# #
search_summary_table.append(search_summary)
ligolw_utils.write_filename(xmldoc, options.output, gz = (options.output or "stdout").endswith(".gz"), verbose = options.verbose) ligolw_utils.write_filename(xmldoc, options.output, gz = (options.output or "stdout").endswith(".gz"), verbose = options.verbose)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment