Skip to content
Snippets Groups Projects
Commit 029bed8e authored by Aaron Viets's avatar Aaron Viets
Browse files

lal_smoothkappas: allow for negative latency (or any latency)

parent d5a59d09
No related branches found
No related tags found
No related merge requests found
......@@ -486,20 +486,21 @@ static gboolean sink_event(GstBaseTransform *trans, GstEvent *event) {
gboolean success = TRUE;
GST_DEBUG_OBJECT(element, "Got %s event on sink pad", GST_EVENT_TYPE_NAME(event));
guint64 waste_samples = (guint64) (element->filter_latency * (element->array_size + element->avg_array_size - 2));
int waste_samples = (int) (element->filter_latency * (element->array_size + element->avg_array_size - 2));
waste_samples = waste_samples > 0 ? waste_samples : 0;
if(GST_EVENT_TYPE(event) == GST_EVENT_EOS && waste_samples > 0) {
/* Trick the function by passing it fake input data */
void *fake = g_malloc(waste_samples * element->unit_size);
void *data = g_malloc(waste_samples * element->unit_size);
GstFlowReturn result;
if(element->data_type == GSTLAL_SMOOTHKAPPAS_F32) {
result = smooth_buffer_float((float *) fake, waste_samples, (float *) data, waste_samples, element->fifo_array_re, element->avg_array_re, element->default_kappa_re, &element->current_median_re, element->maximum_offset_re, element->array_size, element->avg_array_size, &element->index_re, &element->index_im, &element->avg_index_re, &element->avg_index_im, &element->num_bad_in_avg_re, TRUE, element->default_to_median, element->track_bad_kappa, &element->samples_in_filter, element->no_default);
result = smooth_buffer_float((float *) fake, (guint64) waste_samples, (float *) data, (guint64) waste_samples, element->fifo_array_re, element->avg_array_re, element->default_kappa_re, &element->current_median_re, element->maximum_offset_re, element->array_size, element->avg_array_size, &element->index_re, &element->index_im, &element->avg_index_re, &element->avg_index_im, &element->num_bad_in_avg_re, TRUE, element->default_to_median, element->track_bad_kappa, &element->samples_in_filter, element->no_default);
} else if(element->data_type == GSTLAL_SMOOTHKAPPAS_F64) {
result = smooth_buffer_double((double *) fake, waste_samples, (double *) data, waste_samples, element->fifo_array_re, element->avg_array_re, element->default_kappa_re, &element->current_median_re, element->maximum_offset_re, element->array_size, element->avg_array_size, &element->index_re, &element->index_im, &element->avg_index_re, &element->avg_index_im, &element->num_bad_in_avg_re, TRUE, element->default_to_median, element->track_bad_kappa, &element->samples_in_filter, element->no_default);
result = smooth_buffer_double((double *) fake, (guint64) waste_samples, (double *) data, (guint64) waste_samples, element->fifo_array_re, element->avg_array_re, element->default_kappa_re, &element->current_median_re, element->maximum_offset_re, element->array_size, element->avg_array_size, &element->index_re, &element->index_im, &element->avg_index_re, &element->avg_index_im, &element->num_bad_in_avg_re, TRUE, element->default_to_median, element->track_bad_kappa, &element->samples_in_filter, element->no_default);
} else if(element->data_type == GSTLAL_SMOOTHKAPPAS_Z64) {
result = smooth_complex_buffer_float((float complex *) fake, waste_samples, (float complex *) data, waste_samples, element->fifo_array_re, element->fifo_array_im, element->avg_array_re, element->avg_array_im, element->default_kappa_re, element->default_kappa_im, &element->current_median_re, &element->current_median_im, element->maximum_offset_re, element->maximum_offset_im, element->array_size, element->avg_array_size, &element->index_re, &element->index_im, &element->avg_index_re, &element->avg_index_im, &element->num_bad_in_avg_re, &element->num_bad_in_avg_im, TRUE, element->default_to_median, element->track_bad_kappa, &element->samples_in_filter, element->no_default);
result = smooth_complex_buffer_float((float complex *) fake, (guint64) waste_samples, (float complex *) data, (guint64) waste_samples, element->fifo_array_re, element->fifo_array_im, element->avg_array_re, element->avg_array_im, element->default_kappa_re, element->default_kappa_im, &element->current_median_re, &element->current_median_im, element->maximum_offset_re, element->maximum_offset_im, element->array_size, element->avg_array_size, &element->index_re, &element->index_im, &element->avg_index_re, &element->avg_index_im, &element->num_bad_in_avg_re, &element->num_bad_in_avg_im, TRUE, element->default_to_median, element->track_bad_kappa, &element->samples_in_filter, element->no_default);
} else if(element->data_type == GSTLAL_SMOOTHKAPPAS_Z128) {
result = smooth_complex_buffer_double((double complex *) fake, waste_samples, (double complex *) data, waste_samples, element->fifo_array_re, element->fifo_array_im, element->avg_array_re, element->avg_array_im, element->default_kappa_re, element->default_kappa_im, &element->current_median_re, &element->current_median_im, element->maximum_offset_re, element->maximum_offset_im, element->array_size, element->avg_array_size, &element->index_re, &element->index_im, &element->avg_index_re, &element->avg_index_im, &element->num_bad_in_avg_re, &element->num_bad_in_avg_im, TRUE, element->default_to_median, element->track_bad_kappa, &element->samples_in_filter, element->no_default);
result = smooth_complex_buffer_double((double complex *) fake, (guint64) waste_samples, (double complex *) data, (guint64) waste_samples, element->fifo_array_re, element->fifo_array_im, element->avg_array_re, element->avg_array_im, element->default_kappa_re, element->default_kappa_im, &element->current_median_re, &element->current_median_im, element->maximum_offset_re, element->maximum_offset_im, element->array_size, element->avg_array_size, &element->index_re, &element->index_im, &element->avg_index_re, &element->avg_index_im, &element->num_bad_in_avg_re, &element->num_bad_in_avg_im, TRUE, element->default_to_median, element->track_bad_kappa, &element->samples_in_filter, element->no_default);
} else {
result = GST_FLOW_ERROR;
success = FALSE;
......@@ -509,9 +510,9 @@ static gboolean sink_event(GstBaseTransform *trans, GstEvent *event) {
if(result == GST_FLOW_OK) {
GstBuffer *buf;
buf = gst_buffer_new_wrapped(data, waste_samples * element->unit_size);
buf = gst_buffer_new_wrapped(data, (guint64) waste_samples * element->unit_size);
set_metadata(element, buf, waste_samples);
set_metadata(element, buf, (guint64) waste_samples);
/* push buffer downstream */
GST_DEBUG_OBJECT(element, "pushing final buffer %" GST_BUFFER_BOUNDARIES_FORMAT, GST_BUFFER_BOUNDARIES_ARGS(buf));
......@@ -554,6 +555,7 @@ static gboolean transform_size(GstBaseTransform *trans, GstPadDirection directio
/* How many samples do we need to throw away based on the filter latency? */
int waste_samples = (int) (element->filter_latency * (element->array_size + element->avg_array_size - 2));
waste_samples = waste_samples > 0 ? waste_samples : 0;
switch(direction) {
case GST_PAD_SRC:
......@@ -608,9 +610,8 @@ static GstFlowReturn transform(GstBaseTransform *trans, GstBuffer *inbuf, GstBuf
*/
if(G_UNLIKELY(GST_BUFFER_IS_DISCONT(inbuf) || GST_BUFFER_OFFSET(inbuf) != element->next_in_offset || !GST_CLOCK_TIME_IS_VALID(element->t0))) {
guint64 shift_samples = (guint64) (waste_samples < element->samples_in_filter ? waste_samples : element->samples_in_filter);
element->offset0 = element->next_out_offset = GST_BUFFER_OFFSET(inbuf) - shift_samples;
element->t0 = GST_BUFFER_PTS(inbuf) - gst_util_uint64_scale_int_round(shift_samples, GST_SECOND, element->rate);
element->offset0 = element->next_out_offset = GST_BUFFER_OFFSET(inbuf) + (waste_samples < 0 ? (guint64) (-waste_samples) : 0);
element->t0 = GST_BUFFER_PTS(inbuf) + (waste_samples < 0 ? (guint64) (((-waste_samples) * GST_SECOND + 0.5) / element->rate) : 0);
element->need_discont = TRUE;
element->avg_index_re = ((GST_BUFFER_PTS(inbuf) * element->rate) / 1000000000) % element->avg_array_size;
element->avg_index_im = ((GST_BUFFER_PTS(inbuf) * element->rate) / 1000000000) % element->avg_array_size;
......@@ -968,7 +969,7 @@ static void gstlal_smoothkappas_class_init(GSTLALSmoothKappasClass *klass)
"The latency associated with the smoothing process, as a fraction of the\n\t\t\t"
"total length of the running median + average. If 0, there is no latency.\n\t\t\t"
"If 1, the latency is the length of the running median + average.",
0.0, 1.0, 0.0,
-G_MAXDOUBLE, G_MAXDOUBLE, 0.0,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS | G_PARAM_CONSTRUCT
)
);
......
......@@ -35,11 +35,11 @@ matplotlib.rcParams['font.size'] = 22
matplotlib.rcParams['legend.fontsize'] = 18
matplotlib.rcParams['mathtext.default'] = 'regular'
import matplotlib.pyplot as plt
from matplotlib import cm
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
GObject.threads_init()
Gst.init(None)
import lal
......@@ -63,13 +63,15 @@ freq = 10.3 # Hz
test_duration = 400000 # seconds
amplitude_mod = 0.000
phase_mod = 0.000
avg_time = 512 # seconds
other_lines = [10.301, 10.295, 10.31, 10.28] # Hz
avg_time = 1 # seconds
med_time = 128 # seconds
other_lines = []#[10.301, 10.295, 10.31, 10.28] # Hz
other_mod = 0.000
fft_length = 16000 * 64
fft_overlap = 8000 * 64
num_ffts = (test_duration - 1000 - 8000) // (16000 - 8000)
tf_length = fft_length // 2 + 1
filter_latencies = np.arange(0.5, -2.1, -0.25)
#
# =============================================================================
......@@ -93,6 +95,7 @@ def line_subtraction_ringing_01(pipeline, name):
# The witness channel used to subtract the line, in this case a pure sinusoid.
witness = test_common.test_src(pipeline, rate = 64, test_duration = test_duration, wave = 0, freq = freq, src_suffix = '1')
witness = pipeparts.mktee(pipeline, witness)
# The signal in the data, which may not be a pure sinusoid, but could have amplitude and/or phase modulation.
amplitude = test_common.test_src(pipeline, rate = 16, test_duration = test_duration, wave = 0, src_suffix = '2', volume = 0)
......@@ -143,22 +146,26 @@ def line_subtraction_ringing_01(pipeline, name):
strain = pipeparts.mktee(pipeline, calibration_parts.mkadder(pipeline, calibration_parts.list_srcs(pipeline, strain, signal)))
# Subtract signal using same function that is used in gstlal_compute_strain
clean_strain = calibration_parts.remove_lines_with_witnesses(pipeline, strain, [[witness]], [[freq]], [0], [], filter_latency = 1, compute_rate = 16, rate_out = 64, num_median = 1 * 16, num_avg = avg_time * 16, noisesub_gate_bit = None)
clean_strains = []
for filter_latency in filter_latencies:
clean_strains.append(calibration_parts.remove_lines_with_witnesses(pipeline, strain, [[witness]], [[freq]], [0], [], [], filter_latency = filter_latency, compute_rate = 16, rate_out = 64, num_median = med_time * 16, num_avg = avg_time * 16, noisesub_gate_bit = None))
#strain = calibration_parts.mkresample(pipeline, strain, 4, False, "audio/x-raw, format=F64LE, rate=64")
#clean_strain = calibration_parts.mkresample(pipeline, clean_strain, 4, False, "audio/x-raw, format=F64LE, rate=64")
# Remove the initial data
strain = calibration_parts.mkinsertgap(pipeline, strain, insert_gap = False, chop_length = 600 * 1000000000)
clean_strain = calibration_parts.mkinsertgap(pipeline, clean_strain, insert_gap = False, chop_length = 600 * 1000000000)
strain = pipeparts.mktee(pipeline, pipeparts.mkprogressreport(pipeline, strain, "strain"))
clean_strain = pipeparts.mktee(pipeline, pipeparts.mkprogressreport(pipeline, clean_strain, "clean_strain"))
# Compute ASDs
pipeparts.mkgeneric(pipeline, strain, "lal_asd", fft_samples = 16000 * 64, overlap_samples = 8000 * 64, window_type = 3, filename = "strainASD_%davg.txt" % avg_time)
pipeparts.mkgeneric(pipeline, clean_strain, "lal_asd", fft_samples = 16000 * 64, overlap_samples = 8000 * 64, window_type = 3, filename = "clean_strainASD_%davg.txt" % avg_time)
# Compute transfer function
interleaved = calibration_parts.mkinterleave(pipeline, [clean_strain, strain])
calibration_parts.mktransferfunction(pipeline, interleaved, fft_length = fft_length, fft_overlap = fft_overlap, num_ffts = num_ffts, use_median = False, update_samples = 1e15, fft_window_type = 3, filename = "lineSubRingingTF_%davg.txt" % avg_time)
# ASD for strain
pipeparts.mkgeneric(pipeline, strain, "lal_asd", fft_samples = 16000 * 64, overlap_samples = 8000 * 64, window_type = 3, filename = "strainASD_%dmed_%davg.txt" % (med_time, avg_time))
for i in range(len(clean_strains)):
clean_strains[i] = calibration_parts.mkinsertgap(pipeline, clean_strains[i], insert_gap = False, chop_length = 600 * 1000000000)
clean_strains[i] = pipeparts.mktee(pipeline, pipeparts.mkprogressreport(pipeline, clean_strains[i], "clean_strain_%d" % i))
# Compute ASDs
pipeparts.mkgeneric(pipeline, clean_strains[i], "lal_asd", fft_samples = 16000 * 64, overlap_samples = 8000 * 64, window_type = 3, filename = "clean_strainASD_%dmed_%davg_%d.txt" % (med_time, avg_time, i))
# Compute transfer function
interleaved = calibration_parts.mkinterleave(pipeline, [clean_strains[i], strain])
calibration_parts.mktransferfunction(pipeline, interleaved, fft_length = fft_length, fft_overlap = fft_overlap, num_ffts = num_ffts, use_median = False, update_samples = 1e15, fft_window_type = 3, filename = "lineSubRingingTF_%dmed_%davg_%d.txt" % (med_time, avg_time, i))
#
# done
......@@ -178,74 +185,91 @@ def line_subtraction_ringing_01(pipeline, name):
test_common.build_and_run(line_subtraction_ringing_01, "line_subtraction_ringing_01")
# Get ASD data
print("Loading ASD txt file 1 of 2")
strain = np.loadtxt("strainASD_%davg.txt" % avg_time)
print("Done loading ASD txt file 1 of 2")
print("Loading ASD txt file 1 of %d" % (1 + len(filter_latencies)))
strain = np.loadtxt("strainASD_%dmed_%davg.txt" % (med_time, avg_time))
print("Done loading ASD txt file 1 of %d" % (1 + len(filter_latencies)))
fvec = np.transpose(strain)[0]
strain = np.transpose(strain)[1]
print("Loading ASD txt file 2 of 2")
clean = np.loadtxt("clean_strainASD_%davg.txt" % avg_time)
print("Done loading ASD txt file 2 of 2")
clean = np.transpose(clean)[1]
clean = []
for i in range(len(filter_latencies)):
print("Loading ASD txt file %d of %d" % (2 + i, 1 + len(filter_latencies)))
clean.append(np.loadtxt("clean_strainASD_%dmed_%davg_%d.txt" % (med_time, avg_time, i)))
print("Done loading ASD txt file %d of %d" % (2 + i, 1 + len(filter_latencies)))
clean[i] = np.transpose(clean[i])[1]
# Get TF data
print("Loading TF txt file")
# Remove unwanted lines from TF file, and re-format wanted lines
f = open("lineSubRingingTF_%davg.txt" % avg_time, "r")
lines = f.readlines()
f.close()
os.system("rm lineSubRingingTF_%davg.txt" % avg_time)
f = open("lineSubRingingTF_%davg.txt" % avg_time, "w")
for j in range(3, 3 + tf_length):
f.write(lines[j].replace(' + ', '\t').replace(' - ', '\t-').replace('i', ''))
f.close()
TF = np.loadtxt("lineSubRingingTF_%davg.txt" % avg_time)
print("Done loading TF txt file")
TFfvec = []
TFmag = []
TFphase = []
for j in range(0, len(TF)):
TFfvec.append(TF[j][0])
tf_at_f = (TF[j][1] + 1j * TF[j][2])
TFmag.append(abs(tf_at_f))
TFphase.append(np.angle(tf_at_f) * 180.0 / np.pi)
# Remove unwanted lines from TF file, and re-format wanted lines
for i in range(len(filter_latencies)):
f = open("lineSubRingingTF_%dmed_%davg_%d.txt" % (med_time, avg_time, i), "r")
lines = f.readlines()
f.close()
os.system("rm lineSubRingingTF_%dmed_%davg_%d.txt" % (med_time, avg_time, i))
f = open("lineSubRingingTF_%dmed_%davg_%d.txt" % (med_time, avg_time, i), "w")
for j in range(3, 3 + tf_length):
f.write(lines[j].replace(' + ', '\t').replace(' - ', '\t-').replace('i', ''))
f.close()
TF = np.loadtxt("lineSubRingingTF_%dmed_%davg_%d.txt" % (med_time, avg_time, i))
TFfvec = []
TFmag.append([])
TFphase.append([])
for j in range(0, len(TF)):
TFfvec.append(TF[j][0])
tf_at_f = (TF[j][1] + 1j * TF[j][2])
TFmag[i].append(abs(tf_at_f))
TFphase[i].append(np.angle(tf_at_f) * 180.0 / np.pi)
print("Done loading TF txt files")
# Plot ASDs against frequency
plt.figure(figsize = (10, 6))
colors = ['red', 'blue']
labels = ['strain', 'clean']
plt.plot(fvec, strain, colors[0], linewidth = 0.75)
plt.plot(fvec, clean, colors[1], linewidth = 0.75)
patches = [mpatches.Patch(color = colors[j], label = r'$%s$' % labels[j]) for j in range(len(labels))]
colors = ['red'] + [cm.viridis(x) for x in np.linspace(0, 1, len(filter_latencies))]
labels = ['strain']
for lat in filter_latencies:
labels.append("%0.2f latency" % lat)
plt.plot(fvec, strain, color = colors[0], linewidth = 0.75)
for i in range(len(filter_latencies)):
plt.plot(fvec, clean[i], color = colors[1 + i], linewidth = 0.75)
patches = [mpatches.Patch(color = colors[j], label = r'$%s$' % labels[j]) for j in range(len(colors))]
plt.legend(handles = patches, loc = 'upper right', ncol = 1)
plt.title('ASD with %d s Average' % (avg_time))
plt.title('ASD %d s Median, %d s Average' % (med_time, avg_time))
plt.ylabel(r'${\rm ASD}\ \left[{\rm strain / }\sqrt{\rm Hz}\right]$')
plt.xlabel(r'${\rm Frequency \ [Hz]}$')
ticks_and_grid(plt.gca(), xmin = freq - 0.03, xmax = freq + 0.02, ymin = 0.0001, ymax = 10000, xscale = 'linear', yscale = 'log')
ticks_and_grid(plt.gca(), xmin = freq - 0.01, xmax = freq + 0.01, ymin = 0.0001, ymax = 10000, xscale = 'linear', yscale = 'log')
if any(other_lines):
plt.savefig("lineSubRingingOtherLinesASD_%davg.png" % (avg_time))
plt.savefig("lineSubRingingOtherLinesASD_%dmed_%davg.png" % (med_time, avg_time))
else:
plt.savefig("lineSubRingingASD_%davg.png" % (avg_time))
plt.savefig("lineSubRingingASD_%dmed_%davg.png" % (med_time, avg_time))
# Plot transfer function against frequency
plt.figure(figsize = (10, 10))
colors = [cm.viridis(x) for x in np.linspace(0, 1, len(filter_latencies))]
labels = []
plt.subplot(211)
plt.plot(TFfvec, TFmag, 'green', linewidth = 0.75)
ticks_and_grid(plt.gca(), xmin = freq - 0.03, xmax = freq + 0.02, ymin = 0, ymax = 2, xscale = 'linear', yscale = 'linear')
for i in range(len(filter_latencies)):
labels.append("%0.2f latency" % filter_latencies[i])
plt.plot(TFfvec, TFmag[i], color = colors[i], linewidth = 0.75)
patches = [mpatches.Patch(color = colors[j], label = r'$%s$' % labels[j]) for j in range(len(colors))]
plt.legend(handles = patches, loc = 'upper right', ncol = 1)
ticks_and_grid(plt.gca(), xmin = freq - 0.01, xmax = freq + 0.01, ymin = 0, ymax = 2, xscale = 'linear', yscale = 'linear')
plt.ylabel('Magnitude')
plt.title('TF with %d s Average' % (avg_time))
plt.title('TF %d s Median %d s Avg' % (med_time, avg_time))
plt.subplot(212)
plt.plot(TFfvec, TFphase, 'green', linewidth = 0.75)
ticks_and_grid(plt.gca(), xmin = freq - 0.03, xmax = freq + 0.02, ymin = -180, ymax = 180, xscale = 'linear', yscale = 'linear')
for i in range(len(filter_latencies)):
plt.plot(TFfvec, TFphase[i], color = colors[i], linewidth = 0.75)
ticks_and_grid(plt.gca(), xmin = freq - 0.01, xmax = freq + 0.01, ymin = -180, ymax = 180, xscale = 'linear', yscale = 'linear')
plt.ylabel('Phase [deg]')
plt.xlabel('Frequency [Hz]')
if any(other_lines):
plt.savefig("lineSubRingingOtherLinesTF_%davg.png" % (avg_time))
plt.savefig("lineSubRingingOtherLinesTF_%dmed_%davg.png" % (med_time, avg_time))
else:
plt.savefig("lineSubRingingTF_%davg.png" % (avg_time))
plt.savefig("lineSubRingingTF_%dmed_%davg.png" % (med_time, avg_time))
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment