diff --git a/gstlal-calibration/bin/gstlal_compute_strain b/gstlal-calibration/bin/gstlal_compute_strain
index a149bfd06f16eb31f1b311903b2fb5cdc7902695..328165cab4710ebde20bd970ce52bf49b31d0824 100755
--- a/gstlal-calibration/bin/gstlal_compute_strain
+++ b/gstlal-calibration/bin/gstlal_compute_strain
@@ -575,6 +575,26 @@ if factors_from_filters_file or compute_calib_statevector:
 		if factors_from_filters_file and (compute_kappapum or compute_kappauim):
 			raise ValueError("Cannot compute kappa_PUM or kappa_UIM, as the needed EPICS are not contained in the specified filters file.")
 
+# Load the high-pass filters for h(t)
+if "ctrl_highpass" in filters:
+	act_highpass = filters["ctrl_highpass"]
+	act_highpass_delay = int(filters['ctrl_highpass_delay'])
+elif "actuation_highpass" in filters:
+	act_highpass = filters["actuation_highpass"]
+	act_highpass_delay = int(filters['actuation_highpass_delay'])
+else:
+	act_highpass = []
+if "res_highpass" in filters:
+	invsens_highpass = filters["res_highpass"]
+	invsens_highpass_delay = int(filters['res_highpass_delay'])
+	invsens_highpass_sr = int(filters['res_highpass_sr']) if 'res_highpass_sr' in filters else hoft_sr
+elif "inv_sensing_highpass" in filters:
+	invsens_highpass = filters["inv_sensing_highpass"]
+	invsens_highpass_delay = int(filters['invsens_highpass_delay'])
+	invsens_highpass_sr = int(filters['invsens_highpass_sr']) if 'invsens_highpass_sr' in filters else hoft_sr
+else:
+	invsens_highpass = []
+
 # If we're performing partial calibration, load the deltal filters
 if CalibrationConfigs["calibrationmode"] == "Partial":
 	reschaindelay = int(filters["res_corr_delay"])
@@ -600,20 +620,6 @@ if CalibrationConfigs["calibrationmode"] == "Partial":
 		resdewhitendelay = int(filters["deltal_res_dewhiten_delay"])
 		resdewhiten = filters["deltal_res_dewhiten"]
 
-	# Load the high-pass filter for h(t)
-	try:
-		act_highpass_delay = int(filters['ctrl_highpass_delay'])
-		invsens_highpass_delay = int(filters['res_highpass_delay'])
-		act_highpass = filters["ctrl_highpass"]
-		invsens_highpass = filters["res_highpass"]
-	except:
-		act_highpass = []
-		invsens_highpass = []
-	try:
-		invsens_highpass_sr = int(filters['res_highpass_sr'])
-	except:
-		invsens_highpass_sr = hoft_sr
-
 # If we're performing full calibration, load the actuation, sensing filters
 if CalibrationConfigs["calibrationmode"] == "Full":
 	tstchainsr = int(filters["actuation_tst_sr"])
@@ -645,20 +651,6 @@ if CalibrationConfigs["calibrationmode"] == "Full":
 		resdewhitendelay = int(filters["dewhiten_err_delay"])
 		resdewhiten = filters["dewhiten_err"]
 
-	# Load the high-pass filter for h(t)
-	try:
-		act_highpass_delay = int(filters['actuation_highpass_delay'])
-		invsens_highpass_delay = int(filters['invsens_highpass_delay'])
-		act_highpass = filters["actuation_highpass"]
-		invsens_highpass = filters["inv_sensing_highpass"]
-	except:
-		act_highpass = []
-		invsens_highpass = []
-	try:
-		invsens_highpass_sr = int(filters['res_highpass_sr'])
-	except:
-		invsens_highpass_sr = hoft_sr
-
 # If we are reading EPICS from the frames and want to remove calibration lines, account for the fact that not all EPICS were available at all times.
 if not factors_from_filters_file and InputConfigs["datasource"] == "frames" and ((instrument == "H1" and gps_start_time < 1175976256) or (instrument == "L1" and gps_start_time < 1179588864)) and "tstexc_linefreq" in act_line_removal_dict.keys():
 	del act_line_removal_dict["tstexc_linefreq"]
@@ -930,19 +922,19 @@ for key in headkeys:
 	elif key.startswith("EP6_") or key.startswith("EP11_"):
 		C_epics_dict[key] = calibration_parts.caps_and_progress(pipeline, head_dict[key], ref_factors_caps, key)
 		C_epics_dict[key] = calibration_parts.mkresample(pipeline, C_epics_dict[key], 0, False, compute_calib_factors_caps)
-		C_epics_dict[key] = (pipeparts.mktee(pipeline, C_epics_dict[key]), float(filters[key]))
+		C_epics_dict[key] = (pipeparts.mktee(pipeline, C_epics_dict[key]), float(filters[key]) if key in filters else numpy.inf)
 	elif key.startswith("EP7_") or key.startswith("EP12_"):
 		D_epics_dict[key] = calibration_parts.caps_and_progress(pipeline, head_dict[key], ref_factors_caps, key)
 		D_epics_dict[key] = calibration_parts.mkresample(pipeline, D_epics_dict[key], 0, False, compute_calib_factors_caps)
-		D_epics_dict[key] = (pipeparts.mktee(pipeline, D_epics_dict[key]), float(filters[key]))
+		D_epics_dict[key] = (pipeparts.mktee(pipeline, D_epics_dict[key]), float(filters[key]) if key in filters else numpy.inf)
 	elif key.startswith("EP1_") or key.startswith("EP2_") or key.startswith("EP15_") or key.startswith("EP22_"):
 		misc_epics_dict[key] = calibration_parts.caps_and_progress(pipeline, head_dict[key], ref_factors_caps, key)
 		misc_epics_dict[key] = calibration_parts.mkresample(pipeline, misc_epics_dict[key], 0, False, compute_calib_factors_caps)
-		misc_epics_dict[key] = (pipeparts.mktee(pipeline, misc_epics_dict[key]), float(filters[key]))
+		misc_epics_dict[key] = (pipeparts.mktee(pipeline, misc_epics_dict[key]), float(filters[key]) if key in filters else numpy.inf)
 	elif key.startswith("EP"):
 		A_epics_dict[key] = calibration_parts.caps_and_progress(pipeline, head_dict[key], ref_factors_caps, key)
 		A_epics_dict[key] = calibration_parts.mkresample(pipeline, A_epics_dict[key], 0, False, compute_calib_factors_caps)
-		A_epics_dict[key] = (pipeparts.mktee(pipeline, A_epics_dict[key]), float(filters[key]))
+		A_epics_dict[key] = (pipeparts.mktee(pipeline, A_epics_dict[key]), float(filters[key]) if key in filters else numpy.inf)
 
 if use_coherence:
 	if compute_kappatst or compute_kappapum or compute_kappauim or compute_kappapu or compute_kappac or compute_fcc or compute_fs or compute_srcq:
@@ -1666,7 +1658,7 @@ if test_filters:
 	tst_tf_dur = gps_end_time - tst_tf_start - tst_filter_latency - 10 if InputConfigs["datasource"] == "frames" else 300
 	tst_tf_dur = tst_tf_dur - tst_tf_dur % 4 - 2
 	num_tst_ffts = int(tst_tf_dur / 2)
-	pipeparts.mkgeneric(pipeline, tst_tf, "lal_transferfunction", fft_length = 4 * tstchainsr, fft_overlap = 2 * tstchainsr, num_ffts = num_tst_ffts, use_median = True, update_samples = 1e15, update_delay_samples = tst_tf_delay * tstchainsr, filename = "tst_filters_transfer_function_%d-%d.txt" % (tst_tf_start, tst_tf_dur), name = "tst_filters_tf")
+	pipeparts.mkgeneric(pipeline, tst_tf, "lal_transferfunction", fft_length = 4 * tstchainsr, fft_overlap = 2 * tstchainsr, num_ffts = num_tst_ffts, use_median = True, update_samples = 1e15, update_delay_samples = tst_tf_delay * tstchainsr, filename = "%s_tst_filters_transfer_function_%d-%d.txt" % (filters_name.split('/')[-1].replace('.', '_'), tst_tf_start, tst_tf_dur), name = "tst_filters_tf")
 
 # apply kappa_tst if we haven't already
 if apply_kappatst and not apply_complex_kappatst:
@@ -1728,7 +1720,7 @@ if apply_kappapum or apply_kappauim or apply_complex_kappapum or apply_complex_k
 		pum_tf_dur = gps_end_time - pum_tf_start - pum_filter_latency - 10 if InputConfigs["datasource"] == "frames" else 300
 		pum_tf_dur = pum_tf_dur - pum_tf_dur % 4 - 2
 		num_pum_ffts = int(pum_tf_dur / 2)
-		pipeparts.mkgeneric(pipeline, pum_tf, "lal_transferfunction", fft_length = 4 * pumchainsr, fft_overlap = 2 * pumchainsr, num_ffts = num_pum_ffts, use_median = True, update_samples = 1e15, update_delay_samples = pum_tf_delay * pumchainsr, filename = "pum_filters_transfer_function_%d-%d.txt" % (pum_tf_start, pum_tf_dur), name = "pum_filters_tf")
+		pipeparts.mkgeneric(pipeline, pum_tf, "lal_transferfunction", fft_length = 4 * pumchainsr, fft_overlap = 2 * pumchainsr, num_ffts = num_pum_ffts, use_median = True, update_samples = 1e15, update_delay_samples = pum_tf_delay * pumchainsr, filename = "%s_pum_filters_transfer_function_%d-%d.txt" % (filters_name.split('/')[-1].replace('.', '_'), pum_tf_start, pum_tf_dur), name = "pum_filters_tf")
 
 	if apply_complex_kappauim:
 		if filter_latency_factor == 0:
@@ -1757,7 +1749,7 @@ if apply_kappapum or apply_kappauim or apply_complex_kappapum or apply_complex_k
 		uim_tf_dur = gps_end_time - uim_tf_start - uim_filter_latency - 10 if InputConfigs["datasource"] == "frames" else 300
 		uim_tf_dur = uim_tf_dur - uim_tf_dur % 4 - 2
 		num_uim_ffts = int(uim_tf_dur / 2)
-		pipeparts.mkgeneric(pipeline, uim_tf, "lal_transferfunction", fft_length = 4 * uimchainsr, fft_overlap = 2 * uimchainsr, num_ffts = num_uim_ffts, use_median = True, update_samples = 1e15, update_delay_samples = uim_tf_delay * uimchainsr, filename = "uim_filters_transfer_function_%d-%d.txt" % (uim_tf_start, uim_tf_dur), name = "uim_filters_tf")
+		pipeparts.mkgeneric(pipeline, uim_tf, "lal_transferfunction", fft_length = 4 * uimchainsr, fft_overlap = 2 * uimchainsr, num_ffts = num_uim_ffts, use_median = True, update_samples = 1e15, update_delay_samples = uim_tf_delay * uimchainsr, filename = "%s_uim_filters_transfer_function_%d-%d.txt" % (filters_name.split('/')[-1].replace('.', '_'), uim_tf_start, uim_tf_dur), name = "uim_filters_tf")
 
 	# apply kappa_pum if we haven't already
 	if apply_kappapum and not apply_complex_kappapum:
@@ -1823,7 +1815,7 @@ else:
 		pumuim_tf_dur = gps_end_time - pumuim_tf_start - pumuim_filter_latency - 10 if InputConfigs["datasource"] == "frames" else 300
 		pumuim_tf_dur = pumuim_tf_dur - pumuim_tf_dur % 4 - 2
 		num_pumuim_ffts = int(pumuim_tf_dur / 2)
-		pipeparts.mkgeneric(pipeline, pumuim_tf, "lal_transferfunction", fft_length = 4 * pumuimchainsr, fft_overlap = 2 * pumuimchainsr, num_ffts = num_pumuim_ffts, use_median = True, update_samples = 1e15, update_delay_samples = pumuim_tf_delay * pumuimchainsr, filename = "pumuim_filters_transfer_function_%d-%d.txt" % (pumuim_tf_start, pumuim_tf_dur), name = "pumuim_filters_tf")
+		pipeparts.mkgeneric(pipeline, pumuim_tf, "lal_transferfunction", fft_length = 4 * pumuimchainsr, fft_overlap = 2 * pumuimchainsr, num_ffts = num_pumuim_ffts, use_median = True, update_samples = 1e15, update_delay_samples = pumuim_tf_delay * pumuimchainsr, filename = "%s_pumuim_filters_transfer_function_%d-%d.txt" % (filters_name.split('/')[-1].replace('.', '_'), pumuim_tf_start, pumuim_tf_dur), name = "pumuim_filters_tf")
 
 	# apply kappa_pu if we haven't already
 	if apply_kappapu and not apply_complex_kappapu:
@@ -1922,7 +1914,7 @@ if test_filters:
 	res_tf_dur = gps_end_time - res_tf_start - res_filter_latency - 10 if InputConfigs["datasource"] == "frames" else 300
 	res_tf_dur = res_tf_dur - res_tf_dur % 4 - 2
 	num_res_ffts = int(res_tf_dur / 2)
-	pipeparts.mkgeneric(pipeline, res_tf, "lal_transferfunction", fft_length = 4 * hoft_sr, fft_overlap = 2 * hoft_sr, num_ffts = num_res_ffts, use_median = True, update_samples = 1e15, update_delay_samples = res_tf_delay * hoft_sr, filename = "res_filters_transfer_function_%d-%d.txt" % (res_tf_start, res_tf_dur), name = "res_filters_tf")
+	pipeparts.mkgeneric(pipeline, res_tf, "lal_transferfunction", fft_length = 4 * hoft_sr, fft_overlap = 2 * hoft_sr, num_ffts = num_res_ffts, use_median = True, update_samples = 1e15, update_delay_samples = res_tf_delay * hoft_sr, filename = "%s_res_filters_transfer_function_%d-%d.txt" % (filters_name.split('/')[-1].replace('.', '_'), res_tf_start, res_tf_dur), name = "res_filters_tf")
 
 # Apply \kappa_c if we haven't already
 if apply_kappac and not (apply_fcc or apply_fs or apply_srcq):
@@ -1951,7 +1943,7 @@ if test_filters:
 	response_dur = gps_end_time - response_start - max(res_filter_latency, tst_filter_latency) - 10 if InputConfigs["datasource"] == "frames" else 300
 	response_dur = response_dur - response_dur % 4 - 2
 	num_response_ffts = int(response_dur / 2)
-	pipeparts.mkgeneric(pipeline, response_function, "lal_transferfunction", fft_length = 4 * hoft_sr, fft_overlap = 2 * hoft_sr, num_ffts = num_response_ffts, use_median = True, update_samples = 1e15, update_delay_samples = response_delay * hoft_sr, filename = "filters_response_transfer_function_%d-%d.txt" % (response_start, response_dur), name = "response_function")
+	pipeparts.mkgeneric(pipeline, response_function, "lal_transferfunction", fft_length = 4 * hoft_sr, fft_overlap = 2 * hoft_sr, num_ffts = num_response_ffts, use_median = True, update_samples = 1e15, update_delay_samples = response_delay * hoft_sr, filename = "%s_response_filters_transfer_function_%d-%d.txt" % (filters_name.split('/')[-1].replace('.', '_'), response_start, response_dur), name = "response_function")
 # Divide by L in a way that is compatitble with old and new filters files, since old filter files don't recored "arm length"
 try:
 	strain = pipeparts.mkaudioamplify(pipeline, strain, 1.0/float(filters["arm_length"]))
diff --git a/gstlal-calibration/config_files/O2/H1/tests/H1DCS_AllCorrections_Cleaning.ini b/gstlal-calibration/config_files/O2/H1/tests/H1DCS_AllCorrections_Cleaning.ini
index 000989b5e0fcb47c4990d2bc273776837fb96234..563d53a802fe942dfbaa967c312e5e18db7e8c83 100644
--- a/gstlal-calibration/config_files/O2/H1/tests/H1DCS_AllCorrections_Cleaning.ini
+++ b/gstlal-calibration/config_files/O2/H1/tests/H1DCS_AllCorrections_Cleaning.ini
@@ -1,6 +1,6 @@
 [InputConfigurations]
 # Filters file containing calibration FIR filters, relative to the directory gstlal-calibration/tests/check_calibration/, from which the pipeline is expected to be run
-FiltersFileName: H1DCS_newsrcline_1173225472.npz
+FiltersFileName: H1DCS_newsrcline_1173225472_v2.npz
 # Data source should be set to frames or lvshm
 DataSource: frames
 FileChecksum: No
diff --git a/gstlal-calibration/config_files/O2/H1/tests/H1DCS_AllCorrections_Cleaning_TEST.ini b/gstlal-calibration/config_files/O2/H1/tests/H1DCS_AllCorrections_Cleaning_TEST.ini
index 6b27800539ed4aa8ed259ccf4cd7683ef695ca2c..1de4a29639e3f0b6617281200e9199ec8ca54f9e 100644
--- a/gstlal-calibration/config_files/O2/H1/tests/H1DCS_AllCorrections_Cleaning_TEST.ini
+++ b/gstlal-calibration/config_files/O2/H1/tests/H1DCS_AllCorrections_Cleaning_TEST.ini
@@ -1,6 +1,6 @@
 [InputConfigurations]
 # Filters file containing calibration FIR filters, relative to the directory gstlal-calibration/tests/check_calibration/, from which the pipeline is expected to be run
-FiltersFileName: H1DCS_newsrcline_1173225472.npz
+FiltersFileName: H1DCS_newsrcline_1173225472_v2.npz
 # Data source should be set to frames or lvshm
 DataSource: frames
 FileChecksum: No
diff --git a/gstlal-calibration/tests/check_calibration/Makefile b/gstlal-calibration/tests/check_calibration/Makefile
index 402fe71e3a295df4aa6631c43d16a15ab9590bee..4c6a018894cf1312b91fb7852731923b0c34524a 100644
--- a/gstlal-calibration/tests/check_calibration/Makefile
+++ b/gstlal-calibration/tests/check_calibration/Makefile
@@ -27,7 +27,7 @@ GDSTESTCONFIGS = ../../config_files/PreER13/H1/H1GDS_TEST_1225558818.ini
 DCSTESTCONFIGS = ../../config_files/O2/H1/tests/H1DCS_AllCorrections_Cleaning_TEST.ini
 GDSSHMCONFIGS = ../../config_files/PreER13/H1/tests/H1GDS_1222058826_shm2frames.ini
 
-all: noise_subtraction_ASD_DCS noise_subtraction_tf_DCS
+all: noise_subtraction_ASD_DCS noise_subtraction_tf_DCS filters_tf_DCS
 
 ###############################################
 ### These commands should change less often ###
@@ -105,6 +105,9 @@ pcal_DCS_transfer_functions: $(IFO)1_easy_raw_frames.cache $(IFO)1_hoft_DCS_fram
 lines_ratio_DCS: $(IFO)1_hoft_DCS_frames.cache
 	python demod_ratio_timeseries.py --ifo $(IFO)1 --gps-end-time $(PLOT_END) --gps-start-time $(PLOT_START) --denominator-frame-cache $(IFO)1_hoft_DCS_frames.cache --numerator-frame-cache $(IFO)1_hoft_DCS_frames.cache --denominator-channel-name 'DCS-CALIB_STRAIN' --numerator-channel-name 'DCS-CALIB_STRAIN_CLEAN' --frequencies '35.9,36.7,331.9,1083.7;60,120,180' --magnitude-ranges '0.0,0.1;0.0,1.0' --phase-ranges '-180.0,180.0;-180.0,180.0' --plot-titles '$(IFO)1 Calibration Line Subtraction;$(IFO)1 Power Mains Line Subtraction'
 
+filters_tf_DCS: $(IFO)1_hoft_DCS_frames.cache
+	python plot_filters_transfer_function.py --tf-frequency-min 0.5 --tf-frequency-max 8192 --ratio-frequency-min 10 --ratio-frequency-max 8192 --ratio-magnitude-min 0.7 --ratio-magnitude-max 1.3 --tf-phase-min -180 --tf-phase-max 180 --ratio-phase-min -20 --ratio-phase-max 20
+
 latency_test: $(IFO)1_hoft_GDS_SHM_frames.cache
 	python latency_plot.py --intime-file gstlal_compute_strain_timestamps_in.txt --outtime-file gstlal_compute_strain_timestamps_out.txt --plot-filename-prefix $(IFO)1GDS_latency --plot-title '$(IFO)1 Calibration Latency vs Time'
 
diff --git a/gstlal-calibration/tests/check_calibration/pcal2darm_timeseries.py b/gstlal-calibration/tests/check_calibration/pcal2darm_timeseries.py
index d4f9494eacd323d6fc8ce077c8ac953fc36c8809..141562a3f171686f409633a10fc1704ed68cff8f 100644
--- a/gstlal-calibration/tests/check_calibration/pcal2darm_timeseries.py
+++ b/gstlal-calibration/tests/check_calibration/pcal2darm_timeseries.py
@@ -111,20 +111,33 @@ InputConfigs = ConfigSectionMap("InputConfigurations")
 
 # Search the directory tree for files with names matching the one we want.
 filters_name = InputConfigs["filtersfilename"]
-filters_paths = []
-print "\nSearching for %s ..." % filters_name
-# Check the user's home directory
-for dirpath, dirs, files in os.walk(os.environ['HOME']):
-	if filters_name in files:
-		# We prefer filters that came directly from a GDSFilters directory of the calibration SVN
-		if dirpath.count("GDSFilters") > 0:
-			filters_paths.insert(0, os.path.join(dirpath, filters_name))
-		else:
-			filters_paths.append(os.path.join(dirpath, filters_name))
-if not len(filters_paths):
-	raise ValueError("Cannot find filters file %s in home directory %s or in /ligo/svncommon/CalSVN/aligocalibration/trunk/Runs/*/GDSFilters", (filters_name, os.environ['HOME']))
-print "Loading calibration filters from %s\n" % filters_paths[0]
-filters = numpy.load(filters_paths[0])
+if filters_name.count('/') > 0:
+	# Then the path to the filters file was given
+	filters = numpy.load(filters_name)
+else:
+	# We need to search for the filters file
+	filters_paths = []
+	print "\nSearching for %s ..." % filters_name
+	# Check the user's home directory
+	for dirpath, dirs, files in os.walk(os.environ['HOME']):
+		if filters_name in files:
+			# We prefer filters that came directly from a GDSFilters directory of the calibration SVN
+			if dirpath.count("GDSFilters") > 0:
+				filters_paths.insert(0, os.path.join(dirpath, filters_name))
+			else:
+				filters_paths.append(os.path.join(dirpath, filters_name))
+	# Check if there is a checkout of the entire calibration SVN
+	for dirpath, dirs, files in os.walk('/ligo/svncommon/CalSVN/aligocalibration/trunk/Runs/'):
+		if filters_name in files:
+			# We prefer filters that came directly from a GDSFilters directory of the calibration SVN
+			if dirpath.count("GDSFilters") > 0:
+				filters_paths.insert(0, os.path.join(dirpath, filters_name))
+			else:
+				filters_paths.append(os.path.join(dirpath, filters_name))
+	if not len(filters_paths):
+		raise ValueError("Cannot find filters file %s in home directory %s or in /ligo/svncommon/CalSVN/aligocalibration/trunk/Runs/*/GDSFilters", (filters_name, os.environ['HOME']))
+	print "Loading calibration filters from %s\n" % filters_paths[0]
+	filters = numpy.load(filters_paths[0])
 
 ifo = options.ifo
 
diff --git a/gstlal-calibration/tests/check_calibration/plot_filters_transfer_function.py b/gstlal-calibration/tests/check_calibration/plot_filters_transfer_function.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cf162e14554471685d21d5a724f830c0bcbeea1
--- /dev/null
+++ b/gstlal-calibration/tests/check_calibration/plot_filters_transfer_function.py
@@ -0,0 +1,287 @@
+#!/usr/bin/env python
+# Copyright (C) 2018  Aaron Viets
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#				   Preamble
+#
+# =============================================================================
+#
+
+
+import sys
+import os
+import numpy
+from math import pi
+import resource
+import matplotlib
+matplotlib.rcParams['font.family'] = 'Times New Roman'
+matplotlib.rcParams['font.size'] = 22
+matplotlib.rcParams['legend.fontsize'] = 18
+matplotlib.rcParams['mathtext.default'] = 'regular'
+matplotlib.use('Agg')
+import glob
+import matplotlib.pyplot as plt
+
+from optparse import OptionParser, Option
+
+parser = OptionParser()
+parser.add_option("--tf-file-directory", metavar = "directory", default = '.', help = "location of txt files with transfer functions (Default is current directory, '.')")
+parser.add_option("--model-jump-delay", metavar = "seconds", type = float, default = 0.000061035, help = "Time delay in time-stamping DARM_ERR (Default is one 16384-Hz clock cycle)")
+parser.add_option("--tf-frequency-min", type = float, default = -1, help = "Minimum frequency for transfer function plots (Default is to disable)")
+parser.add_option("--tf-frequency-max", type = float, default = -1, help = "Maximum frequency for transfer function plots (Default is to disable)")
+parser.add_option("--tf-frequency-scale", default = "log", help = "Frequency scale for transfer function plots (linear or log, default is log)")
+parser.add_option("--tf-magnitude-min", type = float, default = -1, help = "Minimum magnitude for transfer function plots (Default is to disable)")
+parser.add_option("--tf-magnitude-max", type = float, default = -1, help = "Maximum magnitude for transfer function plots (Default is to disable)")
+parser.add_option("--tf-magnitude-scale", default = "log", help = "Magnitude scale for transfer function plots (linear or log, default is log)")
+parser.add_option("--tf-phase-min", metavar = "degrees", type = float, default = 1000, help = "Minimum phase for transfer function plots, in degrees (Default is to disable)")
+parser.add_option("--tf-phase-max", metavar = "degrees", type = float, default = 1000, help = "Maximum phase for transfer function plots, in degrees (Default is to disable)")
+parser.add_option("--ratio-frequency-min", type = float, default = -1, help = "Minimum frequency for transfer function ratio plots (Default is to disable)")
+parser.add_option("--ratio-frequency-max", type = float, default = -1, help = "Maximum frequency for transfer function ratio plots (Default is to disable)")
+parser.add_option("--ratio-frequency-scale", default = "log", help = "Frequency scale for transfer function ratio plots (linear or log, default is log)")
+parser.add_option("--ratio-magnitude-min", type = float, default = -1, help = "Minimum magnitude for transfer function ratio plots (Default is to disable)")
+parser.add_option("--ratio-magnitude-max", type = float, default = -1, help = "Maximum magnitude for transfer function ratio plots (Default is to disable)")
+parser.add_option("--ratio-magnitude-scale", default = "linear", help = "Magnitude scale for transfer function ratio plots (linear or log, default is linear)")
+parser.add_option("--ratio-phase-min", metavar = "degrees", type = float, default = 1000, help = "Minimum phase for transfer function ratio plots, in degrees (Default is to disable)")
+parser.add_option("--ratio-phase-max", metavar = "degrees", type = float, default = 1000, help = "Maximum phase for transfer function ratio plots, in degrees (Default is to disable)")
+
+options, filenames = parser.parse_args()
+
+#
+# Load in the filters file that contains filter coefficients, etc.
+# Search the directory tree for files with names matching the one we want.
+#
+
+# Identify any files that have filters transfer functions in them
+tf_files = [f for f in os.listdir(options.tf_file_directory) if (os.path.isfile(f) and ('GDS' in f or 'DCS' in f) and 'npz' in f and 'filters_transfer_function' in f and '.txt' in f)]
+for tf_file in tf_files:
+	filters_name = tf_file.split('_npz')[0] + '.npz'
+	# Search the directory tree for filters files with names matching the one we want.
+	filters_paths = []
+	print "\nSearching for %s ..." % filters_name
+	# Check the user's home directory
+	for dirpath, dirs, files in os.walk(os.environ['HOME']):
+		if filters_name in files:
+			# We prefer filters that came directly from a GDSFilters directory of the calibration SVN
+			if dirpath.count("GDSFilters") > 0:
+				filters_paths.insert(0, os.path.join(dirpath, filters_name))
+			else:
+				filters_paths.append(os.path.join(dirpath, filters_name))
+	# Check if there is a checkout of the entire calibration SVN
+	for dirpath, dirs, files in os.walk('/ligo/svncommon/CalSVN/aligocalibration/trunk/Runs/'):
+		if filters_name in files:
+			# We prefer filters that came directly from a GDSFilters directory of the calibration SVN
+			if dirpath.count("GDSFilters") > 0:
+				filters_paths.insert(0, os.path.join(dirpath, filters_name))
+			else:
+				filters_paths.append(os.path.join(dirpath, filters_name))
+	if not len(filters_paths):
+		raise ValueError("Cannot find filters file %s in home directory %s or in /ligo/svncommon/CalSVN/aligocalibration/trunk/Runs/*/GDSFilters", (filters_name, os.environ['HOME']))
+	print "Loading calibration filters from %s\n" % filters_paths[0]
+	filters = numpy.load(filters_paths[0])
+
+	# Determine what transfer function this is
+	if '_tst_' in tf_file and 'DCS' in tf_file:
+		plot_title = "TST Transfer Function"
+		model_name = "tst_model" if "tst_model" in filters else None
+	elif '_tst_' in tf_file and 'GDS' in tf_file:
+		plot_title = "TST Correction Transfer Function"
+		model_name = "ctrl_corr_model" if "ctrl_corr_model" in filters else None
+	elif '_pum_' in tf_file and 'DCS' in tf_file:
+		plot_title = "PUM Transfer Function"
+		model_name = "pum_model" if "pum_model" in filters else None
+	elif '_pum_' in tf_file and 'GDS' in tf_file:
+		plot_title = "PUM Correction Transfer Function"
+		model_name = "ctrl_corr_model" if "ctrl_corr_model" in filters else None
+	elif '_uim_' in tf_file and 'DCS' in tf_file:
+		plot_title = "UIM Transfer Function"
+		model_name = "uim_model" if "uim_model" in filters else None
+	elif '_uim_' in tf_file and 'GDS' in tf_file:
+		plot_title = "UIM Correction Transfer Function"
+		model_name = "ctrl_corr_model" if "ctrl_corr_model" in filters else None
+	elif '_pumuim_' in tf_file and 'DCS' in tf_file:
+		plot_title = "PUM/UIM Transfer Function"
+		model_name = "pumuim_model" if "pumuim_model" in filters else None
+	elif '_pumuim_' in tf_file and 'GDS' in tf_file:
+		plot_title = "PUM/UIM Correction Transfer Function"
+		model_name = "ctrl_corr_model" if "ctrl_corr_model" in filters else None
+	elif '_res_' in tf_file and 'DCS' in tf_file:
+		plot_title = "Inverse Sensing Transfer Function"
+		model_name = "invsens_model" if "invsens_model" in filters else None
+	elif '_res_' in tf_file and 'GDS' in tf_file:
+		plot_title = "Inverse Sensing Correction Transfer Function"
+		model_name = "res_corr_model" if "res_corr_model" in filters else None
+	elif '_response_' in tf_file:
+		plot_title = "Response Function"
+		model_name = "response_function" if "response_function" in filters else None
+	else:
+		plot_title = "Transfer Function"
+		model_name = None
+
+	ifo = ''
+	if 'H1' in tf_file:
+		ifo = 'H1 '
+	if 'L1' in tf_file:
+		ifo = 'L1 '
+
+	# Remove unwanted lines from transfer function file, and re-format wanted lines
+	f = open(tf_file, 'r')
+	lines = f.readlines()
+	f.close()
+	tf_length = len(lines) - 5
+	f = open(tf_file.replace('.txt', '_reformatted.txt'), 'w')
+	for j in range(3, 3 + tf_length):
+		f.write(lines[j].replace(' + ', '\t').replace(' - ', '\t-').replace('i', ''))
+	f.close()
+
+	# Read data from re-formatted file and find frequency vector, magnitude, and phase
+	data = numpy.loadtxt(tf_file.replace('.txt', '_reformatted.txt'))
+	os.remove(tf_file.replace('.txt', '_reformatted.txt'))
+	filters_tf = []
+	frequency = []
+	magnitude = []
+	phase = []
+	df = data[1][0] - data[0][0] # frequency spacing
+	for j in range(0, len(data)):
+		frequency.append(data[j][0])
+		tf_at_f = (data[j][1] + 1j * data[j][2]) * numpy.exp(-2j * numpy.pi * data[j][0] * options.model_jump_delay)
+		filters_tf.append(tf_at_f)
+		magnitude.append(abs(tf_at_f))
+		phase.append(numpy.angle(tf_at_f) * 180.0 / numpy.pi)
+
+	# Find frequency-domain models in filters file if they are present and resample if necessary
+	if model_name is not None:
+		model = filters[model_name]
+		model_tf = []
+		model_magnitude = []
+		model_phase = []
+		ratio = []
+		ratio_magnitude = []
+		ratio_phase = []
+		# Check the frequency spacing of the model
+		model_df = model[0][1] - model[0][0]
+		cadence = df / model_df
+		index = 0
+		# This is a linear resampler (it just connects the dots with straight lines)
+		while index < tf_length:
+			before_idx = numpy.floor(cadence * index)
+			after_idx = numpy.ceil(cadence * index + 1e-10)
+			# Check if we've hit the end of the model transfer function
+			if after_idx >= len(model[0]):
+				if before_idx == cadence * index:
+					model_tf.append(model[1][before_idx] + 1j * model[2][before_idx])
+					model_magnitude.append(abs(model_tf[index]))
+					model_phase.append(numpy.angle(model_tf[index]) * 180.0 / numpy.pi)
+					ratio.append(filters_tf[index] / model_tf[index])
+					ratio_magnitude.append(abs(ratio[index]))
+					ratio_phase.append(numpy.angle(ratio[index]) * 180.0 / numpy.pi)
+				index = tf_length
+			else:
+				before = model[1][before_idx] + 1j * model[2][before_idx]
+				after = model[1][after_idx] + 1j * model[2][after_idx]
+				before_weight = after_idx - cadence * index
+				after_weight = cadence * index - before_idx
+				model_tf.append(before_weight * before + after_weight * after)
+				model_magnitude.append(abs(model_tf[index]))
+				model_phase.append(numpy.angle(model_tf[index]) * 180.0 / numpy.pi)
+				ratio.append(filters_tf[index] / model_tf[index])
+				ratio_magnitude.append(abs(ratio[index]))
+				ratio_phase.append(numpy.angle(ratio[index]) * 180.0 / numpy.pi)
+				index += 1
+
+	# Filter transfer function plots
+	plt.figure(figsize = (10, 10))
+	if model_name is not None:
+		plt.subplot(211)
+		plt.plot(frequency, model_magnitude, 'r', linewidth = 0.5, label = 'Model')
+		leg = plt.legend(fancybox = True)
+		leg.get_frame().set_alpha(0.5)
+		plt.gca().set_xscale(options.tf_frequency_scale)
+		plt.gca().set_yscale(options.tf_magnitude_scale)
+		plt.title(plot_title)
+		plt.ylabel('Magnitude')
+		if options.tf_frequency_max > 0:
+			plt.xlim(options.tf_frequency_min, options.tf_frequency_max)
+		if options.tf_magnitude_max > 0:
+			plt.ylim(options.tf_magnitude_min, options.tf_magnitude_max)
+		plt.grid(True)
+		ax = plt.subplot(212)
+		ax.set_xscale(options.tf_frequency_scale)
+		plt.plot(frequency, model_phase, 'r', linewidth = 0.5)
+		plt.ylabel('Phase [deg]')
+		plt.xlabel('Frequency [Hz]')
+		if options.tf_frequency_max > 0:
+			plt.xlim(options.tf_frequency_min, options.tf_frequency_max)
+		if options.tf_phase_max < 1000:
+			plt.ylim(options.tf_phase_min, options.tf_phase_max)
+		plt.grid(True)
+	plt.subplot(211)
+	plt.plot(frequency, magnitude, 'g', linewidth = 0.5, label = 'Filters')
+	leg = plt.legend(fancybox = True)
+	leg.get_frame().set_alpha(0.5)
+	plt.gca().set_xscale(options.tf_frequency_scale)
+	plt.gca().set_yscale(options.tf_magnitude_scale)
+	plt.title(plot_title)
+	plt.ylabel('Magnitude')
+	if options.tf_frequency_max > 0:
+		plt.xlim(options.tf_frequency_min, options.tf_frequency_max)
+	if options.tf_magnitude_max > 0:
+		plt.ylim(options.tf_magnitude_min, options.tf_magnitude_max)
+	plt.grid(True)
+	ax = plt.subplot(212)
+	ax.set_xscale(options.tf_frequency_scale)
+	plt.plot(frequency, phase, 'g', linewidth = 0.5)
+	plt.ylabel('Phase [deg]')
+	plt.xlabel('Frequency [Hz]')
+	if options.tf_frequency_max > 0:
+		plt.xlim(options.tf_frequency_min, options.tf_frequency_max)
+	if options.tf_phase_max < 1000:
+		plt.ylim(options.tf_phase_min, options.tf_phase_max)
+	plt.grid(True)
+	plt.savefig(tf_file.replace('.txt', '.png'))
+	plt.savefig(tf_file.replace('.txt', '.pdf'))
+
+	# Plots of the ratio filters / model
+	if model_name is not None:
+		plt.figure(figsize = (10, 10))
+		plt.subplot(211)
+		plt.plot(frequency, ratio_magnitude, 'b', linewidth = 0.5, label = 'Filters / Model')
+		leg = plt.legend(fancybox = True)
+		leg.get_frame().set_alpha(0.5)
+		plt.gca().set_xscale(options.ratio_frequency_scale)
+		plt.gca().set_yscale(options.ratio_magnitude_scale)
+		plt.title(plot_title)
+		plt.ylabel('Magnitude')
+		if options.ratio_frequency_max > 0:
+			plt.xlim(options.ratio_frequency_min, options.ratio_frequency_max)
+		if options.ratio_magnitude_max > 0:
+			plt.ylim(options.ratio_magnitude_min, options.ratio_magnitude_max)
+		plt.grid(True)
+		ax = plt.subplot(212)
+		ax.set_xscale(options.ratio_frequency_scale)
+		plt.plot(frequency, ratio_phase, 'b', linewidth = 0.5)
+		plt.ylabel('Phase [deg]')
+		plt.xlabel('Frequency [Hz]')
+		if options.ratio_frequency_max > 0:
+			plt.xlim(options.ratio_frequency_min, options.ratio_frequency_max)
+		if options.ratio_phase_max < 1000:
+			plt.ylim(options.ratio_phase_min, options.ratio_phase_max)
+		plt.grid(True)
+		plt.savefig(tf_file.replace('.txt', '_ratio.png'))
+		plt.savefig(tf_file.replace('.txt', '_ratio.pdf'))
+
diff --git a/gstlal-calibration/tests/check_calibration/plot_transfer_function.py b/gstlal-calibration/tests/check_calibration/plot_transfer_function.py
index ee29d8824c961a01671fc05054dfa682bd6ed140..b954060f443557cbb3528d4a949967eb37b8ccfe 100644
--- a/gstlal-calibration/tests/check_calibration/plot_transfer_function.py
+++ b/gstlal-calibration/tests/check_calibration/plot_transfer_function.py
@@ -149,20 +149,33 @@ if options.config_file is not None:
 
 	# Search the directory tree for files with names matching the one we want.
 	filters_name = InputConfigs["filtersfilename"]
-	filters_paths = []
-	print "\nSearching for %s ..." % filters_name
-	# Check the user's home directory
-	for dirpath, dirs, files in os.walk(os.environ['HOME']):
-		if filters_name in files:
-			# We prefer filters that came directly from a GDSFilters directory of the calibration SVN
-			if dirpath.count("GDSFilters") > 0:
-				filters_paths.insert(0, os.path.join(dirpath, filters_name))
-			else:
-				filters_paths.append(os.path.join(dirpath, filters_name))
-	if not len(filters_paths):
-		raise ValueError("Cannot find filters file %s in home directory %s or in /ligo/svncommon/CalSVN/aligocalibration/trunk/Runs/*/GDSFilters", (filters_name, os.environ['HOME']))
-	print "Loading calibration filters from %s\n" % filters_paths[0]
-	filters = numpy.load(filters_paths[0])
+	if filters_name.count('/') > 0:
+		# Then the path to the filters file was given
+		filters = numpy.load(filters_name)
+	else:
+		# We need to search for the filters file
+		filters_paths = []
+		print "\nSearching for %s ..." % filters_name
+		# Check the user's home directory
+		for dirpath, dirs, files in os.walk(os.environ['HOME']):
+			if filters_name in files:
+				# We prefer filters that came directly from a GDSFilters directory of the calibration SVN
+				if dirpath.count("GDSFilters") > 0:
+					filters_paths.insert(0, os.path.join(dirpath, filters_name))
+				else:
+					filters_paths.append(os.path.join(dirpath, filters_name))
+		# Check if there is a checkout of the entire calibration SVN
+		for dirpath, dirs, files in os.walk('/ligo/svncommon/CalSVN/aligocalibration/trunk/Runs/'):
+			if filters_name in files:
+				# We prefer filters that came directly from a GDSFilters directory of the calibration SVN
+				if dirpath.count("GDSFilters") > 0:
+					filters_paths.insert(0, os.path.join(dirpath, filters_name))
+				else:
+					filters_paths.append(os.path.join(dirpath, filters_name))
+		if not len(filters_paths):
+			raise ValueError("Cannot find filters file %s in home directory %s or in /ligo/svncommon/CalSVN/aligocalibration/trunk/Runs/*/GDSFilters", (filters_name, os.environ['HOME']))
+		print "Loading calibration filters from %s\n" % filters_paths[0]
+		filters = numpy.load(filters_paths[0])
 
 # Get the corrections for numerators and denominator, and resample if necessary
 num_corr = []
diff --git a/gstlal-calibration/tests/fill_silence_test.py b/gstlal-calibration/tests/fill_silence_test.py
new file mode 100755
index 0000000000000000000000000000000000000000..a5857f80a834c666e7b1900bdfb344d722799898
--- /dev/null
+++ b/gstlal-calibration/tests/fill_silence_test.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+# Copyright (C) 2016  Aaron Viets
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+#
+# =============================================================================
+#
+#				   Preamble
+#
+# =============================================================================
+#
+
+
+import random
+import sys
+import os
+import numpy
+import time
+from math import pi
+import resource
+import datetime
+import time
+import matplotlib
+matplotlib.rcParams['font.family'] = 'Times New Roman'
+matplotlib.rcParams['font.size'] = 22
+matplotlib.rcParams['legend.fontsize'] = 18
+matplotlib.rcParams['mathtext.default'] = 'regular'
+matplotlib.use('Agg')
+import glob
+import matplotlib.pyplot as plt
+
+from optparse import OptionParser, Option
+import ConfigParser
+
+import gi
+gi.require_version('Gst', '1.0')
+from gi.repository import GObject, Gst
+GObject.threads_init()
+Gst.init(None)
+
+import lal
+from lal import LIGOTimeGPS
+
+from gstlal import pipeparts
+from gstlal import calibration_parts
+from gstlal import simplehandler
+from gstlal import datasource
+
+from glue.ligolw import ligolw
+from glue.ligolw import array
+from glue.ligolw import param
+from glue.ligolw.utils import segments as ligolw_segments
+array.use_in(ligolw.LIGOLWContentHandler)
+param.use_in(ligolw.LIGOLWContentHandler)
+from glue.ligolw import utils
+from ligo import segments
+from gstlal import test_common
+
+
+#
+# =============================================================================
+#
+#				  Pipelines
+#
+# =============================================================================
+#
+
+def fill_silence_test_01(pipeline, name, line_sep = 0.5):
+	#
+	# This test is intended to help get rid of error messages associated with adding two streams that have different start times
+	#
+
+	rate = 16		# Hz
+	buffer_length = 1.0	# seconds
+	test_duration = 300.0	# seconds
+	filter_latency = 1.0
+
+	#
+	# build pipeline
+	#
+
+	head = test_common.test_src(pipeline, rate = rate, test_duration = test_duration, wave = 5)
+	head = pipeparts.mktee(pipeline, head)
+	smooth = calibration_parts.mkcomplexfirbank(pipeline, head, latency = int((rate * 40 - 1) * filter_latency + 0.5), fir_matrix = [numpy.ones(rate * 40)], time_domain = True)
+	smooth = calibration_parts.mkcomplexfirbank(pipeline, smooth, latency = 23, fir_matrix = [numpy.ones(45)], time_domain = True)
+	#smooth = pipeparts.mkgeneric(pipeline, head, "lal_smoothkappas", array_size = rate * 128, avg_array_size = rate * 10, filter_latency = 1)
+	#smooth = pipeparts.mkgeneric(pipeline, smooth, "splitcounter", name = "smooth")
+	#head = pipeparts.mkgeneric(pipeline, head, "splitcounter", name = "unsmooth")
+	#channelmux_input_dict = {}
+	#channelmux_input_dict["unsmooth"] = calibration_parts.mkqueue(pipeline, head)
+	#channelmux_input_dict["smooth"] = calibration_parts.mkqueue(pipeline, smooth)
+	#mux = pipeparts.mkframecppchannelmux(pipeline, channelmux_input_dict, frame_duration = 64, frames_per_file = 1, compression_scheme = 6, compression_level = 3)
+	head = calibration_parts.mkadder(pipeline, calibration_parts.list_srcs(pipeline, head, smooth))
+	#mux = pipeparts.mkgeneric(pipeline, mux, "splitcounter", name = "sum")
+	#head = calibration_parts.mkgate(pipeline, smooth, head, 0)
+	pipeparts.mknxydumpsink(pipeline, head, "%s_out.txt" % name)
+	#pipeparts.mkframecppfilesink(pipeline, mux, frame_type = "H1DCS", instrument = "H1")
+
+	#
+	# done
+	#
+	
+	return pipeline
+	
+#
+# =============================================================================
+#
+#				     Main
+#
+# =============================================================================
+#
+
+test_common.build_and_run(fill_silence_test_01, "fill_silence_test_01")
+
+