diff --git a/bin/gstlal_compute_strain b/bin/gstlal_compute_strain
index 19e269fa12a739d24c43e8feb496b2650518b423..41abc922306a9ac7b3cd40b0f04205ebda317fe2 100755
--- a/bin/gstlal_compute_strain
+++ b/bin/gstlal_compute_strain
@@ -1104,7 +1104,7 @@ if(test_latency or InputConfigs["datasource"] == "lvshm" and kafka_server is not
 	pipeparts.mkfakesink(pipeline, src_latency)
 
 if ci_latency_dir is not None:
-	src = pipeparts.mkgeneric(pipeline, src, "splitcounter", filename = "%s/input_unit_timestamps.txt" % ci_latency_dir)
+	src = pipeparts.mkgeneric(pipeline, src, "splitcounter", filename = "%s/input_unix_timestamps.txt" % ci_latency_dir)
 
 #
 # Hook up the relevant channels to the demuxer
diff --git a/tests/tests_pytest/act2darm_timeseries.py b/tests/tests_pytest/act2darm_timeseries.py
index f1c74922b351acfc7f5d0230d746b74d295ed14d..4f2c84f0fbcd7159f4b6b0aa11fed2d722a51cd1 100755
--- a/tests/tests_pytest/act2darm_timeseries.py
+++ b/tests/tests_pytest/act2darm_timeseries.py
@@ -31,8 +31,6 @@ import resource
 import datetime
 import glob
 
-import configparser
-
 import gi
 gi.require_version('Gst', '1.0')
 from gi.repository import GObject, Gst
@@ -54,31 +52,19 @@ from gstlalcalibration import test_common
 from tests.tests_pytest.error import rms
 from tests.tests_pytest.plot import plot_act
 
-from optparse import OptionParser, Option
-import configparser
-
-parser = OptionParser()
-
-parser.add_option("--gps-start-time", metavar = "seconds", type = int, default = 1370674240, help = "GPS time at which to start processing data")
-parser.add_option("--gps-end-time", metavar = "seconds", type = int, default = 1370678400, help = "GPS time at which to stop processing data")
-parser.add_option("--ifo", metavar = "name", default = "H1", help = "Name of the interferometer (IFO), e.g., H1, L1")
-
-options, filenames = parser.parse_args()
 
-ifo = options.ifo
+gps_start_time = 1404304248
+gps_end_time = 1404308216
+ifo = 'H1'
 
 
-#ifo = 'H1'
-
 #
 # Load in the filters file that contains filter coefficients, etc.
 #
 
-#filters = numpy.load("tests/tests_pytest/filters/gstlal_compute_strain_C00_filters_H1.npz")
-filters = numpy.load("tests/tests_pytest/filters/gstlal_compute_strain_C00_filters_H1_20230613_ci.npz")
+filters = numpy.load("tests/tests_pytest/filters/gstlal_compute_strain_C00_filters_H1_20240330T211519Z.npz")
 
 # Set up gstlal frame cache list
-#gstlal_frame_cache_list = ['tests/tests_pytest/filters/GDS_Approx_frames.cache', 'tests/tests_pytest/filters/GDS_Exact_frames.cache']
 gstlal_frame_cache_list = ['tests/tests_pytest/GDS_Approx_frames.cache', 'tests/tests_pytest/GDS_Exact_frames.cache']
 gstlal_channels = ['GDS-CALIB_STRAIN', 'GDS-CALIB_STRAIN']
 
@@ -116,7 +102,7 @@ except:
 
 # demodulation and averaging parameters
 filter_time = 20
-average_time = 128
+median_time = 128
 rate_out = 1
 
 #
@@ -159,10 +145,10 @@ def act2darm(pipeline, name):
                         demodulated_deltal = calibration_parts.demodulate(pipeline, deltal, frequencies[i], True, rate_out, filter_time, 0.5)
 		        # Take ratio \DeltaL(f) / act(f)
                         deltaL_over_act = calibration_parts.complex_division(pipeline, demodulated_deltal, demodulated_act_list[i])
-			# Take a running average
-                        deltaL_over_act = pipeparts.mkgeneric(pipeline, deltaL_over_act, "lal_smoothkappas", array_size = int(rate_out * average_time), no_default = True, filter_latency = 0.5)
-			# The first samples are not averaged.  Remove only half, since sometimes early data is important.
-                        deltaL_over_act = calibration_parts.mkinsertgap(pipeline, deltaL_over_act, insert_gap = False, chop_length = 500000000 * average_time)
+			# Take a running median
+                        deltaL_over_act = pipeparts.mkgeneric(pipeline, deltaL_over_act, "lal_smoothkappas", array_size = int(rate_out * median_time), no_default = True, filter_latency = 0.5)
+			# The first samples are not median'ed.  Remove only half, since sometimes early data is important.
+                        deltaL_over_act = calibration_parts.mkinsertgap(pipeline, deltaL_over_act, insert_gap = False, chop_length = 500000000 * median_time)
 			# Find the magnitude
                         deltaL_over_act = pipeparts.mktee(pipeline, deltaL_over_act)
                         magnitude = pipeparts.mkgeneric(pipeline, deltaL_over_act, "cabs")
@@ -190,30 +176,10 @@ def act2darm(pipeline, name):
 # =============================================================================
 #
 
-#test_common.build_and_run(act2darm, "act2darm")
-#class TestAct2darm:
-#        """act2darm test class"""
 
-#        def test_act2darm(self):
-#                """Test act2darm"""
 def Act2darm():
-    test_common.build_and_run(act2darm, "act2darm", segment = segments.segment((LIGOTimeGPS(0, 1000000000 * options.gps_start_time), LIGOTimeGPS(0, 1000000000 * options.gps_end_time))))
+    test_common.build_and_run(act2darm, "act2darm", segment = segments.segment((LIGOTimeGPS(0, 1000000000 * gps_start_time), LIGOTimeGPS(0, 1000000000 * gps_end_time))))
     rms('A')
     rms('A', 'E')
     plot_act()
 
-#test_common.build_and_run(act2darm, "act2darm", segment = segments.segment((LIGOTimeGPS(0, 1000000000 * options.gps_start_time), LIGOTimeGPS(0, 1000000000 * options.gps_end_time))))
-#rms('A')
-#rms('A', 'E')
-#plot_act
-
-#for i in range(len(gstlal_frame_cache_list)):
-#	for j in range(len(frequencies)):
-#	       standard_data = numpy.loadtxt("%s_%s_over_Act_standard_%d_at_%0.1fHz.txt" % (ifo, channel.replace(' ', '_'), cache_num, frequencies[i]))
-#
-#frequencies.sort()
-
-#rms('A')
-#rms('A', 'E')
-#plot_pcal('A')
-#plot_pcal('E')
diff --git a/tests/tests_pytest/error.py b/tests/tests_pytest/error.py
index 2488e74a07f763cacac16cf4032f20dd70a6d134..2b0391e2780c34550ad562c32a08ae5ff10ee72b 100644
--- a/tests/tests_pytest/error.py
+++ b/tests/tests_pytest/error.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright (C) 2018  Aaron Viets, Alexander Bartoletti
+# Copyright (C) 2023  Alexander Bartoletti
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms of the GNU General Public License as published by the
diff --git a/tests/tests_pytest/latency.py b/tests/tests_pytest/latency.py
index 0749b980b579e24724200a6e60445a8d39dfbc9f..5106bc073419484c4d18f2a232106fea235b269c 100644
--- a/tests/tests_pytest/latency.py
+++ b/tests/tests_pytest/latency.py
@@ -1,8 +1,10 @@
+
 import numpy as np
+import sys
 
 def Latency():
-    inp = np.loadtxt('input_unix_timestamps.txt')
-    outp = np.loadtxt('output_unix_timestamps.txt')
+    inp = np.loadtxt('tests/tests_pytest/input_unix_timestamps.txt')
+    outp = np.loadtxt('tests/tests_pytest/output_unix_timestamps.txt')
         
     inp = np.transpose(inp)
     outp = np.transpose(outp)
@@ -12,13 +14,34 @@ def Latency():
     i_gps = inp[0]
     o_gps = outp[0]
 
-    e_file = open('tests/tests_pytest/error_results.txt', 'a')
-    
     i_lst = [list(i_gps).index(x) for x in list(np.intersect1d(i_gps,o_gps))]   #sub a and b for left and right collumn of output, returns a list of the indexs of a to keep 
     lat = o_real - i_real[i_lst]
-    e_file.write("Median latency = %f" % np.median(lat))
+
+    # Allow 100 s for pipeline startup processes to settle
+    lat = lat[100:]
+    # Check for an increasing trend
+    var = lat - np.mean(lat)
+    # lat_increase estimates the latency increase in seconds from start to finish
+    lat_increase = 0.0
+    for i in range(len(var)):
+        lat_increase += var[i] * 12 * (i - (len(var) - 1) / 2.0) / len(var) / len(var)
+
+    e_file = open('tests/tests_pytest/error_results.txt', 'a')
+    e_file.write("Mean latency = %fs\n" % np.mean(lat))
+    e_file.write("Max latency = %fs\n" % max(lat))
+    e_file.write("%d of %d frames with latency over 6s\n" % (sum(i > 6 for i in lat), len(lat)))
+    e_file.write("Latency increased by %fs over %fs of data\n" % (lat_increase, o_gps[-1] - o_gps[0]))
     e_file.close()
 
-    assert np.median(lat) < 6
-    #assert np.percentile(lat, 99) < 10
-    #assert np.max(lat) < 10
+    print("Mean latency = {}s".format(np.mean(lat)), file = sys.stderr)
+    print("Max latency = {}s".format(max(lat)), file = sys.stderr)
+    print("{} of {} frames with latency over 6s".format(sum(i > 6 for i in lat), len(lat)), file = sys.stderr)
+    print("Latency increased by {}s over {}s of data".format(lat_increase, o_gps[-1] - o_gps[0]), file = sys.stderr)
+
+    assert np.mean(lat) < 5
+    # There should be no large excursions (This may occasionally fail)
+    assert max(lat) < 10
+    # There should not be numerous small excursions
+    assert sum(i > 6 for i in lat) < 10
+    assert lat_increase < 0.5
+
diff --git a/tests/tests_pytest/pcal2darm_timeseries.py b/tests/tests_pytest/pcal2darm_timeseries.py
index 61ee4b7e51899e9c35a964da11cca0bb85162c55..8640d87b4701978bc55b2643bb6203ed49c912c1 100755
--- a/tests/tests_pytest/pcal2darm_timeseries.py
+++ b/tests/tests_pytest/pcal2darm_timeseries.py
@@ -31,8 +31,6 @@ import resource
 import datetime
 import glob
 
-import configparser
-
 import gi
 gi.require_version('Gst', '1.0')
 from gi.repository import GObject, Gst
@@ -52,31 +50,18 @@ from gstlalcalibration import test_common
 from tests.tests_pytest.error import rms
 from tests.tests_pytest.plot import plot_pcal
 
-from optparse import OptionParser, Option
-import configparser
-
-parser = OptionParser()
-
-parser.add_option("--gps-start-time", metavar = "seconds", type = int, default = 1370674240, help = "GPS time at which to start processing data")
-parser.add_option("--gps-end-time", metavar = "seconds", type = int, default = 1370678400, help = "GPS time at which to stop processing data")
-parser.add_option("--ifo", metavar = "name", default = "H1", help = "Name of the interferometer (IFO), e.g., H1, L1")
-
-options, filenames = parser.parse_args()
 
-ifo = options.ifo
-
-
-#ifo = 'H1'
+gps_start_time = 1404304248
+gps_end_time = 1404308216
+ifo = 'H1'
 
 #
 # Load in the filters file that contains filter coefficients, etc.
 #
 
-#filters = numpy.load("tests/tests_pytest/filters/gstlal_compute_strain_C00_filters_H1.npz")
-filters = numpy.load("tests/tests_pytest/filters/gstlal_compute_strain_C00_filters_H1_20230613_ci.npz")
+filters = numpy.load("tests/tests_pytest/filters/gstlal_compute_strain_C00_filters_H1_20240330T211519Z.npz")
 
 # Set up gstlal frame cache list
-#gstlal_frame_cache_list = ['tests/tests_pytest/filters/GDS_Approx_frames.cache', 'tests/tests_pytest/filters/GDS_Exact_frames.cache']
 gstlal_frame_cache_list = ['tests/tests_pytest/GDS_Approx_frames.cache', 'tests/tests_pytest/GDS_Exact_frames.cache']
 gstlal_channels = ['GDS-CALIB_STRAIN', 'GDS-CALIB_STRAIN']
 
@@ -137,7 +122,7 @@ except:
 
 # demodulation and averaging parameters
 filter_time = 20
-average_time = 128
+median_time = 128
 rate_out = 1
 
 #
@@ -189,10 +174,10 @@ def pcal2darm(pipeline, name):
 			demodulated_deltal = calibration_parts.demodulate(pipeline, deltal, all_frequencies[i], True, rate_out, filter_time, 0.5)
 			# Take ratio \DeltaL(f) / pcal(f)
 			deltaL_over_pcal = calibration_parts.complex_division(pipeline, demodulated_deltal, demodulated_pcal_list[i])
-			# Take a running average
-			deltaL_over_pcal = pipeparts.mkgeneric(pipeline, deltaL_over_pcal, "lal_smoothkappas", array_size = int(rate_out * average_time), no_default = True, filter_latency = 0.5)
-			# The first samples are not averaged.  Remove only half, since sometimes early data is important.
-			deltaL_over_pcal = calibration_parts.mkinsertgap(pipeline, deltaL_over_pcal, insert_gap = False, chop_length = 500000000 * average_time)
+			# Take a running median
+			deltaL_over_pcal = pipeparts.mkgeneric(pipeline, deltaL_over_pcal, "lal_smoothkappas", array_size = int(rate_out * median_time), no_default = True, filter_latency = 0.5)
+			# The first samples are not median'ed.  Remove only half, since sometimes early data is important.
+			deltaL_over_pcal = calibration_parts.mkinsertgap(pipeline, deltaL_over_pcal, insert_gap = False, chop_length = 500000000 * median_time)
 			# Find the magnitude
 			deltaL_over_pcal = pipeparts.mktee(pipeline, deltaL_over_pcal)
 			magnitude = pipeparts.mkgeneric(pipeline, deltaL_over_pcal, "cabs")
@@ -220,9 +205,9 @@ def pcal2darm(pipeline, name):
 # =============================================================================
 #
 
-#test_common.build_and_run(pcal2darm, "pcal2darm", segment = segments.segment((LIGOTimeGPS(0, 1000000000 * options.gps_start_time), LIGOTimeGPS(0, 1000000000 * options.gps_end_time))))
 def Pcal2darm():
-    test_common.build_and_run(pcal2darm, "pcal2darm", segment = segments.segment((LIGOTimeGPS(0, 1000000000 * options.gps_start_time), LIGOTimeGPS(0, 1000000000 * options.gps_end_time))))
+    test_common.build_and_run(pcal2darm, "pcal2darm", segment = segments.segment((LIGOTimeGPS(0, 1000000000 * gps_start_time), LIGOTimeGPS(0, 1000000000 * gps_end_time))))
     rms('P')
     rms('P', 'E')
     plot_pcal()
+
diff --git a/tests/tests_pytest/test_order/test_order.py b/tests/tests_pytest/test_order/test_order.py
index 16a6182d3d851c3aca99ad3b116dbec60306e08f..8e6dcb13cc2442d14547ace09c2639045ab3442b 100644
--- a/tests/tests_pytest/test_order/test_order.py
+++ b/tests/tests_pytest/test_order/test_order.py
@@ -7,7 +7,7 @@ import os
 #from tests.tests_pytest.pcal2darm_timeseries import Pcal2darm
 from tests.tests_pytest.run_calib_pipeline import Run_calib
 from tests.tests_pytest.diff import Diff
-#from tests.tests_pytest.latency import Latency
+from tests.tests_pytest.latency import Latency
 
 
 def test_Calib_Pipeline():
@@ -27,8 +27,8 @@ def test_pcal2darm():
 '''
 def test_diff():
     Diff()
-'''
+
 def test_latency():
     Latency()
-'''
+