diff --git a/gstlal-calibration/bin/gstlal_compute_strain b/gstlal-calibration/bin/gstlal_compute_strain
index 6ce335b62a44e6b91e86af505343d4c375447853..3f5b0c763405a34c82aa412db633782cbe5158c2 100755
--- a/gstlal-calibration/bin/gstlal_compute_strain
+++ b/gstlal-calibration/bin/gstlal_compute_strain
@@ -293,9 +293,9 @@ expected_kappapum_imag = float(TDCFConfigs["expectedkappapumimag"])
 expected_kappauim_real = float(TDCFConfigs["expectedkappauimreal"])
 expected_kappauim_imag = float(TDCFConfigs["expectedkappauimimag"])
 expected_kappac = float(TDCFConfigs["expectedkappac"])
-expected_fcc = float(TDCFConfigs["expectedfcc"])
-expected_fs = float(TDCFConfigs["expectedfs"])
-expected_srcq = float(TDCFConfigs["expectedsrcq"])
+expected_fcc = float(TDCFConfigs["expectedfcc"]) if "expectedfcc" in TDCFConfigs else 0.0
+expected_fs = float(TDCFConfigs["expectedfs"]) if "expectedfs" in TDCFConfigs else 0.0
+expected_srcq = float(TDCFConfigs["expectedsrcq"]) if "expectedsrcq" in TDCFConfigs else 0.0
 kappatst_real_var = float(TDCFConfigs["kappatstrealvar"])
 kappatst_imag_var = float(TDCFConfigs["kappatstimagvar"])
 kappapu_real_var = float(TDCFConfigs["kappapurealvar"])
@@ -489,12 +489,18 @@ try:
 	fcc_default = float(filters["fcc"])
 except:
 	fcc_default = expected_fcc
+	if fcc_default == 0.0:
+		print("Warning: Could not find expected fcc in filters file or config file.  Setting to zero.")
 try:
 	fs_default = float(filters["fs"])
 	srcQ_default = float(filters["srcQ"])
 except:
 	fs_default = expected_fs
+	if fs_default == 0:
+		print("Warning: Could not find expected fs in filters file or config file.  Setting to zero.")
 	srcQ_default = expected_srcq
+	if srcQ_default == 0:
+		print("Warning: Could not find expected srcQ in filters file or config file.  Setting to zero.")
 
 # Load kappa dewhitening factors
 if dewhitening:
@@ -972,7 +978,7 @@ if ChannelNames["witnesschannellist"] != "None":
 if compute_calib_statevector:
 	noisesub_gate_channel = ChannelNames["noisesubgatechannel"] if "noisesubgatechannel" in ChannelNames else lownoise_channel_name 
 	noisesub_gate_bitmask = int(Bitmasks["noisesubgatebitmask"]) if "noisesubgatebitmask" in Bitmasks else lownoise_bitmask
-if compute_calib_statevector and (any(line_witness_channel_list) or any(witness_channel_list)) and noisesub_gate_channel != obsintent_channel_name and noisesub_gate_channel != lownoise_channel_name and noisesub_gate_channel != hwinj_channel_name and noisesub_gate_bitmask > 0 and noisesub_gate_channel != "None":
+if compute_calib_statevector and (any(line_witness_channel_list) or any(witness_channel_list)) and noisesub_gate_channel != obsintent_channel_name and noisesub_gate_channel != lownoise_channel_name and noisesub_gate_channel != hwinj_channel_name and noisesub_gate_channel not in numpy.concatenate(filterclock_channel_list) and noisesub_gate_bitmask > 0 and noisesub_gate_channel != "None":
 	channel_list.append((instrument, noisesub_gate_channel))
 	headkeys.append("noisesubgatechannel")
 
@@ -2184,7 +2190,11 @@ if compute_calib_statevector:
 		for key in headkeys:
 			for j in range(len(filterclock_channel_list[i])):
 				if ((filterclock_channel_list[i][j] != obsintent_channel_name and filterclock_channel_list[i][j] != lownoise_channel_name) and key in filterclock_channel_list[i]):
-					filterclock_channels.append(calibration_parts.mkqueue(pipeline, pipeparts.mkcapsfilter(pipeline, pipeparts.mkgeneric(pipeline, calibration_parts.caps_and_progress(pipeline, head_dict[key], filterclock_caps[i],key), "lal_logicalundersample", required_on = filterclock_bitmask_list[i], status_out = 1), calibstate_caps)))
+					filterclock_channel = calibration_parts.caps_and_progress(pipeline, head_dict[key], filterclock_caps[i], key)
+					if filterclock_channel_list[i][j] == noisesub_gate_channel:
+						filterclock_channel = noisesub_gate_tee = pipeparts.mktee(pipeline, filterclock_channel)
+					filterclock_channel = calibration_parts.mkqueue(pipeline, pipeparts.mkcapsfilter(pipeline, pipeparts.mkgeneric(pipeline, "lal_logicalundersample", required_on = filterclock_bitmask_list[i], status_out = 1), calibstate_caps))
+					filterclock_channels.append(filterclock_channel)
 				elif (filterclock_channel_list[i][j] == obsintent_channel_name and key == "obsintent"):
 					filterclock_channels.append(calibration_parts.mkqueue(pipeline, pipeparts.mkcapsfilter(pipeline, pipeparts.mkgeneric(pipeline, obsintenttee, "lal_logicalundersample", required_on = pow(2,obs_intent_bitnum), status_out = 1), calibstate_caps)))
 				elif (filterclock_channel_list[i][j] == lownoise_channel_name and key == "lownoise"):
@@ -2570,7 +2580,7 @@ if remove_cal_lines:
 
 # Set up gating for the power mains and noise subtraction
 if compute_calib_statevector and (any(line_witness_channel_list) or any(witness_channel_list)) and noisesub_gate_bitmask > 0 and noisesub_gate_channel != "None":
-	noisesubgate = obsintentchanneltee if noisesub_gate_channel == obsintent_channel_name else lownoisechanneltee if noisesub_gate_channel == lownoise_channel_name else hwinjtee if noisesub_gate_channel == hwinj_channel_name else calibration_parts.caps_and_progress(pipeline, head_dict["noisesubgatechannel"], "audio/x-raw, format=U32LE, channels=1, channel-mask=(bitmask)0x0", noisesub_gate_channel)
+	noisesubgate = obsintentchanneltee if noisesub_gate_channel == obsintent_channel_name else lownoisechanneltee if noisesub_gate_channel == lownoise_channel_name else hwinjchanneltee if noisesub_gate_channel == hwinj_channel_name else noisesub_gate_tee if noisesub_gate_channel in numpy.concatenate(filterclock_channel_list) else calibration_parts.caps_and_progress(pipeline, head_dict["noisesubgatechannel"], "audio/x-raw, format=U32LE, channels=1, channel-mask=(bitmask)0x0", noisesub_gate_channel)
 	noisesubgate = pipeparts.mkgeneric(pipeline, noisesubgate, "lal_logicalundersample", required_on = noisesub_gate_bitmask, status_out = pow(2,noise_sub_gate_bitnum))
 	noisesubgate = pipeparts.mkcapsfilter(pipeline, noisesubgate, calibstate_caps)
 	noisesubgatetee = pipeparts.mktee(pipeline, noisesubgate)
diff --git a/gstlal-ugly/bin/Makefile.am b/gstlal-ugly/bin/Makefile.am
index f702ef57f84b07250bcadd356488f1b9d80a2d2f..8693b0f5fd46c441a9d96656ac4e34e5da3b5015 100644
--- a/gstlal-ugly/bin/Makefile.am
+++ b/gstlal-ugly/bin/Makefile.am
@@ -29,5 +29,6 @@ dist_bin_SCRIPTS = \
 	gstlal_ll_dq \
 	gstlal_condor_top \
 	gstlal_injsplitter \
+	gstlal_kafka_dag \
 	gstlal_reduce_dag \
 	gstlal_dag_run_time
diff --git a/gstlal-ugly/bin/gstlal_kafka_dag b/gstlal-ugly/bin/gstlal_kafka_dag
new file mode 100755
index 0000000000000000000000000000000000000000..38ac6760e2f981808bb0245f34fb9edf9ba550f6
--- /dev/null
+++ b/gstlal-ugly/bin/gstlal_kafka_dag
@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2018--2019  Chad Hanna, Patrick Godwin
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+
+### This program will make create a HTCondor DAG to automate the running of
+### low-latency, online gstlal_inspiral jobs; see gstlal_ll_trigger_pipe
+
+"""
+This program makes a dag for persistent kafka/zookeeper services
+"""
+
+__author__ = 'Chad Hanna <channa@caltech.edu>, Patrick Godwin <patrick.godwin@ligo.org>'
+
+#
+# import standard modules and append the lalapps prefix to the python path
+#
+
+import os
+from optparse import OptionParser
+
+#
+# import the modules we need to build the pipeline
+#
+
+from gstlal import dagparts
+
+#
+# configuration file templates
+#
+
+ZOOKEEPER_TEMPLATE = """
+# the directory where the snapshot is stored.
+dataDir=%s
+# the port at which the clients will connect
+clientPort=%d
+# disable the per-ip limit on the number of connections since this is a non-production config
+maxClientCnxns=%d
+"""
+
+KAFKA_TEMPLATE = """
+broker.id=0
+listeners = PLAINTEXT://%s:%d
+background.threads=100
+num.network.threads=50
+num.io.threads=80
+log.cleaner.threads=10
+socket.send.buffer.bytes=102400
+socket.receive.buffer.bytes=102400
+socket.request.max.bytes=104857600
+queued.max.requests=10000
+log.dirs=%s
+num.partitions=1
+num.recovery.threads.per.data.dir=1
+auto.create.topics.enable=true
+offsets.topic.replication.factor=1
+transaction.state.log.replication.factor=1
+transaction.state.log.min.isr=1
+log.flush.interval.ms=300000
+log.retention.ms=100000
+log.roll.ms = 1000000
+log.segment.bytes=1073741824
+log.retention.check.interval.ms=300000
+zookeeper.connect=%s
+zookeeper.connection.timeout.ms=6000
+group.initial.rebalance.delay.ms=0
+"""
+
+KAFKA_ENV_TEMPLATE = """
+KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:MetaspaceSize=96m -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16M -XX:MinMetaspaceFreeRatio=50 -XX:MaxMetaspaceFreeRatio=80"
+KAFKA_HEAP_OPTS="-Xms8G -Xmx8G"
+export KAFKA_HEAP_OPTS KAFKA_JVM_PERFORMANCE_OPTS
+"""
+
+#
+# job classes
+#
+
+class ZookeeperJob(dagparts.DAGJob):
+	"""
+	A zookeeper job
+	"""
+	def __init__(self, program = "zookeeper-server-start.sh", tag_base = "zookeeper-server-start", datadir = os.path.join(dagparts.log_path(), "zookeeper"), port = 2181, maxclients = 0, universe = "local", condor_commands = {}):
+		"""
+		"""
+		dagparts.DAGJob.__init__(self, program, tag_base = tag_base, universe = universe, condor_commands = condor_commands)
+
+		try:
+			os.mkdir(datadir)
+		except OSError:
+			pass
+		with open("zookeeper.properties", "w") as f:
+			f.write(ZOOKEEPER_TEMPLATE%(datadir, port, maxclients))
+
+
+class KafkaJob(dagparts.DAGJob):
+	"""
+	A kafka job
+	"""
+	def __init__(self, program = "kafka-server-start.sh", tag_base = "kafka-server-start", logdir = os.path.join(dagparts.log_path(), "kafka"), hostname = "10.14.0.112", port = 9092, zookeeperaddr = "localhost:2181", universe = "local", condor_commands = {}):
+		"""
+		"""
+		dagparts.DAGJob.__init__(self, program, tag_base = tag_base, universe = universe, condor_commands = condor_commands)
+
+		try:
+			os.mkdir(logdir)
+		except OSError:
+			pass
+		with open("kafka.properties", "w") as f:
+			f.write(KAFKA_TEMPLATE%(hostname, port, logdir, zookeeperaddr))
+
+
+#
+# Parse the command line
+#
+
+
+def parse_command_line():
+	parser = OptionParser(description = __doc__)
+
+	parser.add_option("--condor-command", action = "append", default = [], metavar = "command=value", help = "set condor commands of the form command=value can be given multiple times")
+	parser.add_option("--zookeeper-port", type = "int", metavar = "number", help = "Set the zookeeper port. default 2181", default = 2181)
+	parser.add_option("--kafka-hostname", metavar = "hostname", help = "Set the hostname in which kafka/zookeeper will be running at.")
+	parser.add_option("--kafka-port", type = "int", metavar = "number", help = "Set the kafka port. default: 9092", default = 9092)
+	parser.add_option("--condor-universe", default = "local", metavar = "universe", help = "set the condor universe to run jobs in DAG, options are local/vanilla, default = local")
+	parser.add_option("--condor-accounting-user", metavar = "user", help = "set the condor accounting user.")
+	parser.add_option("--condor-accounting-group", metavar = "group", help = "set the condor accounting group.")
+
+	options, filenames = parser.parse_args()
+
+	return options, filenames
+
+
+#
+# MAIN
+#
+
+
+options, filenames = parse_command_line()
+
+print('making logs directory...')
+try:
+	os.mkdir("logs")
+except:
+	pass
+dag = dagparts.DAG("kafka_broker")
+
+#
+# setup kafka/zookeeper jobs and nodes
+#
+
+condor_options = {
+	"accounting_group_user": options.condor_accounting_user,
+	"accounting_group": options.condor_accounting_group,
+	"want_graceful_removal": "True",
+	"kill_sig": "15"
+}
+
+if options.condor_universe == 'vanilla':
+	condor_options.update({
+		"request_memory": "10GB",
+		"request_cpus": 2,
+	})
+condor_commands = dagparts.condor_command_dict_from_opts(options.condor_command, condor_options)
+
+zookeeper_job = ZookeeperJob(
+	"zookeeper-server-start.sh",
+	tag_base = "zookeeper-server-start",
+	condor_commands = condor_commands,
+	port = options.zookeeper_port
+)
+kafka_job = KafkaJob(
+	"kafka-server-start.sh",
+	tag_base = "kafka-server-start",
+	condor_commands = condor_commands,
+	hostname = options.kafka_hostname,
+	port = options.kafka_port,
+	zookeeperaddr = "localhost:%d" % options.zookeeper_port
+)
+
+zookeeper_node = dagparts.DAGNode(zookeeper_job, dag, [], opts = {"":"zookeeper.properties"})
+kafka_node = dagparts.DAGNode(kafka_job, dag, [], opts = {"":"kafka.properties"})
+
+#
+# Write out the dag and other files
+#
+
+print('making sub files...')
+dag.write_sub_files()
+
+# we probably want these jobs to retry indefinitely on dedicated nodes. A user
+# can intervene and fix a problem without having to bring the dag down and up.
+[node.set_retry(10000) for node in dag.get_nodes()]
+
+print('making DAG...')
+dag.write_dag()
+dag.write_script()
+
+print('making env script...')
+with open('kafka.env', 'w') as f:
+    f.write(KAFKA_ENV_TEMPLATE)
+
+
+print('source kafka env with: source kafka.env')
+print('launch DAG with: condor_submit_dag kafka_broker.dag')
diff --git a/gstlal/lib/gstlal/gstlal.c b/gstlal/lib/gstlal/gstlal.c
index 2060489408dfdd85a637f05de92fceea8025d40b..5f27915a7316459a3fbb93f0c7e2e8ed6d0ed8e7 100644
--- a/gstlal/lib/gstlal/gstlal.c
+++ b/gstlal/lib/gstlal/gstlal.c
@@ -225,6 +225,73 @@ GValueArray *gstlal_g_value_array_from_uint64s(const guint64 *src, gint n)
 }
 
 
+/**
+ * gstlal_floats_from_g_value_array:
+ * @va: the #GValueArray from which to copy the elements
+ * @dest:  address of memory large enough to hold elements, or %NULL
+ * @n:  address of integer that will be set to the number of elements, or
+ * %NULL
+ *
+ * Convert a #GValueArray of floats to an array of floats.  If @dest is
+ * %NULL then new memory will be allocated otherwise the floats are copied
+ * into the memory pointed to by @dest, which must be large enough to hold
+ * them.  If memory is allocated by this function, free with g_free().  If
+ * @n is not %NULL it is set to the number of elements in the array.
+ *
+ * Returns: @dest or the address of the newly-allocated memory on success,
+ * or %NULL on failure.
+ */
+
+
+gfloat *gstlal_floats_from_g_value_array(GValueArray *va, gfloat *dest, gint *n)
+{
+	guint i;
+
+	if(!va)
+		return NULL;
+	if(!dest)
+		dest = g_new(gfloat, va->n_values);
+	if(!dest)
+		return NULL;
+	if(n)
+		*n = va->n_values;
+	for(i = 0; i < va->n_values; i++)
+		dest[i] = g_value_get_float(g_value_array_get_nth(va, i));
+	return dest;
+}
+
+
+/**
+ * gstlal_g_value_array_from_floats:
+ * @src:  start of array
+ * @n: number of elements in array
+ *
+ * Build a #GValueArray from an array of floats.
+ *
+ * Returns: newly-allocated #GValueArray object or %NULL on failure.
+ */
+
+
+GValueArray *gstlal_g_value_array_from_floats(const gfloat *src, gint n)
+{
+	GValueArray *va;
+	GValue v = G_VALUE_INIT;
+	gint i;
+	g_value_init(&v, G_TYPE_FLOAT);
+
+	if(!src)
+		return NULL;
+	va = g_value_array_new(n);
+	if(!va)
+		return NULL;
+	for(i = 0; i < n; i++) {
+		g_value_set_float(&v, src[i]);
+		g_value_array_append(va, &v);
+	}
+	return va;
+}
+
+
 /**
  * gstlal_doubles_from_g_value_array:
  * @va: the #GValueArray from which to copy the elements
@@ -331,6 +398,46 @@ GValueArray *gstlal_g_value_array_from_gsl_vector_int(const gsl_vector_int *vect
 }
 
 
+/**
+ * gstlal_gsl_vector_float_from_g_value_array:
+ * @va:  #GValueArray of floats
+ *
+ * Build a #gsl_vector_float from a #GValueArray of floats.
+ *
+ * Returns:  the newly-allocated #gsl_vector_float or %NULL on failure.
+ */
+
+
+gsl_vector_float *gstlal_gsl_vector_float_from_g_value_array(GValueArray *va)
+{
+	gsl_vector_float *vector = gsl_vector_float_alloc(va->n_values);
+	if(!vector)
+		return NULL;
+	if(!gstlal_floats_from_g_value_array(va, gsl_vector_float_ptr(vector, 0), NULL)) {
+		gsl_vector_float_free(vector);
+		return NULL;
+	}
+	return vector;
+}
+
+
+/**
+ * gstlal_g_value_array_from_gsl_vector_float:
+ * @vector:  #gsl_vector_float
+ *
+ * Build a #GValueArray of floats from a #gsl_vector_float.
+ *
+ * Returns:  the newly-allocated #GValueArray of floats or %NULL on
+ * failure.
+ */
+
+
+GValueArray *gstlal_g_value_array_from_gsl_vector_float(const gsl_vector_float *vector)
+{
+	return gstlal_g_value_array_from_floats(gsl_vector_float_const_ptr(vector, 0), vector->size);
+}
+
+
 /**
  * gstlal_gsl_vector_from_g_value_array:
  * @va:  #GValueArray of doubles
@@ -550,19 +657,50 @@ gsl_matrix_ulong *gstlal_gsl_matrix_ulong_from_g_value_array(GValueArray *va)
 	return matrix;
 }
 
+
 /**
- * gstlal_gsl_matrix_from_g_value_array:
- * @va:  #GValueArray of #GValueArrays of double
+ * gstlal_g_value_array_from_gsl_matrix_ulong:
+ * @matrix: a #gsl_matrix_ulong
  *
- * Build a #gsl_matrix from a #GValueArray of #GValueArrays of doubles.
+ * Build a #GValueArray of #GValueArrays of #guin64 from a
+ * #gsl_matrix_ulong.
  *
- * Returns:  the newly-allocated #gsl_matrix or %NULL on failure.
+ * Returns:  the newly-allocated #GValueArray of newly-allocated
+ * #GValueArrays of #guint64s or %NULL on failure.
  */
 
 
-gsl_matrix *gstlal_gsl_matrix_from_g_value_array(GValueArray *va)
+GValueArray *gstlal_g_value_array_from_gsl_matrix_ulong(const gsl_matrix_ulong *matrix)
 {
-	gsl_matrix *matrix;
+	GValueArray *va;
+	GValue v = G_VALUE_INIT;
+	guint i;
+	g_value_init(&v, G_TYPE_VALUE_ARRAY);
+
+	va = g_value_array_new(matrix->size1);
+	if(!va)
+		return NULL;
+	for(i = 0; i < matrix->size1; i++) {
+		g_value_take_boxed(&v, gstlal_g_value_array_from_uint64s((guint64*) gsl_matrix_ulong_const_ptr(matrix, i, 0), matrix->size2));
+		g_value_array_append(va, &v);
+	}
+	return va;
+}
+
+
+/**
+ * gstlal_gsl_matrix_float_from_g_value_array:
+ * @va:  #GValueArray of #GValueArrays of float
+ *
+ * Build a #gsl_matrix_float from a #GValueArray of #GValueArrays of floats.
+ *
+ * Returns:  the newly-allocated #gsl_matrix_float or %NULL on failure.
+ */
+
+
+gsl_matrix_float *gstlal_gsl_matrix_float_from_g_value_array(GValueArray *va)
+{
+	gsl_matrix_float *matrix;
 	GValueArray *row;
 	guint rows, cols;
 	guint i;
@@ -572,29 +710,29 @@ gsl_matrix *gstlal_gsl_matrix_from_g_value_array(GValueArray *va)
 	rows = va->n_values;
 	if(!rows)
 		/* 0x0 matrix */
-		return gsl_matrix_alloc(0, 0);
+		return gsl_matrix_float_alloc(0, 0);
 
 	row = g_value_get_boxed(g_value_array_get_nth(va, 0));
 	cols = row->n_values;
-	matrix = gsl_matrix_alloc(rows, cols);
+	matrix = gsl_matrix_float_alloc(rows, cols);
 	if(!matrix)
 		/* allocation failure */
 		return NULL;
-	if(!gstlal_doubles_from_g_value_array(row, gsl_matrix_ptr(matrix, 0, 0), NULL)) {
+	if(!gstlal_floats_from_g_value_array(row, gsl_matrix_float_ptr(matrix, 0, 0), NULL)) {
 		/* row conversion failure */
-		gsl_matrix_free(matrix);
+		gsl_matrix_float_free(matrix);
 		return NULL;
 	}
 	for(i = 1; i < rows; i++) {
 		row = g_value_get_boxed(g_value_array_get_nth(va, i));
 		if(row->n_values != cols) {
 			/* one of the rows has the wrong number of columns */
-			gsl_matrix_free(matrix);
+			gsl_matrix_float_free(matrix);
 			return NULL;
 		}
-		if(!gstlal_doubles_from_g_value_array(row, gsl_matrix_ptr(matrix, i, 0), NULL)) {
+		if(!gstlal_floats_from_g_value_array(row, gsl_matrix_float_ptr(matrix, i, 0), NULL)) {
 			/* row conversion failure */
-			gsl_matrix_free(matrix);
+			gsl_matrix_float_free(matrix);
 			return NULL;
 		}
 	}
@@ -604,18 +742,17 @@ gsl_matrix *gstlal_gsl_matrix_from_g_value_array(GValueArray *va)
 
 
 /**
- * gstlal_g_value_array_from_gsl_matrix_ulong:
- * @matrix: a #gsl_matrix_ulong
+ * gstlal_g_value_array_from_gsl_matrix_float:
+ * @matrix: a #gsl_matrix_float
  *
- * Build a #GValueArray of #GValueArrays of #guin64 from a
- * #gsl_matrix_ulong.
+ * Build a #GValueArray of #GValueArrays of floats from a #gsl_matrix_float.
  *
  * Returns:  the newly-allocated #GValueArray of newly-allocated
- * #GValueArrays of #guint64s or %NULL on failure.
+ * #GValueArrays of doubles or %NULL on failure.
  */
 
 
-GValueArray *gstlal_g_value_array_from_gsl_matrix_ulong(const gsl_matrix_ulong *matrix)
+GValueArray *gstlal_g_value_array_from_gsl_matrix_float(const gsl_matrix_float *matrix)
 {
 	GValueArray *va;
 	GValue v = G_VALUE_INIT;
@@ -626,13 +763,66 @@ GValueArray *gstlal_g_value_array_from_gsl_matrix_ulong(const gsl_matrix_ulong *
 	if(!va)
 		return NULL;
 	for(i = 0; i < matrix->size1; i++) {
-		g_value_take_boxed(&v, gstlal_g_value_array_from_uint64s((guint64*) gsl_matrix_ulong_const_ptr(matrix, i, 0), matrix->size2));
+		g_value_take_boxed(&v, gstlal_g_value_array_from_floats(gsl_matrix_float_const_ptr(matrix, i, 0), matrix->size2));
 		g_value_array_append(va, &v);
 	}
 	return va;
 }
 
 
+/**
+ * gstlal_gsl_matrix_from_g_value_array:
+ * @va:  #GValueArray of #GValueArrays of double
+ *
+ * Build a #gsl_matrix from a #GValueArray of #GValueArrays of doubles.
+ *
+ * Returns:  the newly-allocated #gsl_matrix or %NULL on failure.
+ */
+
+
+gsl_matrix *gstlal_gsl_matrix_from_g_value_array(GValueArray *va)
+{
+	gsl_matrix *matrix;
+	GValueArray *row;
+	guint rows, cols;
+	guint i;
+
+	if(!va)
+		return NULL;
+	rows = va->n_values;
+	if(!rows)
+		/* 0x0 matrix */
+		return gsl_matrix_alloc(0, 0);
+
+	row = g_value_get_boxed(g_value_array_get_nth(va, 0));
+	cols = row->n_values;
+	matrix = gsl_matrix_alloc(rows, cols);
+	if(!matrix)
+		/* allocation failure */
+		return NULL;
+	if(!gstlal_doubles_from_g_value_array(row, gsl_matrix_ptr(matrix, 0, 0), NULL)) {
+		/* row conversion failure */
+		gsl_matrix_free(matrix);
+		return NULL;
+	}
+	for(i = 1; i < rows; i++) {
+		row = g_value_get_boxed(g_value_array_get_nth(va, i));
+		if(row->n_values != cols) {
+			/* one of the rows has the wrong number of columns */
+			gsl_matrix_free(matrix);
+			return NULL;
+		}
+		if(!gstlal_doubles_from_g_value_array(row, gsl_matrix_ptr(matrix, i, 0), NULL)) {
+			/* row conversion failure */
+			gsl_matrix_free(matrix);
+			return NULL;
+		}
+	}
+
+	return matrix;
+}
+
+
 /**
  * gstlal_g_value_array_from_gsl_matrix:
  * @matrix: a #gsl_matrix
diff --git a/gstlal/lib/gstlal/gstlal.h b/gstlal/lib/gstlal/gstlal.h
index ecf180b29a175d73032790a6531e54cf74eec4b8..bace696a972e7473b340f82a9d1832ae5a0c8a40 100644
--- a/gstlal/lib/gstlal/gstlal.h
+++ b/gstlal/lib/gstlal/gstlal.h
@@ -27,7 +27,9 @@
 #include <gst/gst.h>
 #include <gst/audio/audio.h>
 #include <gsl/gsl_vector.h>
+#include <gsl/gsl_vector_float.h>
 #include <gsl/gsl_matrix.h>
+#include <gsl/gsl_matrix_float.h>
 
 
 #include <lal/LALDatatypes.h>
@@ -93,6 +95,15 @@ GValueArray *gstlal_g_value_array_from_gsl_matrix_ulong(const gsl_matrix_ulong *
 GValueArray *gstlal_g_value_array_from_uint64s(const guint64 *src, gint n);
 
 
+/* float type */
+GValueArray *gstlal_g_value_array_from_floats(const gfloat *src, gint n);
+gfloat *gstlal_floats_from_g_value_array(GValueArray *va, gfloat *dest, gint *n);
+gsl_vector_float *gstlal_gsl_vector_float_from_g_value_array(GValueArray *va);
+GValueArray *gstlal_g_value_array_from_gsl_vector_float(const gsl_vector_float *vector);
+gsl_matrix_float *gstlal_gsl_matrix_float_from_g_value_array(GValueArray *va);
+GValueArray *gstlal_g_value_array_from_gsl_matrix_float(const gsl_matrix_float *matrix);
+
+
 /* double type */
 GValueArray *gstlal_g_value_array_from_doubles(const gdouble *src, gint n);
 gdouble *gstlal_doubles_from_g_value_array(GValueArray *va, gdouble *dest, gint *n);