Skip to content
Snippets Groups Projects
Commit 177bdb22 authored by Duncan Meacher's avatar Duncan Meacher
Browse files

Added ETG offline pipeline gen tool

parent 1b4c07a1
No related branches found
No related tags found
No related merge requests found
......@@ -33,4 +33,5 @@ dist_bin_SCRIPTS = \
gstlal_ll_dq \
gstlal_ll_inspiral_state \
gstlal_condor_top \
gstlal_etg
gstlal_etg \
gstlal_etg_pipe
......@@ -144,34 +144,6 @@ def phi_ql(phi_min, phi_max, q_min, q_max, mismatch = 0.2):
for l in range(int(nphi)):
yield (phi_min*(phi_max/phi_min)**((0.5+l)/nphi), q)
def parse_command_line():
parser = OptionParser(description = __doc__)
#
# First append the datasource common options
#
multichannel_datasource.append_options(parser)
parser.add_option("--out-path", metavar = "path", default = ".", help = "Write to this path. Default = .")
parser.add_option("--description", metavar = "string", default = "GSTLAL_IDQ_TRIGGERS", help = "Set the filename description in which to save the output.")
parser.add_option("--cadence", type = "int", default = 32, help = "Rate at which to write trigger files to disk. Default = 32 seconds.")
parser.add_option("--disable-web-service", action = "store_true", help = "If set, disables web service that allows monitoring of PSDS of aux channels.")
parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
parser.add_option("--triggers-from-dataframe", action = "store_true", default = False, help = "If set, will output iDQ-compatible triggers to disk straight from dataframe once every cadence")
parser.add_option("-m", "--mismatch", type = "float", default = 0.2, help = "Mismatch between templates, mismatch = 1 - minimal match. Default = 0.2.")
parser.add_option("-q", "--qhigh", type = "float", default = 20, help = "Q high value for half sine-gaussian waveforms. Default = 20.")
parser.add_option("-l", "--latency", action = "store_true", help = "Print latency to output ascii file. Temporary.")
parser.add_option("--save-hdf", action = "store_true", default = False, help = "If set, will save hdf5 files to disk straight from dataframe once every cadence")
#
# parse the arguments and sanity check
#
options, filenames = parser.parse_args()
return options, filenames
####################
#
# classes
......@@ -565,6 +537,40 @@ class LinkedAppSync(pipeparts.AppSync):
self.appsinks[elem] = None
self.appsink_new_buffer(elem, self.sink_dict)
###############################
#
# command line parser
#
###############################
def parse_command_line():
parser = OptionParser(description = __doc__)
#
# First append the datasource common options
#
multichannel_datasource.append_options(parser)
parser.add_option("--out-path", metavar = "path", default = ".", help = "Write to this path. Default = .")
parser.add_option("--description", metavar = "string", default = "GSTLAL_IDQ_TRIGGERS", help = "Set the filename description in which to save the output.")
parser.add_option("--cadence", type = "int", default = 32, help = "Rate at which to write trigger files to disk. Default = 32 seconds.")
parser.add_option("--disable-web-service", action = "store_true", help = "If set, disables web service that allows monitoring of PSDS of aux channels.")
parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
parser.add_option("--triggers-from-dataframe", action = "store_true", default = False, help = "If set, will output iDQ-compatible triggers to disk straight from dataframe once every cadence")
parser.add_option("-m", "--mismatch", type = "float", default = 0.2, help = "Mismatch between templates, mismatch = 1 - minimal match. Default = 0.2.")
parser.add_option("-q", "--qhigh", type = "float", default = 20, help = "Q high value for half sine-gaussian waveforms. Default = 20.")
parser.add_option("-l", "--latency", action = "store_true", help = "Print latency to output ascii file. Temporary.")
parser.add_option("--save-hdf", action = "store_true", default = False, help = "If set, will save hdf5 files to disk straight from dataframe once every cadence")
#
# parse the arguments and sanity check
#
options, filenames = parser.parse_args()
return options, filenames
####################
#
......
#!/usr/bin/env python
#
# Copyright (C) 2011-2017 Chad Hanna, Duncan Meacher
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This program makes a dag to run gstlal_inspiral offline
"""
__author__ = 'Duncan Meacher <duncan.meacher@ligo.org>'
##############################################################################
# import standard modules and append the lalapps prefix to the python path
import sys, os, stat
import itertools
import numpy
import math
from optparse import OptionParser
##############################################################################
# import the modules we need to build the pipeline
import lal
import lal.series
from lal.utils import CacheEntry
from glue import pipeline
from glue.lal import Cache
from glue import segments
from glue.ligolw import ligolw
from glue.ligolw import lsctables
import glue.ligolw.utils as ligolw_utils
import glue.ligolw.utils.segments as ligolw_segments
from gstlal import inspiral, inspiral_pipe
from gstlal import dagparts as gstlaldagparts
from gstlal import datasource
from gstlal import multichannel_datasource
class LIGOLWContentHandler(ligolw.LIGOLWContentHandler):
pass
lsctables.use_in(LIGOLWContentHandler)
#
# get a dictionary of all the channels per gstlal_etg job
#
def etg_node_gen(gstlalETGJob, dag, parent_nodes, options, channels, data_source_info):
etg_nodes = {}
cumsum_rates = 0
outstr = ""
out_index = 0
for ii, channel in enumerate(channels,1):
samp_rate = data_source_info.channel_dict[channel]['fsamp']
max_samp_rate = min(2048, int(samp_rate))
min_samp_rate = min(32, max_samp_rate)
n_rates = int(numpy.log2(max_samp_rate/min_samp_rate) + 1)
cumsum_rates += n_rates
outstr = outstr + channel + ":" + str(int(samp_rate))
if cumsum_rates < options.streams:
outstr = outstr + " --channel-name="
if cumsum_rates >= options.streams or ii == len(data_source_info.channel_dict.keys()):
out_index += 1
outpath = options.out_path + "/gstlal_etg/gstlal_etg_%04d" % out_index
etg_nodes[channel] = \
inspiral_pipe.generic_node(gstlalETGJob, dag, parent_nodes = parent_nodes,
opts = {"gps-start-time":options.gps_start_time,
"gps-end-time":options.gps_end_time,
"data-source":"frames",
"channel-name":outstr,
"mismatch":options.mismatch,
"qhigh":options.qhigh,
"cadence":options.cadence,
#"triggers-from-dataframe":"",
"disable-web-service":""
},
input_files = {"frame-cache":options.frame_cache},
output_files = {"out-path":outpath}
)
cumsum_rates = 0
outstr = ""
return etg_nodes
#
# Main
#
def parse_command_line():
parser = OptionParser(description = __doc__)
# generic data source options
#datasource.append_options(parser)
multichannel_datasource.append_options(parser)
# trigger generation options
parser.add_option("-v", "--verbose", action = "store_true", help = "Be verbose.")
parser.add_option("--triggers-from-dataframe", action = "store_true", default = False,
help = "If set, will output iDQ-compatible triggers to disk straight from dataframe once every cadence")
parser.add_option("--disable-web-service", action = "store_true", help = "If set, disables web service that allows monitoring of PSDS of aux channels.")
parser.add_option("--description", metavar = "string", default = "GSTLAL_IDQ_TRIGGERS", help = "Set the filename description in which to save the output.")
parser.add_option("--cadence", type = "int", default = 32, help = "Rate at which to write trigger files to disk. Default = 32 seconds.")
parser.add_option("-m", "--mismatch", type = "float", default = 0.2, help = "Mismatch between templates, mismatch = 1 - minimal match. Default = 0.2.")
parser.add_option("-q", "--qhigh", type = "float", default = 20, help = "Q high value for half sine-gaussian waveforms. Default = 20.")
parser.add_option("-s", "--streams", type = "float", default = 100, help = "Number of streams to process per node. Default = 100.")
parser.add_option("-l", "--latency", action = "store_true", help = "Print latency to output ascii file. Temporary.")
parser.add_option("--save-hdf", action = "store_true", default = False, help = "If set, will save hdf5 files to disk straight from dataframe once every cadence")
parser.add_option("--out-path", metavar = "path", default = ".", help = "Write to this path. Default = .")
# Condor commands
parser.add_option("--request-cpu", default = "2", metavar = "integer", help = "set the inspiral CPU count, default = 2")
parser.add_option("--request-memory", default = "7GB", metavar = "integer", help = "set the inspiral memory, default = 7GB")
parser.add_option("--condor-command", action = "append", default = [], metavar = "command=value", help = "set condor commands of the form command=value; can be given multiple times")
options, filenames = parser.parse_args()
return options, filenames
#
# Useful variables
#
options, filenames = parse_command_line()
output_dir = "plots"
#
#
#
data_source_info = multichannel_datasource.DataSourceInfo(options)
instrument = data_source_info.instrument
channels = data_source_info.channel_dict.keys()
#
# Setup the dag
#
try:
os.mkdir("logs")
except:
pass
dag = inspiral_pipe.DAG("etg_pipe")
#
# setup the job classes
#
gstlalETGJob = inspiral_pipe.generic_job("gstlal_etg", condor_commands = inspiral_pipe.condor_command_dict_from_opts(options.condor_command, {"request_memory":options.request_memory, "request_cpus":options.request_cpu, "want_graceful_removal":"True", "kill_sig":"15"}))
#
# Inspiral jobs by segment
#
etg_nodes = etg_node_gen(gstlalETGJob, dag, [], options, channels, data_source_info)
#
# all done
#
dag.write_sub_files()
dag.write_dag()
dag.write_script()
dag.write_cache()
......@@ -66,6 +66,31 @@ framexmit_ports = {
# misc useful functions
#
def channel_dict_from_channel_list(channel_list):
"""!
Given a list of channels, produce a dictionary keyed by channel names:
The list here typically comes from an option parser with options that
specify the "append" action.
Examples:
>>> multichannel_datasource.channel_dict_from_channel_list(["H1:AUX-CHANNEL-NAME_1:2048", "H1:AUX-CHANNEL-NAME-2:512"])
{'H1:AUX-CHANNEL-NAME_1': {'qhigh': None, 'ifo': 'H1', 'flow': None, 'fsamp': 2048.0, 'fhigh': None, 'frametype': None}, 'H1:AUX-CHANNEL-NAME-2': {'qhigh': None, 'ifo': 'H1', 'flow': None, 'fsamp': 512.0, 'fhigh': None, 'frametype': None}}
"""
channel_dict = {}
for channel in channel_list:
ifo, channel_info, fsamp = channel.split(':')
channel_name = ifo + ":" + channel_info
channel_dict[channel_name] = {'fsamp': float(fsamp),
'ifo': ifo,
'flow': None,
'fhigh': None,
'qhigh' : None,
'frametype' : None}
return channel_dict
def channel_dict_from_channel_file(channel_file):
"""!
Given a file of channel names with sampling rates, produce a dictionary keyed by ifo:
......@@ -193,8 +218,10 @@ class DataSourceInfo(object):
raise ValueError("can only give --frame-segments-file if --data-source=frames")
if options.frame_segments_name is not None and options.frame_segments_file is None:
raise ValueError("can only specify --frame-segments-name if --frame-segments-file is given")
if not options.channel_list:
raise ValueError("must specify a channel list in the form --channel-list=/path/to/file")
if not (options.channel_list or options.channel_name):
raise ValueError("must specify a channel list in the form --channel-list=/path/to/file or --channel-name=H1:AUX-CHANNEL-NAME:RATE --channel-name=H1:SOMETHING-ELSE:RATE")
if (options.channel_list and options.channel_name):
raise ValueError("must specify a channel list in the form --channel-list=/path/to/file or --channel-name=H1:AUX-CHANNEL-NAME:RATE --channel-name=H1:SOMETHING-ELSE:RATE")
## Generate a dictionary of requested channels from channel INI file
......@@ -212,12 +239,16 @@ class DataSourceInfo(object):
assert fidelity in self.known_fidelity, '--fidelity-exclude=%s is not understood. Must be one of %s'%(fidelity, ", ".join(self.known_fidelity))
# dictionary of the requested channels, e.g., {"H1": {"LDAS-STRAIN": 16384}, "L1": {"LDAS-STRAIN": 16384}}
name, self.extension = options.channel_list.rsplit('.', 1)
if self.extension == 'ini':
self.channel_dict = channel_dict_from_channel_ini(options)
else:
self.channel_dict = channel_dict_from_channel_file(options.channel_list)
if options.channel_list:
name, self.extension = options.channel_list.rsplit('.', 1)
if self.extension == 'ini':
self.channel_dict = channel_dict_from_channel_ini(options)
else:
self.channel_dict = channel_dict_from_channel_file(options.channel_list)
elif options.channel_name:
self.extension = 'none'
self.channel_dict = channel_dict_from_channel_list(options.channel_name)
# set instrument; it is assumed all channels from a given channel list are from the same instrument
self.instrument = self.channel_dict[next(iter(self.channel_dict))]['ifo']
......@@ -306,6 +337,10 @@ def append_options(parser):
File needs to be in format channel-name[spaces]sampling_rate with a new channel in each line.
Command given as --channel-list=location/to/file.
- --channel-name [string]
Set the name of the channels to process.
Can be given multiple times as --channel-name=IFO:AUX-CHANNEL-NAME:RATE
- --framexmit-addr [string]
Set the address of the framexmit service. Can be given
multiple times as --framexmit-addr=IFO=xxx.xxx.xxx.xxx:port
......@@ -345,6 +380,10 @@ def append_options(parser):
#### Typical usage case examples
-# Reading data from frames
--data-source=frames --gps-start-time=999999000 --gps-end-time=999999999 --channel-name=H1:AUX-CHANNEL-NAME:RATE
-# Reading online data via framexmit
--data-source=framexmit --channel-list=H1=location/to/file
......@@ -358,6 +397,7 @@ def append_options(parser):
group.add_option("--gps-end-time", metavar = "seconds", help = "Set the end time of the segment to analyze in GPS seconds. Required unless --data-source=lvshm")
group.add_option("--frame-cache", metavar = "filename", help = "Set the name of the LAL cache listing the LIGO-Virgo .gwf frame files (optional). This is required iff --data-source=frames")
group.add_option("--channel-list", type="string", metavar = "name", help = "Set the list of the channels to process. Command given as --channel-list=location/to/file")
group.add_option("--channel-name", metavar = "name", action = "append", help = "Set the name of the channels to process. Can be given multiple times as --channel-name=IFO:AUX-CHANNEL-NAME:RATE")
group.add_option("--framexmit-addr", metavar = "name", action = "append", help = "Set the address of the framexmit service. Can be given multiple times as --framexmit-addr=IFO=xxx.xxx.xxx.xxx:port")
group.add_option("--framexmit-iface", metavar = "name", help = "Set the multicast interface address of the framexmit service.")
group.add_option("--shared-memory-partition", metavar = "name", action = "append", help = "Set the name of the shared memory partition for a given instrument. Can be given multiple times as --shared-memory-partition=IFO=PARTITION-NAME")
......
SHELL := /bin/bash
# condor commands
# Set the accounting tag from https://ldas-gridmon.ligo.caltech.edu/ldg_accounting/user
ACCOUNTING_TAG=ligo.dev.o3.detchar.onlinedq.idq
GROUP_USER=duncan.meacher
CONDOR_COMMANDS:=--condor-command=accounting_group=$(ACCOUNTING_TAG) --condor-command=accounting_group_user=$(GROUP_USER)
#########################
# Triggering parameters #
#########################
# The GPS start time for analysis
START = 1176638000
# The GPS end time for analysis
STOP = 1176639000
OUTPATH = $(PWD)
# Number of streams (N_channels x N_rates_per_channel) that each processor will analise
N_STREAMS = 100
MISMATCH = 0.2
QHIGH = 40
# Detector
CLUSTER:=$(shell hostname -d)
FRAME_TYPE=R
#################
# Web directory #
#################
# A user tag for the run
TAG = O2_C00
# Run number
RUN = run_1
# A web directory for output (note difference between cit+uwm and Atlas)
# cit & uwm
WEBDIR = ~/public_html/observing/$(TAG)/$(START)-$(STOP)-$(RUN)
# Atlas
#WEBDIR = ~/WWW/LSC/testing/$(TAG)/$(START)-$(STOP)-test_dag-$(RUN)
############
# Workflow #
############
all : dag
sed -i '/gstlal_etg / s/$$/ |& grep -v '\''XLAL\|GSL\|Generic'\''/' etg_pipe.sh
@echo "Submit with: condor_submit_dag etg_pipe.dag"
# Run etg pipe to produce dag
dag : frame.cache plots channel_list.txt
gstlal_etg_pipe \
--data-source frames \
--gps-start-time $(START) \
--gps-end-time $(STOP) \
--frame-cache frame.cache \
--channel-list channel_list.txt \
--out-path $(OUTPATH) \
--streams $(N_STREAMS)\
--mismatch $(MISMATCH) \
--qhigh $(QHIGH) \
$(CONDOR_COMMANDS) \
--request-cpu 2 \
--request-memory 5GB \
--disable-web-service
# --web-dir $(WEBDIR) \
full_channel_list.txt : frame.cache
FrChannels $$(head -n 1 $^ | awk '{ print $$5}' | sed -e "s@file://localhost@@g") > $@
frame.cache :
# FIXME force the observatory column to actually be instrument
if [[ ${CLUSTER} == *"ligo-wa.caltech.edu" ]] ; then \
gw_data_find -o H -t H1_$(FRAME_TYPE) -l -s $(START) -e $(STOP) --url-type file -O $@ ; \
elif [[ ${CLUSTER} == *"ligo-la.caltech.edu" ]] ; then \
gw_data_find -o L -t L1_$(FRAME_TYPE) -l -s $(START) -e $(STOP) --url-type file -O $@ ; \
fi
# Make webpage directory and copy files across
#$(WEBDIR) : $(MAKEFILE_LIST)
# mkdir -p $(WEBDIR)/OPEN-BOX
# cp $(MAKEFILE_LIST) $@
# Makes local plots directory
plots :
mkdir plots
clean :
-rm -rvf *.sub *.dag* *.cache *.sh logs *.sqlite plots *.html Images *.css *.js
H1:CAL-CS_CARM_DELTAF_DQ 16384
H1:CAL-CS_LINE_SUM_DQ 16384
H1:CAL-DARM_CTRL_WHITEN_OUT_DBL_DQ 16384
H1:CAL-DARM_ERR_WHITEN_OUT_DBL_DQ 16384
H1:CAL-DELTAL_EXTERNAL_DQ 16384
H1:CAL-DELTAL_RESIDUAL_DBL_DQ 16384
H1:CAL-PCALX_FPGA_DTONE_IN1_DQ 16384
H1:CAL-PCALX_IRIGB_OUT_DQ 16384
H1:CAL-PCALX_RX_PD_OUT_DQ 16384
H1:CAL-PCALX_TX_PD_OUT_DQ 16384
H1:CAL-PCALY_EXC_SUM_DQ 16384
H1:CAL-PCALY_FPGA_DTONE_IN1_DQ 16384
H1:CAL-PCALY_IRIGB_OUT_DQ 16384
H1:CAL-PCALY_RX_PD_OUT_DQ 16384
H1:CAL-PCALY_TX_PD_OUT_DQ 16384
H1:IMC-F_OUT_DQ 16384
H1:IMC-I_OUT_DQ 16384
H1:LSC-ASAIR_A_RF45_I_ERR_DQ 16384
H1:LSC-ASAIR_A_RF45_Q_ERR_DQ 16384
H1:LSC-DARM_IN1_DQ 16384
H1:LSC-DARM_OUT_DQ 16384
H1:LSC-MCL_IN1_DQ 16384
H1:LSC-MCL_OUT_DQ 16384
H1:LSC-MICH_IN1_DQ 16384
H1:LSC-MICH_OUT_DQ 16384
H1:LSC-MOD_RF45_AM_AC_OUT_DQ 16384
H1:LSC-PRCL_IN1_DQ 16384
H1:LSC-PRCL_OUT_DQ 16384
H1:LSC-REFLAIR_A_RF45_I_ERR_DQ 16384
H1:LSC-REFLAIR_A_RF45_Q_ERR_DQ 16384
H1:LSC-REFLAIR_A_RF9_I_ERR_DQ 16384
H1:LSC-REFLAIR_A_RF9_Q_ERR_DQ 16384
H1:LSC-REFL_SERVO_CTRL_OUT_DQ 16384
H1:LSC-REFL_SERVO_ERR_OUT_DQ 16384
H1:LSC-SRCL_IN1_DQ 16384
H1:LSC-SRCL_OUT_DQ 16384
H1:OMC-DCPD_NORM_OUT_DQ 16384
H1:OMC-DCPD_NULL_OUT_DQ 16384
H1:OMC-DCPD_SUM_OUT_DQ 16384
H1:OMC-LSC_DITHER_OUT_DQ 16384
H1:OMC-LSC_I_OUT_DQ 16384
H1:OMC-PZT1_MON_AC_OUT_DQ 16384
H1:OMC-PZT2_MON_AC_OUT_DQ 16384
H1:PEM-CS_ACC_BEAMTUBE_MCTUBE_Y_DQ 16384
H1:PEM-CS_ACC_BSC1_ITMY_Y_DQ 16384
H1:PEM-CS_ACC_BSC3_ITMX_X_DQ 16384
H1:PEM-CS_ACC_HAM6_OMC_Z_DQ 16384
H1:PEM-CS_ACC_PSL_PERISCOPE_X_DQ 16384
H1:PEM-CS_ADC_4_30_16K_OUT_DQ 16384
H1:PEM-CS_ADC_4_31_16K_OUT_DQ 16384
H1:PEM-CS_MIC_EBAY_RACKS_DQ 16384
H1:PEM-CS_MIC_LVEA_BS_DQ 16384
H1:PEM-CS_MIC_LVEA_HAM7_DQ 16384
H1:PEM-CS_MIC_LVEA_INPUTOPTICS_DQ 16384
H1:PEM-CS_MIC_LVEA_OUTPUTOPTICS_DQ 16384
H1:PEM-CS_MIC_LVEA_VERTEX_DQ 16384
H1:PEM-CS_MIC_LVEA_XMANSPOOL_DQ 16384
H1:PEM-CS_MIC_LVEA_YMANSPOOL_DQ 16384
H1:PEM-CS_MIC_PSL_CENTER_DQ 16384
H1:PEM-CS_RADIO_EBAY_NARROWBAND_1_DQ 16384
H1:PEM-CS_RADIO_EBAY_NARROWBAND_2_DQ 16384
H1:PEM-CS_RADIO_LVEA_NARROWBAND_1_DQ 16384
H1:PEM-CS_RADIO_LVEA_NARROWBAND_2_DQ 16384
H1:PEM-CS_RADIO_ROOF1_BROADBAND_DQ 16384
H1:PEM-CS_RADIO_ROOF2_BROADBAND_DQ 16384
H1:PEM-CS_RADIO_ROOF3_BROADBAND_DQ 16384
H1:PEM-CS_RADIO_ROOF4_BROADBAND_DQ 16384
H1:PEM-EX_ACC_BSC9_ETMX_Y_DQ 16384
H1:PEM-EX_MIC_EBAY_RACKS_DQ 16384
H1:PEM-EX_MIC_VEA_MINUSX_DQ 16384
H1:PEM-EX_MIC_VEA_PLUSX_DQ 16384
H1:PEM-EY_ACC_BSC10_ETMY_X_DQ 16384
H1:PEM-EY_MIC_EBAY_RACKS_DQ 16384
H1:PEM-EY_MIC_VEA_MINUSY_DQ 16384
H1:PEM-EY_MIC_VEA_PLUSY_DQ 16384
H1:PSL-FSS_FAST_MON_OUT_DQ 16384
H1:PSL-FSS_MIXER_OUT_DQ 16384
H1:PSL-FSS_PC_MON_OUT_DQ 16384
H1:PSL-FSS_TPD_DC_OUT_DQ 16384
H1:PSL-ILS_HV_MON_OUT_DQ 16384
H1:PSL-ILS_MIXER_OUT_DQ 16384
H1:PSL-ISS_AOM_DRIVER_MON_OUT_DQ 16384
H1:PSL-ISS_PDA_REL_OUT_DQ 16384
H1:PSL-ISS_PDB_REL_OUT_DQ 16384
H1:PSL-OSC_PD_AMP_DC_OUT_DQ 16384
H1:PSL-OSC_PD_BP_DC_OUT_DQ 16384
H1:PSL-OSC_PD_INT_DC_OUT_DQ 16384
H1:PSL-OSC_PD_ISO_DC_OUT_DQ 16384
H1:PSL-PMC_HV_MON_OUT_DQ 16384
H1:PSL-PMC_MIXER_OUT_DQ 16384
H1:PSL-PWR_HPL_DC_OUT_DQ 16384
H1:PEM-CS_MAG_EBAY_LSCRACK_X_DQ 8192
H1:PEM-CS_MAG_EBAY_LSCRACK_Y_DQ 8192
H1:PEM-CS_MAG_EBAY_LSCRACK_Z_DQ 8192
H1:PEM-CS_MAG_EBAY_SUSRACK_X_DQ 8192
H1:PEM-CS_MAG_EBAY_SUSRACK_Y_DQ 8192
H1:PEM-CS_MAG_EBAY_SUSRACK_Z_DQ 8192
H1:PEM-CS_MAG_LVEA_INPUTOPTICS_X_DQ 8192
H1:PEM-CS_MAG_LVEA_INPUTOPTICS_Y_DQ 8192
H1:PEM-CS_MAG_LVEA_INPUTOPTICS_Z_DQ 8192
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment