Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • iain.morton/locklost
  • oli.patane/locklost-olifork
  • timothy.ohanlon/locklost
  • benjaminrobert.mannix/locklost
  • austin.jennings/locklost
  • camilla.compton/locklost
  • arnaud.pele/locklost
  • yamamoto/locklost
  • marc.lormand/locklost
  • saravanan.tiruppatturrajamanikkam/locklost
  • nikhil-mukund/locklost
  • patrick.godwin/locklost
  • yannick.lecoeuche/locklost
  • jameson.rollins/locklost
14 results
Show changes
Showing
with 1446 additions and 287 deletions
import numpy as np
import matplotlib.pyplot as plt
from gwpy.timeseries import TimeSeries
from .. import logger
from .. import config
from .. import plotutils
DARM_CHANNEL = f'{config.IFO}:GDS-CALIB_STRAIN_CLEAN'
##############################################
def plot_darm(event):
"""Grabs DARM data from 8 minutes before LL and right before
LL and plots them
"""
# Only create DARM plot if we got a LL from NLN or above
if event.transition_index[0] < config.GRD_NOMINAL_STATE[0]:
logger.info('IFO not fully locked, DARM plot will not be created.')
return
plotutils.set_rcparams()
plt.rcParams['figure.figsize'] = (66, 24)
logger.info('Plotting DARM 8 minutes vs right before lockloss')
# Before time: between 8 and 9 minutes before LL
time_before = [int(event.gps) - (9*60), int(event.gps) - (8*60)]
# Right before LL time: 61 seconds to 1 second before LL
time_before_LL = [int(event.gps) - 61, int(event.gps) - 1]
# Grab the data
darm_before = TimeSeries.get(
DARM_CHANNEL,
time_before[0],
time_before[1],
frametype=f'{config.IFO}_HOFT_C00',
nproc=8,
verbose=True
)
darm_before_LL = TimeSeries.get(
DARM_CHANNEL,
time_before_LL[0],
time_before_LL[1],
frametype=f'{config.IFO}_HOFT_C00',
nproc=8,
verbose=True
)
# Calculate the Strain PSD
fft = 16
psd_before = darm_before.psd(fft, fft/2.).crop(10, 5000)
psd_before_LL = darm_before_LL.psd(fft, fft/2.).crop(10, 5000)
# Plot DARM comparison
plt.loglog(
np.sqrt(psd_before),
linewidth=1,
color='red',
label='Right before lockloss'
)
plt.loglog(
np.sqrt(psd_before_LL),
linewidth=1,
color='lightsteelblue',
label='8 min before lockloss'
)
plt.xlim(10, 5000)
plt.xlabel('Frequency [Hz]')
plt.ylabel(u'ASD [1/\u221AHz]')
plt.title(f'DARM - {DARM_CHANNEL}')
plt.suptitle(
'Note: not helpful if not in NLN 9 mins before lockloss',
size=35
)
plt.grid(which='both')
plt.legend()
plt.tight_layout()
outfile_plot = 'darm.png'
outpath_plot = event.path(outfile_plot)
plt.savefig(outpath_plot, bbox_inches='tight')
import glob
import time
import logging
from gwpy.segments import Segment, SegmentList
from gwpy.segments import Segment
from .. import logger
from .. import config
from .. import data
##################################################
def find_shm_segments():
"""Returns a segment list covering the span of available frame data from /dev/shm.
"""
from lal.utils import CacheEntry
frames = glob.glob("/dev/shm/lldetchar/{}/*".format(config.IFO))
cache = map(CacheEntry.from_T050017, frames)
return SegmentList([e.segment for e in cache]).coalesce()
##################################################
def discover_data(event):
......@@ -29,39 +18,14 @@ def discover_data(event):
# use transition gps since it doesn't depend on refinement
gps = event.transition_gps
### use refine window to determine data query range
# gwdatafind requires integer segment times
query_segment = Segment(*config.REFINE_WINDOW).shift(int(gps))
query_segs = SegmentList([query_segment])
logging.info("querying for data in range: {} - {}".format(*query_segment))
# use refine window to determine data query range
segment = Segment(*config.REFINE_WINDOW).shift(int(gps))
### first check if low-latency frames are available
logging.info("checking if data is available in /dev/shm...")
try:
shm_segs = find_shm_segments()
except ImportError as e:
logging.info("unable to check /dev/shm: {}".format(e))
shm_segs = Segment()
if query_segs & shm_segs == query_segs:
shm_end_time = shm_segs[-1][1]
query_end_time = query_segment[1]
### check if time requested is too new, if so,
### sleep until it is available
if query_end_time > shm_end_time:
time.sleep(query_end_time - shm_end_time)
shm_segs = find_shm_segments()
logger.info("querying for data in range: {} - {}".format(*segment))
### check if span of data requested is all available
if query_segs & shm_segs == query_segs:
logging.info("queried data available in /dev/shm")
return True
### if shm frames are not available for the time requested,
### wait until raw frames from LDR are available
logging.info("no data available in /dev/shm, checking LDR...")
if data.data_wait(query_segment):
logging.info("LDR data available")
return
try:
data.fetch([config.GRD_STATE_N_CHANNEL], segment)
except RuntimeError:
assert False, f"{config.DATA_DISCOVERY_TIMEOUT}s data discovery timeout reached"
raise RuntimeError("data discovery timeout reached, data not found")
logger.info("data available!")
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from gwpy.segments import Segment
from .. import logger
from .. import config
from .. import data
from .. import plotutils
GLITCH_SEARCH_WINDOW = [-2.0, 0]
GLITCH_CHANNEL = ['{}:SUS-ETMX_L3_MASTER_OUT_LR_DQ'.format(config.IFO)]
THRESH = 2 * (10 ** 4) # High threshold looking for glitches
LOW_THRESH = 2 * (10 ** 4) # Low threshold for number of zeroes
LOW_TIME = 50 * (10 ** -3) # Threshold for amount of time spent within LOW_GLITCH threshold
##############################################
def check_glitch(event):
"""
This will use a 200Hz high-passed verion of GLITCH_CHANNEL to check for
any glitches prior to lockloss in two steps:
1. Is there a glitch that goes above THRESH in the GLITCH_SEARCH_WINDOW
If this is true:
2. Does the channel remain within LOW_THRESH for at least LOW_TIME
"""
if config.IFO == 'L1':
logger.info("NOT SET UP FOR LLO.")
return
if event.transition_index[0] < config.GRD_NOMINAL_STATE[0]:
logger.info("IFO not fully locked, plugin will not run.")
return
plotutils.set_rcparams()
mod_window = [GLITCH_SEARCH_WINDOW[0], GLITCH_SEARCH_WINDOW[1]]
segment = Segment(mod_window).shift(event.gps)
buf = data.fetch(GLITCH_CHANNEL, segment)[0]
thresh_crossing = segment[1]
srate = buf.sample_rate
t = np.arange(segment[0], segment[1], 1 / srate)
highpass = signal.butter(4, 200, btype='highpass', output='sos', fs=srate)
hp_data = signal.sosfilt(highpass, buf.data)
if event.refined_gps:
lockloss_time = event.refined_gps
else:
lockloss_time = event.gps
thresh_ref = hp_data[np.where(t < lockloss_time - LOW_TIME)]
glitches = np.where((thresh_ref <= -THRESH) | (thresh_ref >= THRESH))[0]
# discard first 100ms of data to avoid filtering edge effects
glitches = glitches[glitches > 0.1 * srate]
if not any(glitches):
logger.info("No glitches found in {}".format(GLITCH_CHANNEL[0]))
else:
glitch_time = t[glitches[0]]
logger.info("Threshold crossed at time: {}".format(glitch_time))
low_ref = hp_data[np.where((t >= glitch_time) & (t < lockloss_time))]
count = 0
low_list = []
for value in low_ref:
if -LOW_THRESH < value < LOW_THRESH:
count += 1
else:
# gone outside of LOW_THRESH threshold, add count to list and reset.
low_list.append(count)
count = 0
low_list.append(count)
thresh_crossing = min(glitch_time, thresh_crossing)
low_max_time = max(low_list) / srate
# Checking that the channel remained within LOW_THRESH (1e5) for at least LOW_TIME (60ms)
logger.info("Channel remained for {}ms below threshold.".format(int(1000 * low_max_time)))
if (low_max_time >= LOW_TIME):
event.add_tag('ETM_GLITCH')
else:
logger.info('Channel did not remain within threshold, no ETM glitch found.')
# Make plots
fig, ax = plt.subplots(1, figsize=(22, 16))
ax.plot(
t - event.gps,
buf.data,
label=buf.channel,
alpha=0.8,
lw=2,
)
ax.grid()
ax.set_xlabel('Time [s] since lock loss at {}'.format(event.gps), labelpad=10)
ax.set_ylabel('Counts')
ax.legend(loc='best')
ax.set_title('Glitch check', y=1.04)
ax.set_xlim(segment[0] - event.gps, segment[1] - event.gps)
ax.set_ylim(-3.9 * (10 ** 5), 3.9 * (10 ** 5))
if thresh_crossing != segment[1]:
plotutils.set_thresh_crossing(ax, thresh_crossing, event.gps, segment)
fig.tight_layout()
fig.savefig(event.path('ETM_GLITCH.png'))
# Make High Passed data plot
fig1, ax1 = plt.subplots(1, figsize=(22, 16))
ax1.plot(
t - event.gps,
hp_data,
label='200Hz HP of {}'.format(buf.channel),
alpha=0.8,
lw=2,
)
ax1.axhline(
-THRESH,
linestyle='--',
color='black',
label='Glitch threshold',
lw=5,
)
ax1.axhline(
THRESH,
linestyle='--',
color='black',
lw=5,
)
ax1.grid()
ax1.set_xlabel('Time [s] since lock loss at {}'.format(event.gps), labelpad=10)
ax1.set_ylabel('Counts')
ax1.legend(loc='best')
ax1.set_title('Glitch check, 200Hz High Passed', y=1.04)
ax1.set_xlim(segment[0] - event.gps, segment[1] - event.gps)
ax1.set_ylim(1.9 * -THRESH, 1.9 * THRESH)
if thresh_crossing != segment[1]:
plotutils.set_thresh_crossing(ax1, thresh_crossing, event.gps, segment)
fig1.tight_layout()
fig1.savefig(event.path('ETM_GLITCH_HP.png'))
import numpy as np
import matplotlib.pyplot as plt
from gwpy.segments import Segment
from .. import logger
from .. import config
from .. import data
from .. import plotutils
FSS_SEARCH_WINDOW = [-5, 5]
FSS_CHANNELS = [
'{}:PSL-FSS_FAST_MON_OUT_DQ'.format(config.IFO)
]
FSS_THRESH = 3
##############################################
def check_fss(event):
"""Checks for FSS oscillations.
Checks FSS PD for counts below threshold and
creates a tag/plot if below said threshold.
"""
plotutils.set_rcparams()
mod_window = [FSS_SEARCH_WINDOW[0], FSS_SEARCH_WINDOW[1]]
segment = Segment(mod_window).shift(int(event.gps))
buf = data.fetch(FSS_CHANNELS, segment)[0]
thresh_crossing = segment[1]
srate = buf.sample_rate
t = np.arange(segment[0], segment[1], 1/srate)
thresh_ref = buf.data[np.where(t-event.gps <= -1)]
thresh_above = np.mean(thresh_ref)+FSS_THRESH
thresh_below = np.mean(thresh_ref)-FSS_THRESH
glitches = np.where((thresh_ref <= thresh_below) | (thresh_ref >= thresh_above))[0]
if any(glitches):
glitch_time = t[glitches[0]]
thresh_crossing = min(glitch_time, thresh_crossing)
event.add_tag('FSS_OSCILLATION')
else:
logger.info('no fss oscillations')
fig, ax = plt.subplots(1, figsize=(22, 16))
t = np.arange(segment[0], segment[1], 1/srate)
ax.plot(
t-event.gps,
buf.data,
label=buf.channel,
alpha=0.8,
lw=2,
)
ax.axhline(
thresh_below,
linestyle='--',
color='black',
label='FSS oscillation threshold',
lw=5,
)
ax.axhline(
thresh_above,
linestyle='--',
color='black',
lw=5,
)
ax.grid()
ax.set_xlabel('Time [s] since lock loss at {}'.format(event.gps), labelpad=10)
ax.set_ylabel('Voltage [V]')
ax.legend(loc='best')
ax.set_title('FSS stability check', y=1.04)
ax.set_xlim(segment[0]-event.gps, segment[1]-event.gps)
if thresh_crossing != segment[1]:
plotutils.set_thresh_crossing(ax, thresh_crossing, event.gps, segment)
fig.tight_layout()
fig.savefig(event.path('fss.png'))
import os
import glob
import logging
import itertools
from collections import defaultdict
import h5py
import numpy as np
from scipy.interpolate import griddata
import matplotlib
from matplotlib import cm
from matplotlib import pyplot as plt
......@@ -15,18 +15,18 @@ from mpl_toolkits.axes_grid1 import make_axes_locatable
from gwpy.segments import Segment, SegmentList
from .. import logger
from .. import config
### specific trigger ingestion imports
import h5py
__author__ = "Patrick Godwin (patrick.godwin@ligo.org)"
__description__ = "a plugin to analyze glitches around lockloss times"
##################################################
### NOTE: embed viridis colormap since it isn't available for
### matplotlib prior to 1.4.x
# NOTE: embed viridis colormap since it isn't available for
# matplotlib prior to 1.4.x
VIRIDIS_DATA = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
......@@ -288,9 +288,10 @@ VIRIDIS = ListedColormap(VIRIDIS_DATA, name='viridis')
##################################################
### NOTE: this has been taken and modified from iDQ, and will be
### removed when a standard function for grabbing gstlal triggers
### are incorporated into gwpy
# NOTE: this has been taken and modified from iDQ, and will be
# removed when a standard function for grabbing gstlal triggers
# are incorporated into gwpy
def retrieve_triggers(event, trigger_path, channels=None):
"""
workhorse data discovery method for gstlal hdf5 files
......@@ -302,7 +303,7 @@ def retrieve_triggers(event, trigger_path, channels=None):
analysis_seg = Segment(*config.PLOT_WINDOWS['WIDE']).shift(gps)
analysis_segs = SegmentList([analysis_seg])
### find triggers using directory patterns
# find triggers using directory patterns
basename = '{}-GSTLAL_IDQ_FEATURES'.format(config.IFO[0])
digits_start = str(analysis_seg[0])[:5]
digits_end = str(analysis_seg[1])[:5]
......@@ -314,7 +315,7 @@ def retrieve_triggers(event, trigger_path, channels=None):
trigger_files = glob.iglob(os.path.join(trigger_path, glob_pattern))
cache = [CacheEntry.from_T050017(file_) for file_ in trigger_files]
### consider more directories if first 5 digits of gps do not match
# consider more directories if first 5 digits of gps do not match
if digits_start != digits_end:
glob_pattern = os.path.join(
basename,
......@@ -324,25 +325,26 @@ def retrieve_triggers(event, trigger_path, channels=None):
trigger_files = glob.iglob(os.path.join(trigger_path, glob_pattern))
cache.extend([CacheEntry.from_T050017(file_) for file_ in trigger_files])
### data structure and dtype for query results
# data structure and dtype for query results
data = defaultdict(list)
sample_rate = None
columns = ['time', 'snr']
dtype = [(col, '<f8') for col in columns]
for entry in cache: ### iterate over hdf5 files
# iterate over hdf5 files
for entry in cache:
### determine which segments are relevant for this hdf5 file
# determine which segments are relevant for this hdf5 file
relevant_segs = SegmentList([Segment(*entry.segment)]) & analysis_segs
if not relevant_segs:
continue
with h5py.File(entry.path, 'r') as file_obj:
### determine stride + sample rate of datasets
# determine stride + sample rate of datasets
sample_rate = file_obj.attrs.get('sample_rate')
### query only channels in file
# query only channels in file
file_channels = set(file_obj.keys())
if channels is not None:
channels2query = file_channels & set(channels)
......@@ -353,7 +355,7 @@ def retrieve_triggers(event, trigger_path, channels=None):
datasets = set(file_obj[channel].keys())
dset_segs = [dset2segment(dset) for dset in datasets]
### filter datasets with segments
# filter datasets with segments
relevant_datasets = [(dset, seg) for dset, seg in zip(datasets, dset_segs)
if relevant_segs.intersects_segment(seg)]
dset_span = np.sum([seg[1]-seg[0] for _, seg in relevant_datasets])
......@@ -363,14 +365,14 @@ def retrieve_triggers(event, trigger_path, channels=None):
key = os.path.join(channel, relevant_datasets[0][0])
### preallocate space and initialize all values to nan
# preallocate space and initialize all values to nan
rows = np.empty((int(dset_span * sample_rate),), dtype=dtype)
if rows.size == 1:
rows[:] = tuple(np.nan for col in columns)
else:
rows[:] = np.nan
### populate allocated rows with datasets
# populate allocated rows with datasets
idx_start = 0
for dataset, seg in relevant_datasets:
livetime = seg[1] - seg[0]
......@@ -382,7 +384,7 @@ def retrieve_triggers(event, trigger_path, channels=None):
data[channel].append(rows)
### stack datasets vertically
# stack datasets vertically
for channel, datasets in data.items():
data[channel] = np.concatenate(datasets, axis=0)
......@@ -405,7 +407,7 @@ def plot_snr_eventmap(event, triggers, sample_rate, zoom, normalize=False):
"""Produces event plot for several data channels; lines are colored by SNR.
"""
### get relevant info from event, config settings
# get relevant info from event, config settings
gps = event.gps
window = config.GLITCH_PLOT_WINDOWS[zoom]
......@@ -418,7 +420,7 @@ def plot_snr_eventmap(event, triggers, sample_rate, zoom, normalize=False):
glitch_channels = ['{}:{}'.format(config.IFO, chan) for chan in glitch_channels]
channels = set(glitch_channels) & set(triggers.keys())
### filter triggers based on plot window
# filter triggers based on plot window
filt_triggers = dict(triggers)
for channel in channels:
window_condition = np.logical_and(
......@@ -428,52 +430,52 @@ def plot_snr_eventmap(event, triggers, sample_rate, zoom, normalize=False):
filt_triggers[channel] = filt_triggers[channel][np.where(window_condition)]
filt_triggers[channel]['time'] -= gps
### find out which channels have triggers to plot
# find out which channels have triggers to plot
channels &= set([ch for ch in filt_triggers.keys() if len(filt_triggers[ch]) > 0])
if len(channels) > 1:
logging.info("generating {} eventmap for {} subsystem".format(
logger.info("generating {} eventmap for {} subsystem".format(
zoom,
glitch_subsystem,
))
### sort channels by name
# sort channels by name
channels = sorted(list(channels))
### define figure, axes
fig = plt.figure(dpi = 300)
# define figure, axes
fig = plt.figure(dpi=300)
axes = fig.add_subplot(111)
### set a colormap based on matplotlib version available
# set a colormap based on matplotlib version available
try:
cmap = cm.viridis
except AttributeError:
cmap = VIRIDIS
### set SNR bounds
# set SNR bounds
channel_snr_max = {ch: max(filt_triggers[ch]['snr']) for ch in channels}
if normalize:
snr_min = 10**-5
snr_max = 1
else:
### NOTE: if max SNR over all channels is < 1, set range: (max(SNR), 1)
### else, set range: (1, max(SNR))
# NOTE: if max SNR over all channels is < 1, set range: (max(SNR), 1)
# else, set range: (1, max(SNR))
snr_min = min(1, max(channel_snr_max.values()))
snr_max = min(10**5, max(max(channel_snr_max.values()), 1))
### set default values in case of missing data
# set default values in case of missing data
step_size = 1. / sample_rate
default_times = np.arange(window[0], window[1] + step_size, step_size)
default_snrs = np.ones(len(default_times)) * snr_min
### define colorbar and map colors to correct scaling
# define colorbar and map colors to correct scaling
snr_norm = matplotlib.colors.LogNorm(vmin=snr_min, vmax=snr_max, clip=True)
mapper = cm.ScalarMappable(norm = snr_norm, cmap = cmap)
# mapper = cm.ScalarMappable(norm=snr_norm, cmap=cmap)
### define channel and time indices
# define channel and time indices
time_idx, channel_idx = np.meshgrid(default_times, np.arange(len(channels) + 1))
### generate grid points
# generate grid points
time_pts = np.tile(default_times, len(channels))
ch_pts = np.repeat(np.arange(len(channels)), len(default_times))
snr_pts = np.array([])
......@@ -487,21 +489,21 @@ def plot_snr_eventmap(event, triggers, sample_rate, zoom, normalize=False):
snrs[trgtime_idx] = filt_triggers[channel]['snr']
snr_pts = np.append(snr_pts, snrs)
### generate SNR indices from interpolated grid, SNR data
# generate SNR indices from interpolated grid, SNR data
grid_pts = (time_pts, ch_pts)
interp_pts = (time_idx[:-1, :-1], channel_idx[:-1, :-1])
snr_idx = griddata(grid_pts, snr_pts, interp_pts, method='linear')
### plot heatmap
axes.pcolormesh(time_idx, channel_idx, snr_idx, cmap = cmap, norm = snr_norm)
# plot heatmap
axes.pcolormesh(time_idx, channel_idx, snr_idx, cmap=cmap, norm=snr_norm)
### attach y axis to plot
# attach y axis to plot
axes.set_yticks(np.arange(len(channels)) + 0.5)
axes.set_yticklabels(list(channels))
axes.set_ylabel(r"Data Channels")
axes.set_ylim(0, len(channels))
### attach x axis to plot
# attach x axis to plot
axes.set_xlabel(r"Time [s] from lock loss at {:f}".format(gps))
axes.set_xlim((window[0], window[1]))
axes.grid(which='major', axis='x', color='lightgrey')
......@@ -509,26 +511,26 @@ def plot_snr_eventmap(event, triggers, sample_rate, zoom, normalize=False):
# add vertical line to mark lock loss time
axes.axvline(
0,
color = 'black',
linestyle = '--',
lw = 1
color='black',
linestyle='--',
lw=1,
)
### attach colorbar to plot
# attach colorbar to plot
divider = make_axes_locatable(axes)
cax = divider.append_axes( "right", size="5%", pad=0.1)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbl = matplotlib.colorbar.ColorbarBase(
cax,
cmap = cmap,
norm = snr_norm,
orientation="vertical"
cmap=cmap,
norm=snr_norm,
orientation="vertical",
)
if normalize:
cbl.set_label(r"Normalized SNR (by channel)")
else:
cbl.set_label(r"SNR")
### misc plot formatting
# misc plot formatting
axes.set_title(r"Eventmap for {} channels".format(glitch_subsystem))
plt.rc('font', size=10)
plt.rc('axes', titlesize=10, labelsize=10)
......@@ -537,13 +539,13 @@ def plot_snr_eventmap(event, triggers, sample_rate, zoom, normalize=False):
plt.rc('legend', fontsize=10)
plt.tight_layout()
### write plot to disk
# write plot to disk
outfile = 'snr_eventmap_{}_{}.png'.format(glitch_subsystem, zoom)
outpath = event.path(outfile)
logging.debug(outpath)
fig.savefig(outpath, dpi = 300)
logger.debug(outpath)
fig.savefig(outpath, dpi=300)
else:
logging.info("No channels available for {} subsystem, skipping eventmap".format(
logger.info("No channels available for {} subsystem, skipping eventmap".format(
glitch_subsystem,
))
......@@ -558,7 +560,7 @@ def analyze_glitches(event):
all_channels = list(itertools.chain.from_iterable(config.GLITCH_CHANNELS.values()))
all_channels = ['{}:{}'.format(config.IFO, chan) for chan in all_channels]
logging.info("finding triggers surrounding event")
logger.info("finding triggers surrounding event")
triggers, sample_rate = retrieve_triggers(
event,
trigger_path,
......@@ -566,9 +568,10 @@ def analyze_glitches(event):
)
if triggers.keys():
logging.info("generating eventmaps")
logger.info("generating eventmaps")
for zoom in config.PLOT_WINDOWS.keys():
plot_snr_eventmap(event, triggers, sample_rate, zoom)
else: ### no channels to analyze
logging.warning("No triggers available for this event, skipping analysis")
# no channels to analyze
else:
logger.warning("No triggers available for this event, skipping analysis")
import logging
import requests
import os
import datetime
import subprocess
import requests
import gpstime
from .. import logger
from .. import config
......@@ -29,8 +30,8 @@ def retrieve_guardlog(event):
'--lines', '1000000',
GUARDLOG_NODES,
]
logging.info("retrieving guardian log...")
logging.debug(' '.join(cmd))
logger.info("retrieving guardian log...")
logger.debug(' '.join(cmd))
with open(event.path('guardlog.txt'), 'w') as f:
subprocess.call(
cmd,
......@@ -50,7 +51,7 @@ def previous_state_name(event):
sys.load()
state_index = event.transition_index[0]
name = sys.index(state_index)
logging.info("previous guardian state name: {}".format(name))
logger.info("previous guardian state name: {}".format(name))
with open(event.path('previous_state_name'), 'w') as f:
f.write(name+'\n')
......
import numpy as np
import matplotlib.pyplot as plt
from gwpy.segments import Segment
from .. import logger
from .. import config
from .. import data
from .. import plotutils
#################################################
def power_in_ham6(event):
'''Calculates the power under the curve for the PEM-CS_ADC_5_19_2K_OUT_DQ
channel and compares it to the power that gets through the fast shutter'''
plotutils.set_rcparams()
gps = event.gps
ifo = config.IFO
window = [-3, 3]
if ifo == 'H1':
if gps < 1415034018:
logger.info('The HAM6 power monitor was not fully installed for '
'locklosses occurring before November 07, 2024 '
'17:00UTC (See LHO81080). Plugin not running.')
return
elif event.transition_index[0] < config.GRD_NOMINAL_STATE[0]:
logger.info('IFO was not in low noise. Plugin not running.')
return
elif ifo == 'L1' and gps < 1410812958:
logger.info('The HAM6 power monitor was not installed for '
'locklosses occurring before Sep 19, 2024 '
'15:29 CT. Plugin not running.')
return
if ifo == 'H1':
channels = [
f'{ifo}:PEM-CS_ADC_5_19_2K_OUT_DQ',
f'{ifo}:ASC-AS_A_DC_NSUM_OUT_DQ',
f'{ifo}:OMC-DCPD_SUM_OUT_DQ',
f'{ifo}:ISI-HAM6_GS13INF_V3_IN1_DQ'
]
# Get the data
segment = Segment(window).shift(gps)
bufs = data.fetch(channels, segment)
# Calibration
calPEM = 0.177 # [W/ct]
calASA = 1.15e-5 # From data before GPS=1415043099 lockloss
asa_sat_thresh = 130000 * calASA # ~1.5W; raw cts before calib
pmon_sat_thresh = 32000 * calPEM # ~5.66kW; rails at ~32k (1V)
elif ifo == 'L1':
channels = [
f'{ifo}:PEM-CS_ADC_4_11_OUT_DQ',
f'{ifo}:ASC-AS_A_DC_SUM_OUT_DQ',
f'{ifo}:OMC-DCPD_SUM_OUT_DQ',
f'{ifo}:ISI-HAM6_GS13INF_V3_IN1_DQ',
f'{ifo}:PEM-CS_ADC_4_11_GAIN'
]
# Get the data
segment = Segment(window).shift(gps)
bufs = data.fetch(channels, segment)
# Calibration
calPEM = 1 # calibrated in front end [W]
calASA = 1.562e-5 # Calibrated to same power as PEM during lockloss
asa_sat_thresh = (2**17)*calASA # ~1.5W; raw cts before calib
pmon_sat_thresh = (2**15)*bufs[4].data[0]*calPEM # ~5.66kW; rails at ~32k (1V)
# Filter through each channel and get data times
# PEM-CS_ADC_5_19_2K_OUT_DQ
pmon = bufs[0].data*calPEM
t_pmon = np.arange(0, len(pmon))/bufs[0].sample_rate + window[0]
# ASC-AS_A_DC_NSUM_OUT_DQ
asa = bufs[1].data
pasa = asa*calASA
t_asa = np.arange(0, len(asa))/bufs[1].sample_rate + window[0]
# OMC-DCPD_SUM_OUT_DQ
dcpd = bufs[2].data
t_dcpd = np.arange(0, len(dcpd))/bufs[2].sample_rate + window[0]
# ISI-HAM6_GS13INF_V3_IN1_DQ
gs13 = bufs[3].data
t_gs13 = np.arange(0, len(gs13))/bufs[3].sample_rate + window[0]
# Find fast shutter trigger time
if ifo == 'H1':
trigger = (np.abs(gs13) > 100).argmax()
elif ifo == 'L1':
trigger = (np.abs(gs13) > 500).argmax()
t_trigger = t_gs13[trigger]
# Integrate power monitor over t_trigger+[-0.1, +0.2]
pmon_integ_range = range(
(t_pmon >= t_trigger-0.1).argmax(),
(t_pmon > t_trigger+0.2).argmax()
)
e_ham6 = sum(pmon[pmon_integ_range])*(t_pmon[1]-t_pmon[0])
# Assume that the power while the shutter was closed is negligible
# Only integrate when the shutter is open
# If pmon doesn't saturate, shutter open/close status is approximated by:
# abs(p(ham6)/p(AS_A_DC_NSUM)) < pmon_sat_thresh/asa_sat_thresh = 3.8k
#
# This is because:
# When the shutter is closed, the ratio would be:
# ~1/trans_mirror ~ 1.4E4 > 3.8k
# (or larger if AS_A saturates but power monitor doesn't)
# When the shutter is open, the ratio would be
# ~1 < 3.8k
# (or larger if AS_A saturates but power monitor doesn't)
# pmon saturates earlier than AS_A when shutter is closed, because
# pmon_sat_thresh is 5.7kW and asa_sat_thresh/trans_mirror~20kW
#
# Therefore, unless pmon saturates,
# abs(p(ham6)/p(AS_A_DC_NSUM)) < pmon_sat_thresh/asa_sat_thresh = 3.8k
# is a good criteria to tell the shutter open state
# Get ratio between pmon and pasa - add 1e-20 to avoid /0
if ifo == 'H1':
pmon_pasa_ratio = np.divide(np.abs(pmon), np.abs(pasa) + 1E-20)
elif ifo == 'L1':
pmon_pasa_ratio = 1 # ASA already calibrated to PEM channel
ratio_thresh = pmon_sat_thresh/asa_sat_thresh
# Getting power downstream of fast shutter
p_after_FS = np.multiply(pmon, np.logical_and(pmon_pasa_ratio < ratio_thresh, pmon > 0.01))
# Integrate p_aft_fs over the same integration range for pmon
e_after_FS = sum(p_after_FS[pmon_integ_range])*(t_pmon[1]-t_pmon[0])
# Check if pmon is close to saturation
pmon_sat_check = np.max(pmon) > 0.9*pmon_sat_thresh
# Plot figures
logger.info('Creating HAM6 power plot')
# Update default font sizes
plt.rcParams['font.size'] = 20
plt.rcParams['axes.titlesize'] = 35
plt.rcParams['axes.labelsize'] = 20
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['legend.fontsize'] = 23
fig, (ax0, ax1, ax2) = plt.subplots(3, sharex=True, figsize=(27, 16), dpi=100)
# Plot PEM pmon, AS_A, and power after the fast shutter
p_peak = np.nanmax(pmon) # peak power
ax0.semilogy(
t_pmon,
pmon,
label=f'Lockloss power monitor (Energy: {e_ham6:.1f}J, Peak power: {p_peak:.1f}W)',
linewidth=5.5
)
ax0.semilogy(
t_asa,
pasa,
label='ASC-AS_A_DC_NSUM',
linewidth=5.5
)
if ifo == 'H1':
ax0.semilogy(
t_pmon,
p_after_FS,
label=f'Power after fast shutter (Projection, Energy: {e_after_FS:.1f}J)',
linewidth=2.5,
color='cyan'
)
ax0.axhline(
asa_sat_thresh,
label='AS_A Saturation Threshold',
color='k',
linestyle='--'
)
if trigger != 0:
ax0.set_xlim(t_trigger-0.15, t_trigger+0.18)
ax0.set_ylim(1E-4, p_peak*2)
ax0.set_ylabel('Power [W]')
# Display warning if pmon saturated
if pmon_sat_check:
logger.info('Power monitor might have railed!')
ax0.text(
t_trigger+0.1,
10,
'Power monitor might have railed!!',
color='red',
size=22
)
# Plot DCPDs
ax1.plot(
t_dcpd,
dcpd,
label='OMC-DCPD_SUM',
lw=4
)
ax1.set_ylabel('Current [mA]')
if ifo == 'H1':
ax1.set_ylim(35, 45)
elif ifo == 'L1':
ax1.set_ylim(15, 55)
# Plot GS13 for HAM6 that shows time of fast shutter close
ax2.plot(
t_gs13,
gs13,
label='HAM6_GS13INF_V3_IN2',
lw=4
)
ax2.set_ylabel('counts')
ax2.set_xlabel(f'Time [s] since Lockloss at {str(gps)}')
for ax in [ax0, ax1, ax2]:
ax.grid(True)
ax.legend(loc='lower left')
ax0.set_title('Power through HAM6')
fig.tight_layout(pad=0.05)
# Save plot to lockloss directory
outfile_plot = 'HAM6_power.png'
outpath_plot = event.path(outfile_plot)
fig.savefig(outpath_plot, bbox_inches='tight')
fig.clf()
import os
import logging
import numpy as np
from gwpy.segments import Segment
from .. import logger
from .. import config
from .. import data
##################################################
INITIAL_WINDOW = [-60, 0]
##################################################
def find_previous_state(event):
"""Collect info about previous guardian state
......@@ -21,13 +25,10 @@ def find_previous_state(event):
"""
channels = [config.GRD_STATE_N_CHANNEL]
# get the lock loss transition itself
previous_index, lockloss_index = event.transition_index
# find the transition time
segment = Segment(0, 1).shift(event.id)
gbuf = data.fetch(channels, segment)[0]
lli = np.where(gbuf.data == lockloss_index)[0]
lli = np.where(gbuf.data == event.transition_index[1])[0]
assert len(lli) > 0, "Lock loss not found at this time!"
state_end_gps = gbuf.tarray[lli[0]]
# note the transition time for old events
......@@ -35,10 +36,11 @@ def find_previous_state(event):
if not os.path.exists(state_end_file):
with open(state_end_file, 'w') as f:
f.write('{:f}\n'.format(state_end_gps))
assert state_end_gps == event.transition_gps, "State end time does not match transition_gps ({} != {}).".format(state_end_gps, event.transition_gps)
# find start of previous state
lockloss_found = False
check_3_101_2 = 0
check_3_2 = 0
state_start_gps = None
power = 1
window = [INITIAL_WINDOW[0], 1]
......@@ -51,20 +53,94 @@ def find_previous_state(event):
# check the transitions
for transition in reversed(transitions):
transt = transition[0]
transi = tuple(map(int, transition[1:]))
if lockloss_found:
assert int(transition[2]) == previous_index, "Transition history does not match lock loss transtion ({} != {}).".format(transition[2], previous_index)
state_start_gps = transition[0]
if config.IFO == 'H1' and transi[0] >= 600:
logger.info(
"Previous state was still in NLN, continue searching "
"for transition from state less than 600."
)
continue
previous_index = event.transition_index[0]
# Ignore matching up transi[1] with event.transition_index[0] if
# state at lockloss was at or above 600
if not (config.IFO == 'H1' and previous_index >= 600):
assert transi[1] == previous_index, \
"Transition history does not match lock loss " \
f"transition ({transi[1]} != {previous_index})"
state_start_gps = transt
break
elif tuple(transition[1:]) == event.transition_index:
logging.info("lockloss found: {}".format(transition[0]))
# For H1, a X->3->101->2 transition is a common pattern
# that shows up as 101->2 but we actually want to record
# them as X->3. The following logic checks to see if
# we're in one of these situations and updates the
# transition_index information accordingly.
#
# and event.transition_index == (101, 2)
if config.IFO == 'H1':
if transi == (101, 2):
# could be this case, so mark as candidate and
# continue
check_3_101_2 = 1
continue
elif check_3_101_2 == 1:
if transi == (3, 101):
# yup, this is it, note and continue
check_3_101_2 = 2
continue
else:
# nope, this is a regular 101->2 transtion, so
# note state 101 start time and break
lockloss_found = True
state_start_gps = transt
break
elif check_3_101_2 == 2:
logger.info(
f'Updating H1 lockloss for X->3->101->2 '
f'transition: {transi}'
)
event.add_tag("FAST_DRMI")
# update the transition_index information on disk
# and clear cache so it's reloaded next access
with open(event.path('transition_index'), 'w') as f:
f.write('{} {}\n'.format(*transi))
event._transition_index = None
# don't continue so that we hit the lockloss_found
# check below
# If we don't have a X->3->101->2 LL, check if we maybe have
# a X->3->2 LL
if transi == (3, 2):
# Mark as candidate for this type of LL
check_3_2 = 1
continue
elif check_3_2 == 1:
logger.info(
f'Updating H1 lockloss for X->3->2 '
f'transition: {transi}'
)
event.add_tag("FAST_DRMI")
# update the transition_index information on disk
# and clear cache so it's reloaded next access
with open(event.path('transition_index'), 'w') as f:
f.write('{} {}\n'.format(*transi))
event._transition_index = None
# don't continue so that we hit the lockloss_found
if transi == event.transition_index:
logger.info(f'Lockloss found: {transi}')
lockloss_found = True
assert lockloss_found, "lock loss not found in first segment"
assert lockloss_found, "Lock loss not found in first segment"
window = [edge * (2 ** power) + window[0] for edge in INITIAL_WINDOW]
power += 1
# write start of lock stretch to disk
previous_index, lockloss_index = event.transition_index
previous_state = (previous_index, state_start_gps, state_end_gps, lockloss_index)
logging.info("previous guardian state: {}".format(previous_state))
logger.info("Previous guardian state: {}".format(previous_state))
with open(event.path('previous_state'), 'w') as f:
f.write('{} {:f} {:f} {}\n'.format(*previous_state))
from gwpy.segments import Segment
from .. import logger
from .. import config
from .. import data
WINDOW = [-5, 0]
OBSERVE_CHANNEL = ['{}:GRD-IFO_OK'.format(config.IFO)]
IFO_MODE_CHANNEL = ['{}:ODC-OBSERVATORY_MODE'.format(config.IFO)]
# The Observatory mode is set by operators/commissioners to indicate when the
# IFO is in a non-Observing state. While GRD-IFO_OK is an absolute check that
# we are in Observing, ODC-OBSERVATORY_MODE is not guaranteed to represent the
# state of the Observatory.
##############################################
def check_ifo_mode(event):
"""Checks if IFO Observatory mode is set to MAINTENANCE, CALIBRATION,
or COMMISSIONING modes and creates appropriate tag. """
mode_window = [WINDOW[0], WINDOW[1]]
segment = Segment(mode_window).shift(int(event.gps))
observe_channel = data.fetch(OBSERVE_CHANNEL, segment)[0]
if bool(observe_channel.data[0]):
event.add_tag('OBSERVE')
return
mode_channel = data.fetch(IFO_MODE_CHANNEL, segment)[0]
# commissioners are noise hunting, experimenting with controls, creating alignment scripts, etc.
if any(mode_channel.data == 40):
event.add_tag('COMMISSIONING')
# all activities related to calibration, including calibration injections
elif any(mode_channel.data == 51):
event.add_tag('CALIBRATION')
# for Tuesday maintenance as well as hardware/software failures that must be fixed
elif any(mode_channel.data == 53) or any(mode_channel.data == 52):
event.add_tag('MAINTENANCE')
else:
logger.info('IFO mode was not in commissioning, calibration, or maintenance.')
import numpy as np
from gwpy.segments import Segment
from .. import logger
from .. import config
from .. import data
##############################################
def initial_alignment_check(event):
"""Checks to see if an initial alignment is needed based off the output of the ITM green cameras"""
if config.IFO == 'L1':
logger.info("followup is not configured or not applicable to LLO.")
return
if event.transition_index[0] > config.INIT_GRD_STATE[0]:
logger.info('Guardian state is too high to check for initial alignment.')
return
mod_window = [config.INITIAL_ALIGNMENT_SEARCH_WINDOW[0], config.INITIAL_ALIGNMENT_SEARCH_WINDOW[1]]
segment = Segment(mod_window).shift(int(event.gps))
itm_channels = data.fetch(config.ITM_CHANNELS, segment)
dof1_wfs = data.fetch(config.ALS_WFS_DOF1, segment)
dof2_wfs = data.fetch(config.ALS_WFS_DOF2, segment)
dof3_wfs = data.fetch(config.ALS_WFS_DOF3, segment)
for i in dof1_wfs:
i = np.abs(i.data) * config.DOF_1_SCALE
i = np.array([i])
for q in dof2_wfs:
q = np.abs(q.data) * config.DOF_2_SCALE
q = np.array([q])
for j in dof3_wfs:
j = np.abs(j.data) * config.DOF_3_SCALE
j = np.array([j])
initial_alignment = False
wfs_arr = np.concatenate((i, q, j), axis=0)
wfs_max = np.amax(wfs_arr)
if wfs_max < config.WFS_THRESH:
for check_als in itm_channels:
chan_name = check_als.channel
check_als = check_als.data
signal = np.abs(check_als)
signal_max = np.amax(signal)
if signal_max >= config.INITIAL_ALIGNMENT_THRESH:
initial_alignment = True
logger.info('{} is above the threshold.'.format(chan_name))
if initial_alignment:
event.add_tag('INITIAL_ALIGNMENT')
else:
logger.info('An initial alignment is not needed.')
else:
logger.info('ALS WFS have not yet converged.')
import numpy as np
import matplotlib.pyplot as plt
from gwpy.segments import Segment
from .. import logger
from .. import config
from .. import data
from .. import plotutils
FSS_SEARCH_WINDOW = [-60, 5]
FSS_CHANNELS = [
'{}:PSL-ISS_AOM_DRIVER_MON_OUT_DQ'.format(config.IFO)
]
FSS_THRESH = 0.36
##############################################
def check_iss(event):
"""Checks for ISS AOM.
Checks ISS AOM for counts below threshold and
creates a tag/plot if below said threshold.
"""
plotutils.set_rcparams()
mod_window = [FSS_SEARCH_WINDOW[0], FSS_SEARCH_WINDOW[1]]
segment = Segment(mod_window).shift(int(event.gps))
buf = data.fetch(FSS_CHANNELS, segment)[0]
srate = buf.sample_rate
t = np.arange(segment[0], segment[1], 1/srate)
idxs = (t-event.gps) < -1
if config.IFO == 'L1':
if np.min(buf.data[idxs]) <= FSS_THRESH:
event.add_tag('ISS')
else:
logger.info('no ISS issue')
else:
logger.info('Tag not Configured for LHO')
fig, ax = plt.subplots(1, figsize=(22, 16))
t = np.arange(segment[0], segment[1], 1/srate)
ax.plot(
t-event.gps,
buf.data,
label=buf.channel,
alpha=0.8,
lw=2,
)
if config.IFO == 'L1':
ax.axhline(
FSS_THRESH,
linestyle='--',
color='black',
label='ISS Threshold',
lw=5,
)
ax.grid()
ax.set_xlabel('Time [s] since lock loss at {}'.format(event.gps), labelpad=10)
ax.set_ylabel('Voltage [V]')
ax.legend(loc='best')
ax.set_title('ISS AOM', y=1.04)
ax.set_xlim(segment[0]-event.gps, segment[1]-event.gps)
fig.tight_layout()
fig.savefig(event.path('iss.png'))
import os
import logging
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from gwpy.segments import Segment
from .. import logger
from .. import config
from .. import data
from .. import plotutils
#################################################
def find_lpy(event):
"""Create length, pitch, and yaw plots
......@@ -27,16 +29,15 @@ def find_lpy(event):
infile_csv = 'saturations.csv'
inpath_csv = event.path(infile_csv)
sat_channel = ''
lpy_lim = 4*config.SATURATION_THRESHOLD
if not os.path.exists(inpath_csv):
logging.info("LPY plot bypassed (no saturating suspensions).")
return
with open(inpath_csv, 'r') as f:
first_sat = f.readline()
if first_sat:
sat_channel, sat_time = first_sat.split(' ', 1)
logger.info('No saturating suspensions, plotting ETMX L3 LPY')
sat_channel = f'{config.IFO}:SUS-ETMX_L3_MASTER_OUT_UR_DQ'
else:
with open(inpath_csv, 'r') as f:
first_sat = f.readline()
if first_sat:
sat_channel, sat_time = first_sat.split(' ', 1)
# generate the LPY mapping and channels to query from base channel
lpy_map = channel2lpy_coeffs(sat_channel)
......@@ -44,7 +45,7 @@ def find_lpy(event):
channels = [get_sus_channel(base_channel, corner) for corner in lpy_map['corners']]
for window_type, window in config.PLOT_WINDOWS.items():
logging.info('Making {} LPY plot for {}'.format(window_type, base_channel))
logger.info('Making {} LPY plot for {}'.format(window_type, base_channel))
segment = Segment(window).shift(gps)
bufs = data.fetch(channels, segment)
......@@ -58,37 +59,43 @@ def find_lpy(event):
for title in titles:
for corner, coeff in lpy_map[title].items():
idx = channels.index(get_sus_channel(base_channel, corner))
lpy[title] += (bufs[idx].data * coeff)/lpy_lim
# If ETMX, make threshold < saturation value so we can see better
if base_channel[0:11] == 'H1:SUS-ETMX':
threshold = 524288
lpy[title] += (bufs[idx].data * coeff) / (4 * threshold)
else:
threshold = config.get_saturation_threshold(get_sus_channel(base_channel, corner), gps, config.IFO)
lpy[title] += (bufs[idx].data * coeff) / (4 * threshold)
colors = ['#2c7fb8', '#e66101', '#5e3c99']
offset = -0.5
fig, axs = plt.subplots(3, sharex=True, figsize=(27,16))
fig, axs = plt.subplots(3, sharex=True, figsize=(27, 16))
for ax, dof, color, title in zip(axs, lpy.values(), colors, titles):
ax.plot(
t,
dof,
color = color,
label = title,
alpha = 0.8,
lw = 2
color=color,
label=title,
alpha=0.8,
lw=2,
)
if base_channel[0:11] == 'H1:SUS-ETMX':
label = '524288 cts (sat threshold = 134217728)'
else:
label = 'saturation threshold'
ax.axhline(
1,
color='red',
linestyle='--',
lw=3,
label = 'saturation threshold'
label=label,
)
ax.axhline(
-1,
color='red',
linestyle='--',
lw=3
lw=3,
)
# offset = window[0] / 6 # window dependent offset from lock loss
offset = -0.5
ax.set_ylim(-1.2, 1.2)
ax.set_xlim(window[0], window[1])
ax.set_xlabel(
......@@ -133,43 +140,153 @@ def channel2lpy_coeffs(channel):
if optic in ['ETMX', 'ETMY', 'ITMX', 'ITMY']:
if stage in ['M0']:
lpy_map['length'] = {'F2': -1, 'F3': -1}
lpy_map['pitch'] = {'F2': 1, 'F3': 1}
lpy_map['yaw'] = {'F2': 1, 'F3': -1}
F2_gain = -1
F3_gain = 1
lpy_map['length'] = {'F2': -1 * F2_gain, 'F3': -1 * F3_gain}
lpy_map['pitch'] = {'F2': 1 * F2_gain, 'F3': 1 * F3_gain}
lpy_map['yaw'] = {'F2': 1 * F2_gain, 'F3': -1 * F3_gain}
lpy_map['corners'] = ['F2', 'F3']
elif stage in ['L1', 'L2', 'L3']:
lpy_map['length'] = {'UL': 1, 'LL': 1, 'UR': 1, 'LR': 1}
lpy_map['pitch'] = {'UL': 1, 'LL': -1, 'UR': 1, 'LR': -1}
lpy_map['yaw'] = {'UL': -1, 'LL': -1, 'UR': 1, 'LR': 1}
elif stage in ['L1']:
if config.IFO == 'H1':
if optic in ['ETMX']:
UL_gain = -1
LL_gain = 1
UR_gain = -1
LR_gain = 1
else:
UL_gain = -1
LL_gain = 1
UR_gain = 1
LR_gain = -1
else:
UL_gain = -1
LL_gain = 1
UR_gain = 1
LR_gain = -1
lpy_map['length'] = {'UL': 1 * UL_gain, 'LL': 1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['pitch'] = {'UL': 1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': -1 * LR_gain}
lpy_map['yaw'] = {'UL': -1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['corners'] = ['UL', 'LL', 'UR', 'LR']
elif stage in ['L2']:
if config.IFO == 'H1':
UL_gain = 1
LL_gain = -1
UR_gain = -1
LR_gain = 1
else:
UL_gain = -1
LL_gain = 1
UR_gain = 1
LR_gain = -1
lpy_map['length'] = {'UL': 1 * UL_gain, 'LL': 1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['pitch'] = {'UL': 1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': -1 * LR_gain}
lpy_map['yaw'] = {'UL': -1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['corners'] = ['UL', 'LL', 'UR', 'LR']
elif stage in ['L3']:
UL_gain = 1
LL_gain = 1
UR_gain = 1
LR_gain = 1
lpy_map['length'] = {'UL': 1 * UL_gain, 'LL': 1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['pitch'] = {'UL': 1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': -1 * LR_gain}
lpy_map['yaw'] = {'UL': -1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['corners'] = ['UL', 'LL', 'UR', 'LR']
elif optic in ['MC1', 'MC2', 'MC3', 'PRM', 'PR2', 'PR3', 'SRM', 'SR2', 'SR3']:
if stage in ['M1']:
lpy_map['length'] = {'LF': 1, 'RT': 1}
lpy_map['pitch'] = {'T2': 1, 'T3': -1}
lpy_map['yaw'] = {'LF': -1, 'RT': 1}
LF_gain = 1
RT_gain = -1
T2_gain = -1
T3_gain = 1
lpy_map['length'] = {'LF': 1 * LF_gain, 'RT': 1 * RT_gain}
lpy_map['pitch'] = {'T2': 1 * T2_gain, 'T3': -1 * T3_gain}
lpy_map['yaw'] = {'LF': -1 * LF_gain, 'RT': 1 * RT_gain}
lpy_map['corners'] = ['T2', 'T3', 'LF', 'RT']
elif stage in ['M2', 'M3']:
lpy_map['length'] = {'UL': 1, 'LL': 1, 'UR': 1, 'LR': 1}
lpy_map['pitch'] = {'UL': 1, 'LL': -1, 'UR': 1, 'LR': -1}
lpy_map['yaw'] = {'UL': -1, 'LL': -1, 'UR': 1, 'LR': 1}
if config.IFO == 'L1':
UL_gain = -1
LL_gain = 1
UR_gain = 1
LR_gain = -1
if config.IFO == 'H1':
UL_gain = 1
LL_gain = -1
UR_gain = -1
LR_gain = 1
lpy_map['length'] = {'UL': 1 * UL_gain, 'LL': 1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['pitch'] = {'UL': 1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': -1 * LR_gain}
lpy_map['yaw'] = {'UL': -1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['corners'] = ['UL', 'LL', 'UR', 'LR']
elif optic in ['BS']:
if stage in ['M1']:
lpy_map['length'] = {'LF': 1, 'RT': 1}
lpy_map['pitch'] = {'F2': -1, 'F3': -1}
lpy_map['yaw'] = {'LF': -1, 'RT': 1}
LF_gain = 1
RT_gain = -1
F2_gain = 1
F3_gain = -1
lpy_map['length'] = {'LF': 1 * LF_gain, 'RT': 1 * RT_gain}
lpy_map['pitch'] = {'F2': -1 * F2_gain, 'F3': -1 * F3_gain}
lpy_map['yaw'] = {'LF': -1 * LF_gain, 'RT': 1 * RT_gain}
lpy_map['corners'] = ['F2', 'F3']
elif stage in ['M2']:
lpy_map['length'] = {'UL': 1, 'LL': 1, 'UR': 1, 'LR': 1}
lpy_map['pitch'] = {'UL': 1, 'LL': -1, 'UR': 1, 'LR': -1}
lpy_map['yaw'] = {'UL': -1, 'LL': -1, 'UR': 1, 'LR': 1}
if config.IFO == 'L1':
UL_gain = -1
LL_gain = 1
UR_gain = 1
LR_gain = -1
if config.IFO == 'H1':
UL_gain = 1
LL_gain = -1
UR_gain = -1
LR_gain = 1
lpy_map['length'] = {'UL': 1 * UL_gain, 'LL': 1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['pitch'] = {'UL': 1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': -1 * LR_gain}
lpy_map['yaw'] = {'UL': -1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['corners'] = ['UL', 'LL', 'UR', 'LR']
elif optic in ['OMC']:
if stage in ['M1']:
lpy_map['length'] = {'LF': 1, 'RT': 1}
lpy_map['pitch'] = {'T2': 1, 'T3': -1}
lpy_map['yaw'] = {'LF': -1, 'RT': 1}
LF_gain = 1
RT_gain = -1
T2_gain = -1
T3_gain = 1
lpy_map['length'] = {'LF': 1 * LF_gain, 'RT': 1 * RT_gain}
lpy_map['pitch'] = {'T2': 1 * T2_gain, 'T3': -1 * T3_gain}
lpy_map['yaw'] = {'LF': -1 * LF_gain, 'RT': 1 * RT_gain}
lpy_map['corners'] = ['T2', 'T3', 'LF', 'RT']
elif optic in ['OM1', 'OM3', 'RM1']:
if stage in ['M1']:
UL_gain = 1
LL_gain = -1
UR_gain = -1
LR_gain = 1
lpy_map['length'] = {'UL': 1 * UL_gain, 'LL': 1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['pitch'] = {'UL': 1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': -1 * LR_gain}
lpy_map['yaw'] = {'UL': -1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['corners'] = ['UL', 'LL', 'UR', 'LR']
elif optic in ['RM2']:
if stage in ['M1']:
if config.IFO == 'L1':
UL_gain = 1
LL_gain = -1
UR_gain = -1
LR_gain = 1
if config.IFO == 'H1':
UL_gain = -1
LL_gain = 1
UR_gain = 1
LR_gain = -1
lpy_map['length'] = {'UL': 1 * UL_gain, 'LL': 1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['pitch'] = {'UL': 1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': -1 * LR_gain}
lpy_map['yaw'] = {'UL': -1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['corners'] = ['UL', 'LL', 'UR', 'LR']
elif optic in ['OM2', 'ZM1', 'ZM2']:
if stage in ['M1']:
UL_gain = -1
LL_gain = 1
UR_gain = 1
LR_gain = -1
lpy_map['length'] = {'UL': 1 * UL_gain, 'LL': 1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['pitch'] = {'UL': 1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': -1 * LR_gain}
lpy_map['yaw'] = {'UL': -1 * UL_gain, 'LL': -1 * LL_gain, 'UR': 1 * UR_gain, 'LR': 1 * LR_gain}
lpy_map['corners'] = ['UL', 'LL', 'UR', 'LR']
else:
raise RuntimeError("no LPY map for suspension '{}'.".format(channel))
......
import os
import numpy as np
import matplotlib.pyplot as plt
import os
from gwpy.segments import Segment
......@@ -8,8 +9,10 @@ from .. import config
from .. import data
from .. import plotutils
##############################################
def plot_lsc_asc(event):
"""Plot LSC/ASC channels
......
from gwpy.segments import Segment
from .. import config
from .. import data
##################################################
def check_observe(event):
"""Observe status at time of lock loss.
"""
# use transition gps since that doesn't depend on refinement
gps = event.transition_gps
window = [-1, 0]
segment = Segment(*window).shift(gps)
buf = data.fetch([config.IFO+':GRD-IFO_OK'], segment)[0]
if bool(buf.data[0]):
event.add_tag('OBSERVE')
import numpy as np
from gwpy.segments import Segment
from .. import logger
from .. import config
from .. import data
from .. import plotutils
##############################################
def check_omc_dcpd(event):
"""
Check if OMC DCPD is saturating.
Checks OMC_PD overflow channels for values greater than zero, one second
before the refined lockloss time. If so, creates OMC_DCPD tag.
"""
plotutils.set_rcparams()
mod_window = [config.OMC_DCPD_WINDOW[0], config.OMC_DCPD_WINDOW[1]]
segment = Segment(mod_window).shift(int(event.gps))
channels = data.fetch(config.OMC_DCPD_CHANNELS, segment)
saturating = False
srate = channels[0].sample_rate
for buf in channels:
t = np.arange(segment[0], segment[1], 1/srate)
before_loss = buf.data[np.where(t < event.gps-(config.OMC_DCPD_BUFFER))]
if any(before_loss) > 0:
saturating = True
if saturating:
event.add_tag('OMC_DCPD')
else:
logger.info('OMC DCPD not saturating')
import logging
import itertools
import numpy as np
import matplotlib.pyplot as plt
from gwpy.segments import Segment, SegmentList
from gwpy.segments import DataQualityFlag, DataQualityDict
from .. import logger
from .. import config
from .. import data
#################################################
def find_overflows(event):
"""Create ADC overflow plots centered around the lock loss time.
......@@ -24,7 +27,7 @@ def find_overflows(event):
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
logging.info('acquiring ADC overflow data')
logger.info('acquiring ADC overflow data')
segment = Segment(config.PLOT_WINDOWS['WIDE']).shift(gps)
all_bufs = data.fetch(generate_all_overflow_channels(), segment)
......@@ -34,7 +37,7 @@ def find_overflows(event):
segment = Segment(window).shift(gps)
for subsystem in config.ADC_OVERFLOWS.keys():
logging.info('plotting {} ADC overflows for {} subsystem'.format(
logger.info('plotting {} ADC overflows for {} subsystem'.format(
window_type,
subsystem,
))
......@@ -42,7 +45,7 @@ def find_overflows(event):
adc_id = config.ADC_OVERFLOWS[subsystem]['ADC_ID']
channels = generate_overflow_channels(subsystem, bit1)
bufs = [buf for buf in all_bufs if buf.channel in channels]
bufs.sort(key = lambda x: x.channel)
bufs.sort(key=lambda x: x.channel)
srate = bufs[0].sample_rate
dt = 1/srate
......@@ -101,14 +104,13 @@ def find_overflows(event):
fig.savefig(outpath_plot, bbox_inches='tight')
fig.clf()
### aggregate overflows for a specific ADC together
# aggregate overflows for a specific ADC together
union_name = '{}: ADC number {}'.format(subsystem, bit1)
overflows_overview[union_name] = overflows.union()
overflows_overview[union_name].name = union_name
### overview overflow plot
logging.info('plotting {} ADC overflows overview'.format(window_type))
# overview overflow plot
logger.info('plotting {} ADC overflows overview'.format(window_type))
fig = overflows_overview.plot(
facecolor='darkred',
edgecolor='darkred',
......@@ -140,7 +142,9 @@ def find_overflows(event):
fig.clf()
OVERFLOW_TEMPLATE='{}:FEC-{}_ADC_OVERFLOW_{}_{}'
OVERFLOW_TEMPLATE = '{}:FEC-{}_ADC_OVERFLOW_{}_{}'
def generate_overflow_channels(subsystem, bit1):
ifo = config.IFO
adc_id = config.ADC_OVERFLOWS[subsystem]['ADC_ID']
......
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from gwpy.segments import Segment
from gwpy.timeseries import TimeSeries
from .. import logger
from .. import config
from .. import data
from .. import plotutils
ASD_CHANNEL = [
f'{config.IFO}:OMC-PI_DCPD_64KHZ_AHF_DQ'
]
ASD_WINDOW = [-(8*60), 0]
##############################################
def check_pi(event):
"""Checks OMC downconverted signals 2, 6, 7 (H1) or
BAND 1-7 log channels (L1) for PIs.
Checks if PI band goes above threshold and
adds tag if above a certain threshold.
"""
plotutils.set_rcparams()
mod_window = [config.PI_SEARCH_WINDOW[0], config.PI_SEARCH_WINDOW[1]]
segment = Segment(mod_window).shift(int(event.gps))
PI_channels = data.fetch(config.PI_CHANNELS, segment)
mod_window_asd = [ASD_WINDOW[0], ASD_WINDOW[1]]
segment_asd = Segment(mod_window_asd).shift(int(event.gps))
ASD_data = data.fetch(ASD_CHANNEL, segment_asd)[0]
asd_timeseries = TimeSeries(
ASD_data.data,
t0=segment_asd[0],
dt=1/ASD_data.sample_rate
)
time_frame = 33 * 2
fft_length = 16
overlap = fft_length / 2
before_pi = asd_timeseries.times.value - event.gps < ASD_WINDOW[0] + time_frame
during_pi = asd_timeseries.times.value - event.gps > -time_frame
asd_before = asd_timeseries[before_pi].asd(fftlength=fft_length, overlap=overlap)
asd_during = asd_timeseries[during_pi].asd(fftlength=fft_length, overlap=overlap)
saturating = False
for buf in PI_channels:
srate = buf.sample_rate
t = np.arange(segment[0], segment[1], 1/srate)
idxs = ((t-event.gps) < -2) & ((t-event.gps) > -20)
if np.max(buf.data[idxs]) > config.PI_SAT_THRESH:
saturating = True
if saturating:
if config.IFO == 'H1':
if event.transition_index[0] >= 600:
logger.info('PI found')
event.add_tag('PI_MONITOR')
if config.IFO == 'L1':
if event.transition_index[0] >= 1300:
event.add_tag('PI_MONITOR')
else:
logger.info('no PI')
gs = gridspec.GridSpec(2, 4)
gs.update(wspace=0.5)
fig = plt.figure(figsize=(22*3, 16*3))
ax1 = fig.add_subplot(gs[0, :2])
ax2 = fig.add_subplot(gs[0, 2:])
ax3 = fig.add_subplot(gs[1, :])
for idx, buf in enumerate(PI_channels):
srate = buf.sample_rate
t = np.arange(segment[0], segment[1], 1/srate)
# Plot all OMC PI channels on top two plots
for ax in [ax1, ax2]:
ax.plot(
t-event.gps,
buf.data,
label=f'{buf.channel[3:]}: {config.PI_DICT[buf.channel]}',
alpha=0.8,
lw=2,
)
# Configuring top two plots
for ax in [ax1, ax2]:
# Add dotted thresholds to the plots
ax.axhline(
config.PI_SAT_THRESH,
linestyle='--',
color='black',
label='PI threshold',
lw=5,
)
ax.axhline(
-config.PI_SAT_THRESH,
linestyle='--',
color='black',
lw=5,
)
ax.set_xlabel(f'Time [s] since lock loss at {event.gps}', labelpad=10)
ax.set_ylabel(config.PI_YLABEL)
ax.set_ylim(config.PI_YLIMS[0], config.PI_YLIMS[1])
ax.legend(loc='best')
ax.set_title('PI BLRMS Monitors')
ax.grid()
ax1.set_xlim(t[0]-event.gps, t[-1]-event.gps)
ax2.set_xlim(-20, 5)
ax3.plot(
asd_during.frequencies / 1000,
asd_during,
linewidth=1,
color='red',
label='Right before lockloss'
)
ax3.plot(
asd_before.frequencies / 1000, asd_before,
linewidth=1,
color='lightsteelblue',
label='8 min before lockloss'
)
ax3.set_yscale('log')
ax3.set_xscale('log')
ax3.set_xlim(5, 32)
ax3.set_title(f'ASD of {ASD_data.channel}')
ax3.set_ylabel('Magnitude')
ax3.set_xlabel('Frequency [kHz]')
ax3.legend(loc='best')
ax3.set_xticks(
np.arange(5, 32),
labels=['' if x % 5 != 0 else x for x in np.arange(5, 32)]
)
ax3.grid()
outfile_plot = 'PI_monitor.png'
outpath_plot = event.path(outfile_plot)
fig.savefig(outpath_plot, bbox_inches='tight')
import logging
import numpy as np
import matplotlib.pyplot as plt
from gwpy.segments import Segment
from .. import logger
from .. import config
from .. import data
##################################################
def plot_indicators(event, params, refined_gps=None, threshold=None):
"""Create graphs showing indicators of refined time.
"""
t0 = event.transition_gps
trans = event.transition_index
......@@ -29,7 +34,7 @@ def plot_indicators(event, params, refined_gps=None, threshold=None):
isc_loss_time = event.transition_gps - t0
for window_type, window in config.REFINE_PLOT_WINDOWS.items():
logging.info('Making {} indicator plot'.format(window_type))
logger.info('Making {} indicator plot'.format(window_type))
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
......@@ -41,12 +46,7 @@ def plot_indicators(event, params, refined_gps=None, threshold=None):
# plot indicator
color = 'blue'
label = params['CHANNEL']
# filter data based on window
condition = np.logical_and(t0+window[0] <= t_ind, t_ind <= t0+window[1])
idx = np.argwhere(condition)
y = y_ind[idx]
t = t_ind[idx]
ax1.plot(t-t0, y, label=label,
ax1.plot(t_ind-t0, y_ind, label=label,
color=color, alpha=0.8, linewidth=2)
ax1.grid(True)
ax1.set_ylabel(params['CHANNEL'])
......@@ -76,9 +76,8 @@ def plot_indicators(event, params, refined_gps=None, threshold=None):
textcoords='offset points',
horizontalalignment='left',
verticalalignment=valign,
bbox=dict(
boxstyle="round", fc="w", ec="green", alpha=0.95),
)
bbox=dict(boxstyle="round", fc="w", ec="green", alpha=0.95),
)
# add line for refined time
if refined_gps:
......@@ -104,17 +103,13 @@ def plot_indicators(event, params, refined_gps=None, threshold=None):
horizontalalignment='center',
verticalalignment='bottom',
bbox=dict(boxstyle="round", fc="w", ec="red", alpha=0.95),
)
)
ax1.set_xlim(min(atx-1, window[0]), window[1])
# plot guardian
color = 'orange'
label = config.GRD_STATE_N_CHANNEL
# filter data based on window
condition = np.logical_and(t0+window[0] <= t_grd, t_grd <= t0+window[1])
idx = np.argwhere(condition)
y = y_grd[idx]
t = t_grd[idx]
ax2.plot(t-t0, y, label=label,
ax2.plot(t_grd-t0, y_grd, label=label,
color=color, linewidth=2)
ax2.set_yticks(trans)
ylim = ax2.get_ylim()
......@@ -124,80 +119,104 @@ def plot_indicators(event, params, refined_gps=None, threshold=None):
ax2.axhline(
trans[0],
label = 'state before lockloss',
color = 'orange',
linestyle = '--',
linewidth = 2
label='state before lockloss',
color='orange',
linestyle='--',
linewidth=2,
)
ax2.axvline(
isc_loss_time,
label = 'lockloss time',
color = 'orange',
linestyle = '--',
linewidth = 2
label='lockloss time',
color='orange',
linestyle='--',
linewidth=2,
)
ax2.set_xlim(min(atx-1, window[0]), window[1])
fig.suptitle("Lock loss refinement for {}".format(event.id))
outfile = 'indicators_{}.png'.format(window_type)
outpath = event.path(outfile)
logging.debug(outpath)
logger.debug(outpath)
fig.savefig(outpath)
def refine_time(event):
"""Refine lock loss event time
def find_transition(channel, segment, std_threshold, max_time, minimum=None):
"""Find transition in channel
"""
gps = event.transition_gps
refined_gps = None
`segment` is the time segment to search, `std_threshold` is the % std
from mean defining the transition threshold, `max_time` is the event.gps
lockloss gps time from the GRD state transition used as a maximum
refined time, `minimum` is an optional minimum value that the channel
will be checked against.
# find indicator channel to use based on guardian state
for index, params in sorted(config.INDICATORS.items(), reverse=True):
if event.transition_index[0] >= index:
break
returns `threshold`, `refined_gps` tuple
channels = [
params['CHANNEL'],
]
window = config.REFINE_WINDOW
segment = Segment(*window).shift(gps)
buf = data.fetch(channels, segment)[0]
"""
refined_gps = None
buf = data.fetch(channel, segment)[0]
# calculate mean and std using first 5 seconds of window
samples = int(buf.sample_rate * 5)
lock_data = buf.data[:samples]
lock_mean = np.mean(lock_data)
lock_stdd = np.std(lock_data - lock_mean, ddof=1)
logging.info("locked mean/stddev: {}/{}".format(lock_mean, lock_stdd))
logger.info("locked mean/stddev: {}/{}".format(lock_mean, lock_stdd))
# lock loss threshold is when moves past % threshold of std
threshold = lock_stdd * params['THRESHOLD'] + lock_mean
if params['THRESHOLD'] > 0:
threshold = lock_stdd * std_threshold + lock_mean
if std_threshold > 0:
threshold = min(threshold, max(buf.data))
else:
threshold = max(threshold, min(buf.data))
logging.info("threshold: {}".format(threshold))
logger.info("threshold: {}".format(threshold))
# if the mean is less than the nominal full lock value then abort,
# as this isn't a clean lock
if lock_mean < params['MINIMUM']:
logging.info("channel mean below minimum, unable to resolve time")
if minimum and lock_mean < minimum:
logger.info("channel mean below minimum, unable to resolve time")
else:
if params['THRESHOLD'] > 0:
inds = np.where(buf.data > threshold)[0]
if std_threshold > 0:
inds = np.where((buf.data > threshold) & (buf.tarray < max_time))[0]
else:
inds = np.where(buf.data < threshold)[0]
inds = np.where((buf.data < threshold) & (buf.tarray < max_time))[0]
if inds.any():
ind = np.min(inds)
refined_gps = buf.tarray[ind]
logging.info("refined time: {}".format(refined_gps))
logger.info("refined time: {}".format(refined_gps))
else:
logging.info("no threshold crossings, unable to resolve time")
logger.info("no threshold crossings, unable to resolve time")
return threshold, refined_gps
def refine_time(event):
"""Refine lock loss event time.
Find indicator channel to use, refine time with find_transition.
Create tag and graphs and add refined time to event.
"""
# find indicator channel to use based on guardian state
for index, params in sorted(config.INDICATORS.items(), reverse=True):
if event.transition_index[0] >= index:
break
window = config.REFINE_WINDOW
segment = Segment(*window).shift(event.transition_gps)
logger.info("refining time using channel: {}".format(params['CHANNEL']))
# find threshold and refined time using indicator channel
threshold, refined_gps = find_transition(
[params['CHANNEL'], ],
segment,
params['THRESHOLD'],
event.gps,
params['MINIMUM'],
)
if not refined_gps:
ts = 'nan'
......@@ -208,10 +227,31 @@ def refine_time(event):
with open(event.path('refined_gps'), 'w') as f:
f.write('{}\n'.format(ts))
logging.info("plotting indicators...")
logger.info("plotting indicators...")
plot_indicators(
event,
params,
refined_gps=refined_gps,
threshold=threshold,
)
# if refined time uses channel 'H1:ASC-AS_A_DC_NSUM_OUT_DQ',
# check 'H1:IMC-TRANS_OUT_DQ' looses lock at the same time.
if params['CHANNEL'] != 'H1:ASC-AS_A_DC_NSUM_OUT_DQ':
return
IMC_params = config.INDICATORS[0]
logger.info("refining time using channel: {}".format(IMC_params['CHANNEL']))
# find threshold and refined time using indicator channel
IMC_threshold, IMC_refined_gps = find_transition(
[IMC_params['CHANNEL'], ],
segment,
IMC_params['THRESHOLD'],
event.gps + 1,
IMC_params['MINIMUM'],
)
if IMC_refined_gps and refined_gps:
if IMC_refined_gps <= refined_gps + (50 * 10**-3):
logger.info("IMC refined time within 50ms of AS_A refined time.")
event.add_tag('IMC')
This diff is collapsed.
from gwpy.segments import Segment
from .. import logger
from .. import config
from .. import data
WINDOW = [-3, 0]
CHANNEL = ['{}:GRD-SEI_BS_STATE_N'.format(config.IFO)]
##############################################
def check_sei_bs(event):
"""Checks if SEI_BS Guardian node is in transition state during lockloss and
creates a tag."""
if config.IFO == 'L1':
logger.info("followup is not configured or not applicable to LLO.")
return
mod_window = [WINDOW[0], WINDOW[1]]
segment = Segment(mod_window).shift(int(event.gps))
stage_2_channel = data.fetch(CHANNEL, segment)[0]
if any(stage_2_channel.data == -19):
event.add_tag('SEI_BS_TRANS')
else:
logger.info('SEI BS was not transitioning during lockloss')