diff --git a/.flake8 b/.flake8
index 9ea8d376b11f1e5217f9ae8bce77f626ccbd2a90..403152565e95fdd4f257628ca88c5066feff3ba2 100644
--- a/.flake8
+++ b/.flake8
@@ -1,6 +1,7 @@
 ; vim: set ft=dosini :
 
 [flake8]
+max-line-length = 120
 select =
 	# pycodestyle errors
 	E,
@@ -11,7 +12,7 @@ select =
 	# pycodestyle warnings
 	W,
 ignore =
-	# line break before binary operator, use W504
+	# line break before binary operator
 	W503,
 exclude =
 	__pycache__,
diff --git a/glue/LDBDWClient.py b/glue/LDBDWClient.py
index 24cf673515ff041614addf2191cee4f3e33a85eb..048365178dd5c89540e685585410e90d21576975 100644
--- a/glue/LDBDWClient.py
+++ b/glue/LDBDWClient.py
@@ -335,7 +335,7 @@ class LDBDClient(object):
     else:
         msg = "Insecure connection DOES NOT surpport INSERT."
         msg += '\nTo INSERT, authorized users please specify protocol "https" in your --segment-url argument.'
-        msg += '\nFor example, "--segment-url https://segdb.ligo.caltech.edu".'
+        msg += '\nFor example, "--segment-url https://segments.ligo.org".'
         raise LDBDClientException(msg)
 
     url = "/ldbd/insert.json"
@@ -371,7 +371,7 @@ class LDBDClient(object):
     else:
         msg = "Insecure connection DOES NOT surpport INSERTMAP."
         msg += '\nTo INSERTMAP, authorized users please specify protocol "https" in your --segment-url argument.'
-        msg += '\nFor example, "--segment-url https://segdb.ligo.caltech.edu".'
+        msg += '\nFor example, "--segment-url https://segments.ligo.org".'
         raise LDBDClientException(msg)
 
     url = "/ldbd/insertmap.json"
@@ -411,7 +411,7 @@ class LDBDClient(object):
     else:
         msg = "Insecure connection DOES NOT surpport INSERTDMT."
         msg += '\nTo INSERTDMT, authorized users please specify protocol "https" in your --segment-url argument.'
-        msg += '\nFor example, "--segment-url https://segdb.ligo.caltech.edu".'
+        msg += '\nFor example, "--segment-url https://segments.ligo.org".'
         raise LDBDClientException(msg)
 
     url = "/ldbd/insertdmt.json"
diff --git a/glue/gpstime.py b/glue/gpstime.py
index 9273d66b9075ac81d46c69b8728337754f49e3b3..e25ec7c8571d04ec9cd91137db6fd78905c9c9d6 100644
--- a/glue/gpstime.py
+++ b/glue/gpstime.py
@@ -39,14 +39,17 @@ as compared to some algorigthms found in the literature and on the web.
 """
 
 from __future__ import print_function
-__author__ = 'Duncan Brown <duncan@gravity.phys.uwm.edu>'
+
+import math
+import time
+import warnings
+
 from glue import git_version
+
+__author__ = 'Duncan Brown <duncan@gravity.phys.uwm.edu>'
 __date__ = git_version.date
 __version__ = git_version.id
 
-import time, math
-import warnings
-
 warnings.warn(
     "glue.gpstime is no longer maintained and will be removed in "
     "a future release of lscsoft-glue, please migrate your workflow "
@@ -58,38 +61,43 @@ secsInWeek = 604800
 secsInDay = 86400
 gpsEpoch = (1980, 1, 6, 0, 0, 0)  # (year, month, day, hh, mm, ss)
 
+
 def dayOfWeek(year, month, day):
     "returns day of week: 0=Sun, 1=Mon, .., 6=Sat"
-    hr = 12  #make sure you fall into right day, middle is save
+    hr = 12  # make sure you fall into right day, middle is save
     t = time.mktime((year, month, day, hr, 0, 0.0, 0, 0, -1))
     pyDow = time.localtime(t)[6]
     gpsDow = (pyDow + 1) % 7
     return gpsDow
 
+
 def gpsWeek(year, month, day):
     "returns (full) gpsWeek for given date (in UTC)"
-    hr = 12  #make sure you fall into right day, middle is save
+    hr = 12  # make sure you fall into right day, middle is save
     return gpsFromUTC(year, month, day, hr, 0, 0.0)[0]
 
 
 def julianDay(year, month, day):
     "returns julian day=day since Jan 1 of year"
-    hr = 12  #make sure you fall into right day, middle is save
+    hr = 12  # make sure you fall into right day, middle is save
     t = time.mktime((year, month, day, hr, 0, 0.0, 0, 0, -1))
     julDay = time.localtime(t)[7]
     return julDay
 
+
 def mkUTC(year, month, day, hour, min, sec):
     "similar to python's mktime but for utc"
     spec = [year, month, day, hour, min, sec] + [0, 0, 0]
     utc = time.mktime(spec) - time.timezone
     return utc
 
+
 def ymdhmsFromPyUTC(pyUTC):
     "returns tuple from a python time value in UTC"
     ymdhmsXXX = time.gmtime(pyUTC)
     return ymdhmsXXX[:-3]
 
+
 def wtFromUTCpy(pyUTC, leapSecs=14):
     """convenience function:
          allows to use python UTC times and
@@ -98,6 +106,7 @@ def wtFromUTCpy(pyUTC, leapSecs=14):
     wSowDSoD = gpsFromUTC(*ymdhms + (leapSecs,))
     return wSowDSoD[0:2]
 
+
 def gpsFromUTC(year, month, day, hour, min, sec, leapSecs=14):
     """converts UTC to: gpsWeek, secsOfWeek, gpsDay, secsOfDay
 
@@ -132,9 +141,9 @@ def gpsFromUTC(year, month, day, hour, min, sec, leapSecs=14):
     # Warning:  trouble if daylight savings flag is set to -1 or 1 !!!
     t = t + leapSecs
     tdiff = t - t0
-    gpsSOW = (tdiff % secsInWeek)  + secFract
-    gpsWeek = int(math.floor(tdiff/secsInWeek))
-    gpsDay = int(math.floor(gpsSOW/secsInDay))
+    gpsSOW = (tdiff % secsInWeek) + secFract
+    gpsWeek = int(math.floor(tdiff / secsInWeek))
+    gpsDay = int(math.floor(gpsSOW / secsInDay))
     gpsSOD = (gpsSOW % secsInDay)
     return (gpsWeek, gpsSOW, gpsDay, gpsSOD)
 
@@ -149,26 +158,27 @@ def UTCFromGps(gpsWeek, SOW, leapSecs=14):
     """
     secFract = SOW % 1
     epochTuple = gpsEpoch + (-1, -1, 0)
-    t0 = time.mktime(epochTuple) - time.timezone  #mktime is localtime, correct for UTC
+    t0 = time.mktime(epochTuple) - time.timezone  # mktime is localtime, correct for UTC
     tdiff = (gpsWeek * secsInWeek) + SOW - leapSecs
     t = t0 + tdiff
     (year, month, day, hh, mm, ss, dayOfWeek, julianDay, daylightsaving) = time.gmtime(t)
-    #use gmtime since localtime does not allow to switch off daylighsavings correction!!!
+    # use gmtime since localtime does not allow to switch off daylighsavings correction!!!
     return (year, month, day, hh, mm, ss + secFract)
 
-def GpsSecondsFromPyUTC( pyUTC, leapSecs=14 ):
+
+def GpsSecondsFromPyUTC(pyUTC, leapSecs=14):
     """converts the python epoch to gps seconds
 
     pyEpoch = the python epoch from time.time()
     """
-    t = t=gpsFromUTC(*ymdhmsFromPyUTC( pyUTC ))
+    t = t=gpsFromUTC(*ymdhmsFromPyUTC(pyUTC))
     return int(t[0] * 60 * 60 * 24 * 7 + t[1])
 
 
-#===== Tests  =========================================
+# ===== Tests  =========================================
 
 def testTimeStuff():
-    print("-"*20)
+    print("-" * 20)
     print()
     print("The GPS Epoch when everything began (1980, 1, 6, 0, 0, 0, leapSecs=0)")
     (w, sow, d, sod) = gpsFromUTC(1980, 1, 6, 0, 0, 0, leapSecs=0)
@@ -188,30 +198,34 @@ def testTimeStuff():
     print("     and hopefully back:")
     print("**** %s, %s, %s, %s, %s, %s\n" % UTCFromGps(w, sow))
 
+
 def testJulD():
     print('2002, 10, 11 -> 284  ==??== ', julianDay(2002, 10, 11))
 
+
 def testGpsWeek():
     print('2002, 10, 11 -> 1187  ==??== ', gpsWeek(2002, 10, 11))
 
+
 def testDayOfWeek():
     print('2002, 10, 12 -> 6  ==??== ', dayOfWeek(2002, 10, 12))
     print('2002, 10, 6  -> 0  ==??== ', dayOfWeek(2002, 10, 6))
 
+
 def testPyUtilties():
     ymdhms = (2002, 10, 12, 8, 34, 12.3)
     print("testing for: ", ymdhms)
     pyUtc = mkUTC(*ymdhms)
-    back =  ymdhmsFromPyUTC(pyUtc)
+    back = ymdhmsFromPyUTC(pyUtc)
     print("yields     : ", back)
-#*********************** !!!!!!!!
-    #assert(ymdhms == back)
-    #! TODO: this works only with int seconds!!! fix!!!
+# *********************** !!!!!!!!
+    # assert(ymdhms == back)
+    # ! TODO: this works only with int seconds!!! fix!!!
     (w, t) = wtFromUTCpy(pyUtc)
-    print("week and time: ", (w,t))
+    print("week and time: ", (w, t))
 
 
-#===== Main =========================================
+# ===== Main =========================================
 if __name__ == "__main__":
     pass
     testTimeStuff()
diff --git a/glue/lal.py b/glue/lal.py
index bb510d6bf757bcff9d1b5081ecd2b7af73ffb22a..8ee16f8c750714a1790d022a10934d331c6a69b1 100644
--- a/glue/lal.py
+++ b/glue/lal.py
@@ -339,7 +339,7 @@ class LIGOTimeGPS(object):
 			seconds += 1
 			nanoseconds -= 1000000000
 		elif seconds > 0 and nanoseconds < 0:
-			seconds -=1
+			seconds -= 1
 			nanoseconds += 1000000000
 
 		slo = seconds % 131072
@@ -439,7 +439,7 @@ class Cache(list):
 	"""
 	An object representing a LAL cache file. Currently it is possible to
 	add anything to a Cache. This method should check that the thing you
-	are adding is a CacheEntry and throw and error if it is not.
+	are adding is a CacheEntry and throw an error if it is not.
 	"""
 	entry_class = CacheEntry
 
diff --git a/glue/ldbd.py b/glue/ldbd.py
index 07fd49c161bcf01b3d9406a19aeae6bbfd300591..6d7677f7e5648947916e4e398957ec9081158cfd 100644
--- a/glue/ldbd.py
+++ b/glue/ldbd.py
@@ -17,7 +17,7 @@
 
 """lightweight database dumper
 Copyright (C) 2003 Duncan Brown
-This file is part of the lightweight datapase dumper (ldbd)
+This file is part of the lightweight database dumper (ldbd)
 
 The ldbd module provides classes for manipulating LIGO metadata database
 tables.
@@ -39,251 +39,254 @@ __version__ = git_version.id
 
 
 class LIGOLWStream(csv.Dialect):
-  """
-  Create a csv parser dialect for parsing LIGO_LW streams
-  """
-  delimiter = ','
-  doublequote = False
-  escapechar = '\\'
-  lineterminator = '\n'
-  quotechar = '"'
-  quoting = csv.QUOTE_ALL
-  skipinitialspace = True
+    """
+    Create a csv parser dialect for parsing LIGO_LW streams
+    """
+    delimiter = ','
+    doublequote = False
+    escapechar = '\\'
+    lineterminator = '\n'
+    quotechar = '"'
+    quoting = csv.QUOTE_ALL
+    skipinitialspace = True
+
 
-csv.register_dialect("LIGOLWStream",LIGOLWStream)
+csv.register_dialect("LIGOLWStream", LIGOLWStream)
 
 
 class LIGOLwParseError(Exception):
-  """Error parsing LIGO lightweight XML file"""
-  pass
+    """Error parsing LIGO lightweight XML file"""
+    pass
 
 
 class Xlator(dict):
-  """
-  All in one multiple string substitution class from the python cookbook
-  """
-  def _make_regex(self):
     """
-    Build a re object based on keys in the current dictionary
+    All in one multiple string substitution class from the python cookbook
     """
-    return re.compile("|".join(map(re.escape, self.keys())))
+    def _make_regex(self):
+        """
+        Build a re object based on keys in the current dictionary
+        """
+        return re.compile("|".join(map(re.escape, self.keys())))
 
-  def __call__(self, match):
-    """
-    Handler invoked for each regex match
-    """
-    return self[match.group(0)]
+    def __call__(self, match):
+        """
+        Handler invoked for each regex match
+        """
+        return self[match.group(0)]
 
-  def xlat(self, text):
-    """
-    Translate text, returns the modified text
-    """
-    return self._make_regex().sub(self,text)
+    def xlat(self, text):
+        """
+        Translate text, returns the modified text
+        """
+        return self._make_regex().sub(self, text)
 
 
 class LIGOLwParser:
-  """
-  Provides methods for parsing the data from a LIGO lightweight XML
-  file parsed with pyRXP into a dictionary
-  """
-
-  def __init__(self):
-    """
-    Initializes a LIGO lightweight XML parser with the necessary
-    regular expressions and function for tuple translation
     """
-    self.tabrx = re.compile(r'(\A[a-z0-9_]+:|\A)([a-z0-9_]+):table\Z')
-    self.colrx = re.compile(r'(\A[a-z0-9_]+:|\A)([a-z0-9_]+:|\A)([a-z0-9_]+)\Z')
-    self.llsrx = re.compile(r'\A\s*"')
-    self.rlsrx = re.compile(r'"\s*\Z')
-    self.licrx = re.compile(r'\A\s+"')
-    self.ricrx = re.compile(r'"*\s*\Z')
-    self.octrx = re.compile(r'\A\\[0-9][0-9][0-9]')
-    self.dlmrx = re.compile(r'\\,')
-    self.unique = None
-    self.types = {
-      'int2s' : int,
-      'int_2s' : int,
-      'int4s' : int,
-      'int_4s' : int,
-      'int8s' : int,
-      'int_8s' : int,
-      'real4' : float,
-      'real_4' : float,
-      'real8' : float,
-      'real_8' : float,
-      'lstring' : self.__lstring,
-      'ilwd:char' : self.__ilwdchar,
-      'ilwd:char_u' : self.__ilwdchar
-    }
-    self.xmltostr = Xlator({ r'&amp;' : r'&', r'&gt;' : r'>', r'&lt;' : r'<','\\\\' : '\\'}) # Note: see https://www.gravity.phy.syr.edu/dokuwiki/doku.php?id=rpfisher:gluebughunt if this is confusing, the parser just cleanly handles the conversion of everything
-
-  def __lstring(self,lstr):
+    Provides methods for parsing the data from a LIGO lightweight XML
+    file parsed with pyRXP into a dictionary
     """
-    Returns a parsed lstring by stripping out and instances of
-    the escaped delimiter. Sometimes the raw lstring has whitespace
-    and a double quote at the beginning or end. If present, these
-    are removed.
-    """
-    lstr = self.llsrx.sub('',lstr)
-    lstr = self.rlsrx.sub('',lstr)
-    lstr = self.xmltostr.xlat(lstr)
-    lstr = self.dlmrx.sub(',',lstr)
-    return lstr
 
-  def __ilwdchar(self,istr):
-    """
-    If the ilwd:char field contains octal data, it is translated
-    to a binary string and returned. Otherwise a lookup is done
-    in the unique id dictionary and a binary string containing the
-    correct unique id is returned.
-    """
-    istr_orig = istr
-    istr = self.licrx.sub('',istr)
-    istr = self.ricrx.sub('',istr)
-    if self.octrx.match(istr):
-      exec("istr = '"+istr+"'")
-    else:
-      try:
-        istr = self.unique.lookup(istr)
-      except AttributeError:
-        if not self.unique:
-          istr = istr_orig
+    def __init__(self):
+        """
+        Initializes a LIGO lightweight XML parser with the necessary
+        regular expressions and function for tuple translation
+        """
+        self.tabrx = re.compile(r'(\A[a-z0-9_]+:|\A)([a-z0-9_]+):table\Z')
+        self.colrx = re.compile(r'(\A[a-z0-9_]+:|\A)([a-z0-9_]+:|\A)([a-z0-9_]+)\Z')
+        self.llsrx = re.compile(r'\A\s*"')
+        self.rlsrx = re.compile(r'"\s*\Z')
+        self.licrx = re.compile(r'\A\s+"')
+        self.ricrx = re.compile(r'"*\s*\Z')
+        self.octrx = re.compile(r'\A\\[0-9][0-9][0-9]')
+        self.dlmrx = re.compile(r'\\,')
+        self.unique = None
+        self.types = {
+            'int2s': int,
+            'int_2s': int,
+            'int4s': int,
+            'int_4s': int,
+            'int8s': int,
+            'int_8s': int,
+            'real4': float,
+            'real_4': float,
+            'real8': float,
+            'real_8': float,
+            'lstring': self.__lstring,
+            'ilwd:char': self.__ilwdchar,
+            'ilwd:char_u': self.__ilwdchar
+        }
+        # Note: see https://www.gravity.phy.syr.edu/dokuwiki/doku.php?id=rpfisher:gluebughunt if this is confusing, the parser just cleanly handles the conversion of everything   # noqa: E501
+        self.xmltostr = Xlator({ r'&amp;' : r'&', r'&gt;' : r'>', r'&lt;' : r'<','\\\\' : '\\'})   # noqa: E201,E202,E203,E231,E501
+
+    def __lstring(self, lstr):
+        """
+        Returns a parsed lstring by stripping out any instances of
+        the escaped delimiter. Sometimes the raw lstring has whitespace
+        and a double quote at the beginning or end. If present, these
+        are removed.
+        """
+        lstr = self.llsrx.sub('', lstr)
+        lstr = self.rlsrx.sub('', lstr)
+        lstr = self.xmltostr.xlat(lstr)
+        lstr = self.dlmrx.sub(',', lstr)
+        return lstr
+
+    def __ilwdchar(self, istr):
+        """
+        If the ilwd:char field contains octal data, it is translated
+        to a binary string and returned. Otherwise a lookup is done
+        in the unique id dictionary and a binary string containing the
+        correct unique id is returned.
+        """
+        istr_orig = istr
+        istr = self.licrx.sub('', istr)
+        istr = self.ricrx.sub('', istr)
+        if self.octrx.match(istr):
+            exec("istr = '" + istr + "'")
         else:
-          raise LIGOLwParseError('unique id table has not been initialized')
-    return istr
-
-  def parsetuple(self,xmltuple):
-    """
-    Parse an XML tuple returned by pyRXP into a dictionary
-    of LIGO metadata elements. The dictionary contains one
-    entry for each table found in the XML tuple.
-    """
-    # first extract all the table and columns from the tuple from the
-    # children of the ligo lightweight parent tuple
-    table = {}
-    tupleidx = 0
-    for tag in xmltuple[2]:
-      if tag[0] == 'Table' or tag[0] == 'table':
-        tab = tag[1]['Name'].lower()
-        try:
-          tab = self.tabrx.match(tab).group(2)
-        except AttributeError:
-          raise LIGOLwParseError('unable to parse a valid table name '+tab)
-        # initalize the table dictionary for this table
-        table[tab] = {
-          'pos' : tupleidx,
-          'column' : {},
-          'stream' : (),
-          'query' : ''
-          }
-        # parse for columns in the tables children
-        # look for the column name and type in the attributes
-        # store the index in which the columns were found as
-        # we need this to decode the stream later
-        for subtag in tag[2]:
-          if subtag[0] == 'Column' or subtag[0] == 'column':
-            col = subtag[1]['Name'].lower()
             try:
-              col = self.colrx.match(col).group(3)
+                istr = self.unique.lookup(istr)
             except AttributeError:
-              raise LIGOLwParseError('unable to parse a valid column name '+col)
-            try:
-              typ = subtag[1]['Type'].lower()
-            except KeyError:
-              raise LIGOLwParseError('type is missing for column '+col)
-            table[tab]['column'][col] = typ
-            table[tab].setdefault('orderedcol',[]).append(col)
-      tupleidx += 1
-
-    # now iterate the dictionary of tables we have created looking for streams
-    for tab in table.keys():
-      for tag in xmltuple[2][table[tab]['pos']][2]:
-        if tag[0] == 'Stream' or tag[0] == 'stream':
-          # store the stream delimiter and create the esacpe regex
-          try:
-            delim = tag[1]['Delimiter']
-          except KeyError:
-            raise LIGOLwParseError('stream is missing delimiter')
-          if delim != ',':
-            raise LIGOLwParseError('unable to handle stream delimiter: '+delim)
-
-          # If the result set is empty tag[2] is an empty array, which causes
-          # the next step to fail.  Add an empty string in this case.
-          if len(tag[2]) == 0:
-            tag[2].append("")
-
-          # if the final row of the table ends with a null entry, there will
-          # be an extra delimeter at the end of the stream,
-          # so we need to strip that out;
-          # see glue/ligolw/table.py#L469-L471 (as of glue-release-2.0.0)
-          raw = tag[2][0]
-          if raw.endswith(delim+delim):
-              raw = raw[:-len(delim)]
-
-          # strip newlines from the stream and parse it
-          stream = next(csv.reader([re.sub(r'\n','',raw)],LIGOLWStream))
-
-          # turn the csv stream into a list of lists
-          slen = len(stream)
-          ntyp = len(table[tab]['column'])
-          mlen, lft = divmod(slen,ntyp)
-          if lft != 0:
-            raise LIGOLwParseError('invalid stream length for given columns')
-          lst = [[None] * ntyp for i in range(mlen)]
-
-          # translate the stream data to the correct data types
-          for i in range(slen):
-            j, k = divmod(i,ntyp)
-            try:
-              thiscol = table[tab]['orderedcol'][k]
-              if len( stream[i] ) == 0:
-                lst[j][k] = None
-              else:
-                lst[j][k] = self.types[table[tab]['column'][thiscol]](stream[i])
-            except (KeyError, ValueError) as errmsg:
-              msg = "stream translation error (%s) " % str(errmsg)
-              msg += "for column %s in table %s: %s -> %s" \
-                % (tab,thiscol,stream[i],str(table[tab]))
-              raise LIGOLwParseError(msg)
-          table[tab]['stream'] = list(map(tuple,lst))
-
-    # return the created table to the caller
-    return table
+                if not self.unique:
+                    istr = istr_orig
+                else:
+                    raise LIGOLwParseError('unique id table has not been initialized')
+        return istr
+
+    def parsetuple(self, xmltuple):
+        """
+        Parse an XML tuple returned by pyRXP into a dictionary
+        of LIGO metadata elements. The dictionary contains one
+        entry for each table found in the XML tuple.
+        """
+        # first extract all the tables and columns from the tuple from the
+        # children of the ligo lightweight parent tuple
+        table = {}
+        tupleidx = 0
+        for tag in xmltuple[2]:
+            if tag[0] == 'Table' or tag[0] == 'table':
+                tab = tag[1]['Name'].lower()
+                try:
+                    tab = self.tabrx.match(tab).group(2)
+                except AttributeError:
+                    raise LIGOLwParseError('unable to parse a valid table name ' + tab)
+                # initalize the table dictionary for this table
+                table[tab] = {
+                    'pos': tupleidx,
+                    'column': {},
+                    'stream': (),
+                    'query': ''
+                }
+                # parse for columns in the table's children
+                # look for the column name and type in the attributes
+                # store the index in which the columns were found, as
+                # we need this to decode the stream later
+                for subtag in tag[2]:
+                    if subtag[0] == 'Column' or subtag[0] == 'column':
+                        col = subtag[1]['Name'].lower()
+                        try:
+                            col = self.colrx.match(col).group(3)
+                        except AttributeError:
+                            raise LIGOLwParseError('unable to parse a valid column name ' + col)
+                        try:
+                            typ = subtag[1]['Type'].lower()
+                        except KeyError:
+                            raise LIGOLwParseError('type is missing for column ' + col)
+                        table[tab]['column'][col] = typ
+                        table[tab].setdefault('orderedcol', []).append(col)
+            tupleidx += 1
+
+        # now iterate the dictionary of tables we have created looking for streams
+        for tab in table.keys():
+            for tag in xmltuple[2][table[tab]['pos']][2]:
+                if tag[0] == 'Stream' or tag[0] == 'stream':
+                    # store the stream delimiter and create the escape regex
+                    try:
+                        delim = tag[1]['Delimiter']
+                    except KeyError:
+                        raise LIGOLwParseError('stream is missing delimiter')
+                    if delim != ',':
+                        raise LIGOLwParseError('unable to handle stream delimiter: ' + delim)
+
+                    # If the result set is empty, tag[2] is an empty array, which causes
+                    # the next step to fail.  Add an empty string in this case.
+                    if len(tag[2]) == 0:
+                        tag[2].append("")
+
+                    # if the final row of the table ends with a null entry, there will
+                    # be an extra delimeter at the end of the stream,
+                    # so we need to strip that out;
+                    # see glue/ligolw/table.py#L469-L471 (as of glue-release-2.0.0)
+                    raw = tag[2][0]
+                    if raw.endswith(delim + delim):
+                        raw = raw[:-len(delim)]
+
+                    # strip newlines from the stream and parse it
+                    stream = next(csv.reader([re.sub(r'\n', '', raw)], LIGOLWStream))
+
+                    # turn the csv stream into a list of lists
+                    slen = len(stream)
+                    ntyp = len(table[tab]['column'])
+                    mlen, lft = divmod(slen, ntyp)
+                    if lft != 0:
+                        raise LIGOLwParseError('invalid stream length for given columns')
+                    lst = [[None] * ntyp for i in range(mlen)]
+
+                    # translate the stream data to the correct data types
+                    for i in range(slen):
+                        j, k = divmod(i, ntyp)
+                        try:
+                            thiscol = table[tab]['orderedcol'][k]
+                            if len(stream[i]) == 0:
+                                lst[j][k] = None
+                            else:
+                                lst[j][k] = self.types[table[tab]['column'][thiscol]](stream[i])
+                        except (KeyError, ValueError) as errmsg:
+                            msg = "stream translation error (%s) " % str(errmsg)
+                            msg += "for column %s in table %s: %s -> %s" \
+                                % (tab, thiscol, stream[i], str(table[tab]))
+                            raise LIGOLwParseError(msg)
+                    table[tab]['stream'] = list(map(tuple, lst))
+
+        # return the created table to the caller
+        return table
 
 
 class LIGOMetadata:
-  """
-  LIGO Metadata object class. Contains methods for parsing a LIGO
-  lightweight XML file and inserting it into a database, executing
-  and SQL query to retrive data from the database and writing it
-  to a LIGO lightweight XML file
-  """
-  def __init__(self,xmlparser=None,lwtparser=None):
-    """
-    Connects to the database and creates a cursor. Initializes the unique
-    id table for this LIGO lw document.
-
-    xmlparser = pyRXP XML to tuple parser object
-    lwtparser = LIGOLwParser object (tuple parser)
     """
-    self.xmlparser = xmlparser
-    self.lwtparser = lwtparser
-    if lwtparser:
-      self.lwtparser.unique = None
-    self.table = {}
-    self.strtoxml = Xlator({ r'&' : r'&amp;', r'>' : r'&gt;', r'<' : r'&lt;', '\\' : '\\\\', '\"' : '\\\"' }) # Note: see https://www.gravity.phy.syr.edu/dokuwiki/doku.php?id=rpfisher:gluebughunt if this is confusing, the parser just cleanly handles the conversion of everything
-
-  def parse(self,xml):
-    """
-    Parses an XML document into a form read for insertion into the database
-
-    xml = the xml document to be parsed
+    LIGO Metadata object class. Contains methods for parsing a LIGO
+    lightweight XML file and inserting it into a database, executing
+    an SQL query to retrive data from the database and writing it
+    to a LIGO lightweight XML file
     """
-    if not self.xmlparser:
-      raise LIGOLwParseError("pyRXP parser not initialized")
-    if not self.lwtparser:
-      raise LIGOLwParseError("LIGO_LW tuple parser not initialized")
-    xml = "".join([x.strip() for x in xml.split('\n')])
-    ligolwtup = self.xmlparser(xml.encode("utf-8"))
-    self.table = self.lwtparser.parsetuple(ligolwtup)
+    def __init__(self, xmlparser=None, lwtparser=None):
+        """
+        Connects to the database and creates a cursor. Initializes the unique
+        id table for this LIGO lw document.
+
+        xmlparser = pyRXP XML to tuple parser object
+        lwtparser = LIGOLwParser object (tuple parser)
+        """
+        self.xmlparser = xmlparser
+        self.lwtparser = lwtparser
+        if lwtparser:
+            self.lwtparser.unique = None
+        self.table = {}
+        # Note: see https://www.gravity.phy.syr.edu/dokuwiki/doku.php?id=rpfisher:gluebughunt if this is confusing, the parser just cleanly handles the conversion of everything   # noqa: E501
+        self.strtoxml = Xlator({ r'&' : r'&amp;', r'>' : r'&gt;', r'<' : r'&lt;', '\\' : '\\\\', '\"' : '\\\"' })   # noqa: E201,E202,E203,E231,E501
+
+    def parse(self, xml):
+        """
+        Parses an XML document into a form read for insertion into the database
+
+        xml = the xml document to be parsed
+        """
+        if not self.xmlparser:
+            raise LIGOLwParseError("pyRXP parser not initialized")
+        if not self.lwtparser:
+            raise LIGOLwParseError("LIGO_LW tuple parser not initialized")
+        xml = "".join([x.strip() for x in xml.split('\n')])
+        ligolwtup = self.xmlparser(xml.encode("utf-8"))
+        self.table = self.lwtparser.parsetuple(ligolwtup)
diff --git a/glue/pidfile.py b/glue/pidfile.py
index c84949520de515957b433afbb9b95d204de3601f..1efa207c42dbed76e7633e007b66c56f6aad0d97 100644
--- a/glue/pidfile.py
+++ b/glue/pidfile.py
@@ -16,7 +16,7 @@ import glue.utils
 
 # __author__ = 'Peter Couvares <pfcouvar@syr.edu>'
 # __version__ = "git id %s" % git_version.id
-#__date__ = git_version.date
+# __date__ = git_version.date
 
 
 def get_lock(lockfile):
@@ -36,7 +36,7 @@ def get_lock(lockfile):
     # pidfile before reading it, to prevent two daemons from seeing a
     # stale lock at the same time, and both trying to run
     try:
-        fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
+        fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
     except IOError as e:
         raise RuntimeError("failed to lock %s: %s" % (lockfile, e))
 
@@ -49,8 +49,8 @@ def get_lock(lockfile):
             raise RuntimeError("pidfile %s contains pid (%s) of a running "
                                "process" % (lockfile, pidfile_pid))
         else:
-            print ("pidfile %s contains stale pid %s; writing new lock" %
-                   (lockfile, pidfile_pid))
+            print("pidfile %s contains stale pid %s; writing new lock" %
+                  (lockfile, pidfile_pid))
 
     # the pidfile didn't exist or was stale, so grab a new lock
     pidfile.truncate(0)
diff --git a/glue/pipeline.py b/glue/pipeline.py
index 49bd26e00ae80d687d48c5bdfeb748cc6379a989..b3ac6b3344542214b9d959edd7743e56d0910644 100644
--- a/glue/pipeline.py
+++ b/glue/pipeline.py
@@ -20,23 +20,24 @@ this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
 from __future__ import print_function
-__author__ = 'Duncan Brown <duncan@gravity.phys.uwm.edu>'
-from glue import git_version
-__date__ = git_version.date
-__version__ = git_version.id
+from six.moves import (configparser, StringIO)
 
+import itertools
+import math
 import os
-import sys
-import re
-import time
 import random
-import math
+import re
 import stat
-import itertools
-import ligo.segments
+import sys
+import time
 from hashlib import md5
 
-from six.moves import (configparser, StringIO)
+import ligo.segments
+from glue import git_version
+
+__author__ = 'Duncan Brown <duncan@gravity.phys.uwm.edu>'
+__date__ = git_version.date
+__version__ = git_version.id
 
 
 def s2play(t):
@@ -51,16 +52,28 @@ class CondorError(Exception):
   """Error thrown by Condor Jobs"""
   def __init__(self, args=None):
     self.args = args
+
+
 class CondorJobError(CondorError):
   pass
+
+
 class CondorSubmitError(CondorError):
   pass
+
+
 class CondorDAGError(CondorError):
   pass
+
+
 class CondorDAGJobError(CondorError):
   pass
+
+
 class CondorDAGNodeError(CondorError):
   pass
+
+
 class SegmentError(Exception):
   def __init__(self, args=None):
     self.args = args
@@ -406,15 +419,15 @@ class CondorJob(object):
       raise CondorSubmitError("Cannot open file " + self.__sub_file_path)
 
     if self.__universe == 'grid':
-      if self.__grid_type == None:
+      if self.__grid_type is None:
         raise CondorSubmitError('No grid type specified.')
       elif self.__grid_type == 'gt2':
-        if self.__grid_server == None:
+        if self.__grid_server is None:
           raise CondorSubmitError('No server specified for grid resource.')
       elif self.__grid_type == 'gt4':
-        if self.__grid_server == None:
+        if self.__grid_server is None:
           raise CondorSubmitError('No server specified for grid resource.')
-        if self.__grid_scheduler == None:
+        if self.__grid_scheduler is None:
           raise CondorSubmitError('No scheduler specified for grid resource.')
       else:
         raise CondorSubmitError('Unsupported grid resource.')
@@ -466,7 +479,6 @@ class CondorJob(object):
     subfile.close()
 
 
-
 class CondorDAGJob(CondorJob):
   """
   A Condor DAG job never notifies the user on completion and can have variable
@@ -551,7 +563,7 @@ class CondorDAGManJob(object):
     """
     self.__dag = dag
     self.__notification = None
-    self.__dag_directory= dir
+    self.__dag_directory = dir
 
   def create_node(self):
     """
@@ -638,7 +650,7 @@ class CondorDAGNode(object):
     self.__input_files = []
     self.__checkpoint_files = []
     self.__vds_group = None
-    if isinstance(job,CondorDAGJob) and job.get_universe()=='standard':
+    if isinstance(job, CondorDAGJob) and job.get_universe() == 'standard':
       self.__grid_start = 'none'
     else:
       self.__grid_start = None
@@ -964,14 +976,14 @@ class CondorDAGNode(object):
     Write the DAG entry for this node's category to the DAG file descriptor.
     @param fh: descriptor of open DAG file.
     """
-    fh.write( 'CATEGORY ' + self.__name + ' ' + self.__category +  '\n' )
+    fh.write('CATEGORY ' + self.__name + ' ' + self.__category + '\n')
 
   def write_priority(self,fh):
     """
     Write the DAG entry for this node's priority to the DAG file descriptor.
     @param fh: descriptor of open DAG file.
     """
-    fh.write( 'PRIORITY ' + self.__name + ' ' + self.__priority +  '\n' )
+    fh.write('PRIORITY ' + self.__name + ' ' + self.__priority + '\n')
 
   def write_vars(self,fh):
     """
@@ -1004,8 +1016,8 @@ class CondorDAGNode(object):
     @param fh: descriptor of open DAG file.
     """
     if self.__pre_script:
-      fh.write( 'SCRIPT PRE ' + str(self) + ' ' + self.__pre_script + ' ' +
-        ' '.join(self.__pre_script_args) + '\n' )
+      fh.write( 'SCRIPT PRE ' + str(self) + ' ' + self.__pre_script + ' '
+        + ' '.join(self.__pre_script_args) + '\n' )
 
   def write_post_script(self,fh):
     """
@@ -1013,8 +1025,8 @@ class CondorDAGNode(object):
     @param fh: descriptor of open DAG file.
     """
     if self.__post_script:
-      fh.write( 'SCRIPT POST ' + str(self) + ' ' + self.__post_script + ' ' +
-        ' '.join(self.__post_script_args) + '\n' )
+      fh.write( 'SCRIPT POST ' + str(self) + ' ' + self.__post_script + ' '
+        + ' '.join(self.__post_script_args) + '\n' )
 
   def write_input_files(self, fh):
     """
@@ -1268,7 +1280,7 @@ class CondorDAG(object):
     @param category: tuple containing type of jobs to set a maxjobs limit for
         and the maximum number of jobs of that type to run at once.
     """
-    fh.write( 'MAXJOBS ' + str(category[0]) + ' ' + str(category[1]) +  '\n' )
+    fh.write('MAXJOBS ' + str(category[0]) + ' ' + str(category[1]) + '\n')
 
   def write_sub_files(self):
     """
@@ -1419,8 +1431,8 @@ class AnalysisNode(object):
       self.add_var_opt('gps-start-time',time)
     self.__start = time
     self.__data_start = time
-    #if not self.__calibration and self.__ifo and self.__start > 0:
-    #  self.calibration()
+    # if not self.__calibration and self.__ifo and self.__start > 0:
+    #   self.calibration()
 
   def get_start(self):
     """
@@ -1624,11 +1636,11 @@ class AnalysisNode(object):
         a, b, c, d = lfn.split('.')[0].split('-')
         t_start = int(c)
         t_end = int(c) + int(d)
-        if (t_start <= (self.get_data_end()+self.get_pad_data()+int(d)+1) \
-          and t_end >= (self.get_data_start()-self.get_pad_data()-int(d)-1)):
+        if (t_start <= (self.get_data_end() + self.get_pad_data() + int(d) + 1)
+          and t_end >= (self.get_data_start() - self.get_pad_data() - int(d) - 1)):
           self.add_input_file(lfn)
       # set the frame type based on the LFNs returned by datafind
-      self.add_var_opt('frame-type',b)
+      self.add_var_opt('frame-type', b)
     else:
       raise CondorDAGNodeError("Unknown LFN cache format")
 
@@ -1637,11 +1649,11 @@ class AnalysisNode(object):
     Determine the path to the correct calibration cache file to use.
     """
     if self.__ifo and self.__start > 0:
-        cal_path = self.job().get_config('calibration','path')
+        cal_path = self.job().get_config('calibration', 'path')
 
         # check if this is S2: split calibration epochs
-        if ( self.__LHO2k.match(self.__ifo) and
-          (self.__start >= 729273613) and (self.__start <= 734367613) ):
+        if ( self.__LHO2k.match(self.__ifo)
+          and (self.__start >= 729273613) and (self.__start <= 734367613) ):
           if self.__start < int(
             self.job().get_config('calibration','H2-cal-epoch-boundary')):
             cal_file = self.job().get_config('calibration','H2-1')
@@ -1679,13 +1691,12 @@ class AnalysisNode(object):
     return self.__calibration_cache
 
 
-
 class AnalysisChunk(object):
   """
   An AnalysisChunk is the unit of data that a node works with, usually some
   subset of a ScienceSegment.
   """
-  def __init__(self, start, end, trig_start = 0, trig_end = 0):
+  def __init__(self, start, end, trig_start=0, trig_end=0):
     """
     @param start: GPS start time of the chunk.
     @param end: GPS end time of the chunk.
@@ -1777,7 +1788,6 @@ class AnalysisChunk(object):
     self.__trig_end = end
 
 
-
 class ScienceSegment(object):
   """
   A ScienceSegment is a period of time where the experimenters determine
@@ -1816,7 +1826,7 @@ class ScienceSegment(object):
 
   def __repr__(self):
     return '<ScienceSegment: id %d, start %d, end %d, dur %d, unused %d>' % (
-    self.id(),self.start(),self.end(),self.dur(),self.__unused)
+        self.id(),self.start(),self.end(),self.dur(),self.__unused)
 
   def make_chunks(self,length=0,overlap=0,play=0,sl=0,excl_play=0,pad_data=0):
     """
@@ -1843,12 +1853,12 @@ class ScienceSegment(object):
     increment = length - overlap
     while time_left >= length:
       end = start + length
-      if (not play) or (play and (((end-sl-excl_play-729273613) % 6370) <
-        (600+length-2*excl_play))):
+      if (not play) or (play and (((end - sl - excl_play - 729273613) % 6370)
+        < (600 + length - 2 * excl_play))):
         if (play == 2):
         # calculate the start of the playground preceeding the chunk end
           play_start = 729273613 + 6370 * \
-           math.floor((end-sl-excl_play-729273613) / 6370)
+           math.floor((end - sl - excl_play - 729273613) / 6370)
           play_end = play_start + 600
           trig_start = 0
           trig_end = 0
@@ -1861,21 +1871,21 @@ class ScienceSegment(object):
               trig_start = int(play_start)
             if play_end < end:
               trig_end = int(play_end)
-          self.__chunks.append(AnalysisChunk(start,end,trig_start,trig_end))
+          self.__chunks.append(AnalysisChunk(start, end, trig_start, trig_end))
         else:
-          self.__chunks.append(AnalysisChunk(start,end))
+          self.__chunks.append(AnalysisChunk(start, end))
       start += increment
       time_left -= increment
     self.__unused = time_left - overlap
 
-  def add_chunk(self,start,end,trig_start=0,trig_end=0):
+  def add_chunk(self, start, end, trig_start=0, trig_end=0):
     """
     Add an AnalysisChunk to the list associated with this ScienceSegment.
     @param start: GPS start time of chunk.
     @param end: GPS end time of chunk.
     @param trig_start: GPS start time for triggers from chunk
     """
-    self.__chunks.append(AnalysisChunk(start,end,trig_start,trig_end))
+    self.__chunks.append(AnalysisChunk(start, end, trig_start, trig_end))
 
   def unused(self):
     """
@@ -1978,19 +1988,19 @@ class ScienceData(object):
     @param slide_sec: Slide each ScienceSegment by::
 
       delta > 0:
-        [s,e] -> [s+delta,e].
+        [s, e] -> [s+delta, e].
       delta < 0:
-        [s,e] -> [s,e-delta].
+        [s, e] -> [s, e-delta].
 
     @param buffer: shrink the ScienceSegment::
 
-      [s,e] -> [s+buffer,e-buffer]
+      [s, e] -> [s+buffer, e-buffer]
     """
     self.__filename = filename
     octothorpe = re.compile(r'\A#')
     for line in open(filename):
       if not octothorpe.match(line) and int(line.split()[3]) >= min_length:
-        (id,st,en,du) = list(map(int,line.split()))
+        (id, st, en, du) = list(map(int, line.split()))
 
         # slide the data if doing a background estimation
         if slide_sec > 0:
@@ -2003,16 +2013,16 @@ class ScienceData(object):
         if buffer > 0:
           st += buffer
           en -= buffer
-          du -= 2*abs(buffer)
+          du -= 2 * abs(buffer)
 
-        x = ScienceSegment(tuple([id,st,en,du]))
+        x = ScienceSegment(tuple([id, st, en, du]))
         self.__sci_segs.append(x)
 
-  def append_from_tuple(self,seg_tuple):
+  def append_from_tuple(self, seg_tuple):
     x = ScienceSegment(seg_tuple)
     self.__sci_segs.append(x)
 
-  def tama_read(self,filename):
+  def tama_read(self, filename):
     """
     Parse the science segments from a tama list of locked segments contained in
                 file.
@@ -2029,8 +2039,7 @@ class ScienceData(object):
       x = ScienceSegment(tuple([id, start, end, dur]))
       self.__sci_segs.append(x)
 
-
-  def make_chunks(self,length,overlap=0,play=0,sl=0,excl_play=0,pad_data=0):
+  def make_chunks(self, length, overlap=0, play=0, sl=0, excl_play=0, pad_data=0):
     """
     Divide each ScienceSegment contained in this object into AnalysisChunks.
     @param length: length of chunk in seconds.
@@ -2069,13 +2078,13 @@ class ScienceData(object):
       if seg.unused() > min_length:
         end = seg.end() - pad_data
         start = end - length
-        if (not play) or (play and (((end-sl-excl_play-729273613)%6370) <
-          (600+length-2*excl_play))):
+        if (not play) or (play and (((end - sl - excl_play - 729273613) % 6370)
+          < (600 + length - 2 * excl_play))):
           trig_start = end - seg.unused() - trig_overlap
           if (play == 2):
             # calculate the start of the playground preceeding the chunk end
             play_start = 729273613 + 6370 * \
-              math.floor((end-sl-excl_play-729273613) / 6370)
+              math.floor((end - sl - excl_play - 729273613) / 6370)
             play_end = play_start + 600
             trig_end = 0
             if ( (play_end - 6370) > start ):
@@ -2110,8 +2119,8 @@ class ScienceData(object):
         start = seg.end() - seg.unused() - overlap
         end = seg.end()
         length = start - end
-        if (not play) or (play and (((end-sl-excl_play-729273613)%6370) <
-        (600+length-2*excl_play))):
+        if (not play) or (play and (((end - sl - excl_play - 729273613) % 6370)
+          < (600 + length - 2 * excl_play))):
           seg.add_chunk(start, end, start)
         seg.set_unused(0)
 
@@ -2132,17 +2141,17 @@ class ScienceData(object):
 
       if seg.unused() > max_length:
         # get number of max_length chunks
-        N = (seg_end - seg_start)/max_length
+        N = (seg_end - seg_start) / max_length
 
         # split into chunks of max_length
-        for i in range(N-1):
+        for i in range(N - 1):
           start = seg_start + (i * max_length)
           stop = start + max_length
           seg.add_chunk(start, stop)
 
         # optimise data usage for last 2 chunks
-        start = seg_start + ((N-1) * max_length)
-        middle = (start + seg_end)/2
+        start = seg_start + ((N - 1) * max_length)
+        middle = (start + seg_end) / 2
         seg.add_chunk(start, middle)
         seg.add_chunk(middle, seg_end)
         seg.set_unused(0)
@@ -2188,7 +2197,7 @@ class ScienceData(object):
           else:
             ostop = stop1
 
-          x = ScienceSegment(tuple([id, ostart, ostop, ostop-ostart]))
+          x = ScienceSegment(tuple([id, ostart, ostop, ostop - ostart]))
           outlist.append(x)
 
           if stop2 > stop1:
@@ -2209,8 +2218,6 @@ class ScienceData(object):
     self.__sci_segs = outlist
     return len(self)
 
-
-
   def union(self, other):
     """
     Replaces the ScienceSegments contained in this instance of ScienceData
@@ -2282,20 +2289,19 @@ class ScienceData(object):
       else:
          # flush the current output segment, and replace it with the
          # new segment
-         x = ScienceSegment(tuple([id,ostart,ostop,ostop-ostart]))
+         x = ScienceSegment(tuple([id, ostart, ostop, ostop - ostart]))
          seglist.append(x)
          ostart = ustart
          ostop = ustop
 
     # flush out the final output segment (if any)
     if ostart != -1:
-      x = ScienceSegment(tuple([id,ostart,ostop,ostop-ostart]))
+      x = ScienceSegment(tuple([id, ostart, ostop, ostop - ostart]))
       seglist.append(x)
 
     self.__sci_segs = seglist
     return len(self)
 
-
   def coalesce(self):
     """
     Coalesces any adjacent ScienceSegments. Returns the number of
@@ -2320,7 +2326,8 @@ class ScienceData(object):
       if start > ostop:
         # disconnected, so flush out the existing segment (if any)
         if ostop >= 0:
-          x = ScienceSegment(tuple([id,ostart,ostop,ostop-ostart]))
+          # the following line produces a flake8 issue with ostart; see https://git.ligo.org/lscsoft/glue/-/issues/37
+          x = ScienceSegment(tuple([id, ostart, ostop, ostop - ostart]))   # noqa: F821
           outlist.append(x)
         ostart = start  # noqa: F841
         ostop = stop  # noqa: F841
@@ -2330,13 +2337,12 @@ class ScienceData(object):
 
     # flush out the final segment (if any)
     if ostop >= 0:
-      x = ScienceSegment(tuple([id,ostart,ostop,ostop-ostart]))
+      x = ScienceSegment(tuple([id, ostart, ostop, ostop - ostart]))
       outlist.append(x)
 
     self.__sci_segs = outlist
     return len(self)
 
-
   def invert(self):
     """
     Inverts the ScienceSegments in the class (i.e. set NOT).  Returns the
@@ -2357,18 +2363,17 @@ class ScienceData(object):
       if start < 0 or stop < start or start < ostart:
         raise SegmentError("Invalid list")
       if start > 0:
-        x = ScienceSegment(tuple([0,ostart,start,start-ostart]))
+        x = ScienceSegment(tuple([0, ostart, start, start - ostart]))
         outlist.append(x)
       ostart = stop
 
     if ostart < 1999999999:
-      x = ScienceSegment(tuple([0,ostart,1999999999,1999999999-ostart]))
+      x = ScienceSegment(tuple([0, ostart, 1999999999, 1999999999 - ostart]))
       outlist.append(x)
 
     self.__sci_segs = outlist
     return len(self)
 
-
   def play(self):
     """
     Keep only times in ScienceSegments which are in the playground
@@ -2387,8 +2392,8 @@ class ScienceData(object):
       id = seg.id()
 
       # select first playground segment which ends after start of seg
-      play_start = begin_s2+play_space*( 1 +
-        int((start - begin_s2 - play_len)/play_space) )
+      play_start = begin_s2 + play_space * ( 1
+        + int((start - begin_s2 - play_len) / play_space) )
 
       while play_start < stop:
         if play_start > start:
@@ -2396,7 +2401,6 @@ class ScienceData(object):
         else:
           ostart = start
 
-
         play_stop = play_start + play_len
 
         if play_stop < stop:
@@ -2404,7 +2408,7 @@ class ScienceData(object):
         else:
           ostop = stop
 
-        x = ScienceSegment(tuple([id, ostart, ostop, ostop-ostart]))
+        x = ScienceSegment(tuple([id, ostart, ostop, ostop - ostart]))
         outlist.append(x)
 
         # step forward
@@ -2414,7 +2418,6 @@ class ScienceData(object):
     self.__sci_segs = outlist
     return len(self)
 
-
   def intersect_3(self, second, third):
     """
     Intersection routine for three inputs.  Built out of the intersect,
@@ -2439,7 +2442,7 @@ class ScienceData(object):
     """
       Split the segments in the list is subsegments at least as long as dt
     """
-    outlist=[]
+    outlist = []
     for seg in self:
       start = seg.start()
       stop = seg.end()
@@ -2450,8 +2453,8 @@ class ScienceData(object):
         if tmpstop > stop:
           tmpstop = stop
         elif tmpstop + dt > stop:
-          tmpstop = int( (start + stop)/2 )
-        x = ScienceSegment(tuple([id,start,tmpstop,tmpstop-start]))
+          tmpstop = int( (start + stop) / 2 )
+        x = ScienceSegment(tuple([id, start, tmpstop, tmpstop - start]))
         outlist.append(x)
         start = tmpstop
 
@@ -2460,7 +2463,6 @@ class ScienceData(object):
     return len(self)
 
 
-
 class LsyncCache(object):
   def __init__(self,path):
     # location of the cache file
@@ -2468,7 +2470,7 @@ class LsyncCache(object):
 
     # dictionary where the keys are data types like 'gwf', 'sft', 'xml'
     # and the values are dictionaries
-    self.cache = {'gwf': None, 'sft' : None, 'xml' : None}
+    self.cache = {'gwf': None, 'sft': None, 'xml': None}
 
     # for each type create a dictionary where keys are sites and values
     # are dictionaries
@@ -2486,7 +2488,8 @@ class LsyncCache(object):
     """
     Each line of the frame cache file is like the following:
 
-    /frames/E13/LHO/frames/hoftMon_H1/H-H1_DMT_C00_L2-9246,H,H1_DMT_C00_L2,1,16 1240664820 6231 {924600000 924646720 924646784 924647472 924647712 924700000}
+    /frames/E13/LHO/frames/hoftMon_H1/H-H1_DMT_C00_L2-9246,H,H1_DMT_C00_L2,1,16 1240664820 6231 \
+      {924600000 924646720 924646784 924647472 924647712 924700000}
 
     The description is as follows:
 
@@ -2603,7 +2606,7 @@ class LsyncCache(object):
           # loop through the times and create paths
           for t in times:
             if search.intersects(ligo.segments.segment(t, t + dur)):
-              lfn =  "%s-%s-%d-%d.gwf" % (site, frameType, t, dur)
+              lfn = "%s-%s-%d-%d.gwf" % (site, frameType, t, dur)
               lfnDict[lfn] = None
 
     # sort the LFNs to deliver URLs in GPS order
@@ -2656,8 +2659,10 @@ class LSCDataFindJob(CondorDAGJob, AnalysisJob):
 
     self.add_condor_cmd('getenv','True')
 
-    self.set_stderr_file(os.path.join(log_dir, 'datafind-$(macroobservatory)-$(macrotype)-$(macrogpsstarttime)-$(macrogpsendtime)-$(cluster)-$(process).err'))
-    self.set_stdout_file(os.path.join(log_dir, 'datafind-$(macroobservatory)-$(macrotype)-$(macrogpsstarttime)-$(macrogpsendtime)-$(cluster)-$(process).out'))
+    self.set_stderr_file(os.path.join(log_dir,
+        'datafind-$(macroobservatory)-$(macrotype)-$(macrogpsstarttime)-$(macrogpsendtime)-$(cluster)-$(process).err'))
+    self.set_stdout_file(os.path.join(log_dir,
+        'datafind-$(macroobservatory)-$(macrotype)-$(macrogpsstarttime)-$(macrogpsendtime)-$(cluster)-$(process).out'))
     self.set_sub_file('datafind.sub')
 
   def get_cache_dir(self):
@@ -2705,16 +2710,17 @@ class LSCDataFindNode(CondorDAGNode, AnalysisNode):
     once the ifo, start and end times have been set.
     """
     if self.__start and self.__end and self.__observatory and self.__type:
-      self.__output = os.path.join(self.__job.get_cache_dir(), self.__observatory + '-' + self.__type +'_CACHE' + '-' + str(self.__start) + '-' + str(self.__end - self.__start) + '.lcf')
+      self.__output = os.path.join(self.__job.get_cache_dir(), self.__observatory
+        + '-' + self.__type + '_CACHE' + '-' + str(self.__start) + '-' + str(self.__end - self.__start) + '.lcf')
       self.set_output(self.__output)
 
-  def set_start(self,time,pad = None):
+  def set_start(self, time, pad=None):
     """
     Set the start time of the datafind query.
     @param time: GPS start time of query.
     """
     if pad:
-      self.add_var_opt('gps-start-time', int(time)-int(pad))
+      self.add_var_opt('gps-start-time', int(time) - int(pad))
     else:
       self.add_var_opt('gps-start-time', int(time))
     self.__start = time
@@ -2774,7 +2780,7 @@ class LSCDataFindNode(CondorDAGNode, AnalysisNode):
     return self.__type
 
   def get_output_cache(self):
-    return  self.__output
+    return self.__output
 
   def get_output(self):
     """
@@ -3009,7 +3015,7 @@ class LigolwSqliteNode(SqliteNode):
     super(LigolwSqliteNode,self).__init__(job)
     self.__input_cache = None
     self.__xml_output = None
-    self.__xml_input   = None
+    self.__xml_input = None
 
   def set_input_cache(self, input_cache):
     """
@@ -3051,6 +3057,7 @@ class LigolwSqliteNode(SqliteNode):
     else:
       raise ValueError("no output xml file or database specified")
 
+
 class DeepCopyableConfigParser(configparser.ConfigParser):
     """
     The standard SafeConfigParser no longer supports deepcopy() as of python
diff --git a/glue/segmentdb/query_engine.py b/glue/segmentdb/query_engine.py
index 215ff41ae1d5b3e32409a9615df2b9852818de45..6dc8e9d67c0ee10805f54a2a2abbf45fe8f22ff6 100644
--- a/glue/segmentdb/query_engine.py
+++ b/glue/segmentdb/query_engine.py
@@ -19,7 +19,7 @@
 #
 # =============================================================================
 #
-#				   Preamble
+#				   Preamble                                    # noqa: E265
 #
 # =============================================================================
 #
@@ -39,89 +39,86 @@ except ImportError:
 from glue import ldbd
 
 from glue import git_version
+
 __date__ = git_version.date
 __version__ = git_version.id
-__author__  = "Larne Pekowsky <lppekows@physics.syr.edu>"
+__author__ = "Larne Pekowsky <lppekows@physics.syr.edu>"
 
 
 class QueryEngine:
-	"""Abstract class.  Provides query() method that returns something that
-	behaves like an array of tuples of results and a close() method that
-	does any needed cleanup."""
+    """Abstract class.  Provides query() method that returns something that
+    behaves like an array of tuples of results and a close() method that
+    does any needed cleanup."""
 
-	def query(sql):
-		return None
+    def query(sql):
+        return None
 
-	def close():
-		pass
+    def close():
+        pass
 
 
 class SqliteQueryEngine(QueryEngine):
-	"""QueryEngine for sqlite databases.  Really just a minimal wrapper
-	around cursor"""
+    """QueryEngine for sqlite databases.  Really just a minimal wrapper
+    around cursor"""
 
-	connection = None
-	cursor     = None
+    connection = None
+    cursor     = None   # noqa: E221
 
-	def __init__(self, connection):
-		self.connection = connection
+    def __init__(self, connection):
+        self.connection = connection
 
-	def query(self, sql):
-		self.cursor = self.connection.cursor().execute(sql)
-		ret         = []
+    def query(self, sql):
+        self.cursor = self.connection.cursor().execute(sql)
+        ret         = []   # noqa: E221
 
-		for row in self.cursor:
-			ret.append(row)
+        for row in self.cursor:
+            ret.append(row)
 
-		return ret
+        return ret
 
-	def close(self):
-		self.cursor.close()
-		del self.cursor
+    def close(self):
+        self.cursor.close()
+        del self.cursor
 
 
 class LdbdQueryEngine(QueryEngine):
-	"""QueryEngine for databses behind ldbd.  Parses ligolw that a query
-	returns into rows"""
-
-	xmlparser = None
-	lwtparser = None
-	ligomd    = None
-
-	rows      = None
-
-
-
-	def __init__(self, client):
-		def dtd_uri_callback(uri):
-			if uri == 'http://ldas-sw.ligo.caltech.edu/doc/ligolwAPI/html/ligolw_dtd.txt':
-				return 'file://localhost' + os.path.join( os.getenv("GLUE_PREFIX", sys.prefix), 'share', 'lscsoft-glue', 'ligolw_dtd.txt' )
-			else:
-				return uri
-
-		self.client	 = client
-		self.xmlparser      = pyRXP.Parser()
-		self.xmlparser.eoCB = dtd_uri_callback
-		self.lwtparser      = ldbd.LIGOLwParser()
-		self.ligomd	 = ldbd.LIGOMetadata(self.xmlparser, self.lwtparser)
-
-
-	def query(self, sql):
-		xml = self.client.query(sql)
-
-		# This is a kludge around bug 2317
-		try:
-			self.client.__disconnect__()
-			self.client.__connect__(self.client.host, self.client.port, self.client.identity)
-		except:
-			pass
-
-		self.ligomd.parse(xml)
-		res = self.ligomd.table
-		self.rows = self.ligomd.table[list(res.keys())[0]]['stream']
-
-		return self.rows
-
-	def close(self):
-		del self.rows
-
+    """QueryEngine for databses behind ldbd.  Parses ligolw that a query
+    returns into rows"""
+
+    xmlparser = None
+    lwtparser = None
+    ligomd    = None   # noqa: E221
+    rows      = None   # noqa: E221
+
+    def __init__(self, client):
+        def dtd_uri_callback(uri):
+            if uri == 'http://ldas-sw.ligo.caltech.edu/doc/ligolwAPI/html/ligolw_dtd.txt':
+                return 'file://localhost' + os.path.join(os.getenv("GLUE_PREFIX", sys.prefix),
+                                                         'share', 'lscsoft-glue', 'ligolw_dtd.txt')
+            else:
+                return uri
+
+        self.client         = client   # noqa: E221
+        self.xmlparser      = pyRXP.Parser()   # noqa: E221
+        self.xmlparser.eoCB = dtd_uri_callback
+        self.lwtparser      = ldbd.LIGOLwParser()   # noqa: E221
+        self.ligomd         = ldbd.LIGOMetadata(self.xmlparser, self.lwtparser)   # noqa: E221
+
+    def query(self, sql):
+        xml = self.client.query(sql)
+
+        # This is a kludge around bug 2317
+        try:
+            self.client.__disconnect__()
+            self.client.__connect__(self.client.host, self.client.port, self.client.identity)
+        except:
+            pass
+
+        self.ligomd.parse(xml)
+        res = self.ligomd.table
+        self.rows = self.ligomd.table[list(res.keys())[0]]['stream']
+
+        return self.rows
+
+    def close(self):
+        del self.rows
diff --git a/glue/segmentdb/segmentdb_utils.py b/glue/segmentdb/segmentdb_utils.py
index 5e833f585299f2e44e070a19f1ce6af4107d4974..e1b179c2f53cb3082758766055d7a5a2a80aab71 100644
--- a/glue/segmentdb/segmentdb_utils.py
+++ b/glue/segmentdb/segmentdb_utils.py
@@ -16,19 +16,17 @@
 # with this program; if not, write to the Free Software Foundation, Inc.,
 # 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 
-import sys
+from six.moves import filter
+from six.moves import range
+
 import os
 import re
+import sys
 
-from ligo.segments import segment, segmentlist
 from glue.ligolw import lsctables
-from glue.ligolw import table
-from glue.segmentdb import query_engine
 from glue.ligolw import types as ligolwtypes
-from six.moves import filter
-from six.moves import map
-from six.moves import range
-
+from glue.segmentdb import query_engine
+from ligo.segments import segment, segmentlist
 
 
 #
@@ -54,62 +52,59 @@ def get_all_files_in_range(dirname, starttime, endtime, pad=64):
         else:
             return ret
 
-#    first_four_start = starttime / 100000
-#    first_four_end   = endtime   / 100000
-    first_four_start = starttime // 100000   # P3 fix
-    first_four_end   = endtime   // 100000   # P3 fix
+    first_four_start = starttime // 100000
+    first_four_end   = endtime   // 100000   # noqa: E221
 
     # Screen for files starting with . and ending with .xml.*
     # i.e. those leftover by rsync
-    file_list=os.listdir(dirname)
+    file_list = os.listdir(dirname)
     file_list.sort()
     # traverse file_list in reverse order, so that if a filename is removed, the following file is not skipped;
     #   see https://github.com/ligovirgo/dqsegdb/issues/111 and https://git.ligo.org/lscsoft/glue/-/issues/25
     for filename in file_list[::-1]:
-        a = re.match(r"\..*\.xml\..*$",filename)
-        if a != None:
+        a = re.match(r"\..*\.xml\..*$", filename)
+        if a is not None:
             file_list.remove(a.group(0))
 
-    #for filename in os.listdir(dirname):
+    #for filename in os.listdir(dirname):   # noqa: E265
     for filename in file_list:
         if re.match(r'.*-[0-9]{5}$', filename):
             dirtime = int(filename[-5:])
             if dirtime >= first_four_start and dirtime <= first_four_end:
-                ret += get_all_files_in_range(os.path.join(dirname,filename), starttime, endtime, pad=pad)
+                ret += get_all_files_in_range(os.path.join(dirname, filename), starttime, endtime, pad=pad)
         elif re.match(r'.*-[0-9]{4}$', filename):
             dirtime = int(filename[-4:])
             if dirtime >= first_four_start and dirtime <= first_four_end:
-                ret += get_all_files_in_range(os.path.join(dirname,filename), starttime, endtime, pad=pad)
+                ret += get_all_files_in_range(os.path.join(dirname, filename), starttime, endtime, pad=pad)
         elif re.match(r'.*-[0-9]*-[0-9]*\.xml$', filename):
             file_time = int(filename.split('-')[-2])
-            if file_time >= (starttime-pad) and file_time <= (endtime+pad):
-                ret.append(os.path.join(dirname,filename))
-        elif os.path.isfile(os.path.join(dirname,filename)):
+            if file_time >= (starttime - pad) and file_time <= (endtime + pad):
+                ret.append(os.path.join(dirname, filename))
+        elif os.path.isfile(os.path.join(dirname, filename)):
             # Non .xml file, don't recurse:
             return ret
         else:
             # Keep recursing, we may be looking at directories of
             # ifos, each of which has directories with times
-            ret += get_all_files_in_range(os.path.join(dirname,filename), starttime, endtime, pad=pad)
+            ret += get_all_files_in_range(os.path.join(dirname, filename), starttime, endtime, pad=pad)
 
     return ret
 
 
-
 def setup_database(database_location):
     """ 1. Determine protocol"""
     try:
         # When no protocol is given:
         if database_location.find('://') == -1:
             msg = "Error: Please specify protocol in your --segment-url argument in the format PROTOCOL://HOST"
-            msg +="\nFor example: --segment-url https://segdb.ligo.caltech.edu"
+            msg += "\nFor example: --segment-url https://segments.ligo.org"
             msg += "\nSupported protocols include: http, https, ldbd, ldbdi"
             msg += "\nRun with --help for usage"
             raise ValueError(msg)
 
         # When wrong protocol is given:
         protocol = database_location[:database_location.find('://')].lower()
-        if protocol not in ("http","https","ldbd","ldbdi"):
+        if protocol not in ("http", "https", "ldbd", "ldbdi"):
             msg = "Error: protocol %s not supported" % protocol
             msg += "\nPlease specify correct protocol in your --segment-url argument in the format PROTOCOL://HOST"
             msg += "\nSupported protocols include: http, https, ldbd, ldbdi"
@@ -120,7 +115,7 @@ def setup_database(database_location):
         sys.exit(1)
 
     """ 2. Determine host and port"""
-    host_and_port = database_location[(len(protocol)+3):]
+    host_and_port = database_location[(len(protocol) + 3):]
     if host_and_port.find(':') < 0:
         # if no port number given, set default port respectively:
         host = host_and_port
@@ -151,9 +146,9 @@ def setup_database(database_location):
         else:
             identity = None
         try:
-            client = LDBDWClient.LDBDClient(host,port,protocol,identity)
+            client = LDBDWClient.LDBDClient(host, port, protocol, identity)
         except Exception as e:
-            sys.stderr.write("Unable to connect to LDBD Server at %s://%s:%d \n" % (protocol,host, port) + str(e))
+            sys.stderr.write("Unable to connect to LDBD Server at %s://%s:%d \n" % (protocol, host, port) + str(e))
             sys.exit(1)
 
     elif protocol.startswith('ldbd'):
@@ -164,9 +159,9 @@ def setup_database(database_location):
         else:
             identity = None
         try:
-            client = LDBDClient.LDBDClient(host,port,identity)
+            client = LDBDClient.LDBDClient(host, port, identity)
         except Exception as e:
-            sys.stderr.write("Unable to connect to LDBD Server at %s://%s:%d\n" % (protocol,host, port) + str(e))
+            sys.stderr.write("Unable to connect to LDBD Server at %s://%s:%d\n" % (protocol, host, port) + str(e))
             try:
                 if gsiserverutils.checkCredentials():
                     sys.stderr.write("Got the following error : \n" + str(e))
@@ -176,8 +171,7 @@ def setup_database(database_location):
             sys.exit(1)
 
     else:
-        raise ValueError( "invalid url for segment database" )
-
+        raise ValueError("invalid url for segment database")
 
     return client
 
@@ -191,15 +185,13 @@ def setup_database(database_location):
 #
 
 
-
 def query_segments(engine, table, segdefs):
     # each segdef is a list containing:
     #     ifo, name, version, start_time, end_time, start_pad, end_pad
 
-
     # The trivial case: if there's nothing to do, return no time
     if len(segdefs) == 0:
-        return [ segmentlist([]) ]
+        return [segmentlist([])]
 
     #
     # For the sake of efficiency we query the database for all the segdefs at once
@@ -217,9 +209,9 @@ def query_segments(engine, table, segdefs):
 
     clauses = [make_clause(table, segdef) for segdef in segdefs]
 
-    sql  = 'SELECT segment_definer.ifos, segment_definer.name, segment_definer.version, '
+    sql  = 'SELECT segment_definer.ifos, segment_definer.name, segment_definer.version, '   # noqa: E221
     sql += ' %s.start_time, %s.end_time ' % (table, table)
-    sql += ' FROM segment_definer, %s '   % table
+    sql += ' FROM segment_definer, %s ' % table
     sql += ' WHERE %s.segment_def_id = segment_definer.segment_def_id AND ' % table
 
     if engine.__class__ == query_engine.LdbdQueryEngine:
@@ -247,14 +239,14 @@ def query_segments(engine, table, segdefs):
     for segdef in segdefs:
         ifo, name, version, start_time, end_time, start_pad, end_pad = segdef
 
-        search_span      = segment(start_time, end_time)
+        search_span      = segment(start_time, end_time)   # noqa: E221
         search_span_list = segmentlist([search_span])
 
         # See whether the row belongs to the current segdef.  Name, ifo and version must match
         # and the padded segment must overlap with the range of the segdef.
         def matches(row):
-            return ( row[0].strip() == ifo and row[1] == name and int(row[2]) == int(version)
-                     and search_span.intersects(segment(row[3] + start_pad, row[4] + start_pad)) )
+            return (row[0].strip() == ifo and row[1] == name and int(row[2]) == int(version)
+                    and search_span.intersects(segment(row[3] + start_pad, row[4] + start_pad)))
 
         # Add the padding.  Segments may extend beyond the time of interest, chop off the excess.
         def pad_and_truncate(row_start, row_end):
@@ -266,7 +258,7 @@ def query_segments(engine, table, segdefs):
             # PR 2969: The above comment is incorrect.  Negative padding may cause
             # an empty intersection.
             if len(tmp) == 0:
-                return segment(0,0)
+                return segment(0, 0)
             else:
                 return tmp[0]
 
@@ -274,7 +266,7 @@ def query_segments(engine, table, segdefs):
         # not necessarily be disjoint, if the padding crosses gaps.  They are also not gauranteed to
         # be in order, since there's no ORDER BY in the query.  So the list needs to be coalesced
         # before arithmatic can be done with it.
-        result  = segmentlist( [pad_and_truncate(row[3], row[4]) for row in rows if matches(row)] ).coalesce()
+        result = segmentlist([pad_and_truncate(row[3], row[4]) for row in rows if matches(row)]).coalesce()
 
         # This is not needed: since each of the segments are constrained to be within the search
         # span the whole list must be as well.
@@ -295,11 +287,11 @@ def expand_version_number(engine, segdef):
     intervals = segmentlist([segment(start_time, end_time)])
 
     # Find the maximum version number
-    sql  = "SELECT max(version) FROM segment_definer "
+    sql  = "SELECT max(version) FROM segment_definer "   # noqa: E221
     sql += "WHERE  segment_definer.ifos = '%s' " % ifo
     sql += "AND   segment_definer.name = '%s' " % name
 
-    rows    = engine.query(sql)
+    rows = engine.query(sql)
     try:
         version = len(rows[0]) and rows[0][0] or 1
     except:
@@ -312,7 +304,7 @@ def expand_version_number(engine, segdef):
             segs = query_segments(engine, 'segment_summary', [(ifo, name, version, interval[0], interval[1], 0, 0)])
 
             for seg in segs[0]:
-                results.append( (ifo, name, version, seg[0], seg[1], 0, 0) )
+                results.append((ifo, name, version, seg[0], seg[1], 0, 0))
 
         intervals.coalesce()
         intervals -= segs[0]
@@ -322,27 +314,26 @@ def expand_version_number(engine, segdef):
     return results
 
 
-
-
-def find_segments(doc, key, use_segment_table = True):
+def find_segments(doc, key, use_segment_table = True):   # noqa: E251
     key_pieces = key.split(':')
     while len(key_pieces) < 3:
         key_pieces.append('*')
 
-    filter_func = lambda x: str(x.ifos) == key_pieces[0] and (str(x.name) == key_pieces[1] or key_pieces[1] == '*') and (str(x.version) == key_pieces[2] or key_pieces[2] == '*')
+    # flake8 doesn't like this line to be split up (the way I did it), so added an exception
+    filter_func = lambda x: str(x.ifos) == key_pieces[0] and (str(x.name) == key_pieces[1] or key_pieces[1] == '*') and (str(x.version) == key_pieces[2] or key_pieces[2] == '*')   # noqa: 501
 
     # Find all segment definers matching the critieria
     seg_def_table = lsctables.SegmentDefTable.get_table(doc)
-    seg_defs      = list(filter(filter_func, seg_def_table))
-    seg_def_ids   = [str(x.segment_def_id) for x in seg_defs]
+    seg_defs      = list(filter(filter_func, seg_def_table))   # noqa: E221
+    seg_def_ids   = [str(x.segment_def_id) for x in seg_defs]   # noqa: E221
 
     # Find all segments belonging to those definers
     if use_segment_table:
-        seg_table     = lsctables.SegmentTable.get_table(doc)
-        seg_entries   = [x for x in seg_table if str(x.segment_def_id) in seg_def_ids]
+        seg_table     = lsctables.SegmentTable.get_table(doc)   # noqa: E221
+        seg_entries   = [x for x in seg_table if str(x.segment_def_id) in seg_def_ids]   # noqa: E221
     else:
         seg_sum_table = lsctables.SegmentSumTable.get_table(doc)
-        seg_entries   = [x for x in seg_sum_table if str(x.segment_def_id) in seg_def_ids]
+        seg_entries   = [x for x in seg_sum_table if str(x.segment_def_id) in seg_def_ids]   # noqa: E221
 
     # Combine into a segmentlist
     ret = segmentlist([segment(x.start_time, x.end_time) for x in seg_entries])
@@ -351,6 +342,7 @@ def find_segments(doc, key, use_segment_table = True):
 
     return ret
 
+
 #
 # =============================================================================
 #
@@ -358,6 +350,8 @@ def find_segments(doc, key, use_segment_table = True):
 #
 # =============================================================================
 #
+
+
 def ensure_segment_table(connection):
     """Ensures that the DB represented by connection posses a segment table.
     If not, creates one and prints a warning to stderr"""
@@ -366,13 +360,13 @@ def ensure_segment_table(connection):
 
     if count == 0:
         sys.stderr.write("WARNING: None of the loaded files contain a segment table\n")
-        theClass  = lsctables.TableByName['segment']
-        statement = "CREATE TABLE IF NOT EXISTS segment (" + ", ".join(["%s %s" % (key, ligolwtypes.ToSQLiteType[theClass.validcolumns[key]]) for key in theClass.validcolumns]) + ")"
+        theClass = lsctables.TableByName['segment']
+        statement = "CREATE TABLE IF NOT EXISTS segment (" + ", ".join(["%s %s" % (key, ligolwtypes.ToSQLiteType[theClass.validcolumns[key]]) for key in theClass.validcolumns]) + ")"   # noqa: E501
 
         connection.cursor().execute(statement)
 
 
-
+#
 # =============================================================================
 #
 #                    Routines to write data to XML documents
@@ -380,44 +374,47 @@ def ensure_segment_table(connection):
 # =============================================================================
 #
 
+
 def add_to_segment_definer(xmldoc, proc_id, ifo, name, version, comment=''):
     try:
         seg_def_table = lsctables.SegmentDefTable.get_table(xmldoc)
     except:
-        seg_def_table = lsctables.New(lsctables.SegmentDefTable, columns = ["process_id", "segment_def_id", "ifos", "name", "version", "comment"])
+        seg_def_table = lsctables.New(lsctables.SegmentDefTable,
+                                      columns=["process_id", "segment_def_id", "ifos", "name", "version", "comment"])
         xmldoc.childNodes[0].appendChild(seg_def_table)
 
-    seg_def_id                     = seg_def_table.get_next_id()
-    segment_definer                = lsctables.SegmentDef()
-    segment_definer.process_id     = proc_id
+    seg_def_id                     = seg_def_table.get_next_id()   # noqa: E221
+    segment_definer                = lsctables.SegmentDef()   # noqa: E221
+    segment_definer.process_id     = proc_id   # noqa: E221
     segment_definer.segment_def_id = seg_def_id
-    segment_definer.ifos           = ifo
-    segment_definer.name           = name
-    segment_definer.version        = version
-    segment_definer.comment        = comment
+    segment_definer.ifos           = ifo   # noqa: E221
+    segment_definer.name           = name   # noqa: E221
+    segment_definer.version        = version   # noqa: E221
+    segment_definer.comment        = comment   # noqa: E221
 
     seg_def_table.append(segment_definer)
 
     return seg_def_id
 
 
-
 def add_to_segment(xmldoc, proc_id, seg_def_id, sgmtlist):
     try:
         segtable = lsctables.SegmentTable.get_table(xmldoc)
     except:
-        segtable = lsctables.New(lsctables.SegmentTable, columns = ["process_id", "segment_def_id", "segment_id", "start_time", "start_time_ns", "end_time", "end_time_ns"])
+        segtable = lsctables.New(lsctables.SegmentTable,
+                                 columns=["process_id", "segment_def_id", "segment_id",
+                                          "start_time", "start_time_ns", "end_time", "end_time_ns"])
         xmldoc.childNodes[0].appendChild(segtable)
 
     for seg in sgmtlist:
-        segment                = lsctables.Segment()
-        segment.process_id     = proc_id
+        segment                = lsctables.Segment()   # noqa: E221
+        segment.process_id     = proc_id   # noqa: E221
         segment.segment_def_id = seg_def_id
-        segment.segment_id     = segtable.get_next_id()
-        segment.start_time     = seg[0]
-        segment.start_time_ns  = 0
-        segment.end_time       = seg[1]
-        segment.end_time_ns    = 0
+        segment.segment_id     = segtable.get_next_id()   # noqa: E221
+        segment.start_time     = seg[0]   # noqa: E221
+        segment.start_time_ns  = 0   # noqa: E221
+        segment.end_time       = seg[1]   # noqa: E221
+        segment.end_time_ns    = 0   # noqa: E221
 
         segtable.append(segment)
 
@@ -426,19 +423,21 @@ def add_to_segment_summary(xmldoc, proc_id, seg_def_id, sgmtlist, comment=''):
     try:
         seg_sum_table = lsctables.SegmentSumTable.get_table(xmldoc)
     except:
-        seg_sum_table = lsctables.New(lsctables.SegmentSumTable, columns = ["process_id", "segment_def_id", "segment_sum_id", "start_time", "start_time_ns", "end_time", "end_time_ns", "comment"])
+        seg_sum_table = lsctables.New(lsctables.SegmentSumTable,
+                                      columns=["process_id", "segment_def_id", "segment_sum_id",
+                                               "start_time", "start_time_ns", "end_time", "end_time_ns", "comment"])
         xmldoc.childNodes[0].appendChild(seg_sum_table)
 
     for seg in sgmtlist:
-        segment_sum                = lsctables.SegmentSum()
-        segment_sum.process_id     = proc_id
+        segment_sum                = lsctables.SegmentSum()   # noqa: E221
+        segment_sum.process_id     = proc_id   # noqa: E221
         segment_sum.segment_def_id = seg_def_id
         segment_sum.segment_sum_id = seg_sum_table.get_next_id()
-        segment_sum.start_time     = seg[0]
-        segment_sum.start_time_ns  = 0
-        segment_sum.end_time       = seg[1]
-        segment_sum.end_time_ns    = 0
-        segment_sum.comment        = comment
+        segment_sum.start_time     = seg[0]   # noqa: E221
+        segment_sum.start_time_ns  = 0   # noqa: E221
+        segment_sum.end_time       = seg[1]   # noqa: E221
+        segment_sum.end_time_ns    = 0   # noqa: E221
+        segment_sum.comment        = comment   # noqa: E221
 
         seg_sum_table.append(segment_sum)
 
@@ -455,6 +454,7 @@ def add_segment_info(doc, proc_id, segdefs, segments, segment_summaries):
         if segments:
             add_to_segment(doc, proc_id, seg_def_id, segments[i])
 
+
 #
 # =============================================================================
 #
diff --git a/glue/text_progress_bar.py b/glue/text_progress_bar.py
index 0ea3219103cd81777d3cdcc48bab7c6a5225ba2f..170e0f635f51c1ec5ad5a568d89790bfeb0dbd99 100644
--- a/glue/text_progress_bar.py
+++ b/glue/text_progress_bar.py
@@ -19,12 +19,12 @@ Text-mode progress bars
 from __future__ import division, print_function, unicode_literals
 from six.moves import range
 
+import collections
 import locale
 import math
 import os
 import struct
 import sys
-import collections
 
 __all__ = ('ProgressBar', 'ProgressBarTheme')
 
@@ -200,10 +200,10 @@ class ProgressBar:
             iMajorMinor = int(math.ceil(progressFraction * barWidth))
 
             barSymbols = (
-                (self.sequence[-1] * iMajor) +
-                self.sequence[iMinor] +
-                (self.sequence[1] * (barWidth - iMajorMinor)))
-            progressFractionText = ('%.1f%%' % (100*progressFraction)).rjust(6)
+                (self.sequence[-1] * iMajor)
+                + self.sequence[iMinor]
+                + (self.sequence[1] * (barWidth - iMajorMinor)))
+            progressFractionText = ('%.1f%%' % (100 * progressFraction)).rjust(6)
 
         print(
             '\r\x1B[1m', label, '\x1B[0m', self.left_border, '\x1B[36m',
@@ -223,8 +223,8 @@ class ProgressBar:
             redraw = text != self.text
             self.text = text
         if value is not None:
-            redraw |= self.max == 0 or round(value/(0.0003*self.max)) != \
-                round(self.value/(0.0003*self.max))
+            redraw |= self.max == 0 or round(value / (0.0003 * self.max)) != \
+                round(self.value / (0.0003 * self.max))
             self.value = value
         if redraw:
             self.show()
diff --git a/glue/utils.py b/glue/utils.py
index a1de457232fff931ca7a09c57925b5de4a8ee580..9253d8f33104739a1b0406b832b2817e1a48f096 100644
--- a/glue/utils.py
+++ b/glue/utils.py
@@ -26,7 +26,7 @@ signal is faster and more unix-portable than looking in /proc).
 Inspired by Larz Wirzenius
 <http://stackoverflow.com/questions/1005972>
 """
-def pid_exists(pid):
+def pid_exists(pid):   # noqa: E302
     """ Returns true if the given pid exists, false otherwise. """
     try:
         # signal 0 is harmless and can be safely used to probe pid existence
@@ -47,11 +47,12 @@ os.makedirs(), produces no error if the path already exists.
 Inspired by Christos Georgiou
 <http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python>
 """
-def mkdir_p(path):
+def mkdir_p(path):   # noqa: E302
     try:
         os.makedirs(path)
     except OSError as exc:
         if exc.errno == errno.EEXIST:
             pass
-        else: raise
+        else:
+            raise
     return path
diff --git a/test/lal_verify.py b/test/lal_verify.py
index 84d0d5e1f448936ec448e483b27bf5c95242d5c1..cf364cfb417b1a3c8d286a5f563e2d059d77d112 100644
--- a/test/lal_verify.py
+++ b/test/lal_verify.py
@@ -1,10 +1,12 @@
 from __future__ import print_function
+from six.moves import range
+
 import doctest
 import random
 import sys
 import unittest
+
 from glue import lal
-from six.moves import range
 
 
 #
@@ -14,6 +16,7 @@ from six.moves import range
 def maxLIGOTimeGPS():
 	return lal.LIGOTimeGPS(2**32 - 1, 999999999)
 
+
 def randomLIGOTimeGPS():
 	return lal.LIGOTimeGPS(random.randint(-100000000, +100000000), random.randint(0, 999999999))
 
@@ -117,8 +120,8 @@ class test_LIGOTimeGPS(unittest.TestCase):
 		# FIXME:  mod tests fail, fix then enable
 		operators = {
 			"mul": (lal.LIGOTimeGPS.__mul__, swigLIGOTimeGPS.__mul__),
-			"div": (lal.LIGOTimeGPS.__div__, swigLIGOTimeGPS.__div__)#,
-			#"mod": (lal.LIGOTimeGPS.__mod__, swigLIGOTimeGPS.__mod__)
+			"div": (lal.LIGOTimeGPS.__div__, swigLIGOTimeGPS.__div__)  # ,
+			# "mod": (lal.LIGOTimeGPS.__mod__, swigLIGOTimeGPS.__mod__)
 		}
 
 		for i in range(100000):
@@ -128,7 +131,7 @@ class test_LIGOTimeGPS(unittest.TestCase):
 			try:
 				self.assertEqual(abs(op(arg1, arg2) - fromswig(swigop(toswig(arg1), arg2))) <= 1e-9, True)
 			except AssertionError:
-				raise AssertionError("%s(%s, %s) comparison failed: %s != %s" % (key, str(arg1), "%.17g" % arg2, str(op(arg1, arg2)), str(swigop(toswig(arg1), arg2))))
+				raise AssertionError("%s(%s, %s) comparison failed: %s != %s" % (key, str(arg1), "%.17g" % arg2, str(op(arg1, arg2)), str(swigop(toswig(arg1), arg2))))   # noqa: 501
 
 
 #