Commit face6485 authored by Duncan Macleod's avatar Duncan Macleod Committed by James Clark
Browse files

A few changes for python3

parent 425e2807
......@@ -17,6 +17,8 @@
# DAG Class definitions for bayeswave
from __future__ import print_function
import sys,os,subprocess
from glue import pipeline
......@@ -25,12 +27,18 @@ from glue.ligolw import utils as ligolw_utils
from glue.ligolw import lsctables
#import lalinspiral, lalburst
import ConfigParser
import itertools
import socket
import ast
import numpy as np
import random
try:
import configparser
except ImportError: # python < 3
import ConfigParser
else: # other python3 compatibility stuff
xrange = range
# XXX Hardcoded cvmfs frame root
CVMFS_FRAMES="/cvmfs/oasis.opensciencegrid.org/ligo/frames/"
......@@ -311,8 +319,7 @@ class eventTrigger:
try:
self.rho = event_info['extra_attributes']['MultiBurst']['snr']
except KeyError:
print >> sys.stderr, \
"graceDB UID %s has no MultiBurst snr attribute"%(graceid)
print("graceDB UID %s has no MultiBurst snr attribute"%(graceid), file=sys.stderr)
# Set time
self.trigger_time = event_info['gpstime']
......@@ -334,9 +341,8 @@ class eventTrigger:
self.flow = self.max_flow
except KeyError:
print >> sys.stderr, \
"graceDB UID %s has no MultiBurst central_freq attribute"%(graceid)
print >> sys.stderr, "...using default sample rate"
print("graceDB UID %s has no MultiBurst central_freq attribute"%(graceid), file=sys.stderr)
print("...using default sample rate", file=sys.stderr)
self.srate = self.default_srate
......@@ -390,7 +396,7 @@ class triggerList:
else:
# Fail
print >> sys.stdout, "don't know what to do."
print("don't know what to do.", file=sys.stdout)
sys.exit()
def parse_graceDB_triggers(self, cp, graceIDs):
......@@ -416,10 +422,10 @@ class triggerList:
try:
BW_chainLength=cp.getint('bayeswave_options','BW-chainLength')
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
print >> sys.stdout, "Reading chainlength from files in %s"%(
cp.get('bayeswave_options','BW-path'))
print("Reading chainlength from files in %s"%(
cp.get('bayeswave_options','BW-path')), file=sys.stdout)
# O1 names:
if injtype=='glitch':
......@@ -470,12 +476,12 @@ class triggerList:
injection_times = sim_inspiral_table.get_column('geocent_end_time') + \
1e-9*sim_inspiral_table.get_column('geocent_end_time_ns')
print "..read %d injections"%len(injection_times)
print("..read %d injections"%len(injection_times))
triggers=[]
if followup_injections is None:
print 'downsampling to requested injections using events= in config'
print('downsampling to requested injections using events= in config')
# reduce to specified values
events=cp.get('injections', 'events')
......@@ -493,7 +499,7 @@ class triggerList:
# Parse the detected injections
print "downsampling to events listed in %s"%followup_injections
print("downsampling to events listed in %s"%followup_injections)
trigger_list_from_file = triggerList(cp,
trigger_file=followup_injections)
......@@ -534,19 +540,11 @@ class triggerList:
if 'V1' in ifo_list:
network+='V'
else:
print >> sys.stderr, \
"Error setting up timeslides from cWB trigger list. Please check IFO list"
# if 'H1' in ifo_list and 'L1' in ifo_list and 'V1' not in ifo_list:
# network='HL'
# elif 'H1' in ifo_list and 'L1' in ifo_list and 'V1' in ifo_list:
# network='HLV'
# else:
# print >> sys.stderr, \
# "Only HL and HLV networks currently supported for timeslides"
print >> sys.stdout, "Network: {}".format(network)
print >> sys.stdout, "Discarding rho<=%f"%rho_threshold
print("Error setting up timeslides from cWB trigger list. Please check IFO list.", file=sys.stderr)
print("Network: {}".format(network), file=sys.stdout)
print("Discarding rho<=%f"%rho_threshold, file=sys.stdout)
if network=='HL':
......@@ -694,8 +692,8 @@ class triggerList:
keepidx=random.sample(range(0,len(triggers)), nkeep)
triggers_out = [ triggers[i] for i in sorted(keepidx) ]
print >> sys.stdout, "Read %d triggers, following up %d"%(
nall, len(triggers_out))
print("Read %d triggers, following up %d"%(
nall, len(triggers_out)), file=sys.stdout)
return triggers_out
......@@ -734,16 +732,15 @@ class triggerList:
trigger_frequency=trigger_data[i,2]))
elif ncols==4:
print >> sys.stderr, \
"""WARNING: Looks like you're using an old style cwb trigger
list. Success is not guarenteed"""
print("""WARNING: Looks like you're using an old style cwb trigger
list. Success is not guarenteed""", file=sys.stderr)
# Trigger time, hl_lag, frequency, rho
try:
rho_threshold = cp.getfloat('input', 'rho-threshold')
except:
rho_threshold = rho_threshold
print >> sys.stdout, "Discarding rho<=%f"%rho_threshold
print("Discarding rho<=%f"%rho_threshold, file=sys.stdout)
for i in xrange(nrows):
# Apply rho threshold
......@@ -765,8 +762,8 @@ class triggerList:
keepidx=random.sample(range(0,len(triggers)), nkeep)
triggers_out = [ triggers[i] for i in sorted(keepidx) ]
print >> sys.stdout, "Read %d triggers, following up %d"%(
nrows, len(triggers_out))
print("Read %d triggers, following up %d"%(
nrows, len(triggers_out)), file=sys.stdout)
return triggers_out
......@@ -797,7 +794,7 @@ def condor_job_config(job_type,condor_job,config_parser):
try:
job_index = valid_job_types.index(job_type)
except ValueError:
print >> sys.stderr, "unrecognized job type"
print("unrecognized job type", file=sys.stderr)
# --- Set executable and choose singularity image
executable=config_parser.get('engine',job_type)
......@@ -815,14 +812,14 @@ def condor_job_config(job_type,condor_job,config_parser):
requires.append("(HAS_SINGULARITY=?=TRUE)")
print "Running with singularity(image={})".format(
config_parser.get('engine','singularity'))
print("Running with singularity(image={})".format(
config_parser.get('engine','singularity')))
try:
image = config_parser.get('engine','singularity').replace('"','')
except:
print """You requested a singularity run (--singularity) but did not
specify an image in the [engine] section of the config file"""
print("""You requested a singularity run (--singularity) but did not
specify an image in the [engine] section of the config file""")
sys.exit(-1)
pipeline.CondorDAGJob.__init__(condor_job,universe,executable)
......@@ -836,7 +833,7 @@ def condor_job_config(job_type,condor_job,config_parser):
condor_job.add_condor_cmd('+SingularityBindCVMFS', True)
# --- Perform file transfers
print >> sys.stdout, "Configuring file transfers (singularity requirement)"
print("Configuring file transfers (singularity requirement)", file=sys.stdout)
condor_job.add_condor_cmd('should_transfer_files', 'YES')
condor_job.add_condor_cmd('when_to_transfer_output', 'ON_EXIT_OR_EVICT')
......@@ -1465,7 +1462,7 @@ class megaskyNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
# Set eventnum if injection
def set_injevent(self, eventnum):
print 'eventnum:', eventnum
print('eventnum:', eventnum)
self.add_var_opt('eventnum', eventnum)
self.eventnum=eventnum
# Set work dir
......
......@@ -6,7 +6,7 @@ try:
import glue.ligolw.table
import glue.ligolw.lsctables
except:
print "WARNING: glue modules not found. Will not be able to read XML tables"
print("WARNING: glue modules not found. Will not be able to read XML tables")
filename = '/home/jkanner/baysewave/svn/trunk/burstinj/s6/BurstMDC-BRST_S6-Log.txt'
......@@ -30,7 +30,7 @@ class Mdc:
# -- Read a CBC XML table
if fileExtension == '.xml':
print "READING CBC XML TABLE"
print("READING CBC XML TABLE")
xmldoc = glue.ligolw.utils.load_filename(filename, verbose=False)
injs = glue.ligolw.table.get_table(xmldoc,glue.ligolw.lsctables.SimInspiralTable.tableName)
......@@ -69,7 +69,7 @@ class Mdc:
mdclog.close()
namelist = namestring.split()
print namelist
print(namelist)
data = np.recfromtxt(filename, names=namelist)
self.waveform = data['SimName']
self.hrss = data['SimHrss']
......@@ -81,9 +81,9 @@ class Mdc:
self.ecc = data['Internal_x']
def get_theta_phi(self, time):
print "Injection times:"
print self.gps
print "Trying to find this time: {0}".format(time)
print("Injection times:")
print(self.gps)
print("Trying to find this time: {0}".format(time))
n = np.where(np.abs(self.gps-time) < 0.1)
return (self.theta[n], self.phi[n])
......
......@@ -49,7 +49,7 @@ class BwbParams:
self.noise = float(spl[1])
self.noise_err = float(spl[2])
except:
print "WARNING: Could not find file evidence.dat"
print("WARNING: Could not find file evidence.dat")
# -- Read run file
......@@ -82,13 +82,13 @@ class BwbParams:
self.gps = float(cmdline[index+1])
# Determine if the job was glitchOnly, noiseOnly, or signalOnly
elif arg=='--glitchOnly':
print '\nThis run was executed with the --glitchOnly flag\n'
print('\nThis run was executed with the --glitchOnly flag\n')
self.restrictModel = 'glitch'
elif arg=='--noiseOnly':
print '\nThis run was executed with the --noiseOnly flag\n'
print('\nThis run was executed with the --noiseOnly flag\n')
self.restrictModel = 'noise'
elif arg=='--signalOnly':
print '\nThis run was executed with the --signalOnly flag\n'
print('\nThis run was executed with the --signalOnly flag\n')
self.restrictModel = 'signal'
elif arg=='--inj':
self.mdc = True
......
......@@ -15,6 +15,8 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function
import numpy as np
import time
import sys
......@@ -73,7 +75,7 @@ def confirm(prompt=None, resp=False):
if not ans:
return resp
if ans not in ['y', 'Y', 'n', 'N']:
print 'please enter y or n.'
print('please enter y or n.')
continue
if ans == 'y' or ans == 'Y':
return True
......@@ -180,15 +182,15 @@ def parser():
(opts,args) = parser.parse_args()
if opts.workdir is None:
print >> sys.stderr, "ERROR: must specify --workdir"
print("ERROR: must specify --workdir", file=sys.stderr)
sys.exit()
if len(args)==0:
print >> sys.stderr, "ERROR: require config file"
print("ERROR: require config file", file=sys.stderr)
sys.exit()
if not os.path.isfile(args[0]):
print >> sys.stderr, "ERROR: config file %s does not exist"%args[0]
print("ERROR: config file %s does not exist"%args[0], file=sys.stderr)
sys.exit()
......@@ -213,7 +215,7 @@ workdir = opts.workdir
if os.path.exists(workdir):
# Prompt for confirmation to continue as this will overwrite existing
# workflow files (but not resuilts)
print >> sys.stderr, """
print("""
\nXXX DANGER XXX: path {} already exists.
Continuing workflow generation will OVERWRITE current workflow files
......@@ -223,14 +225,14 @@ if os.path.exists(workdir):
Proceeding is only recommended to re-run POSTPROCESSING.
**Sanity is not guarenteed** if re-running parent bayeswave jobs\n""".format(
workdir)
workdir), file=sys.stderr)
if not confirm(prompt='Proceed?', resp=False):
print >> sys.stderr, "You chose wisely, exiting"
print("You chose wisely, exiting", file=sys.stderr)
sys.exit()
else:
print >> sys.stdout, "making work-directory: %s"%workdir
print("making work-directory: %s"%workdir, file=sys.stdout)
os.makedirs(workdir)
os.makedirs(workdir+"/logs")
......@@ -262,8 +264,8 @@ if cp.has_option('engine', 'singularity'):
try:
cp.get('engine', 'singularity')
except:
print "Singularity job requested but no image was specified"
print "Please add (e.g.,) singularity=<image-path.img> to [engine]"
print("Singularity job requested but no image was specified")
print("Please add (e.g.,) singularity=<image-path.img> to [engine]")
exit(-1)
else:
cp.set('engine', 'use-singularity', str(False))
......@@ -320,12 +322,11 @@ if injfile is not None and cp.has_option('injections','nrhdf5'):
# Skip segment queries?
print >> sys.stdout, "Determining whether to do segment queries"
print("Determining whether to do segment queries", file=sys.stdout)
try:
skip_segment_queries = cp.getboolean('datafind','ignore-science-segments')
except ConfigParser.NoOptionError:
print >> sys.stdout, \
"No ignore-science-segments in [datafind], skipping segdb by default"
print("No ignore-science-segments in [datafind], skipping segdb by default", file=sys.stdout)
cp.set('datafind','ignore-science-segments', str(True))
skip_segment_queries=True
......@@ -373,14 +374,14 @@ if injfile is not None:
if cp.has_option('bayeswave_options','BW-inject'):
# Check the option is valid:
if cp.get('bayeswave_options','BW-inject') not in ['signal','glitch']:
print >> sys.stderr, "Error: BW-inject must be in ", ['signal','glitch']
print("Error: BW-inject must be in ", ['signal','glitch'], file=sys.stderr)
sys.exit()
#
# Perform internal injections drawn from the signal or glitch model
#
if opts.trigger_time is None:
opts.trigger_time=1126259462.392
print >> sys.stdout, "Setting trigger time to %f"%opts.trigger_time
print("Setting trigger time to %f"%opts.trigger_time, file=sys.stdout)
trigger_list = pipe_utils.triggerList(cp, gps_times=opts.trigger_time,
internal_injections=True)
......@@ -402,7 +403,7 @@ if opts.submit_to_gracedb:
else:
html_root = opts.html_root
if html_root is None:
print >> sys.stder, "demanding submit to gdb but no html-root"
print("demanding submit to gdb but no html-root", file=sys.stder)
sys.exit()
......@@ -410,7 +411,7 @@ if opts.submit_to_gracedb:
if not os.path.exists(html_root):
os.makedirs(html_root)
else:
print >> sys.stderr, "Warning: html-root %s exists"%html_root
print("Warning: html-root %s exists"%html_root, file=sys.stderr)
# Extract trigger times for readability. Add a systematic offset if required
......@@ -521,7 +522,7 @@ if (opts.cwb_trigger_list is not None) \
for ifo in ifo_list:
if cp.getboolean('datafind','sim-data'):
print >> sys.stdout, "Simulating noise"
print("Simulating noise", file=sys.stdout)
# Get the type of simulated data from the frame type list
# E.g., to simulate from LALSimAdLIGO put this in the config.ini:
......@@ -534,8 +535,7 @@ for ifo in ifo_list:
# If sim-data cache file is a reference PSD file, copy it to the work
# directory
if os.path.exists(sim_spectrum):
print >> sys.stdout, \
"Attempting to copy ASD file to datafind directory"
print("Attempting to copy ASD file to datafind directory", file=sys.stdout)
asd_path = os.path.join(datafind_dir,
os.path.basename(sim_spectrum))
shutil.copy(sim_spectrum, asd_path)
......@@ -558,8 +558,7 @@ for ifo in ifo_list:
cache_files[ifo]=os.path.join('datafind', '{0}.cache'.format(ifo))
if opts.skip_datafind:
print >> sys.stdout, \
"Copying cache files from [datafind], cache-files"
print("Copying cache files from [datafind], cache-files", file=sys.stdout)
manual_cache_files=ast.literal_eval(cp.get('datafind','cache-files'))
shutil.copy(manual_cache_files[ifo], cache_files[ifo])
......@@ -580,8 +579,8 @@ for ifo in ifo_list:
{cachefile}".format( o=ifo[0], frtype=frtype_list[ifo],
cachefile=cachefilefmt.format(ifo), gps_start_time=gps_start_time,
gps_end_time=gps_end_time, url_type=cp.get('datafind','url-type'))
print >> sys.stdout, "Calling LIGO data find ..."
print >> sys.stdout, ldfcmd
print("Calling LIGO data find ...", file=sys.stdout)
print(ldfcmd, file=sys.stdout)
subprocess.call(ldfcmd, shell=True)
......@@ -626,7 +625,7 @@ for ifo in ifo_list:
segfile.close()
if segmentList[ifo] == []:
print >> sys.stderr, "No matching segments for %s"%ifo
print("No matching segments for %s"%ifo, file=sys.stderr)
sys.exit()
os.chdir(curdir)
......@@ -637,7 +636,7 @@ for ifo in ifo_list:
# directory
if opts.copy_frames:
print "Setting up frame copying"
print("Setting up frame copying")
#
# Now we need to make a new, local cache file
......@@ -739,9 +738,9 @@ if opts.submit_to_gracedb: submitToGraceDB_job = pipe_utils.submitToGraceDB(cp)
try:
dataseed=cp.getint('input', 'dataseed')
except ConfigParser.NoOptionError:
print >> sys.stderr, "[input] section requires dataseed for sim data"
print >> sys.stderr, " (you need this in bayeswave_post, even if real data"
print >> sys.stderr, "...removing %s"%workdir
print("[input] section requires dataseed for sim data", file=sys.stderr)
print(" (you need this in bayeswave_post, even if real data", file=sys.stderr)
print("...removing %s"%workdir, file=sys.stderr)
os.chdir(topdir)
shutil.rmtree(workdir)
sys.exit()
......@@ -754,7 +753,7 @@ totaltrigs=0
for t,trigger in enumerate(trigger_list.triggers):
print >> sys.stdout, "---------------------------------------"
print("---------------------------------------", file=sys.stdout)
# Add systematic offset
trigger.trigger_time += opts.trigger_time_delta
......@@ -782,23 +781,23 @@ for t,trigger in enumerate(trigger_list.triggers):
unanalyzeable_jobs.append(bad_job)
print >> sys.stderr, "Warning: No matching %s segments for job %d of %d"%(
ifo, t+1, len(trigger_times))
print >> sys.stderr, bad_job
print("Warning: No matching %s segments for job %d of %d"%(
ifo, t+1, len(trigger_times)), file=sys.stderr)
print(bad_job, file=sys.stderr)
break
else:
if 'H1' in ifo_list:
print >> sys.stdout, """Adding node for GPS {0} ({1} of {2})
print( """Adding node for GPS {0} ({1} of {2})
L1-timeslide {3}, V-timeslide {4} """.format(
trigger.trigger_time, totaltrigs+1, len(trigger_times),
trigger.hl_time_lag, trigger.hv_time_lag)
trigger.hl_time_lag, trigger.hv_time_lag)), file=sys.stdout)
else:
print >> sys.stdout, """Adding node for GPS {0} ({1} of {2})
print("""Adding node for GPS {0} ({1} of {2})
L1-timeslide {3}, V-timeslide {4} """.format(
trigger.trigger_time, totaltrigs+1, len(trigger_times),
trigger.hl_time_lag, trigger.lv_time_lag)
trigger.hl_time_lag, trigger.lv_time_lag), file=sys.stdout)
if not cp.getboolean('datafind','sim-data'):
......@@ -939,8 +938,8 @@ for t,trigger in enumerate(trigger_list.triggers):
#
megasky_node.set_outputDir(outputDir)
if injfile is not None:
print 'Injfile is not none'
print 'adding event ', trigger.injevent
print('Injfile is not none')
print('adding event ', trigger.injevent)
megasky_node.set_injevent(trigger.injevent)
megaplot_node.set_outputDir(outputDir)
......@@ -1017,7 +1016,7 @@ os.chdir(topdir)
# print some summary info:
if len(trigger_times)-len(unanalyzeable_jobs)>0:
print """
print("""
Total number of requested trigger times: {ntrigs_desired}
Number of triggers successfully added to DAG: {ntrigs_added}
Number of triggers failing data criteria: {ntrigs_failed}
......@@ -1028,25 +1027,25 @@ if len(trigger_times)-len(unanalyzeable_jobs)>0:
""".format(ntrigs_desired=len(trigger_times),
ntrigs_added=len(trigger_times)-len(unanalyzeable_jobs),
ntrigs_failed=len(unanalyzeable_jobs),
workdir=workdir, dagfile=dag.get_dag_file())
workdir=workdir, dagfile=dag.get_dag_file()))
else:
print ""
print "No analyzeable jobs in requested time"
print("")
print("No analyzeable jobs in requested time")
if opts.condor_submit:
# Auto-submit dag by cd-ing into the work-directory and submitting
# chdir is useful with the OSG-friendly relative paths
print "Submitting DAG..."
print("Submitting DAG...")
os.chdir(workdir)
x = subprocess.Popen(['condor_submit_dag',dag.get_dag_file()])
x.wait()
if x.returncode==0:
print 'Submitted DAG file: ',dag.get_dag_file()
print('Submitted DAG file: ',dag.get_dag_file())
else:
print 'Unable to submit DAG file'
print('Unable to submit DAG file')
os.chdir(topdir)
......
......@@ -30,9 +30,9 @@ import sys
import re
import traceback
print 'Path to megaplot: '
print sys.argv[0]
print '\n'
print('Path to megaplot: ')
print(sys.argv[0])
print('\n')
# Allow navigation into specified working directory
......@@ -173,17 +173,17 @@ def readbwb():
# Determine the job name
elif arg=='--runName':
jobName = cmdline[index+1]
print "The job name is: {0}".format(jobName)
print("The job name is: {0}".format(jobName))
jobName = jobName+'_'
# Determine if the job was glitchOnly, noiseOnly, or signalOnly
elif arg=='--glitchOnly':
print '\nThis run was executed with the --glitchOnly flag\n'
print('\nThis run was executed with the --glitchOnly flag\n')
restrictModel = 'glitch'
elif arg=='--noiseOnly':
print '\nThis run was executed with the --noiseOnly flag\n'
print('\nThis run was executed with the --noiseOnly flag\n')
restrictModel = 'noise'
elif arg=='--signalOnly':
print '\nThis run was executed with the --signalOnly flag\n'
print('\nThis run was executed with the --signalOnly flag\n')
restrictModel = 'signal'
elif arg=='--inj':
injFlag = True
......@@ -221,7 +221,7 @@ def readbwb():
info = info + 'Injected SNR in detector {0} = {1}\n'.format(ifoNames[int(ifo)],snrList[-1])
bayeswave.close()
# -- Report to user
print "{0}".format(info)
print("{0}".format(info))
return(jobName, restrictModel, mdc, injFlag, bayeswaverunfile, ifoList, ifoNames, gps, snrList, info)
# --------------------------------------------------
......@@ -444,9 +444,9 @@ def plot_evidence(jobName, plotsDir):
err_sig_gl += err_sig_si