Commit 24322407 authored by Adam Mercer's avatar Adam Mercer

Merge branch 'lalapps-python3' into 'master'

Updated lalapps python syntax for python3

See merge request lscsoft/lalsuite!612
parents b0163c43 5d6a707b
......@@ -5,6 +5,8 @@ This script produces the condor submit and dag files to run
the noise comparison between h(t) and calibrated DARM_ERR
"""
from __future__ import print_function
__author__ = 'Xavier Siemens<siemens@gravity.phys.uwm.edu>'
__date__ = '$Date$'
__version__ = '$Revision$'
......@@ -77,18 +79,18 @@ parser.add_option("-V", "--veto-list",action="store_true",default=False,\
df_pad=128
if not opts.config_file:
print >> sys.stderr, "No configuration file specified."
print >> sys.stderr, "Use --help for usage details."
print("No configuration file specified.", file=sys.stderr)
print("Use --help for usage details.", file=sys.stderr)
sys.exit(1)
if not opts.segment_filename:
print >> sys.stderr, "No segment filename specified."
print >> sys.stderr, "Use --help for usage details."
print("No segment filename specified.", file=sys.stderr)
print("Use --help for usage details.", file=sys.stderr)
sys.exit(1)
if not opts.basename:
print >> sys.stderr, "No dag file base name specified."
print >> sys.stderr, "Use --help for usage details."
print("No dag file base name specified.", file=sys.stderr)
print("Use --help for usage details.", file=sys.stderr)
sys.exit(1)
# create the config parser object and read in the ini file
......@@ -142,11 +144,11 @@ epochs = strain.EpochData(cp,opts)
epoch_cnt = 0;
# loop over the segments defined by the calibration epochs
print "\n"
print("\n")
for epoch in epochs.epoch_segs():
noise_output_files = []
noise_output_files2 = []
print "setting up jobs for calibration epoch " + str(epoch[1])+" - "+str(epoch[2]) + "..."
print("setting up jobs for calibration epoch " + str(epoch[1])+" - "+str(epoch[2]) + "...")
#output the epochs in their own directories
epoch_dir = 'EPOCH'+'-'+str(epoch[1])+'-'+str(epoch[2])
mkdir_node2 = strain.MkdirNode(mkdir_job,epoch_dir)
......@@ -270,7 +272,7 @@ if not opts.cat_noise_jobs:
if opts.write_dax: dag.write_pegasus_rls_cache(cp.get("ldgsubmitdax","gsiftp"),cp.get("ldgsubmitdax","pool"))
if opts.write_script: dag.write_script()
print "\nDAG contains " + str(len(dag.get_nodes())) + " nodes.\n"
print("\nDAG contains " + str(len(dag.get_nodes())) + " nodes.\n")
# write out a log file for this script
log_fh = open(opts.basename + '.pipeline.log', 'w')
......@@ -291,21 +293,21 @@ if not opts.cat_noise_jobs:
for seg in data:
for chunk in seg:
total_data += len(chunk)
print >> log_fh, "total data =", total_data
print("total data =", total_data, file=log_fh)
print >> log_fh, "\n===========================================\n"
print >> log_fh, data
print("\n===========================================\n", file=log_fh)
print(data, file=log_fh)
for seg in data:
print >> log_fh, seg
print(seg, file=log_fh)
for chunk in seg:
print >> log_fh, chunk, 'length', int(chunk.end())-int(chunk.start())
print(chunk, 'length', int(chunk.end())-int(chunk.start()), file=log_fh)
endgps=chunk.end()
if not opts.cat_noise_jobs:
# write a message telling the user that the DAG has been written
print "\nCreated a DAG file which can be submitted by executing"
print "\n condor_submit_dag", dag.get_dag_file()
print """\nfrom a condor submit machine (e.g. hydra.phys.uwm.edu)\n
print("\nCreated a DAG file which can be submitted by executing")
print("\n condor_submit_dag", dag.get_dag_file())
print("""\nfrom a condor submit machine (e.g. hydra.phys.uwm.edu)\n
If you are running LSCdataFind jobs, do not forget to initialize your grid
proxy certificate on the condor submit machine by running the commands
......@@ -328,7 +330,7 @@ if not opts.cat_noise_jobs:
Contact the administrator of your cluster to find the hostname and port of the
LSCdataFind server.
"""
""")
sys.exit(0)
......@@ -5,6 +5,8 @@ This script produces the condor submit and dag files to run
the standalone strain
"""
from __future__ import print_function
__author__ = 'Xavier Siemens<siemens@gravity.phys.uwm.edu>'
__date__ = '$Date$'
__version__ = '$Revision$'
......@@ -34,7 +36,7 @@ Usage: lalapps_ring_pipe [options]
-f, --dag-file basename for .dag file (excluding the .dag)
-t, --aux-path path to auxiliary files
"""
print >> sys.stderr, msg
print(msg, file=sys.stderr)
# pasrse the command line options to figure out what we should do
shortop = "hv:s:e:S:f:t:a:b:"
......@@ -66,7 +68,7 @@ segment_filename = None
for o, a in opts:
if o in ("-v", "--version"):
print "$Id$"
print("$Id$")
sys.exit(0)
elif o in ("-h", "--help"):
usage()
......@@ -86,7 +88,7 @@ for o, a in opts:
elif o in ("-b", "--trig-end"):
pass
else:
print >> sys.stderr, "Unknown option:", o
print("Unknown option:", o, file=sys.stderr)
usage()
sys.exit(1)
......@@ -95,29 +97,29 @@ log_path = '/usr1/xsiemens/'
df_pad=128
if not config_file:
print >> sys.stderr, "No configuration file specified."
print >> sys.stderr, "Use --help for usage details."
print("No configuration file specified.", file=sys.stderr)
print("Use --help for usage details.", file=sys.stderr)
sys.exit(1)
if not log_path:
print >> sys.stderr, "No log file path specified."
print >> sys.stderr, "Use --help for usage details."
print("No log file path specified.", file=sys.stderr)
print("Use --help for usage details.", file=sys.stderr)
sys.exit(1)
if (not GPSStart or not GPSEnd) and not segment_filename:
print >> sys.stderr, "No GPS start time and end times or segment filename specified."
print >> sys.stderr, "Either GPS start time and end times, or a segment filename must be specified."
print >> sys.stderr, "Use --help for usage details."
print("No GPS start time and end times or segment filename specified.", file=sys.stderr)
print("Either GPS start time and end times, or a segment filename must be specified.", file=sys.stderr)
print("Use --help for usage details.", file=sys.stderr)
sys.exit(1)
if not basename:
print >> sys.stderr, "No dag file base name specified."
print >> sys.stderr, "Use --help for usage details."
print("No dag file base name specified.", file=sys.stderr)
print("Use --help for usage details.", file=sys.stderr)
sys.exit(1)
if not aux_path:
print >> sys.stderr, "No auxiliary file path specified."
print >> sys.stderr, "Use --help for usage details."
print("No auxiliary file path specified.", file=sys.stderr)
print("Use --help for usage details.", file=sys.stderr)
sys.exit(1)
# try and make a directory to store the cache files and job logs
......@@ -159,7 +161,7 @@ strain_job.set_sub_file( basename + '.strain' + subsuffix )
# if runnign on-line make segments filename
if running_online:
segment_file=open('strain_segment.txt',mode='w')
print >> segment_file, '1',GPSStart,' ',GPSEnd,' ',int(GPSEnd)-int(GPSStart)
print('1',GPSStart,' ',GPSEnd,' ',int(GPSEnd)-int(GPSStart), file=segment_file)
segment_file.close()
segment_filename='strain_segment.txt'
......@@ -206,7 +208,7 @@ for seg in data:
command = "/archive/home/xsiemens/lscsoft/glue/bin/LSCdataFindcheck --gps-start-time "+str(seg.start())+\
" --gps-end-time "+str(seg.end())+" "+df.get_output()
print >> cachecheck_file, command
print(command, file=cachecheck_file)
if prev_df:
df.add_parent(prev_df)
......@@ -222,11 +224,11 @@ for seg in data:
gps_str=str(chunk.start())
gps_time_first_four=gps_str[0]+gps_str[1]+gps_str[2]+gps_str[3]
try: os.mkdir(base_data_dirL1+'/'+ifo[0]+'-'+frametypeL1+'-'+gps_time_first_four)
except OSError, err:
except OSError as err:
import errno
#print "Warning:", err
try: os.mkdir(base_data_dirL2+'/'+ifo[0]+'-'+frametypeL2+'-'+gps_time_first_four)
except OSError, err:
except OSError as err:
import errno
#print "Warning:", err
......@@ -241,9 +243,9 @@ for seg in data:
strain1.add_parent(df)
dag.add_node(strain1)
print >> framelist_file, 'ls '+directory+'/'+ifo[0] \
print('ls '+directory+'/'+ifo[0] \
+'-'+frametype+'-'+str(int(chunk.start())+int(overlap))+'-' \
+str(int(chunk.end())-int(chunk.start())-2*int(overlap))+'.gwf > /dev/null'
+str(int(chunk.end())-int(chunk.start())-2*int(overlap))+'.gwf > /dev/null', file=framelist_file)
cachecheck_file.close()
framelist_file.close()
......@@ -267,24 +269,24 @@ total_data = 0
for seg in data:
for chunk in seg:
total_data += len(chunk)
print >> log_fh, "total data =", total_data
print("total data =", total_data, file=log_fh)
print >> log_fh, "\n===========================================\n"
print >> log_fh, data
print("\n===========================================\n", file=log_fh)
print(data, file=log_fh)
for seg in data:
print >> log_fh, seg
print(seg, file=log_fh)
for chunk in seg:
print >> log_fh, chunk, 'length', int(chunk.end())-int(chunk.start())
print(chunk, 'length', int(chunk.end())-int(chunk.start()), file=log_fh)
endgps=chunk.end()
if running_online:
print >> sys.stdout, seg.start()+overlap,int(chunk.end())-overlap
print(seg.start()+overlap,int(chunk.end())-overlap)
if not running_online:
# write a message telling the user that the DAG has been written
print "\nCreated a DAG file which can be submitted by executing"
print "\n condor_submit_dag", dag.get_dag_file()
print """\nfrom a condor submit machine (e.g. hydra.phys.uwm.edu)\n
print("\nCreated a DAG file which can be submitted by executing")
print("\n condor_submit_dag", dag.get_dag_file())
print("""\nfrom a condor submit machine (e.g. hydra.phys.uwm.edu)\n
If you are running LSCdataFind jobs, do not forget to initialize your grid
proxy certificate on the condor submit machine by running the commands
......@@ -307,7 +309,7 @@ if not running_online:
Contact the administrator of your cluster to find the hostname and port of the
LSCdataFind server.
"""
""")
sys.exit(0)
......@@ -228,7 +228,7 @@ class NoiseNode(pipeline.CondorDAGNode, pipeline.AnalysisNode):
# set the frame type based on the LFNs returned by datafind
#self.add_var_opt('frame-type',b)
else:
raise CondorDAGNodeError, "Unknown LFN cache format"
raise CondorDAGNodeError("Unknown LFN cache format")
# Convenience functions to cat together noise output files.
def open_noise_cat_file(dir):
......@@ -300,11 +300,11 @@ def plot_systematics(filelist,cp,dir,epoch,dag,opts):
amphf[f] = 0.0
freqcnt = 0;
print "\tfirst pass through systematics files..."
print("\tfirst pass through systematics files...")
for file in flist:
try: input = open(file,'r')
except:
print "WARNING: file " + file + " doesn't exist"
print("WARNING: file " + file + " doesn't exist")
continue
for line in input.readlines():
tmp = line.split()
......@@ -394,12 +394,12 @@ def plot_systematics(filelist,cp,dir,epoch,dag,opts):
freqcnt = 0;
print "\tsecond pass through systematics files..."
print("\tsecond pass through systematics files...")
#Compute the moments of the distribution
for file in flist:
try: input = open(file,'r')
except:
print "WARNING: file " + file + " doesn't exist"
print("WARNING: file " + file + " doesn't exist")
continue
for line in input.readlines():
tmp = line.split()
......@@ -458,7 +458,7 @@ def plot_systematics(filelist,cp,dir,epoch,dag,opts):
fl.close()
# Plot the results
print "\tplotting..."
print("\tplotting...")
# Plot the systematic in magnitude
magfigname = "sys_mag"+epoch[1]+"-"+epoch[2]+".png"
figure(1)
......@@ -592,7 +592,7 @@ def plot_systematics(filelist,cp,dir,epoch,dag,opts):
page.write('<h3>Raw distribution of residual noise</h3><hr><br>\n')
for f in freq:
#time.sleep(10)
print "plotting "+str(f)
print("plotting "+str(f))
figname = "n_hist_"+str(f)+'_'+epoch[1]+"-"+epoch[2]+".png"
#figure(1)
plot(binVec,realHistVecs[f])
......@@ -647,7 +647,7 @@ def plot_noise_jobs(filelist,cp,dir,epoch,dag,qjob,opts):
for file in filelist:
try: input = open(file,'r')
except:
print "WARNING: file " + file + " doesn't exist"
print("WARNING: file " + file + " doesn't exist")
continue
#if STOP > 100: break
#STOP+=1
......@@ -769,7 +769,7 @@ def plot_noise_spec(specList,cp,dir,dag,qjob,qfile,tftuple):
A = array(specList,typecode='f')
figure(1)
pcolor(X,Y,A.transpose(),shading='flat',vmin=0.95,vmax=1.05)
print "...plotting qscan for " + start
print("...plotting qscan for " + start)
title('h(t) and h(f) power ratios per freq bin GPS '+start + '\n min = '+str(MIN) + ' max = '+str(MAX) )
xlabel('Time')
ylabel('Frequency')
......@@ -897,7 +897,7 @@ class qscanNode(pipeline.CondorDAGNode):
# just look at darm and h(t) for puny outliers.
self.add_file_arg(qfile)
else:
print ".....found 10% outlier running full qscan\n"
print(".....found 10% outlier running full qscan\n")
# run the standard qscan on outliers greater than 10%
self.add_file_arg(qfile+'FULL')
self.add_var_arg('@default')
......
......@@ -95,8 +95,8 @@ def create_bank(configcp, arguments):
arguments = arguments + ' --n 1 --check --print-bank --xml-output'
os.system('rm -f BE_Bank.dat BE_Bank.xml')
print '###'
print ' We are creating the template bank for sanity check. Please wait'
print('###')
print(' We are creating the template bank for sanity check. Please wait')
fp =open('BankEfficiency_createbank','w');
fp.write( configcp.get("main", "executable") + arguments +' \
1> ./log/bankefficiency_tmpltbank.out 2>./log/bankefficiency_tmpltbank.err'+'\n')
......@@ -105,9 +105,9 @@ def create_bank(configcp, arguments):
a=os.system('./BankEfficiency_createbank')
if a==0:
print '... done (your parameters seems correct). See BE_Bank.xml file.'
print('... done (your parameters seems correct). See BE_Bank.xml file.')
else:
print '... failed (your parameters seems correct)'
print('... failed (your parameters seems correct)')
sys.exit()
def create_dag_file(configcp):
......@@ -118,7 +118,7 @@ def create_dag_file(configcp):
"""
njobs = int(configcp.get("simulation", "njobs"))
print '--- Generating the dag file'
print('--- Generating the dag file')
fp=open('bep.dag', 'w')
for id in range(1,njobs+1,1):
fp.write('JOB '+str(id)+' bep.sub\n')
......@@ -130,7 +130,7 @@ def create_dag_file(configcp):
fp.write('PARENT ' + str(id)+' CHILD '+str(njobs+1)+'\n')
fp.close()
print '... done'
print('... done')
def create_finalise_condor(configcp):
"""
......@@ -177,13 +177,13 @@ def check_executable(configcp):
A routine to check that the executable is accessible
"""
try:
print '--- Check that the executable ('+ configcp.get("main","executable") +')is present in '+path
print('--- Check that the executable ('+ configcp.get("main","executable") +')is present in '+path)
f = open(configcp("main", "executable"), 'r')
f.close()
except:
print '### Can not find ' + configcp("main", "executable")
print('### Can not find ' + configcp("main", "executable"))
sys.exit()
print '... executable found. Going ahead'
print('... executable found. Going ahead')
......@@ -192,7 +192,7 @@ def parse_arguments():
"""
The user interface
"""
print '--- Parsing user arguments'
print('--- Parsing user arguments')
parser = OptionParser()
parser.add_option( "--config-file")
......@@ -210,13 +210,13 @@ configcp.read(options.config_file)
os.system('mkdir log')
arguments = create_condor_file(configcp)
print """
print("""
The condor script will use the following arguments
-------------------------------------------
"""
print arguments
print '\n--- The number of simulation requested is '+configcp.get("simulation", "ntrial")
print '--- They will be split into '+ configcp.get("simulation", "njobs")+' jobs'
""")
print(arguments)
print('\n--- The number of simulation requested is '+configcp.get("simulation", "ntrial"))
print('--- They will be split into '+ configcp.get("simulation", "njobs")+' jobs')
# create the condor file using the input parameter stored in BE
create_finalise_script(configcp)
......@@ -224,14 +224,14 @@ create_finalise_condor(configcp)
create_dag_file(configcp)
create_bank(configcp, arguments)
print '--- Generating the prototype xml file for merging condor job'
print('--- Generating the prototype xml file for merging condor job')
command = configcp.get("main", "executable") + ' ' + arguments +' --print-prototype \
1>./log/bankefficiency_prototype.out 2>./log/bankefficiency_prototype.err'
os.system(command)
print '... done'
print('... done')
time.sleep(.5)
print """--- In order to start the job, type
print("""--- In order to start the job, type
--------------------------------------------
condor_submit_dag -maxjobs 100 bep.dag
......@@ -243,7 +243,7 @@ condor_submit_dag -maxjobs 100 -f bep.dag
Once the dag is finished and all the job are completed, get back all
the results together within an xml file by using the script called : finalise.sh
Ideally, this script should be put within the daga file"""
Ideally, this script should be put within the daga file""")
create_finalise_script(configcp)
os.system('mkdir log')
......
......@@ -16,9 +16,9 @@ from pylab import *
try:
from scipy.sandbox import delaunay
except:
print """"Warning: the delaunay package could not be imported.
print(""""Warning: the delaunay package could not be imported.
Functionalities such as the contour or surf plots will not be available.
Fix your configuration and packages."""
Fix your configuration and packages.""")
#import numpy.core.ma as ma
from numpy import *
from math import log10
......@@ -177,12 +177,12 @@ class ReadXMLFiles:
file = open(filename, "r")
line = file.readline()
# first, we search for the requested table
if self.verbose : print "searching for the table "+table
if self.verbose : print("searching for the table "+table)
while line and line.find(table) < 0:
line = file.readline()
if self.verbose: print "reading the data..."
if self.verbose: print("reading the data...")
if not line:
return None
# create a dictionary using the parameters of the xml table as keys
......@@ -221,21 +221,21 @@ class ReadXMLFiles:
# check that the file was properly closed with a </stream> tag
if self.verbose: print "Found "+str(count-1)+" elements"
if self.verbose: print("Found "+str(count-1)+" elements")
#convert
if self.verbose : print "converting to arrays"
if self.verbose : print("converting to arrays")
for key in sortDict:
try:
table_summ[key] = array(table_summ[key])
except:
print 'skip array conversion for '+ key
print('skip array conversion for '+ key)
pass
file.close()
if table_summ is None:
print 'Warning: '+ table +'table cannot be found in'+filename
print('Warning: '+ table +'table cannot be found in'+filename)
return table_summ
......@@ -247,15 +247,15 @@ class ReadXMLFiles:
"""
# right now only one file can be read.
if len(self.files)>1:
print "Warning: More than 1 file match your --glob argument. use the last one. Fix me"
print("Warning: More than 1 file match your --glob argument. use the last one. Fix me")
if len(self.files)==0:
print 'Error, no file to read. check the spelling'
print('Error, no file to read. check the spelling')
sys.exit(1)
for file in self.files:
if self.verbose is True:
print 'Reading ' + file
print('Reading ' + file)
# first we need to read the process_params, and extract Fl, the lower cut-
# off frequency, which will be used later on
if name is None:
......@@ -267,9 +267,9 @@ class ReadXMLFiles:
except:
self.params = None
self.values = None
raise ValueError, """ The XMl file must contain a process_params table
raise ValueError(""" The XMl file must contain a process_params table
with the --fl option at least. It does not seem to be present in the file
provide."""
provide.""")
#try:
results = self.readXML(file, 'bankefficiency')
......@@ -306,9 +306,8 @@ class ReadXMLFiles:
pass
if self.bank is None:
print \
"""Warning: no sngl_inspiral table found. so,no information
related to the template bank can be extracted"""
print("""Warning: no sngl_inspiral table found. so,no information
related to the template bank can be extracted""")
if name is not None:
try:
......@@ -339,7 +338,7 @@ class Plotting:
fig = None
if self.hold is False:
if self.verbose is True:
print 'Plotting '+ str(self.figure_num) +' in progress...',
print('Plotting '+ str(self.figure_num) +' in progress...', end=' ')
fig = figure(self.figure_num)
self.figure_num += 1
return fig
......@@ -357,7 +356,7 @@ class Plotting:
try:
n, bins, patches = hist(data, nbins, normed=1)
except:
print """Error inside histogram (hist function)"""
print("""Error inside histogram (hist function)""")
if fit is True:
try:
......@@ -369,7 +368,7 @@ class Plotting:
except:pass
except:
print """Error inside histogram (normpdf function)"""
print(