Commit 49d47074 authored by Karl Wette's avatar Karl Wette
Browse files

Merge branch 'knope_automation' into 'master'

Known pulsar search  automation script fixes

See merge request !756
parents 9288744c c4da381c
Pipeline #57761 passed with stages
in 74 minutes and 57 seconds
......@@ -23,7 +23,7 @@ import smtplib
import stat
import argparse
from six.moves.configparser import ConfigParser
from six.moves.configparser import RawConfigParser
from lalapps import git_version
......@@ -85,7 +85,8 @@ if __name__=='__main__':
A configuration .ini file is required.
"""
parser = argparse.ArgumentParser( description = description, version = __version__ )
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument("inifile", help="The configuration (.ini) file")
# parse input options
......@@ -102,7 +103,7 @@ A configuration .ini file is required.
cronid = 'knopeJob' # default ID for the crontab job
# open and parse config file
cp = ConfigParser()
cp = RawConfigParser()
try:
cp.read(inifile)
except:
......@@ -130,7 +131,12 @@ A configuration .ini file is required.
if cp.has_option('configuration', 'cronid'):
cronid = cp.get('configuration', 'cronid')
cprun = ConfigParser()
# check for kerberos certificate
kerberos = None
if cp.has_option('configuration', 'kerberos'):
kerberos = cp.get('configuration', 'kerberos')
cprun = RawConfigParser()
try:
cprun.read(runconfig)
except:
......@@ -481,6 +487,9 @@ A configuration .ini file is required.
except:
print("Error... if specifying a virtualenv the 'WORKON_HOME' environment must exist", file=sys.stderr)
sys.exit(1)
elif cp.has_option('configuration', 'conda'): # assumes using conda
virtualenv = cp.get('configuration', 'conda')
wov = 'conda activate {}'.format(virtualenv)
# check for .bash_profile, or similar file, to invoke
profile = None
......@@ -493,18 +502,27 @@ A configuration .ini file is required.
print("Error... no profile file is given", file=sys.stderr)
sys.exit(1)
if kerberos is not None:
krbcert = "export KRB5CCNAME={}".format(kerberos)
ligoproxyinit = "/usr/bin/ligo-proxy-init -k"
else:
krbcert = ""
ligoproxyinit = ""
# output wrapper script
try:
# set the cron wrapper script (which will re-run this script)
cronwrapperscript = os.path.splitext(inifile)[0] + '.sh'
cronwrapper = """#!/bin/bash
source {0} # source profile
{1} # enable virtual environment (assumes you have virtualenvwrapper.sh)
%s {2} # re-run this script
{1} # enable virtual environment (assumes you have virtualenvwrapper.sh/conda)
{2} # export kerberos certificate location (if required)
{3} # create proxy (if required)
%s {4} # re-run this script
""" % sys.argv[0]
fp = open(cronwrapperscript, 'w')
fp.write(cronwrapper.format(profile, wov, inifile))
fp.write(cronwrapper.format(profile, wov, krbcert, ligoproxyinit, inifile))
fp.close()
os.chmod(cronwrapperscript, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH) # make executable
except:
......
......@@ -22,7 +22,7 @@ import json
import subprocess as sp
import shutil
import uuid
from six.moves.configparser import ConfigParser
from six.moves.configparser import RawConfigParser
import six.moves.urllib.parse as urlparse
from copy import deepcopy
import numpy as np
......@@ -608,7 +608,7 @@ class knopeDAG(pipeline.CondorDAG):
collatejob = collateJob(self.collate_exec, univ=self.results_universe, accgroup=self.accounting_group, accuser=self.accounting_group_user, logdir=self.log_dir, rundir=self.run_dir)
# create config file for collating results into a results table
cpc = ConfigParser() # create config parser to output .ini file
cpc = RawConfigParser() # create config parser to output .ini file
# create configuration .ini file
cinifile = os.path.join(self.results_basedir, 'collate.ini')
......@@ -683,7 +683,7 @@ class knopeDAG(pipeline.CondorDAG):
if os.path.isfile(jsonfile):
# append starttime (which will be the end time of the previous results) timestamp to JSON file
try:
shutil.copyfile(jsonfile, jsonfile + '_%d' % self.starttime.values()[0])
shutil.copyfile(jsonfile, jsonfile + '_%d' % list(self.starttime.values())[0])
except:
print("Warning... could not copy previous results JSON file '%s'. Previous results may get overwritten." % jsonfile, file=sys.stderr)
......@@ -705,7 +705,7 @@ class knopeDAG(pipeline.CondorDAG):
except:
print("Warning... could not write out ATNF catalogue information to JSON file '%s'." % jsonfile, file=sys.stderr)
cp = ConfigParser() # create config parser to output .ini file
cp = RawConfigParser() # create config parser to output .ini file
# create configuration .ini file
inifile = os.path.join(self.results_pulsar_dir[pname], pname+'.ini')
......@@ -809,7 +809,7 @@ class knopeDAG(pipeline.CondorDAG):
if self.autonomous:
if os.path.isfile(posteriorsfiles[det]):
try: # copy to file with the start time (i.e. the end time of the previous analysis for which the posterior file belongs) appended
shutil.copyfile(posteriorsfiles[det], posteriorsfiles[det].strip('.hdf') + '_%d.hdf' % self.starttime.values()[0])
shutil.copyfile(posteriorsfiles[det], posteriorsfiles[det].strip('.hdf') + '_%d.hdf' % list(self.starttime.values())[0])
except:
print("Warning... could not create copy of current posterior samples file '%s'. This will get overwritten on next autonomous run." % posteriorsfiles[det], file=sys.stderr)
......@@ -3080,12 +3080,12 @@ class knopeDAG(pipeline.CondorDAG):
# check if a datafind job is needed for any of the detectors
if len(self.cache_files) < len(self.ifos):
self.datafind_job = pipeline.LSCDataFindJob(self.preprocessing_base_dir.values()[0], self.log_dir, self.config)
self.datafind_job = pipeline.LSCDataFindJob(list(self.preprocessing_base_dir.values())[0], self.log_dir, self.config)
else: # a data find exectable has been given
datafind = self.get_config_option('condor', 'datafind')
if os.path.isfile(datafind) and os.access(datafind, os.X_OK):
self.datafind_job = pipeline.LSCDataFindJob(self.preprocessing_base_dir.values()[0], self.log_dir, self.config)
self.datafind_job = pipeline.LSCDataFindJob(list(self.preprocessing_base_dir.values())[0], self.log_dir, self.config)
else:
print("Warning... data find executable '%s' does not exist, or is not executable, try using system gw_data_find instead" % datafind)
datafindexec = self.find_exec_file('gw_data_find')
......@@ -3095,7 +3095,7 @@ class knopeDAG(pipeline.CondorDAG):
return
else:
self.config.set('condor', 'datafind', datafindexec) # set value in config file parser
self.datafind_job = pipeline.LSCDataFindJob(self.preprocessing_base_dir.values()[0], self.log_dir, self.config)
self.datafind_job = pipeline.LSCDataFindJob(list(self.preprocessing_base_dir.values())[0], self.log_dir, self.config)
else:
# if no data find is specified try using the system gw_data_find
datafindexec = self.find_exec_file('gw_data_find')
......@@ -3105,7 +3105,7 @@ class knopeDAG(pipeline.CondorDAG):
return
else:
self.config.set('condor', 'datafind', datafindexec) # set value in config file parser
self.datafind_job = pipeline.LSCDataFindJob(self.preprocessing_base_dir.values()[0], self.log_dir, self.config)
self.datafind_job = pipeline.LSCDataFindJob(list(self.preprocessing_base_dir.values())[0], self.log_dir, self.config)
# add additional options to data find job
if self.datafind_job is not None:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment