There will be maintenance performed on git.ligo.org, chat.ligo.org, containers.lig.org, and docs.ligo.org starting at 9am PDT on Tuesday 18th August 2020. There will be an extremely small period of downtime at the start of the maintenance window as various services are restarted. Please address any comments, questions, or concerns to computing-help@igwn.org.

Commit 861fa224 authored by Adam Mercer's avatar Adam Mercer

Merge branch 'trailing-whitespace-lalapps-inspiral' into 'master'

Remove trailing whitespace from lalapps inspiral code

See merge request lscsoft/lalsuite!1182
parents 656a0496 6518357d
This diff is collapsed.
......@@ -41,8 +41,6 @@ from lalinspiral.sbank.tau0tau3 import proposals
from lalinspiral.sbank.psds import (noise_models, read_psd)
from lalinspiral.sbank.waveforms import waveforms, SnglInspiralTable
import lal
class ContentHandler(ligolw.LIGOLWContentHandler):
pass
......
......@@ -92,7 +92,7 @@ class SBankJob(inspiral.InspiralAnalysisJob):
if cp.has_section("accounting"):
self.add_condor_cmd('accounting_group', cp.get("accounting", "accounting-group"))
if cp.has_option("accounting", "accounting-group-user"):
self.add_condor_cmd('accounting_group_user', cp.get("accounting", "accounting-group-user"))
self.add_condor_cmd('accounting_group_user', cp.get("accounting", "accounting-group-user"))
self.add_condor_cmd('getenv','True')
self.add_condor_cmd('request_memory', '3999')
if "OMP_NUM_THREADS" in os.environ:
......@@ -134,7 +134,7 @@ class SBankChooseMchirpBoundariesJob(inspiral.InspiralAnalysisJob):
if cp.has_section("accounting"):
self.add_condor_cmd('accounting_group', cp.get("accounting", "accounting-group"))
if cp.has_option("accounting", "accounting-group-user"):
self.add_condor_cmd('accounting_group_user', cp.get("accounting", "accounting-group-user"))
self.add_condor_cmd('accounting_group_user', cp.get("accounting", "accounting-group-user"))
self.add_condor_cmd('getenv','True')
......@@ -172,7 +172,7 @@ class LWAddJob(pipeline.CondorDAGJob):
if cp.has_section("accounting"):
self.add_condor_cmd('accounting_group', cp.get("accounting", "accounting-group"))
if cp.has_option("accounting", "accounting-group-user"):
self.add_condor_cmd('accounting_group_user', cp.get("accounting", "accounting-group-user"))
self.add_condor_cmd('accounting_group_user', cp.get("accounting", "accounting-group-user"))
self.tag_base = tag_base
self.add_condor_cmd('environment',"KMP_LIBRARY=serial;MKL_SERIAL=yes")
self.set_sub_file(tag_base+'.sub')
......
......@@ -69,7 +69,7 @@ for section in commonsections:
# Print a list of options missing from the config file
for opt in missingopts:
print("MISSING option in " + opts.config_file + " in [" + section + "]: " + opt + " = " + refdict[opt])
print("MISSING option in " + opts.config_file + " in [" + section + "]: " + opt + " = " + refdict[opt])
# Print a list of extra options in the config file that aren't in the reference
for opt in extraopts:
......
......@@ -196,7 +196,7 @@ for o, a in opts:
config_file = a
elif o in ("-l", "--log-path"):
log_path = a
elif o in ("-x", "--dax"):
elif o in ("-x", "--dax"):
dax = True
else:
print("Unknown option:", o, file=sys.stderr)
......@@ -336,7 +336,7 @@ for seg in data:
df1.set_start(seg.start() - pad)
df1.set_end(seg.end() + pad)
df1.set_observatory(ifo1[0])
if prev_df1:
if prev_df1:
df1.add_parent(prev_df1)
df2 = pipeline.LSCDataFindNode(df_job)
......@@ -345,7 +345,7 @@ for seg in data:
df2.set_start(seg.start() - pad)
df2.set_end(seg.end() + pad)
df2.set_observatory(ifo2[0])
if prev_df2:
if prev_df2:
df2.add_parent(prev_df2)
if do_datafind:
......@@ -367,9 +367,9 @@ for seg in data:
if not calibrated: bank.calibration()
bank.set_vds_group(group)
if do_datafind:
if do_datafind:
bank.add_parent(df1)
if do_tmpltbank:
if do_tmpltbank:
dag.add_node(bank)
insp1 = inspiral.InspiralNode(insp_job)
......@@ -430,7 +430,7 @@ for seg in data:
# add the inspiral jobs for this segment to the list
insp_nodes.append(seg_insp_nodes)
# now add the last df1 as a parent to the first df2 so we
# now add the last df1 as a parent to the first df2 so we
# don't have multiple datafinds running at the same time
if do_datafind:
first_df2.add_parent(df1)
......@@ -450,7 +450,7 @@ for i in range(len(data)):
inca.set_ifo_b(ifo2)
# if there is a chunk before this one, add it to the job
try:
try:
data[i][j-1]
inca.add_file_arg(insp_nodes[i][j-1][0].get_output())
inca.add_file_arg(insp_nodes[i][j-1][1].get_output())
......@@ -491,27 +491,27 @@ dag.write_sub_files()
dag.write_dag()
# write a message telling the user that the DAG has been written
if dax:
print("""\nCreated a DAX file which can be submitted to the Grid using
Pegasus. See the page:
http://www.lsc-group.phys.uwm.edu/lscdatagrid/griphynligo/vds_howto.html
if dax:
print("""\nCreated a DAX file which can be submitted to the Grid using
Pegasus. See the page:
http://www.lsc-group.phys.uwm.edu/lscdatagrid/griphynligo/vds_howto.html
for instructions.
""")
else:
print("\nCreated a DAG file which can be submitted by executing")
print("\n condor_submit_dag", dag.get_dag_file())
print("""\nfrom a condor submit machine (e.g. hydra.phys.uwm.edu)\n
If you are running LSCdataFind jobs, do not forget to initialize your grid
If you are running LSCdataFind jobs, do not forget to initialize your grid
proxy certificate on the condor submit machine by running the commands
unset X509_USER_PROXY
grid-proxy-init -hours 72
Enter your pass phrase when promted. The proxy will be valid for 72 hours.
Enter your pass phrase when promted. The proxy will be valid for 72 hours.
If you expect the LSCdataFind jobs to take longer to complete, increase the
time specified in the -hours option to grid-proxy-init. You can check that
time specified in the -hours option to grid-proxy-init. You can check that
the grid proxy has been sucessfully created by executing the command:
grid-cert-info -all -file /tmp/x509up_u`id -u`
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment