There will be maintenance performed on git.ligo.org, chat.ligo.org, containers.lig.org, and docs.ligo.org starting at 9am PDT on Tuesday 18th August 2020. There will be an extremely small period of downtime at the start of the maintenance window as various services are restarted. Please address any comments, questions, or concerns to computing-help@igwn.org.

Commit 6ee1d0ff authored by Karl Wette's avatar Karl Wette

Merge branch 'trailing-whitespace-lalapps-pulsar' into 'master'

Remove trailing whitespace from lalapps pulsar code

See merge request lscsoft/lalsuite!1181
parents 206c4720 df3e9348
......@@ -171,7 +171,7 @@ if __name__=='__main__':
ctminus = curtime - dt/2.
_, mvel = get_body_barycentric_posvel(body, ctminus)
ctplus = curtime + dt/2.
_, pvel = get_body_barycentric_posvel(body, ctplus)
_, pvel = get_body_barycentric_posvel(body, ctplus)
acc.append(((pvel.xyz.to('m/s')-mvel.xyz.to('m/s'))/const.c)/dt.to('s'))
curtime += dt
......
......@@ -6,19 +6,19 @@ import sys
# Check if spec*.txt files exist under the directory given on the command line.
# If these files exist, then the SFT jobs have finished.
# Try again after 2 hr
# Try again after 2 hr
checkPath = sys.argv[1] + '/spec*.txt'
fileList = glob.glob(checkPath)
if len(fileList) < 1:
# Try again in 2 hr
# Try again in 2 hr
time.sleep(7200)
fileList = glob.glob(checkPath)
fileList = glob.glob(checkPath)
if len(fileList) < 1:
print('Timout: SFTs jobs under' + sys.argv[1] + 'did not finish.\n')
print('Timout: SFTs jobs under' + sys.argv[1] + 'did not finish.\n')
exit(1)
else:
exit(0)
......
......@@ -95,7 +95,7 @@ def parseSFT(SFTinput):
return tuple(sfts)
def coherenceFromSFTs( pathToSFTsChannA, pathToSFTsChannB, subBand=100): #The function that generates the coherence
def coherenceFromSFTs( pathToSFTsChannA, pathToSFTsChannB, subBand=100): #The function that generates the coherence
done = False
for i in range(11 , len(pathToSFTsChannA)-1):
......@@ -109,7 +109,7 @@ def coherenceFromSFTs( pathToSFTsChannA, pathToSFTsChannB, subBand=100): #The f
CB = pathToSFTsChannB[-(i-1):-10]
done = True
print('Computing the coherence between:')
print('Computing the coherence between:')
print(CA)
print(CB)
......@@ -160,7 +160,7 @@ def coherenceFromSFTs( pathToSFTsChannA, pathToSFTsChannB, subBand=100): #The f
else:
Fmax = FmaxB
#We calculate the beginning and ending bins
#We calculate the beginning and ending bins
KminA = int((Fmin-FminA)*TbaseA)
KminB = int((Fmin-FminB)*TbaseB)
......@@ -200,12 +200,12 @@ def coherenceFromSFTs( pathToSFTsChannA, pathToSFTsChannB, subBand=100): #The f
#print(len(numerator))
for sftA in ListA:
for sftA in ListA:
StartTimesA.append(float(sftA[-19:-10])) #This list might not even be necesarry
nAve = 0
#Let A be the channel that the thing is getting compared to.
for Aind in range(0,len(StartTimesA)):
#Let A be the channel that the thing is getting compared to.
for Aind in range(0,len(StartTimesA)):
for Bind in range(0,len(StartTimesB)):
if StartTimesA[Aind] == StartTimesB[Bind]:
......@@ -220,12 +220,12 @@ def coherenceFromSFTs( pathToSFTsChannA, pathToSFTsChannB, subBand=100): #The f
numerator = numerator * np.conj(numerator)
coh = numerator/(A*B)
coh = numerator/(A*B)
coh = np.real_if_close(coh, tol = 10)
nAve = str(nAve)
nAve = str(nAve)
print('Coherence Completed; nAve = %s' % nAve)
print('Coherence Completed; nAve = %s' % nAve)
print('Generating plots and files')
###
......@@ -239,7 +239,7 @@ def coherenceFromSFTs( pathToSFTsChannA, pathToSFTsChannB, subBand=100): #The f
#print(len(Freq))
#print(len(coh))
#Freq = np.linspace(0, N, len(coh))
#Freq = np.linspace(0, N, len(coh))
#print('value of Freq[180000]')
#print(Freq[180000]) #this is telling me that at each index is 1 Hz. No good.
......@@ -267,12 +267,12 @@ def coherenceFromSFTs( pathToSFTsChannA, pathToSFTsChannB, subBand=100): #The f
subBand = int(subBand) # Output plots and files for each subBand.
# All frequencies below, minFreq, maxFreq, and subBand, are coverted to integer indices in the Freq array.
i = int(0)
# All frequencies below, minFreq, maxFreq, and subBand, are coverted to integer indices in the Freq array.
i = int(0)
subBand = subBand * TbaseA
maxPossibleFreq = len(Freq);
minFreq = int(i * subBand) #minFreq is integer minimum frequency, this subBand
maxFreq = int((i + 1) * subBand) # maxFreq is integer maximum frequency, this subBand
maxPossibleFreq = len(Freq);
minFreq = int(i * subBand) #minFreq is integer minimum frequency, this subBand
maxFreq = int((i + 1) * subBand) # maxFreq is integer maximum frequency, this subBand
while maxFreq < maxPossibleFreq:
plot_num = str(i)
......@@ -295,9 +295,9 @@ def coherenceFromSFTs( pathToSFTsChannA, pathToSFTsChannB, subBand=100): #The f
clf()
close
# Setup to back to the top of the while loop.
# Setup to back to the top of the while loop.
i += 1
minFreq = int(i * subBand) #minFreq is integer minimum frequency, this subBand
minFreq = int(i * subBand) #minFreq is integer minimum frequency, this subBand
maxFreq = int((i + 1) * subBand) # maxFreq is integer maximum frequency, this subBand
print('Done')
......@@ -315,7 +315,7 @@ if len(sys.argv) < 3:
print(' ')
print('The optional subBand is the band in Hz to output in each plot. (Default is 100 Hz)')
print(' ')
exit(0)
exit(0)
#print sys.argv[1]
#print sys.argv[2]
......@@ -324,7 +324,7 @@ pathToSFTsChannA = sys.argv[1]
pathToSFTsChannB = sys.argv[2]
if pathToSFTsChannA[-1] != '/':
pathToSFTsChannA = pathToSFTsChannA + '/'
pathToSFTsChannA = pathToSFTsChannA + '/'
if pathToSFTsChannB[-1] != '/':
pathToSFTsChannB = pathToSFTsChannB + '/'
......
......@@ -9,7 +9,7 @@ import sys
# For teeth thicker than epsilon Hz, pick the tooth with the maximum snr.
def uniqueTeeth(epsilon,f,snr):
indList = [] # List of indices to keep
indList = [] # List of indices to keep
thisToothIndices = np.array([0],dtype=np.int) # List of indiced belong to a tooth.
thisToothSNRs = np.array([snr[0]],dtype=np.int) # List of snrs belong to a tooth.
for i in range(0, len(f) - 1):
......@@ -21,7 +21,7 @@ def uniqueTeeth(epsilon,f,snr):
else:
thisInd = thisToothIndices[np.argmax(thisToothSNRs)]
indList = np.append(indList,int(np.floor(thisInd)))
# Initialize for next tooth
# Initialize for next tooth
thisToothIndices = np.array([j],dtype=np.int) # List of indiced belong to a tooth.
thisToothSNRs = np.array([snr[j]],dtype=np.int) # List of snrs belong to a tooth.
# We are at the end of the array; add the index of the last tooth.
......
This diff is collapsed.
......@@ -60,7 +60,7 @@ def plotSpecAvgOutput(filename,outputFileName,chanName,effTBase,deltaFTicks,tave
tStart = lst.pop() # start time
ifo = lst.pop() # ifo
fEnd = lst.pop() # end frequency
fStart = lst.pop() # start frequency
fStart = lst.pop() # start frequency
y_temp1 = y # Create an array of the spectrogram data without the segments taken out when a segment file is used
yzeros=list(y.sum(axis=0))
......
......@@ -224,7 +224,7 @@ class knopeDAG(pipeline.CondorDAG):
self.error_code = KNOPE_ERROR_GENERAL
return
else:
self.ndatasets[ifo] = len(self.starttime[ifo])
self.ndatasets[ifo] = len(self.starttime[ifo])
# Get the pre-processing engine (heterodyne or SplInter - default to heterodyne)
if not self.postonly:
......
......@@ -2527,7 +2527,7 @@ def pulsar_nest_to_posterior(postfile, nestedsamples=False, removeuntrig=True):
pos.pop('i')
# convert C22 back into h0, and phi22 back into phi0 if required
posC21 = None
posC21 = None
if 'c21' in pos.names:
posC21 = pos['c21'].samples
......
......@@ -13,7 +13,7 @@ __date__ = '$Date$'
__version__ = '$Revision$'
# REVISIONS:
# 12/02/05 gam; generate datafind.sub and MakeSFTs.sub as well as dag file in PWD, with log files based subLogPath and dag filename.
# 12/02/05 gam; generate datafind.sub and MakeSFTs.sub as well as dag file in PWD, with log files based subLogPath and dag filename.
# 12/28/05 gam; Add option --make-gps-dirs, -D <num>, to make directory based on this many GPS digits.
# 12/28/05 gam; Add option --misc-desc, -X <string> giving misc. part of the SFT description field in the filename.
# 12/28/05 gam; Add options --start-freq -F and --band -B options to enter these.
......@@ -45,10 +45,10 @@ import math
def usage():
msg = """\
This script creates datafind.sub, MakeSFTs.sub, and a dag file that generates SFTs based on the options given.
This script creates datafind.sub, MakeSFTs.sub, and a dag file that generates SFTs based on the options given.
The script can be used to create dag files for stand-alone use with condor_submit_dag, or as a dag generator with onasys.
Usage: MakeSFTDAG [options]
-h, --help display this message
......@@ -84,7 +84,7 @@ Usage: MakeSFTDAG [options]
-L, --max-length-all-jobs maximum total amount of data to process, in seconds (optional and unused if a segment file is given)
-g, --segment-file (optional) alternative file with segments to use, rather than the input times.
-A, --accounting-group (optional) accounting group tag to be added to the condor submit files.
-U, --accounting-group-user (optional) accounting group albert.einstein username to be added to the condor submit files.
-U, --accounting-group-user (optional) accounting group albert.einstein username to be added to the condor submit files.
-q, --list-of-nodes (optional) file with list of nodes on which to output SFTs.
-Q, --node-path (optional) path to nodes to output SFTs; the node name is appended to this path, followed by path given by the -p option;
for example, if -q point to file with the list node1 node2 ... and the -Q /data/ -p /frames/S5/sfts/LHO options
......@@ -134,7 +134,7 @@ def writeToDag(dagFID, nodeCount, filterKneeFreq, timeBaseline, outputSFTPath, c
dagFID.write('PARENT %s CHILD %s\n'%(LSCdataFind,MakeSFTs))
#
# MAIN CODE START HERE
# MAIN CODE START HERE
#
# parse the command line options
......@@ -178,7 +178,7 @@ longop = [
"node-path=",
"output-jobs-per-node=",
"min-seg-length=",
"use-single=",
"use-single=",
"use-hot",
"make-tmp-file",
"datafind-path=",
......@@ -251,9 +251,9 @@ for o, a in opts:
elif o in ("-a", "--analysis-start-time"):
analysisStartTime = int(a)
elif o in ("-b", "--analysis-end-time"):
analysisEndTime = int(a)
analysisEndTime = int(a)
elif o in ("-f", "--dag-file"):
dagFileName = a
dagFileName = a
elif o in ("-t", "--aux-path"):
auxPath = a
elif o in ("-G", "--tag-string"):
......@@ -319,7 +319,7 @@ for o, a in opts:
elif o in ("-l", "--min-seg-length"):
minSegLength = int(a)
elif o in ("-S", "--use-single"):
useSingle = True
useSingle = True
elif o in ("-H", "--use-hot"):
useHoT = True
elif o in ("-Z", "--make-tmp-file"):
......@@ -492,9 +492,9 @@ if (segmentFile != None):
adjustSegExtraTime = True
try:
for line in open(segmentFile):
try:
try:
splitLine = line.split();
try:
try:
oneSeg = [];
oneSeg.append(int(splitLine[0]));
oneSeg.append(int(splitLine[1]));
......@@ -632,7 +632,7 @@ for seg in segList:
if analysisEndTime < segStartTime: analysisEndTime = segStartTime
else:
analysisStartTime = seg[0]
analysisEndTime = seg[1]
analysisEndTime = seg[1]
#print analysisStartTime, analysisEndTime
# Loop through the analysis time; make sure no more than maxNumPerNode SFTs are produced by any one node
startTimeThisNode = analysisStartTime
......
......@@ -19,7 +19,7 @@ parser.add_argument('Tcoh', type=int, help='Coherence time, that is, one integra
parser.add_argument('--fftf', action='store_true',help='Use to analyze frequency-frequency plane (default: time-frequency)')
args = parser.parse_args()
def tfplane(path, Tobs, Tcoh, fftf):
def tfplane(path, Tobs, Tcoh, fftf):
if fftf:
print('Printing data for frequency-frequency plane')
else:
......@@ -34,7 +34,7 @@ def tfplane(path, Tobs, Tcoh, fftf):
tfArray = np.array(tfList)
# Calculate expected number of SFTs
nsft = int(np.floor(2 * Tobs / Tcoh)) - 1
nsft = int(np.floor(2 * Tobs / Tcoh)) - 1
print('nsft = ' + str(nsft))
# Calculate expected number of second Fourier transforms
ntft = int(np.floor(nsft/2) + 1)
......@@ -65,14 +65,14 @@ def tfplane(path, Tobs, Tcoh, fftf):
ax.set_title(\
'Power in f-f plane ' + '\n' + 'Number of bins in data arrays (n f-prime, n f): ' +\
str(tfShaped.T.shape) + ' \n '\
)
)
else:
ax.set_xlabel('Time: SFT number (n)')
ax.set_ylabel('Frequency bin: f (Hz) * Tcoh (s)')
ax.set_title(\
'Power in t-f plane ' + '\n' + 'Number of bins in data arrays (n t, n f): ' +\
str(tfShaped.T.shape) + ' \n '\
)
)
if fftf:
plt.savefig('ffplane.png')
plt.savefig('ffplane.pdf')
......
......@@ -46,15 +46,15 @@ def summarizer(mdcVersion, observatory, pulsar, args):
if args.bypassSummary:
print('Bypassing summary file creation')
elif args.massiveSummary:
print('Taking alternate approach to a large output directory')
print('Taking alternate approach to a large output directory')
dagFileCore = headJobName
if args.closed:
dagFileCore = dagFileCore + '_closed'
if args.elsewhere:
jobDagFile = open(args.elsewhere + 'ScoX1_' + dagFileCore + '.dag', "r")
jobDagFile = open(args.elsewhere + 'ScoX1_' + dagFileCore + '.dag', "r")
else:
jobDagFile = open('ScoX1_' + dagFileCore + '.dag', "r")
outfilenameList = []
jobDagFile = open('ScoX1_' + dagFileCore + '.dag', "r")
outfilenameList = []
for jobDagLine in jobDagFile:
outfileLine = re.search("--outfilename=out_" + mdcVersion + \
"_" + observatory + "_pulsar-" + pulsar + "_" + \
......@@ -65,13 +65,13 @@ def summarizer(mdcVersion, observatory, pulsar, args):
"_" + observatory + "_pulsar-" + pulsar + "_" + \
str(outfileLine.group(1) + '.' + outfileLine.group(2)) + '_' + \
str(outfileLine.group(3) + '.' + outfileLine.group(4)) + '.dat'
outfilenameList.append(wholeOutfile)
outfilenameList.append(wholeOutfile)
jobDagFile.close
for ll, outfileEntry in enumerate(outfilenameList):
if ll < 1e6:
if ll < 1e6:
if ll % 1e3 == 0:
print(ll)
fileLocation = outdirectory + '/' + outfileEntry
fileLocation = outdirectory + '/' + outfileEntry
grepCommand = 'grep -i ' + fileLocation + ' -e h0'
os.system(grepCommand + ' >> ' + verboseSummaryFile)
print('Done')
......@@ -130,7 +130,7 @@ def summarizer(mdcVersion, observatory, pulsar, args):
+ "(?P<DELTAINT>\d+)\.(?P<DELTAFP>\d+)", \
verboseString)
declinationList.append(float(deltaLine.group(1) + deltaLine.group(2) + \
'.' + deltaLine.group(3)))
'.' + deltaLine.group(3)))
fLine = re.search("fsig = (?P<FSIGN>\-?)" \
+ "(?P<FINT>\d+)\.(?P<FFP>\d+)", \
verboseString)
......@@ -182,17 +182,17 @@ def summarizer(mdcVersion, observatory, pulsar, args):
# (0, 0), (0, 1), (0, 2), (0, 3),... (1, 0), (1, 1), (1, 2), (1, 3)...
# For convenience, we can reshape these arrays so we can use image plotters
# We want a map where RA increases left to right and dec from bottom to top
# i.e., where
# i.e., where
# (0, 3), (1, 3), (2, 3), (3,3)
# ...
# (0, 0), (1, 0), (2, 0), (3,0)
# To do this we first reshape by the length of right ascension,
# To do this we first reshape by the length of right ascension,
# to break up the arrays in a matrix,
# then transpose, to ensure RA increases the right way,
# then transpose, to ensure RA increases the right way,
raLen = len(np.unique(rightAscensionArray))
decLen = len(np.unique(declinationArray))
raShaped = np.reshape(rightAscensionArray, (raLen, decLen)).T
decShaped = np.reshape(declinationArray, (raLen, decLen)).T
decShaped = np.reshape(declinationArray, (raLen, decLen)).T
# Having checked that this plots correctly (verified by substituting
# raShaped or decShaped into the final plot), we then define the extents
# of the plot so we have plot axis labels
......@@ -409,7 +409,7 @@ def summarizer(mdcVersion, observatory, pulsar, args):
dfShaped = np.reshape(dfArray, (fLen, dfLen)).T
if (args.band or args.noiseTest) or \
(args.templateSearch or args.multiTemplateSearch) or \
args.J1751 or args.ScoX1S6:
args.J1751 or args.ScoX1S6:
print('Number of bins in data arrays: ' + str(fShaped.shape))
x, y = np.meshgrid(fShaped[0, :], dfShaped[:, 0])
extensions = [x[0, 0], x[-1, -1], y[0, 0], y[-1, -1]]
......@@ -500,7 +500,7 @@ def summarizer(mdcVersion, observatory, pulsar, args):
R vs parameters for pulsar ' + pulsar + ' at ' + observatory + ' \n \
' + centerString + str(RCenter) + ' at (df, f) = (' + centerRSpotDF +', ' + centerRSpotF + ') Hz \n \
Number of bins in data arrays (df, f): ' + str(fShaped.shape) + ' \n \
')
')
plt.savefig('DFvsFresultsR-' + observatory + '_pulsar-' + pulsar + '.png')
plt.savefig('DFvsFresultsR-' + observatory + '_pulsar-' + pulsar + '.pdf')
plt.close()
......
......@@ -40,7 +40,7 @@ parser.add_argument('--outfile', type=str, help='Filestring for output', default
parser.add_argument('--outdir', type=str, help='Output directory', default='output_')
parser.add_argument('--executable', type=str, help='Path to compiled binary executable', default='/home/grant.meadors/TwoSpect/dev1/bin/lalapps_TwoSpect')
args = parser.parse_args()
if args.singleBand or args.templateSearch:
if args.singleBand or args.templateSearch:
print('Looking only in a 5 Hz band from the MDCv6 data set at the following pulsar: ' + str(args.singleBand))
elif args.templateSearchOpen or args.templateSearchClosed:
print('Looking in 5 Hz bands from the MDCv6 data set.')
......@@ -59,7 +59,7 @@ def sftFileListParts():
287820, 291420, 295020, 309420, 316620, 334620, 388620, 395820, 399420,\
428220, 475020, 493020]
# Manually construct the sftFileList, because it is so idiosyncratic
# Manually construct the sftFileList, because it is so idiosyncratic
sftFileListPart1 = [66780, 83580, 91980, 100380, 108780, 117180, 133980,\
142380, 167580, 184380, 217980, 234780, 243180, 259980, 268380, 285180,\
293580]
......@@ -78,7 +78,7 @@ def sftFileBin(finj, fstart, fjobspan, fwingsize, tcoh):
bandCountFreq = np.floor( (finj - fstart)/fjobspan )
bandStartFreq = fstart + fjobspan*bandCountFreq
bandStartBin = bandStartFreq*tcoh
wingsBelowStartBin = (fwingsize - fjobspan)/2 * tcoh
wingsBelowStartBin = (fwingsize - fjobspan)/2 * tcoh
completeStartBin = bandStartBin - wingsBelowStartBin
return completeStartBin
print('Frequency start bins for SFTs in real S6 search')
......@@ -97,10 +97,10 @@ def sftNameMaker(observatory, tcoh, binname, args):
observatory + "/"
sftFilePart1 = "s_sfts/" + observatory[0] + "-" + observatory[1] + \
"_" + observatory + "_"
sftFilePart2 = "SFT_SCO_X1_S6_"
sftFilePart2 = "SFT_SCO_X1_S6_"
else:
sftFileRoot = "/home/egoetz/TwoSpect/scox1_mdc6/sfts/" + \
observatory + "/"
observatory + "/"
sftFilePart1 = "s_sfts/" + openFlag + "/" + observatory[0] + "-" + observatory[1] + \
"_" + observatory + "_"
sftFilePart2 = "SFT_SCO_X1_MDCv6_"
......@@ -115,7 +115,7 @@ def tablereader(tableName, observatory, args):
for k, tableLine in enumerate(tableData):
if k == 0:
if args.templateSearchClosed:
# Note this would need to be moved outside if the
# Note this would need to be moved outside if the
# first pulsar were not closed. In that case, just do
# a check to see if these lists exist
raInjList = np.asarray(4.275699238500, dtype=np.float64)
......@@ -161,7 +161,7 @@ def tablereader(tableName, observatory, args):
TcohLine = 840
else:
TcohLine = 360
TcohList = np.vstack([TcohList, np.asarray(TcohLine)]).astype(int)
TcohList = np.vstack([TcohList, np.asarray(TcohLine)]).astype(int)
PList.append(str(tableLine.split()[11]))
asiniList.append(str(tableLine.split()[9]))
......@@ -228,7 +228,7 @@ def categorizer(Tcoh, raInj, decInj, fInj, observatory, pulsarNo, sftFile, jobIn
#raInj = 3.38
#decInj = 0.19
# MDC v3
#raInj = 2.20
#raInj = 2.20
#decInj = -1.01
# MDC v4
#raInj = 3.42
......@@ -237,7 +237,7 @@ def categorizer(Tcoh, raInj, decInj, fInj, observatory, pulsarNo, sftFile, jobIn
#raInj = 3.14
#decInj = 0.00
# MDC v6
#raInj = 4.275699238500
#raInj = 4.275699238500
#decInj = -0.27
# MDC v6 should take alpha and dec from input arguments
# Using the argument parser for the all-sky test
......@@ -284,7 +284,7 @@ def categorizer(Tcoh, raInj, decInj, fInj, observatory, pulsarNo, sftFile, jobIn
if args.templateSearchClosed:
floorFlag = floorFlag + 2
fFloor = FloorBinFinder(Tcoh, fInj, sftFile, \
sftFileListPartList[floorFlag])
sftFileListPartList[floorFlag])
if args.noiseTest:
fInterval = args.noiseTest
elif args.singleBand or args.templateSearch or \
......@@ -346,7 +346,7 @@ def categorizer(Tcoh, raInj, decInj, fInj, observatory, pulsarNo, sftFile, jobIn
# Specify the range of frequency modulation to search
dfHypothesis = 2 * np.pi * fInj * float(asini) / float(Period)
dfInterval = 1 / (4 * float (Tcoh))
dfBound = (dfSteps - 1) * dfInterval
dfBound = (dfSteps - 1) * dfInterval
dfRange = [dfHypothesis -0.5*dfBound + dfInterval*y for y in range(0, dfSteps)]
# Choose a TwoSpect version
......@@ -461,7 +461,7 @@ def dagWriter(g, observatory, headJobName, jobNumber, rightAscension, declinatio
elif args.real:
fminString = str(np.math.floor(f*np.math.floor(1/args.fspan))/np.math.floor(1/args.fspan))
else:
fminString = str(np.math.floor(8*(f-0.125))/8)
fminString = str(np.math.floor(8*(f-0.125))/8)
if args.templateSearch or \
args.templateSearchOpen or args.templateSearchClosed:
#templateStringSet = ' --templateSearch' + \
......@@ -484,7 +484,7 @@ def dagWriter(g, observatory, headJobName, jobNumber, rightAscension, declinatio
if args.real:
configFileName = 'Atlas_config_file.txt'
else:
configFileName = 'config_file_mdcv6.txt'
configFileName = 'config_file_mdcv6.txt'
argumentList = \
'"' + \
' --config=' + configFileName + \
......@@ -499,7 +499,7 @@ def dagWriter(g, observatory, headJobName, jobNumber, rightAscension, declinatio
" --outfilename=out_" + headJobName + '_' + \
str(f) + '_' + \
str(df) + \
".dat" + " --outdirectory=output_" + headJobName + '"'
".dat" + " --outdirectory=output_" + headJobName + '"'
tagStringLine = "TwoSpect_" + str(jobNumber)
g("JOB " + tagStringLine + " ScoX1_" + headJobName + ".sub")
g("VARS " + tagStringLine + " argList=" + argumentList + " tagString=" + '"' + tagStringLine + '"')
......@@ -538,7 +538,7 @@ if args.real:
print(fStartList)
for m, BandNo in enumerate(fStartList):
jobsPerBand = int(args.jobspan/args.fspan)
jobsPerBand = int(args.jobspan/args.fspan)
jobInc = m * jobsPerBand
categorizer(args.Tcoh, args.ra, args.dec, fStartList[m], observatoryChoice, "band-" + str(int(BandNo)).zfill(4), args.sftDir, jobInc, args.P, args.Asini, fSteps, dfSteps, args)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment