diff --git a/BayesWaveUtils/bayeswave_pipe/bayeswave_pipe_utils.py b/BayesWaveUtils/bayeswave_pipe/bayeswave_pipe_utils.py index 87b8e452dd437b3e331901975d88abfffe6443ad..edc4d37169fa4a578d92d5ef6d0143437e272a11 100644 --- a/BayesWaveUtils/bayeswave_pipe/bayeswave_pipe_utils.py +++ b/BayesWaveUtils/bayeswave_pipe/bayeswave_pipe_utils.py @@ -55,7 +55,34 @@ class LIGOLWContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(LIGOLWContentHandler) -def write_pre_cmd(workdir): + + + +# Given an executable returns the path to the executable +""" from http://pythonadventures.wordpress.com/2011/03/13/equivalent-of-the-which-command-in-python/""" +def is_exe(fpath): + return os.path.exists(fpath) and os.access(fpath, os.X_OK) +def which(program): + fpath, fname = os.path.split(program) + if fpath: + if is_exe(program): + return program + else: + for path in os.environ["PATH"].split(os.pathsep): + exe_file = os.path.join(path, program) + if is_exe(exe_file): return exe_file + + return None + +def create_envinfo(rundir): + envdat = os.environ.copy() + with open(os.path.join(rundir,"envinfo.txt"),'w') as f: + f.write(str(envdat)) + + + + +def write_pre_cmd(workdir, smart_restart = True): """ Returns a string with a script for job setup and and geolocaiton which runs as a PreCmd. Output is dumped to a file called _cehostname.txt @@ -68,6 +95,7 @@ def write_pre_cmd(workdir): script="""#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2018-2019 James Clark +# Copyright (C) 2021 Sophie Hourihane # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -94,9 +122,11 @@ Note that we could use many more env variables for similar purposes if needed. \"\"\" import re -import os,sys +import os, sys, subprocess import socket import tarfile +import shutil +import ast # # Preliminary setup @@ -105,6 +135,42 @@ outputDir=sys.argv[1] executable=sys.argv[2] if not os.path.exists(outputDir): os.makedirs(outputDir) geolocation=open("{0}/{1}_cehostname.txt".format(outputDir, executable), "w") +output_file = open('""" + str(workdir) + """' + '/logs/setupdir_' + str(outputDir) + '.out', 'a+') +output_file.write("outputdir is {0}".format(str(outputDir))) + +# Load in environment so we can use BayesWave Executables +try: + with open("envinfo.txt", 'r') as f: + line = f.readline() + envdat = ast.literal_eval(line) +except FileNotFoundError as e: + outputdir.write(e) + outputdir.write("envinfo.txt not created yet") +""" + """ +if {smart_restart}:\n""".format(smart_restart = smart_restart) + """ + # Decide if we want to use the data that's currently in the local trigdir or the one that is remote + p = subprocess.Popen('cp_files.py {0} {1}'.format(str(outputDir),'""" + str(workdir) + """/' + str(outputDir)), + shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE, env = envdat) + p.wait() + out, err = p.communicate() + with open('""" + str(workdir) + """' + '/logs/cp_files_' + str(outputDir) + '.out', 'a+') as f: + f.write(str(out)) + with open('""" + str(workdir) + """' + '/logs/cp_files_' + str(outputDir) + '.err', 'a+') as f: + f.write(str(err)) + + # if restarting from checkpoint, delete extra files + # WARNING, this has not been rigorously tested. Look here for bugs + p = subprocess.Popen('delete_corruption.py {0}'.format(str(outputDir)), + shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE, env = envdat) + out, err = p.communicate() + with open('""" + str(workdir) + """' + '/logs/delete_files_' + str(outputDir) + '.out', 'a+') as f: + f.write(str(out)) + with open('""" + str(workdir) + """' + '/logs/delete_files_' + str(outputDir) + '.err', 'a+') as f: + f.write(str(err)) + + output_file.write("Done moving and deleting files") + +output_file.close() # # Extract MDC data if present @@ -147,11 +213,11 @@ exit(0) prefile.close() os.chmod(os.path.join(workdir, "setupdirs.py"), 0o755) + # write environment to a file + create_envinfo(workdir) return - -# # Convenience Defs # def file_len(fname): @@ -1091,7 +1157,7 @@ def condor_job_config(job_type, condor_job, config_parser): class bayeswaveJob(pipeline.CondorDAGJob, pipeline.AnalysisJob): def __init__(self, cp, cacheFiles, injfile=None, numrel_data=None, - dax=False): + dax=False, smart_restart = True): # # [condor]: Common workflow configuration @@ -1140,12 +1206,15 @@ class bayeswaveJob(pipeline.CondorDAGJob, pipeline.AnalysisJob): if cp.getboolean('condor', 'transfer-files'): # Generate a script for PreCmd to setup directory structure - write_pre_cmd(workdir) + # get path to bin: + full_path = which("cp_files.py") + bin_path = full_path.replace("cp_files.py", "") + print("bin_path", bin_path) + write_pre_cmd(workdir, smart_restart = smart_restart) self.add_condor_cmd("+PreCmd", '"setupdirs.py"') self.add_condor_cmd("+PreArgs", '"$(macrooutputDir) bayeswave"') - # Configure file transfers - transferstring='datafind,setupdirs.py' + transferstring='datafind,setupdirs.py,envinfo.txt' if cp.getboolean('condor','copy-frames'): transferstring += ',$(macroframes)' diff --git a/BayesWaveUtils/bayeswave_plot/BW_Flags.py b/BayesWaveUtils/bayeswave_plot/BW_Flags.py new file mode 100644 index 0000000000000000000000000000000000000000..cc62d71d4d9dfb6e4d89b4cf3b24ad9005538949 --- /dev/null +++ b/BayesWaveUtils/bayeswave_plot/BW_Flags.py @@ -0,0 +1,485 @@ +#!/usr/bin/env python +from bayeswave_pipe import bayeswave_pipe_utils as pipe_utils +import numpy as np +import os +import sys + +""" +This script contains a class to display the results of a BayesWave run. +Written by Sophie Hourihane based on code written by Meg Millhouse and probably others +""" +""" + Class containing run information for a BayesWave run + The parameters are kept inside of trigdir / bayeswave.run + + Must use readbwb to actually initialize values + + Example use: + # Initialize runFlags + runFlags = Flags('/home/sophie.hourihane/public_html/glitch_events/noNoise/seeds_noClean/0noise_2048_5chains_heterodyne_distance600') + # Set runFlags + +""" +class Flags: + def __init__(self, trigdir): + # Parameters describing where things live + if trigdir[-1] == '/': + self.trigdir = trigdir[:-1] + else: + self.trigdir = trigdir + self.XMLfname = None + self.jobName='' + + # Run settings + self.trigtime = 0 # gps time of event + self.flow = 32 + self.srate = 2048 + self.segment_start = 0 + self.seglen = 0 + self.Nchain = 20 # 20 is default number of chains + self.Niter = 4000000 + self.Ncycle = 100 + self.chirplets = False + + # Windows, cleaning, and bayesLine + self.bayesLine = False + self.noClean = 0 + self.CBCwindow = -1 #window around cbc event + self.window = 0 + + # Parameters describing what run type was used + self.signalGlitch_flag = False + self.fullOnly_flag = False + self.GlitchCBC_flag = False + self.SignalCBC_flag = False + self.glitchOnly_flag = False + self.cbcOnly_flag = False + self.CBCType = False # ie, literally if CBC is in the modellist + self.CBCName = False # I think this is unused now + self.multi_type = False # true for full, cbcsignal, cbcglitch + self.noNoiseFlag = False + self.cleanOnly_flag = False + + self.modelList = [] # Gets filled in readbwb, theres probably a clearer way to to this + + # Injection settings + self.injFlag = False + self.event = 0 + self.mdc = False + self.inj_dat = None #If there is an injection, this holds an injection table + self.inj_fref = 0 + + # Likelihood calculation + self.heterodyneL = False + + # Printed output + self.verbose = 0 + self.checkpoint = False + + # Lists of IFOs + self.ifoList = [] + self.ifoNames = [] + self.snrList = [] + + self.cbc_keys = ['M1_s', 'M2_s', 'Mchirp_s', 'Mtot_s', + 's1', 's2', 'chi_eff', + 'phase', 't', + 'distance', 'redshift', + 'RA', 'sindec', 'psi', 'cosiota', + 'M1_d', 'M2_d', 'Mchirp_d', 'Mtot_d', + 'lambda1', 'lambda2', 'lambda3', 'lambda4' ] + + def run_dirname(self): + if self.fullOnly_flag: + return('full') + if self.SignalCBC_flag: + return('cbcsignal') + if self.GlitchCBC_flag: + return('cbcglitch') + if self.signalGlitch_flag: + # case where signal and glitch are run together + # but not on top of eachother + return('signal') + # if there is only one thing in the modellist, return that one + if len(self.modelList) == 1: + return(self.modelList[0]) + if 'clean' in self.modelList: + # if there is clean in the modelList and only one other thing, return the other thing + if len(self.modelList) - 1 == 1: + return(self.modelList[0]) + else: + print("uh oh! what's the dirname?") + return('') + + """ + Given an xml parameter name, will return that value + Accepted names are: + param_names = ['M1_s', 'M2_s', 'Mchirp_s', 'Mtot_s', + 's1', 's2', 'chi_eff', + 'phase', 't', + 'distance', 'redshift', + 'RA', 'sindec', 'psi', 'cosiota', + 'M1_d', 'M2_d', 'Mchirp_d', 'Mtot_d'] + """ + def get_xml_parameter(self, p): + if p[-2:] == '_d': + p = p[:-2] + # if we get a value in the source frame, divide detector frame by (1 + z) + if p[-2:] == '_s': + p = p[:-2] + z = redshift_from_distance(self.inj_dat.getColumnByName('distance')[self.event]) + return(self.get_xml_parameter(p) / (1.0 + z)) + if p == 'Mc' or p == 'Mchirp': + return(self.inj_dat.getColumnByName('mchirp')[self.event]) + elif p == 'Z' or p == 'redshift': + return(redshift_from_distance(self.inj_dat.getColumnByName('distance')[self.event])) + elif p == 'spin1' or p == 'spin2' or p == 's1' or p == 's2': + # dimensionless spin parameter a1 and a2 to be more precise + # TODO not sure what units the xml files are in + # but I will assume they are in units that match up with conventions here + # https://arxiv.org/abs/1409.7215 (pg 6, a_i = |s_i| / m_i^2, otherwise missing a factor of c / G) + if p[-1] == '1': + p = 'spin1' + else: + p = 'spin2' + spin = np.zeros(3) + mass = self.get_xml_parameter('mass{0}'.format(p[-1])) #detector frame + for i, k in enumerate(['x', 'y', 'z']): + spin[i] = self.inj_dat.getColumnByName(p + k)[self.event] + return(np.linalg.norm(spin) / mass ** 2) + elif (p == 'chi_eff'): + a1 = self.get_xml_parameter('spin1') + m1 = self.get_xml_parameter('mass1') + a2 = self.get_xml_parameter('spin2') + m2 = self.get_xml_parameter('mass2') + + # Note! We assume all spins are aligned with the orbital angular momentum + theta1 = 0 + theta2 = 0 + return((m1 * a1 * np.cos(theta1) + m2 * a2 * np.cos(theta2)) / (m1 + m2)) + elif p == 'Q': + return(self.inj_dat.getColumnByName('mass2')[self.event] / self.inj_dat.getColumnByName('mass1')[self.event]) + elif p == 'mtot' or p == 'Mt' or p == 'Mtot': + return(self.inj_dat.getColumnByName('mass2')[self.event] + self.inj_dat.getColumnByName('mass1')[self.event]) + elif p == 'coa_t' or p == 't': + return(self.inj_dat.getColumnByName('geocent_end_time')[self.event] + self.inj_dat.getColumnByName('geocent_end_time_ns')[self.event] * 1e-9) #- self.segment_start) + elif p == 't_segment': + return(self.inj_dat.getColumnByName('geocent_end_time')[self.event] + self.inj_dat.getColumnByName('geocent_end_time_ns')[self.event] * 1e-9 - self.segment_start) + elif p == 'psi': + return(self.inj_dat.getColumnByName('polarization')[self.event]) + elif p == 'ra' or p == 'RA': + return(self.inj_dat.getColumnByName('longitude')[self.event]) + elif p == 'sin_dec' or p == 'sindec': + return(np.sin(self.inj_dat.getColumnByName('latitude')[self.event])) + elif p == 'dec': + return(self.inj_dat.getColumnByName('latitude')[self.event]) + elif p == 'cos_iota' or p == 'cosiota': + return(np.cos(self.inj_dat.getColumnByName('inclination')[self.event])) + elif p == 'iota': + return(self.inj_dat.getColumnByName('inclination')[self.event]) + elif p == 'phase' or p == 'phi' : + return(self.inj_dat.getColumnByName('coa_phase')[self.event]) + elif p == 'M1': + return(self.inj_dat.getColumnByName('mass1')[self.event]) + elif p == 'M2': + return(self.inj_dat.getColumnByName('mass2')[self.event]) + elif 'lambda' in p: + print("TODO, ADD LAMBDA PARAMETERS") + return 0 + else: + return(self.inj_dat.getColumnByName(p)[self.event]) + + +# ---------------------------------- +# Function for initializing runFlags +# ---------------------------------- +def readbwb(runFlags, verbose = True): + # -- Initialize variables with info about the job + info = "" + jobName = '' + restrictModel = '' + lalsimFlag = False + found_model = False # Flag to see if we found what model we should be using + + # -- Open *bayeswave.run + bayeswaverunfile = runFlags.trigdir + '/bayeswave.run' + if verbose: + print("BWrun file is {0}".format(bayeswaverunfile)) + runFlags.bayeswaverunfile = bayeswaverunfile + bayeswave = open(bayeswaverunfile, 'r') + # -- Parse command line arguments + raw_lines = bayeswave.readlines() + lines = [l.lstrip() for l in raw_lines] + cmdline = '' + for l in lines: + if 'Command' in l: + cmdline = l.rstrip().split(' ') + + # Go through the different commands in bayeswave.run + for index, arg in enumerate(cmdline): + # Parameters describing where things live + if arg=='--runName': + jobName = cmdline[index+1] + if verbose: + print("The job name is: {0}".format(jobName)) + runFlags.jobName = jobName+'_' + + # Run settings + elif arg=='--trigtime': + gps = float(cmdline[index+1]) + runFlags.gps = gps + runFlags.trigtime = gps + info = info + "The trigger time is GPS: {0}\n".format(gps) + elif arg=='--srate': + runFlags.srate = float(cmdline[index + 1]) + elif arg=='--segment-start': + runFlags.segment_start = float(cmdline[index + 1]) + elif arg=='--seglen': + runFlags.seglen = float(cmdline[index + 1]) + elif arg == '--Nchain': + runFlags.Nchain = int(cmdline[index + 1]) + elif arg == '--Niter': + runFlags.Niter = int(cmdline[index + 1]) + elif arg == '--Ncycle': + runFlags.Ncycle = int(cmdline[index + 1]) + elif arg == '--chirplets': + runFlags.chirplets = True + + # IFOs + elif arg=='--ifo': + runFlags.ifoNames.append(cmdline[index+1]) + + # Windows, cleaning, and bayesLine + elif arg == '--bayesLine': + runFlags.bayesLine = True + elif arg == '--window': + runFlags.window = float(cmdline[index + 1]) + elif arg == '--CBCwindow': + runFlags.CBCwindow = float(cmdline[index + 1]) + elif arg == '--noClean': + if verbose: + print('\nThis run had no cleaning phase, --noClean\n') + runFlags.noClean = True + # Parameters describing what run type was + elif arg=='--glitchOnly': + if verbose: + print('\nThis run was executed with the --glitchOnly flag\n') + runFlags.modelList = ['glitch'] + runFlags.glitchOnly_flag = True + runFlags.noNoiseFlag = True + found_model = True + elif arg=='--signalOnly': + if verbose: + print('\nThis run was executed with the --signalOnly flag\n') + runFlags.modelList = ['signal'] + found_model = True + elif arg=='--cbcOnly': + if verbose: + print('\nThis run was executed with the --cbcOnly flag\n') + runFlags.modelList = ['cbc'] + runFlags.CBCType = True + runFlags.cbcOnly_flag = True + found_model = True + elif arg=='--noiseOnly': + if verbose: + print('\nThis run was executed with the --noiseOnly flag\n') + runFlags.modelList = ['noise'] + found_model = True + elif arg=='--skipNoise': + if verbose: + print('\nThis run was executed with the --skipNoise flag\n') + runFlags.noNoiseFlag = True + # Combined types + elif arg=='--fullOnly': + if verbose: + print('\nThis run was executed with the --fullOnly flag\n') + runFlags.fullOnly_flag = True + runFlags.modelList = ['signal', 'glitch'] + found_model = True + elif arg=='--GlitchCBC': + if verbose: + print('\nThis run was executed with the --GlitchCBC flag\n') + runFlags.GlitchCBC_flag = True + runFlags.CBCType = True + runFlags.modelList = ['glitch', 'cbc'] + runFlags.multi_type = True + found_model = True + elif arg=='--SignalCBC': + if verbose: + print('\nThis run was executed with the --SignalCBC flag\n') + runFlags.SignalCBC_flag = True + runFlags.CBCType = True + runFlags.multi_type = True + runFlags.modelList = ['signal', 'cbc'] + found_model = True + elif arg=='--cleanOnly': + if verbose: + print('\nThis run was executed with the --cleanOnly flag\n') + runFlags.cleanOnly_flag = True + runFlags.modelList = ['clean'] + found_model = True + # Injection settings + elif arg=='--inj': + runFlags.injFlag = True + runFlags.mdc = True + runFlags.XMLfname = cmdline[index+1] + xmldoc = pipe_utils.ligolw_utils.load_filename(runFlags.trigdir + '/' + runFlags.XMLfname, contenthandler = pipe_utils.LIGOLWContentHandler, verbose = False) + runFlags.inj_dat = pipe_utils.lsctables.SimInspiralTable.get_table(xmldoc) + if verbose: + print('xml filename is {0}'.format(runFlags.trigdir + '/' + runFlags.XMLfname)) + elif arg=='--MDC-cache': + runFlags.mdc = True + elif arg == '--MDC-channel': + runFlags.mdc = True + elif arg =='--event': + # if injection then this indexes the event in the .xml file + runFlags.event = int(cmdline[index + 1]) + elif arg=='--MDC-cache': + mdc = True + elif arg=='--lalinspiralinjection': + lalsimFlag = True + elif arg=='--inj-fref': + runFlags.inj_fref = float(cmdline[index + 1]) + # Likelihood calculation + elif arg=='--heterodyneL': + runFlags.heterodyneL = True + + # Printed output settings + elif arg == '--checkpoint': + runFlags.checkpoint = True + elif arg=='--verbose': + if verbose: + print('\nThis run was executed with the --verbose flag\n') + runFlags.verbose = True + + if lalsimFlag: + runFlags.injFlag = False + + if not found_model: + runFlags.signalGlitch_flag = True + runFlags.modelList = ['signal', 'glitch'] + if not runFlags.noClean and not runFlags.cleanOnly_flag: + runFlags.modelList.append('clean') + + # -- Determine number of IFOs + ifoNum = len(runFlags.ifoNames) + # -- Convert the IFO name list into a list of numbers useful for opening datafiles + runFlags.ifoList = [str(ii) for ii in range(0,len(runFlags.ifoNames))] + + # -- Getting flow + for index, arg in enumerate(cmdline): + for ifo in runFlags.ifoNames: + if arg == '--{0}-flow'.format(ifo): + #TODO THIS IS WRONG IF DIFFERENT IFOS HAVE DIFFERENT FLOW + runFlags.flow = float(cmdline[index+1]) + + info = info + "Detectors(s) found: {0}\n".format(len(runFlags.ifoList)) + info = info + "The detectors are: {0}\n".format(', '.join(runFlags.ifoNames)) + info = info + "BayesLine is: {0}\n".format(runFlags.bayesLine ) + info = info + "Models are: {0}\n".format(', '.join(runFlags.modelList)) + info = info + "\tflow = {0}\n".format(runFlags.flow) + info = info + "\tseglen = {0}\n".format(runFlags.seglen) + info = info + "\tNchains = {0}\n".format(runFlags.Nchain) + info = info + "\tCleaning phase = {0}\n".format(not runFlags.noClean) + + if 'cbc' in runFlags.modelList: + info = info + "\theterodyneL = {0}\n".format(runFlags.heterodyneL) + + # -- If MDC, read SNR info + if runFlags.mdc: + for ifo in runFlags.ifoList: + snrFile = 'post/injection_whitened_moments_%s.dat'%(runFlags.ifoNames[int(ifo)]) + if not os.path.exists(snrFile): + # post hasn't been run yet most likely + #sys.exit("\n {0} not found! \n".format(snrFile)) + break + snrdata = open(snrFile, 'r') + snrdata.readline() + snr = float(snrdata.readline().split(' ')[0]) + runFlags.snrList.append(snr) + snrdata.close() + info = info + 'Injected SNR in detector {0} = {1}\n'.format(runFlags.ifoNames[int(ifo)],runFlags.snrList[-1]) + bayeswave.close() + # -- Report to user + if verbose: + print("{0}".format(info)) + +""" + Functions for calculating redshift (used in Flags.get_xml_paramter) +""" +def redshift_from_distance(DL): # distance (MPC) + #double zlow, zhigh; + #double zmid; + #double Dlow, Dhigh, Dmid; + #double u, v, w; + #int i; + + zlow = 1.0; + zhigh = 1000.0; + + if(DL < 1.0): + zlow = 1.0e-10 + zhigh = 3.0e-4 + elif (DL < 100.0): + zlow = 2.0e-4 + zhigh = 5.0e-2 + elif (DL < 1000.0): + zlow = 1.0e-2 + zhigh = 3.0e-1 + elif (DL < 10000.0): + zlow = 1.0e-1 + zhigh = 2.0 + + zmid = (zhigh + zlow) / 2.0; + + Dlow = DL_fit(zlow); + Dhigh = DL_fit(zhigh); + Dmid = DL_fit(zmid); + zmid = (zhigh + zlow) / 2.0; + + Dlow = DL_fit(zlow); + Dhigh = DL_fit(zhigh); + Dmid = DL_fit(zmid); + + for i in range(30): + + u = Dlow-DL; + v = Dmid-DL; + + if(u*v < 0.0): + zhigh = zmid; + Dhigh = Dmid; + else: + zlow = zmid; + Dlow = Dmid; + + zmid = (zhigh+zlow)/2.0; + Dmid = DL_fit(zmid); + + return(zmid); + +# fits distance to redshift assuming flat cosmological constant +def DL_fit(z): + # Planck values https://arxiv.org/abs/1807.06209 + Om = 0.315; + H0 = 67.4; + #double RH, x1, x2, Om1, Om2; + + CLIGHT = 2.99792458e8 # m / s + MPC = 3.085677581e+22 # meters + + # See http://arxiv.org/pdf/1111.6396v1.pdf + + RH = CLIGHT / H0 * (1.0e-3 * MPC); + + x1 = (1.0-Om)/(Om); + + x2 = (1.0-Om)/(Om*pow((1.0+z),3.0)); + + Om1 = (1.0+1.32*x1+0.4415*x1*x1+0.02656*x1*x1*x1)/(1.0+1.392*x1+0.5121*x1*x1+0.03944*x1*x1*x1); + + Om2 = (1.0+1.32*x2+0.4415*x2*x2+0.02656*x2*x2*x2)/(1.0+1.392*x2+0.5121*x2*x2+0.03944*x2*x2*x2); + + D = 2.0*RH*(1.0+z) / np.sqrt(Om)*(Om1-Om2 / np.sqrt(1.0+z)); + + return(D / MPC); diff --git a/BayesWaveUtils/bayeswave_plot/__init__.py b/BayesWaveUtils/bayeswave_plot/__init__.py index 6d2ef4adb7efde593f94595e9afdfbcf273bc428..2650e9542d3f7dd86ad5df568b0de7672e7531d2 100755 --- a/BayesWaveUtils/bayeswave_plot/__init__.py +++ b/BayesWaveUtils/bayeswave_plot/__init__.py @@ -17,4 +17,4 @@ """ """ -__all__=['bayeswave_utils'] +__all__=['bayeswave_utils', "BW_Flags"] diff --git a/BayesWaveUtils/bayeswave_plot_data/navigate.js b/BayesWaveUtils/bayeswave_plot_data/navigate.js index e64ff2a814852b0c319a3f6a26860a92643ac188..551da8337bceff0d22b6ea0880113bf690eb3556 100644 --- a/BayesWaveUtils/bayeswave_plot_data/navigate.js +++ b/BayesWaveUtils/bayeswave_plot_data/navigate.js @@ -139,6 +139,21 @@ $(document).ready(function(){ $("#main").load("./html/snr.html"); }); }); +$(document).ready(function(){ + $("#clean").click(function(){ + $("#main").load("./html/clean.html"); + }); +}); +$(document).ready(function(){ + $("#cleanmoments").click(function(){ + $("#main").load("./html/cleanmoments.html"); + }); +}); +$(document).ready(function(){ + $("#cleaning").click(function(){ + $("#main").load("./html/cleaning.html"); + }); +}); function toggle(showHideDiv, switchTextDiv) { var ele = document.getElementById(showHideDiv); var text = document.getElementById(switchTextDiv); diff --git a/BayesWaveUtils/scripts/bayeswave_pipe b/BayesWaveUtils/scripts/bayeswave_pipe index bd31e07ef69b1ce4f26de93d28248942428aa8d1..c1ca4b1ad983a5da926bf0f3d31276764c4529f5 100755 --- a/BayesWaveUtils/scripts/bayeswave_pipe +++ b/BayesWaveUtils/scripts/bayeswave_pipe @@ -197,6 +197,7 @@ def parser(): parser.add_argument("--fpeak-analysis", default=False, action="store_true") parser.add_argument("--trigger-time-delta", type=float, default=0.0) parser.add_argument("--bayeswave-clean-frame", default=False, action="store_true") + parser.add_argument("--smart-restart", action="store_true") parser.add_argument("--condor-submit", default=False, action="store_true") # Advanced condor options @@ -863,7 +864,8 @@ if opts.bayesline_median_psd: bayesline_medianpsd_job = pipe_utils.bayeswaveJob(bayesline_cp, cache_files, injfile=injfile, - numrel_data=numrel_data) + numrel_data=numrel_data, + smart_restart = opts.smart_restart) bayesline_medianpsd_log = bayesline_medianpsd_job._CondorJob__log_file bayesline_medianpsd_job.set_sub_file(os.path.join(workdir, 'bayeswave_median_psd.sub')) @@ -893,7 +895,7 @@ if opts.bayesline_median_psd: cp.set('condor', 'extra-files', extra_files) bayeswave_job = pipe_utils.bayeswaveJob(cp, cache_files, injfile=injfile, - numrel_data=numrel_data) + numrel_data=numrel_data, smart_restart = opts.smart_restart) bayeswave_log = bayeswave_job._CondorJob__log_file bayeswave_post_job = pipe_utils.bayeswave_postJob(cp, cache_files, @@ -1427,8 +1429,3 @@ if opts.condor_submit: else: print('Unable to submit DAG file') -print('AT END, CBC_trigger_list is ', CBC_trigger_list) - - - - diff --git a/BayesWaveUtils/scripts/cp_files.py b/BayesWaveUtils/scripts/cp_files.py new file mode 100644 index 0000000000000000000000000000000000000000..bbab9bb48975617d7b624e0a5b5d61c7dac10c28 --- /dev/null +++ b/BayesWaveUtils/scripts/cp_files.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python +import shutil, sys, os, subprocess +from bayeswave_plot import BW_Flags as bwf + +def keep_which_directory(temp, local): + # read in from each what model is currently being run, and decide from there which one to keep + # If one is in the cleaning stage, while the other is not, it will keep the one not being cleaned + # if the 2 are in the same state, it will keep the version with more iterations + + # returns winner, loser + + # Read in trigger directory name + trigdir = str(sys.argv[1]) + runFlag_local = bwf.Flags(local) + runFlag_temp = bwf.Flags(temp) + + runFlag_dict = {'temp':runFlag_temp, + 'local':runFlag_local} + dirs = { 'temp' : temp, + 'local' : local} + #dirs = [temp, local] + + # If temp bayeswave.run doesn't exist, then just keep the local version + # if local bayeswave.run doesn't exist (and temp does), then keep the temp version! + for i, key in enumerate(runFlag_dict.keys()): + try: + bwf.readbwb(runFlag_dict[key]) + except FileNotFoundError as e: + print(e) + # if we did not find the bayeswave.run file, then choose the other key as the winner + print("WARNING: " + runFlag_dict[key].trigdir + '/bayeswave.run' + " has yet to be created") + if key == 'local': + # winner, loser + return dirs['temp'], dirs['local'] + return dirs['local'], dirs['temp'] + + # checkpointing isn't even on, so there is no reason to check checkpointing files, exiting this function + if not runFlag_dict['local'].checkpoint: + sys.exit("--checkpoint has not been turned on, returning") + + models = {'temp':None, 'local':None} + lengths = {} + + # now actually read in the model and iterations + for i, key in enumerate(runFlag_dict.keys()): + ### Read in model + try: + # read in the model name + f = open(runFlag_dict[key].trigdir + '/checkpoint/state.dat') + except FileNotFoundError: + print("FileNotFound in {0}/checkpoint/state.dat".format(runFlag_dict[key].trigdir)) + # if checkpointing file not found, then return the other directory + if key == 'local': + # winner, loser + return dirs['temp'], dirs['local'] + return dirs['local'], dirs['temp'] + # read in model from first word of first line in the state file + model = str(f.readlines()[0].split()[0]) + models[key] = model + f.close() + + + ### Read in iterations + try: + f = open(runFlag_dict[key].trigdir + '/checkpoint/temperature.dat') + except FileNotFoundError: + # if checkpointing file not found, then return the other directory + if key == 'local': + # winner, loser + return dirs['temp'], dirs['local'] + return dirs['local'], dirs['temp'] + + # Keep track of file length + file_length = int(f.readlines()[0]) // runFlag_dict[key].Ncycle + lengths[key] = file_length + f.close() + + # now we confirmed we have 2 directories that both have stuff in them, let's decide which one is more full. + if models['temp'] == models['local']: + # the 2 models are the same, return the directory corresponding to the larger checkpoint + if lengths['temp'] > lengths['local']: + return dirs['temp'], dirs['local'] + # if they are the same length, keep the local directory + return dirs['local'], dirs['temp'] + # the 2 models are different, prioritize whichever one is not currently in the cleaning phase + if models['local'] == 'clean': + return dirs['temp'], dirs['local'] + if models['temp'] == 'clean': + return dirs['local'], dirs['temp'] + + # when in doubt, just keep the local directory + # winner, loser + return dirs['local'], dirs['temp'] + + +def switch_and_delete_dirs(winner, loser, dirname): + # winner becomes dirname, loser gets deleted + + # To make sure we don't delete something important, we make sure that the loser directory + # is something that we actually do want to delete + # directories we are deleteing should be of the form: WHATEVER/trigtime_WHATEVER or temp + deletenames = ['trigtime', 'temp'] + delete_loser = False + for delete in deletenames: + if delete in loser: + delete_loser = True + if not delete_loser: + print("I AM NOT DELETING THE {loser} DIRECTORY!".format(loser = loser)) + print("ERROR switch_and_delete_dirs: name does not contain temp or trigtime, returning") + return + else: + # loser gets deleted + shutil.rmtree(loser) + # winner gets moved + shutil.move(winner, dirname) + return + +# Make sure that directory string does not have a trailing '/' +def clean_input(dir_string): + if dir_string[-1] == '/': + return dir_string[:-1] + else: + return(dir_string) + +# Call to cp_files is +# cp_files.py local temp + +workdir = os.getcwd() +print("cwd is {0}".format(workdir)) + +# read in the directory local directory, then we make a temp directory from that directory +# copy over the working directory to the remote machine +tempdir = clean_input(str(sys.argv[2])) +temp = './temp' +#shutil.copytree(tempdir, temp) +print("Copying over files from tempdir {0} to tempdir {1}".format(tempdir, temp)) +p = subprocess.Popen('cp -r {0} {1}'.format(tempdir, temp), shell = True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) +p.wait() + +# localdir +outputdir = clean_input(str(sys.argv[1])) +print("localdir is {0}".format(outputdir)) +try: + process = subprocess.run('head {0}/checkpoint/temperature.dat'.format(outputdir), shell = True) +except: + pass + +winner, loser = keep_which_directory(temp, outputdir) +print("Keeping winner {0} \n Deleting loser {1}".format(winner, loser)) +switch_and_delete_dirs(winner, loser, outputdir) diff --git a/BayesWaveUtils/scripts/delete_corruption.py b/BayesWaveUtils/scripts/delete_corruption.py new file mode 100644 index 0000000000000000000000000000000000000000..25b37468d3358ccfc6570cf01a17fdaffe0936b3 --- /dev/null +++ b/BayesWaveUtils/scripts/delete_corruption.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python +import subprocess, os, sys +from bayeswave_plot import BW_Flags as bwf + +def delete_lines(filename, file_length): + # delete all lines except the first `file_length` lines + + # get number of lines currently in the file + try: + N_lines = int(subprocess.check_output("wc -l <{filename}".format(filename = filename), shell=True)) + except subprocess.CalledProcessError: + # returns if file does not exist + return + if N_lines == file_length: + return + elif N_lines < file_length: + print("ERROR!! File {filename} too short! Should be {file_length} but is {N_lines}".format(filename = filename, file_length = file_length, N_lines = N_lines)) + else: + print("Deleting {0} lines from {filename} ".format(N_lines - file_length, filename = filename)) + # deletes from file_length (exclusive) to end of file + subprocess.run("sed -i '{file_length},$ d' {filename}".format(filename = filename, file_length = file_length+1), shell = True) + +# Read in trigger directory name +trigdir = str(sys.argv[1]) +runFlags = bwf.Flags(trigdir) +try: + bwf.readbwb(runFlags) +except FileNotFoundError: + sys.exit("WARNING: " + runFlags.trigdir + '/bayeswave.run' + " has yet to be created, returning") + +if not runFlags.checkpoint: + sys.exit("--checkpoint has not been turned on, returning") + +# read in the model flag to see what checkpoint iteration we should be at +f = open(runFlags.trigdir + '/checkpoint/temperature.dat') +file_length = int(f.readlines()[0]) // runFlags.Ncycle +f.close() + +# get name of model +try: + f = open(trigdir + '/checkpoint/state.dat', 'r') +except: + sys.exit(runFlags.trigdir + '/checkpoint/state.dat' + " has yet to be created, returning") +model = f.readlines()[0].split()[0] # first entry is what model we are on +f.close() +print("Model is {model}".format(model = model)) + +# Get files relating to model +chaindir = runFlags.trigdir + '/chains' +onlyfiles = [f for f in os.listdir(chaindir) if os.path.isfile(os.path.join(chaindir, f))] +model_files = [f for f in onlyfiles if (model in f)] + +# if model is cbc, cbc_skychain.dat should not be considered part of the cbc files +if model == 'cbc': + model_files.remove('cbc_skychain.dat') + +# delete extra lines from files +for f in model_files: + delete_lines(runFlags.trigdir + '/chains/' + f, file_length) diff --git a/BayesWaveUtils/scripts/megaplot.py b/BayesWaveUtils/scripts/megaplot.py index b04e5ca14b49b2bfb6a75cf4698031b7f35d2ffc..da32d27767e52ac5c7e7485e81ab892af195e53f 100755 --- a/BayesWaveUtils/scripts/megaplot.py +++ b/BayesWaveUtils/scripts/megaplot.py @@ -3,7 +3,7 @@ This script generates a webpage to display the results of a BayesWave run. This script was originally put together by Jonah Kanner and Francecso Pannarale. -Major and minor modifications have been made by Meg Millhouse, Sudarshan Ghonge, and probably others. +Major and minor modifications have been made by Meg Millhouse, Sudarshan Ghonge, Sophie Hourihane, and probably others This code is tested with python2.7 and python 3.9 """ @@ -32,7 +32,7 @@ import re import traceback from bayeswave_pipe import bayeswave_pipe_utils as pipe_utils import lalsimulation as lalsim - +from bayeswave_plot import BW_Flags @@ -49,7 +49,11 @@ except IndexError: # No work directory specified, workdir=./ workdir=os.getcwd() + os.chdir(workdir) +RUN_FLAGS = BW_Flags.Flags(os.getcwd()) +BW_Flags.readbwb(RUN_FLAGS) + print('Workdir: ') print(workdir) print('\n') @@ -119,9 +123,11 @@ cbc_dict['spin1'] = [4, 'spin1', r'$s_1$', 'Spin 1'] cbc_dict['spin2'] = [5, 'spin2', r'$s_2$', 'Spin 2'] cbc_dict['chi_eff'] = [6, 'chi_eff', '$\chi_{\mathrm{eff}}$', 'Effective spin parameter'] -# coalesence time measured from trigger time cbc_dict['coa_phase'] = [7, 'coa_phase', r'$\phi$', 'Phase at coalescence'] -cbc_dict['coa_t'] = [8, 'coa_t', 't [s]', 'Time since trigger'] +# coalesence time measured from trigger time +cbc_dict['t_segment'] = [8, 'coa_t', 't [s]', 'Time since trigger'] +# coalesence time measured in gps time +cbc_dict['coa_t'] = [8, 'coa_t', 't [s]', 'GPS time of event'] cbc_dict['distance'] = [9, 'distance', 'D [mpc]', 'Distance'] cbc_dict['Z'] = [10, 'Z', 'Z', 'Redshift'] @@ -143,56 +149,14 @@ cbc_dict['lambda2'] = [20, 'lambda2', r'$\lambda_2$', 'Lambda 2'] cbc_dict['lambda3'] = [21, 'lambda3', r'$\lambda_3$', 'Lambda 3'] cbc_dict['lambda4'] = [22, 'lambda4', r'$\lambda_4$', 'Lambda 4'] -class Flags: - def __init__(self): - self.fullOnly_flag = 0 - self.GlitchCBC_flag = 0 - self.SignalCBC_flag = 0 - self.glitchOnly_flag = 0 - self.CBCType = 0 - self.multi_type = 0 # true for full, cbcsignal, cbcglitch - self.modelList = () # Gets filled in readbwb, theres probably a clearer way to to this - self.bayesLine = False - self.XMLfname = None - self.injFlag = False - self.event = 0 - self.gps = 0 # gps time of event - self.CBCwindow = -1 #window around cbc event - self.segment_start = 0 - self.CBCName = 0 - self.multi_type = 0 # true for full, cbcsignal, cbcglitch - self.modelList = () # Gets filled in readbwb, theres probably a clearer way to to this - - self.verbose = 0 - self.noClean = 0 - self.Nchain = 20 # default value for number of chains - - self.checkpoint = False - self.noClean = False - - def run_dirname(self): - if self.fullOnly_flag: - return('full') - if self.SignalCBC_flag: - return('cbcsignal') - if self.GlitchCBC_flag: - return('cbcglitch') - if len(self.modelList) == 1: - return(self.modelList[0]) - #if self.glitchOnly_flag: - #return('glitch') - else: - print("uh oh! what's the dirname?") - return('') - modelList = ('signal', 'glitch', 'cbc') postDir = 'post/' plotsDir = 'plots/' htmlDir = 'html/' -#### --fullOnly ### -RUN_FLAGS = Flags() +CMAP_NAME = 'viridis' + # I think RUN_FLAGS gets set twice, I dont think this codeblock is necesssary if os.path.exists(postDir+'full'): @@ -213,6 +177,7 @@ scolor = 'darkorchid' cbccolor = '#d81159' # magenta #'#45d9a5' # teal #'#ffa756' # pale orange glitchcbccolor = '#ad3400' # fox brown signalcbccolor = '#dfa9fc' # lilac +injcolor = 'teal' def set_color(model): if model == 'glitch': @@ -227,157 +192,22 @@ def set_color(model): return(gcolor) #glitchcbccolor) elif model == 'cbcsignal': return(signalcbccolor) + elif model == 'clean': + return('mediumseagreen') + elif model == 'injection' or model == 'injected': + return(injcolor) return(ncolor) ifoColors = ['darkgoldenrod','darkkhaki','darkseagreen','olive','cadetblue','green','slategray','darkcyan'] signal_ifoColorList = ['darkorchid','fuchsia','indigo','orchid','slateblue','mediumvioletred','palevioletred'] -injcolor = 'teal' ###################################################################################################################### # # Read in run data and info # ###################################################################################################################### - -# ---------------------------------- -# Extract information about the run -# ---------------------------------- -# initialize RUN_FLAGS also -def readbwb(runFlags): - # -- Initialize variables with info about the job - info = "" - ifoNames = [] - ifoList = [] - jobName = '' - restrictModel = '' - injFlag = False - lalsimFlag = False - runFlags.noNoiseFlag = False - mdc = False - snrList = [] - # -- Open *bayeswave.run - bayeswaverunfile = glob.glob('*bayeswave.run') - if not len(bayeswaverunfile): - sys.exit("\n *bayeswave.run not found! \n") - bayeswaverunfile = bayeswaverunfile[0] - bayeswave = open(bayeswaverunfile, 'r') - # -- Parse command line arguments - raw_lines = bayeswave.readlines() - lines = [l.lstrip() for l in raw_lines] - cmdline = '' - for l in lines: - if 'Command' in l: - cmdline = l.rstrip().split(' ') - - for index, arg in enumerate(cmdline): - # Determine the IFOs involved - if arg=='--ifo': - ifoNames.append(cmdline[index+1]) - # Determine the trigger time - elif arg=='--trigtime': - gps = float(cmdline[index+1]) - runFlags.gps = gps - info = info + "The trigger time is GPS: {0}\n".format(gps) - # Determine the job name - elif arg=='--runName': - jobName = cmdline[index+1] - print("The job name is: {0}".format(jobName)) - jobName = jobName+'_' - elif arg=='--fullOnly': - print('\nThis run was executed with the --fullOnly flag\n') - runFlags.fullOnly_flag = True - runFlags.modelList = ('signal', 'glitch') - # Determine if the job was glitchOnly, noiseOnly, or signalOnly - elif arg=='--glitchOnly': - print('\nThis run was executed with the --glitchOnly flag\n') - restrictModel = 'glitch' - runFlags.glitchOnly_flag = True - runFlags.noNoiseFlag = True - elif arg=='--noiseOnly': - print('\nThis run was executed with the --noiseOnly flag\n') - restrictModel = 'noise' - elif arg=='--signalOnly': - print('\nThis run was executed with the --signalOnly flag\n') - restrictModel = 'signal' - elif arg=='--cbcOnly': - print('\nThis run was executed with the --cbcOnly flag\n') - restrictModel = 'cbc' - runFlags.modelList = ('cbc') - runFlags.CBCType = True - elif arg == '--noClean': - print('\nThis run had no cleaning phase, --noClean\n') - runFlags.noClean = True - elif arg=='--GlitchCBC': - print('\nThis run was executed with the --GlitchCBC flag\n') - runFlags.GlitchCBC_flag = True - runFlags.CBCType = True - runFlags.modelList = ('glitch', 'cbc') - runFlags.multi_type = True - elif arg == '--CBCwindow': - runFlags.CBCwindow = float(cmdline[index + 1]) - elif arg=='--SignalCBC': - print('\nThis run was executed with the --SignalCBC flag\n') - runFlags.SignalCBC_flag = True - runFlags.CBCType = True - runFlags.multi_type = True - runFlags.modelList = ('signal', 'cbc') - elif arg=='--skipNoise': - print('\nThis run was executed with the --skipNoise flag\n') - runFlags.noNoiseFlag = True - elif arg=='--verbose': - print('\nThis run was executed with the --verbose flag\n') - runFlags.verbose = True - elif arg == '--Nchain': - runFlags.Nchain = int(cmdline[index + 1]) - elif arg == '--checkpoint': - runFlags.checkpoint = True - elif arg=='--segment-start': - runFlags.segment_start = float(cmdline[index + 1]) - elif arg=='--inj': - injFlag = True - runFlags.injFlag = True - mdc = True - runFlags.XMLfname = cmdline[index+1] - print('xml filename is {0}'.format(runFlags.XMLfname)) - elif arg =='--event': - # if injection then this indexes the event in the .xml file - runFlags.event = int(cmdline[index + 1]) - elif arg=='--MDC-cache': - mdc = True - elif arg=='--lalinspiralinjection': - lalsimFlag = True - elif arg=='--bayesLine': - runFlags.bayesLine = True - - if lalsimFlag: - injFlag = False - self.injFlag = False - # -- Determine number of IFOs - ifoNum = len(ifoNames) - # -- Convert the IFO name list into a list of numbers useful for opening datafiles - ifoList = [str(ii) for ii in range(0,len(ifoNames))] - info = info + "Detectors(s) found: {0}\n".format(len(ifoList)) - info = info + "The detectors are: {0}\n".format(', '.join(ifoNames)) - # -- If MDC, read SNR info - if mdc: - for ifo in ifoList: - snrFile = jobName+'post/injection_whitened_moments_%s.dat'%(ifoNames[int(ifo)]) - if not os.path.exists(snrFile): - sys.exit("\n {0} not found! \n".format(snrFile)) - snrdata = open(snrFile, 'r') - snrdata.readline() - snr = float(snrdata.readline().split(' ')[0]) - snrList.append(snr) - snrdata.close() - info = info + 'Injected SNR in detector {0} = {1}\n'.format(ifoNames[int(ifo)],snrList[-1]) - bayeswave.close() - # -- Report to user - print("{0}".format(info)) - return(jobName, restrictModel, mdc, injFlag, bayeswaverunfile, ifoList, ifoNames, gps, snrList, info, runFlags) - - # --------------------------------------------- # Get Median waveform with CIs # --------------------------------------------- @@ -408,8 +238,8 @@ def get_wavelet_params(filename, model, chirpflag=False, O1version=False, **keyw NE = 6 # number of extrinsic parameters start = 1 - labels = ['t','f','Q','A','phi_int'] # parameters of individual wavelets - extlabels = ['alpha','sindelta','psi','ecc', 'phi_ext','scale'] # Common extrinsic parameters + labels = ['t','f','Q','logA','phi_int'] # parameters of individual wavelets + extlabels = ['alpha','sindelta','psi','elip', 'phi_ext','scale'] # Common extrinsic parameters if chirpflag: NW = 6 labels.append('beta') @@ -447,75 +277,13 @@ def get_wavelet_params(filename, model, chirpflag=False, O1version=False, **keyw data[extlabels[l]].append(float(spl[l+1])) for i in range(0,waveletnumber): for l in range(0,NW): - data[labels[l]].append(float(spl[start+i*NW+l])) + if labels[l] == 'logA': + data[labels[l]].append(np.log10(float(spl[start+i*NW+l]))) + else: + data[labels[l]].append(float(spl[start+i*NW+l])) return data - - -# -------------------------------------------- -# Read wavelet chain file (From Meg) -# --------------------------------------------- -# -def wt(wave_params,psdfile): - """ - Makes a waveform from a set of wavelets - - arguments - --------- - wave_params (dict): the wavelet parameters - - psdfile (str): data file of the PSD - - outputs - ------- - array of the waveform - """ - - psd = np.genfromtxt(psdfile) - - # Find what Nsamp should be: - l = len(psd) - l = 2*l - Nsamp = 2**(l-1).bit_length() - - hs = np.zeros(Nsamp) - - # Find Tobs (1/df) - Tobs = 1./(psd[1,0]-psd[0,0]) - - fmin = int(psd[0,0]*Tobs) - - - wavenumber = wave_params['D'][0] - - for j in range(0,wavenumber): - t0 = wave_params['t'][j] - f0 = wave_params['f'][j] - Q = wave_params['Q'][j] - A = wave_params['A'][j] - phi0 = wave_params['phi'][j] - - i = int(f0*Tobs) - fac = 1.0/math.sqrt(psd[i-fmin,1]) - - tau = Q/(2*np.pi*f0) - - tmax = t0 + 4.0*tau - tmin = t0 - 4.0*tau - - imin = int((tmin/Tobs)*(Nsamp)) - imax = int((tmax/Tobs)*(Nsamp)) - if imin < 0: imin = 0 - if imax > Nsamp: imax = Nsamp - for i in range(imin,imax): - t = float(i)/Nsamp*Tobs - sf = A*np.exp(-((t-t0)**2)/(tau**2)) - sf *= fac - hs[i] += sf*np.cos(2*np.pi*f0*(t-t0)+phi0) - return(hs) - - #Plotting functions ###################################################################################################################### @@ -547,10 +315,9 @@ def get_axes(jobName, postDir, ifoList, model, time, runFlags = RUN_FLAGS): axisList = [] for ifo in ifoList: # -- Read Signal model - if runFlags.multi_type: - filename = str(jobName)+postDir+'{2}/{1}_median_time_domain_waveform_{0}.dat'.format(ifoNames[int(ifo)], model, runFlags.run_dirname()) - else: - filename = str(jobName)+postDir+'{1}/{1}_median_time_domain_waveform_{0}.dat'.format(ifoNames[int(ifo)], model) + local_runType, local_model = runType_and_model(model, runFlags) + filename = str(jobName)+postDir+'{runType}/{model}_median_time_domain_waveform_{ifo}.dat'.format(ifo = ifoNames[int(ifo)], + model = local_model, runType = local_runType) try: timesamp, median_waveform, dummy1, dummy2, down_vec, up_vec = get_waveform(filename) @@ -687,11 +454,11 @@ def plot_evidence(jobName, plotsDir, runFlags): if runFlags.fullOnly_flag: print(' log( E_signal / E_glitch ) = N/A (--fullOnly mode)') - elif restrictModel == 'signal': + elif runFlags.modelList == ['signal']: sig_gl = 0.0 err_sig_gl = 1.0 print(' log( E_signal / E_glitch ) = N/A (no glitch model)') - elif restrictModel == 'glitch': + elif runFlags.modelList == ['glitch']: sig_gl = 0.0 err_sig_gl = 1.0 print(' log( E_signal / E_glitch ) = N/A (no signal model)') @@ -892,6 +659,8 @@ def plot_full_spectro(jobName, postDir, ifo, plotsDir, worc, mdc, models, axwinn # plot powerspec of models for i, model in enumerate(models): + if model == 'clean': + continue colour = set_color(model) powerspec_info = powerspec_infos[i] plt.fill_between(powerspec_info[0],powerspec_info[2],powerspec_info[3],color=colour,alpha=0.5) @@ -980,23 +749,25 @@ def plot_tf_tracks(jobName, postDir, ifo, plotsDir, worc, mdc, model, axwinner, # -------------------------------------- # Plot Q scans of waveforms # -------------------------------------- -def Q_scan(subDir,model, Q, t, f, ifo, axwinner, f_axwinner, climwinner=[1,1], runFlags = RUN_FLAGS): +def Q_scan(subDir,model, Q, t, f, ifo, axwinner, f_axwinner, climwinner=[1,1], runFlags = RUN_FLAGS, cmap_name = 'viridis'): if (model == 'data'): - filename = postDir+'{0}/{0}_spectrogram_{1}_{2}.dat'.format(model, Q, ifoNames[int(ifo)]) + filename = postDir+'{model}/{model}_spectrogram_{Q}_{ifo}.dat'.format(model = model, Q = Q, ifo = ifoNames[int(ifo)]) elif (model == 'injected'): - filename = postDir+'{0}_spectrogram_{1}_{2}.dat'.format(model, Q, ifoNames[int(ifo)]) + filename = postDir+'{model}_spectrogram_{Q}_{ifo}.dat'.format(model = model, Q = Q, ifo = ifoNames[int(ifo)]) + elif model == 'clean': + filename = postDir+'{runType}/{model}_spectrogram_{Q}_{ifo}.dat'.format(runType = 'clean', model = 'glitch', Q = Q, ifo = ifoNames[int(ifo)]) elif runFlags.multi_type: - filename = postDir+'{3}/{0}_spectrogram_{1}_{2}.dat'.format(model, Q, ifoNames[int(ifo)], runFlags.run_dirname()) + filename = postDir+'{runType}/{model}_spectrogram_{Q}_{ifo}.dat'.format(runType = runFlags.run_dirname(), model = model, Q = Q, ifo = ifoNames[int(ifo)]) else: - filename = postDir + subDir + '/' + '{0}_spectrogram_{1}_{2}.dat'.format(model, Q, ifoNames[int(ifo)]) + filename = postDir+'{runType}/{model}_spectrogram_{Q}_{ifo}.dat'.format(runType = subDir, model = model, Q = Q, ifo = ifoNames[int(ifo)]) data = np.genfromtxt(filename) fig, ax = plt.subplots() try: - qplot = ax.imshow(data,aspect='auto',origin='lower',extent=[t[0],t[-1],f[0],f[-1]], cmap='plasma') + qplot = ax.imshow(data,aspect='auto',origin='lower',extent=[t[0],t[-1],f[0],f[-1]], cmap = cmap_name) except: - print("Oops! you're probably using python 2, plasma doesn't exist yet!") + print("Oops! you're probably using python 2, {cmap} doesn't exist yet!".format(cmap = cmap_name)) qplot = ax.imshow(data,aspect='auto',origin='lower',extent=[t[0],t[-1],f[0],f[-1]], cmap='OrRd') if model == 'data': @@ -1022,16 +793,19 @@ def Q_scan(subDir,model, Q, t, f, ifo, axwinner, f_axwinner, climwinner=[1,1], r axis.set_major_formatter(ScalarFormatter()) # make a colorbar plt.colorbar(qplot) - plt.savefig(plotsDir+'{0}_spectrogram_Q{1}_{2}.png'.format(model,Q,ifoNames[int(ifo)])) + if subDir == 'clean' and model == 'glitch_residual': + plt.savefig(plotsDir+'{model}_spectrogram_Q{Q}_{ifo}.png'.format(model = 'clean_residual', Q = Q, ifo = ifoNames[int(ifo)])) + else: + plt.savefig(plotsDir+'{model}_spectrogram_Q{Q}_{ifo}.png'.format(model = model, Q = Q, ifo = ifoNames[int(ifo)])) plt.close() return [np.min(data),np.max(data)] # 2d Scatterplot colored by the chain temperature -def scatter_cbc_chains(param1, param2, chainmin = 0, chainmax = RUN_FLAGS.Nchain, runFlags = RUN_FLAGS, plotsDir = 'plots/'): +def scatter_cbc_chains(param1, param2, chainmin = 0, chainmax = RUN_FLAGS.Nchain, runFlags = RUN_FLAGS, plotsDir = 'plots/', cmap_name = 'viridis'): fig, ax = plt.subplots(figsize = (6,6)) - cmap = mpl.cm.get_cmap('plasma') + cmap = mpl.cm.get_cmap(cmap_name) norm = mpl.colors.Normalize(vmin=0, vmax=runFlags.Nchain) # Use the nice latex for the x and y axis ax.set_xlabel(cbc_dict[param1][2]) @@ -1043,16 +817,16 @@ def scatter_cbc_chains(param1, param2, chainmin = 0, chainmax = RUN_FLAGS.Nchain for chain_index in np.arange(chainmin, chainmax): param_chains = np.genfromtxt('./chains/cbc_params.dat.%i'%chain_index) - ax.scatter(param_chains[:, index1], param_chains[:, index2], + ax.plot(param_chains[:, index1], param_chains[:, index2], ',', color = cmap(norm(chain_index)), zorder = 19 - chain_index, alpha = 0.05) #alpha = 1/(chain_index + 1)) plt.savefig(plotsDir + '{0}_{1}_2dscatter.png'.format(param1, param2)) -def scatter_chains_wavelet(param1, param2, ifo, model, chainmin = 0, chainmax = RUN_FLAGS.Nchain, runFlags = RUN_FLAGS, plotsDir = 'plots/'): +def scatter_chains_wavelet(param1, param2, ifo, model, chainmin = 0, chainmax = RUN_FLAGS.Nchain, runFlags = RUN_FLAGS, plotsDir = 'plots/', cmap_name = 'viridis'): # Make sure model = cbc for a CBCType run fig, ax = plt.subplots(figsize = (6,6)) - cmap = mpl.cm.get_cmap('plasma') + cmap = mpl.cm.get_cmap(cmap_name) norm = mpl.colors.Normalize(vmin=0, vmax=runFlags.Nchain) ax.set_xlabel(param1) ax.set_ylabel(param2) @@ -1061,16 +835,20 @@ def scatter_chains_wavelet(param1, param2, ifo, model, chainmin = 0, chainmax = if runFlags.CBCType: # TODO make this work for signalCBC run # I don't know what the filename would be for a signalCBC run. - filename = 'chains/cbc_params_{1}.dat.{2}'.format(model, ifo, chain_index) + filename = 'chains/cbc_params_{ifo}.dat.{chain_index}'.format(ifo = ifo, chain_index = chain_index) + elif model == 'signal': + filename = 'chains/{model}_params_h0.dat.{2}'.format(model = model, chain_index = chain_index) else: - filename = 'chains/{0}_params_{1}.dat.{2}'.format(model, ifo, chain_index) - param_chains = get_wavelet_params(filename, model) + filename = 'chains/{model}_params_{ifo}.dat.{chain_index}'.format(model = model, ifo = ifo, chain_index = chain_index) + param_chains = get_wavelet_params(filename, model, chirpflag = runFlags.chirplets) - ax.scatter(param_chains[param1], param_chains[param2], + ax.plot(param_chains[param1], param_chains[param2], ',', color = cmap(norm(chain_index)), zorder = 19 - chain_index, alpha = 0.05) #alpha = 1/(chain_index + 1)) - plt.savefig(plotsDir + '{2}{5}_{0}_{1}_2dscatter_chains_{3}_{4}.png'.format(param1, param2, model, chainmin, chainmax, ifo)) + plt.savefig(plotsDir + '{model}{ifo}_{param1}_{param2}_2dscatter_chains_{chainmin}_{chainmax}.png'.format(param1 = param1, param2 = param2, + model = model, chainmin = chainmin, + chainmax = chainmax, ifo = ifo)) plt.close() @@ -1079,7 +857,8 @@ def scatter_chains_wavelet(param1, param2, ifo, model, chainmin = 0, chainmax = # ------- # Corner plot for CBC Parameters -def make_2d_hist_cbc(ax, param_names, Params, cbc_dict, mdc, sim_table = None): +# ------- +def make_2d_hist_cbc(ax, param_names, Params, cbc_dict, mdc, runFlags = RUN_FLAGS): NUM_LEVELS = 10 index1, index2 = cbc_dict[param_names[0]][0], cbc_dict[param_names[1]][0] @@ -1098,8 +877,8 @@ def make_2d_hist_cbc(ax, param_names, Params, cbc_dict, mdc, sim_table = None): #ax.plot(2,3,label=r'CBC+glitch+noise') if mdc: - xinj = get_xml_parameter(param_names[0], sim_table) - yinj = get_xml_parameter(param_names[1], sim_table) + xinj = runFlags.get_xml_parameter(param_names[0]) + yinj = runFlags.get_xml_parameter(param_names[1]) ax.plot(xinj, yinj,'x',color='k',markersize=10) #ax.set_xlim(-1,1) #ax.set_ylim(200,1500) @@ -1111,7 +890,6 @@ def make_2d_hist_cbc(ax, param_names, Params, cbc_dict, mdc, sim_table = None): def make_2d_hist(ax, Xd, Yd, N = 10): #ax.scatter(Xd, Yd, color = 'blue', s = 2, alpha = 0.05) - #ax.hexbin(Xd, Yd, gridsize = 100, bins = 'log') xmin=Xd.min() xmax=Xd.max() @@ -1126,11 +904,11 @@ def make_2d_hist(ax, Xd, Yd, N = 10): ax.grid(True, alpha=0.5) return -def corner_plot_hist(ax, param_name, cbc_dict, mdc, sim_table = None): +def corner_plot_hist(ax, param_name, cbc_dict, mdc, runFlags = RUN_FLAGS): index = cbc_dict[param_name][0] n, bins, patches = ax.hist(Params[:, index], bins = int(np.sqrt(len(Params[:, index]))), color = '#D69E8D') if mdc: - injected = get_xml_parameter(param_name, sim_table) + injected = runFlags.get_xml_parameter(param_name) ax.vlines([injected], [0], [max(n)], color = injcolor) return @@ -1138,7 +916,7 @@ def corner_plot_hist(ax, param_name, cbc_dict, mdc, sim_table = None): # Makes a corner plot from a dictionary # -------------------- def make_corner_plot_dict(Dict, skip = [], plotsDir= 'plots/', name = 'glitch_cornerplot.png', - chaincolor = '#736D6B', histcolor = '#D69E8D', names = None, scatter_color = 'blue'): + chaincolor = '#736D6B', histcolor = '#D69E8D', names = None, scatter_color = 'blue', title = None): key_list = list(Dict.keys()) keys = [] for k in key_list: @@ -1148,6 +926,7 @@ def make_corner_plot_dict(Dict, skip = [], plotsDir= 'plots/', name = 'glitch_co keys.append(k) print("making cornerplot with", keys) fig, axes = plt.subplots(len(keys), len(keys) + 1, figsize = (10,10)) + fig.suptitle(title) for i in range(len(keys)): # over rows for j in range(len(keys) + 1): # over columns # indexes the @@ -1157,7 +936,7 @@ def make_corner_plot_dict(Dict, skip = [], plotsDir= 'plots/', name = 'glitch_co ax.hist(Dict[keys[i]], bins = int(np.sqrt(len(Dict[keys[i]]))), color = histcolor) elif j == i + 1: # plot 1/10 of the chains - ax.scatter(np.arange(0, len(Dict[keys[i]]), 10), Dict[keys[i]][::10], color = chaincolor) + ax.plot(np.arange(0, len(Dict[keys[i]]), 10), Dict[keys[i]][::10], ',', color = chaincolor) elif j > i: ax.set_visible(False) continue @@ -1166,16 +945,17 @@ def make_corner_plot_dict(Dict, skip = [], plotsDir= 'plots/', name = 'glitch_co if len(Dict[keys[j]]) > 5e5: n = int(len(Dict[keys[j]]) / 5e5) print("Dict is long! Sampling only 5e5 of them", keys[j], len(Dict[keys[j]]), len(Dict[keys[j]]) / n) - ax.scatter(Dict[keys[j]][::n],Dict[keys[i]][::n], + ax.plot(Dict[keys[j]][::n],Dict[keys[i]][::n], ',', alpha=0.1, color = scatter_color) # makes sure that the kernel estimate isn't too huge! #make_2d_hist(ax, np.array(Dict[keys[j]])[::n], np.array(Dict[keys[i]])[::n]) else: - ax.scatter(Dict[keys[j]],Dict[keys[i]], + ax.plot(Dict[keys[j]],Dict[keys[i]], ',', alpha=0.1, color = scatter_color) #make_2d_hist(ax, np.array(Dict[keys[j]]), np.array(Dict[keys[i]])) - except ValueError: - print("making 2d hist, slice wasn't 2d, continuing") + except ValueError as e: + print("making 2d hist, slice on {key1} and {key2} wasn't 2d, continuing".format(key1 = keys[j], key2 = keys[i])) + print(e) # set labels if j == 0: @@ -1202,7 +982,7 @@ def make_corner_plot_dict(Dict, skip = [], plotsDir= 'plots/', name = 'glitch_co def make_cbc_corner_plot(param_names, Params, cbc_dict, plotsDir, mdc, - sim_table = None, chain_index = 0, chain_color = '#736D6B', scatter_color = 'blue'): + chain_index = 0, chain_color = '#736D6B', scatter_color = 'blue', runFlags = RUN_FLAGS): print('making corner_plot with cbc parameters') fig, axes = plt.subplots(len(param_names), len(param_names) + 1, figsize = (20,20)) for i in range(len(param_names)): # rows (y axis) @@ -1211,32 +991,40 @@ def make_cbc_corner_plot(param_names, Params, cbc_dict, plotsDir, mdc, ax = axes[i, j] if j == i: - corner_plot_hist(ax, param_names[i], cbc_dict, mdc, sim_table) + # Plot the histogram of this parameter + if mdc: + inj = runFlags.get_xml_parameter(param_names[i]) + corner_plot_hist(ax, param_names[i], cbc_dict, mdc) elif j == i + 1: if mdc: - inj = get_xml_parameter(param_names[i], sim_table) - #TODO plot injected on chain + # plot what injected value actuall was + inj = runFlags.get_xml_parameter(param_names[i]) + ax.plot([0,len(Params[:, cbc_dict[param_names[i]][0]])], + [inj, inj], color = set_color('injection')) # Plot the chains for this parameter, but only plotting 1/10th of the total samples - ax.scatter(np.arange(0,len(Params[:, cbc_dict[param_names[i]][0]]), 10), - Params[:, cbc_dict[param_names[i]][0]][::10], color = chain_color) + ax.plot(np.arange(0,len(Params[:, cbc_dict[param_names[i]][0]]), 10), + Params[:, cbc_dict[param_names[i]][0]][::10], ',', color = chain_color) + # plot the injected value + elif j > i: ax.set_visible(False) continue else: + # plot the 2d histogram of these parameters param_index_x = cbc_dict[param_names[j]][0] param_index_y = cbc_dict[param_names[i]][0] if len(param_names[i]) > 5e5: n = max([int(len(param_names[i]) / 5e5), 1]) - ax.scatter(Params[:, param_index_x][::n], Params[:, param_index_y][::n], + ax.plot(Params[:, param_index_x][::n], Params[:, param_index_y][::n], ',', alpha=0.1, color = scatter_color) - #make_2d_hist_cbc(ax, [param_names[j], param_names[i]], Params, cbc_dict, mdc, sim_table) + #make_2d_hist_cbc(ax, [param_names[j], param_names[i]], Params, cbc_dict, mdc) else: - ax.scatter(Params[:, param_index_x], Params[:, param_index_y], + ax.plot(Params[:, param_index_x], Params[:, param_index_y], ',', alpha=0.1, color = scatter_color) - #make_2d_hist_cbc(ax, [param_names[j], param_names[i]], Params, cbc_dict, mdc, sim_table) + #make_2d_hist_cbc(ax, [param_names[j], param_names[i]], Params, cbc_dict, mdc) if mdc: - xinj = get_xml_parameter(param_names[j], sim_table) - yinj = get_xml_parameter(param_names[i], sim_table) + xinj = runFlags.get_xml_parameter(param_names[j]) + yinj = runFlags.get_xml_parameter(param_names[i]) ax.plot(xinj, yinj,'x',color='k',markersize=10) # sets y axis names @@ -1252,10 +1040,11 @@ def make_cbc_corner_plot(param_names, Params, cbc_dict, plotsDir, mdc, else: ax.set_xticklabels([]) + fig.suptitle("CBC Cornerplot Chain {0}".format(chain_index)) plt.savefig(plotsDir + 'cbc_cornerplot_{0}.png'.format(chain_index)) plt.close() -def make_skymap(RA, sindec, injected = None, plotsDir = "plots/", chain_index = 0, scatter_color = 'blue'): +def make_skymap(RA, sindec, injected = None, plotsDir = "plots/", chain_index = 0, scatter_color = 'blue', cmap_name = 'viridis'): index = np.arange(0, len(RA)) fig, axs = plt.subplots(2,2, figsize = (8,8)) #cmap = mpl.cm.get_cmap('plasma') @@ -1265,7 +1054,7 @@ def make_skymap(RA, sindec, injected = None, plotsDir = "plots/", chain_index = ax = axs[1,0] if injected is not None: ax.scatter(injected[0], injected[1], color = 'black', zorder = 10) - ax.hist2d(RA,sindec, bins = int(np.sqrt(len(RA))), cmap = 'plasma') + ax.hist2d(RA,sindec, bins = int(np.sqrt(len(RA))), cmap = cmap_name) ax.set_xlabel('RA') ax.set_ylabel('sindec') @@ -1273,7 +1062,7 @@ def make_skymap(RA, sindec, injected = None, plotsDir = "plots/", chain_index = ax = axs[0,1] if injected is not None: ax.scatter(injected[0], injected[1], color = 'black', zorder = 10) - ax.scatter(RA, sindec, alpha = 0.5, color = scatter_color) + ax.plot(RA, sindec, ',', alpha = 0.5, color = scatter_color) #ax.set_xlim(0, 2 * np.pi) #ax.set_ylim(-1, 1) @@ -1282,12 +1071,12 @@ def make_skymap(RA, sindec, injected = None, plotsDir = "plots/", chain_index = # Plot chains for sindec if injected is not None: ax.plot([0, len(sindec)], [injected[1], injected[1]], color = 'teal') - ax.plot(index[::10], sindec[::10], color = '#736D6B') + ax.plot(index[::10], sindec[::10], ',', color = '#736D6B') ax = axs[0, 0] if injected is not None: ax.plot([0, len(RA)], [injected[0], injected[0]], color = 'teal') - ax.plot(index[::10], RA[::10], color = '#736D6B') + ax.plot(index[::10], RA[::10], ',', color = '#736D6B') plt.savefig(plotsDir + 'cbc_skymap_{0}.png'.format(chain_index)) plt.close() @@ -1301,7 +1090,7 @@ def make_histogram(moments, moment, model, ifo, plotsDir): histmoments = moments[moment] - if moment == 't0_rec': # Center recovered time at 0s # TODO I don't think this time offset is right if there is a CBC window parameter in post + if moment == 't0_rec': rn,rbins,patches = plt.hist(histmoments - 2, bins=50, label='Recovered', alpha=0.5, linewidth=0, color=colour) elif 'overlap' in moment: # For overlap want to make sure we get the right axes range rbins = np.arange(-1.02,1.02,0.01) @@ -1465,7 +1254,7 @@ def plot_likelihood_2(modelList, plotsDir, runFlags = RUN_FLAGS): def plot_chains(param_chain, param_name, plotsDir, runFlags, color = 'mediumseagreen'): fig, ax = plt.subplots() index = np.arange(0, len(param_chain), 1) - ax.scatter(index, param_chain, color = color) + ax.plot(index, param_chain, ',', color = color) ax.set_xlabel('Chain index') ax.set_ylabel(param_name) plt.savefig(plotsDir + 'cbc_{0}_chain.png'.format(param_name)) @@ -1476,7 +1265,6 @@ def plot_hists(param_chain, param_name, plotsDir, runFlags, color = 'mediumseag N_bins = int(np.sqrt(len(param_chain))) n, bins, patches, = ax.hist(param_chain, bins = N_bins, color = color, alpha = 0.5, linewidth = 0, zorder = 0) if injected_value is not None: - print('plotting injected value') ax.vlines(injected_value, 0, max(n), color = injcolor, label = 'injected', zorder = 10) ax.set_xlabel(param_name) ax.set_ylabel('counts') @@ -1527,7 +1315,8 @@ def plot_model_dims(modelList, ifoList, ifoNames, plotsDir, runFlags = RUN_FLAGS glitchOnly_flag == 1 # -- Variation of the model dimension over MCMC iterations. 2 subplots for signal and glitch models - if len(modelList) == 1: + # if glitch and not signal or signal and not glitch + if ('glitch' in modelList) != ('signal' in modelList): fig = plt.figure() ax1 = plt.subplot(111) ax1.set_title('Model Dimension') @@ -1549,7 +1338,7 @@ def plot_model_dims(modelList, ifoList, ifoNames, plotsDir, runFlags = RUN_FLAGS ax1.grid() plt.savefig(plotsDir+'model_dimensions.png') plt.close() - elif len(modelList) == 2: + elif ('glitch' in modelList) and ('signal' in modelList): fig, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=True) ax1.set_title('Model Dimension') if len(signalChains) > 0: @@ -1576,12 +1365,8 @@ def plot_model_dims(modelList, ifoList, ifoNames, plotsDir, runFlags = RUN_FLAGS plt.savefig(plotsDir+'model_dimensions.png') plt.close() - if len(modelList) == 2: - # -- Histogram of the model dimension. 2 subplots for signal and glitch models - if len(signalChains) > 0 and len(glitchChains) > 0: - fig, (ax1, ax2) = plt.subplots(2, sharex=True) - else: - fig, (ax1, ax2) = plt.subplots(2, sharex=True) + fig, (ax1, ax2) = plt.subplots(2, sharex=True) + ax1.set_title('Model Dimension Histogram') if len(signalChains) > 0: n,bins,patches = ax1.hist(signalChains, bins=np.arange(int(min(signalChains))-.5, int(max(signalChains))+1.5, 1), histtype='bar', color=scolor, log=False) @@ -1663,23 +1448,111 @@ def plot_model_dims(modelList, ifoList, ifoNames, plotsDir, runFlags = RUN_FLAGS return sig_noise, sig_gl +#-------------------- +# plot cleaning phase +#-------------------- +def plot_cleaning_phase_time_domain(runFlags, plotsDir='plots/'): + + if not runFlags.noClean: + print('making cleaning phase plot(s)') + + #find window ends + window_start = runFlags.gps - runFlags.segment_start + runFlags.window / 2 + window_end = runFlags.gps - runFlags.segment_start - runFlags.window / 2 + + color = set_color(runFlags.run_dirname()) + cleancolor = set_color('clean') + + for ifo in ifoNames: + name = 'cleaning_phase_time_domain_plot_{0}'.format(ifo) + f, ax = plt.subplots(dpi=200) + #get glitches found in cleaning phase & those found in run + clean = get_waveform('post/clean/glitch_median_time_domain_waveform_{0}.dat'.format(ifo)) + if runFlags.cleanOnly_flag: + run = clean + else: + run = get_waveform('post/{1}/{1}_median_time_domain_waveform_{0}.dat'.format(ifo, runFlags.run_dirname())) + + + if runFlags.mdc: + # plot injected signal + injected = np.genfromtxt('post/injected_whitened_waveform_{ifo}.dat'.format(ifo = ifo)) + ax.plot(clean[0], injected ,color = set_color('injected'), label = 'injected', alpha=0.7,zorder=2,linewidth=1) + #plot cleaning phase waveform + ax.plot(clean[0], clean[1],color = cleancolor, label = 'clean', alpha=0.7,zorder=10,linewidth=1) + ax.fill_between(clean[0], clean[4], clean[5],color = cleancolor, alpha=0.3,edgecolor=None,zorder=8) + #plot run waveform + ax.plot(run[0], run[1],color = color, label = 'run',linewidth=1) + ax.fill_between(run[0], run[4], run[5],color = color, alpha=0.3,edgecolor=None) + + #plot window + ax.axvspan(window_start, window_end, color='gray',alpha=0.2,zorder=1,label='window') + + #other plot elements + ax.legend(fontsize=6); ax.set_xlim(0, runFlags.seglen) + ax.set_xlabel('Time (s)'); ax.set_ylabel('Whitened Strain') + ax.set_title(runFlags.run_dirname()+', '+ifo) + plt.savefig(plotsDir+name); plt.close() + +def plot_cleaning_phase_frequency_domain(runFlags, plotsDir='plots/'): + + if not runFlags.noClean: + color = set_color(runFlags.run_dirname()) + for ifo in ifoNames: + name = 'cleaning_phase_frequency_domain_plot_{0}'.format(ifo) + f, ax = plt.subplots(dpi=200) + + #get glitches found in cleaning phase & those found in run + clean = get_waveform('post/clean/glitch_median_frequency_domain_waveform_spectrum_{0}.dat'.format(ifo)) + if not runFlags.cleanOnly_flag: + run = get_waveform('post/{1}/{1}_median_frequency_domain_waveform_spectrum_{0}.dat'.format(ifo, runFlags.run_dirname())) + else: + run = clean + + #get psd in cleaning phase and full run + clean_psd = get_waveform('post/clean/glitch_median_PSD_{0}.dat'.format(ifo)) + if not runFlags.cleanOnly_flag: + run_psd = get_waveform('post/{1}/{1}_median_PSD_{0}.dat'.format(ifo, runFlags.run_dirname())) + else: + run_psd = clean_psd + + #plot cleaning phase waveform + ax.plot(clean[0], clean[1],color = set_color('clean'), label = 'clean', alpha=0.7,zorder=10,linewidth=1) + ax.fill_between(clean[0], clean[4], clean[5],color = set_color('clean'), alpha=0.3,edgecolor=None,zorder=8) + + #plot run waveform + ax.plot(run[0], run[1],color = color, label = 'run',linewidth=1) + ax.fill_between(run[0], run[4], run[5],color = color, alpha=0.3,edgecolor=None) + + #plot PSDs + ax.plot(clean_psd[0],clean_psd[1],color='darkred',linewidth=1,label='clean psd') + ax.plot(run_psd[0],run_psd[1],color='k',linewidth=1,label='run psd') + + #other plot elements + ax.legend(fontsize=6,ncol=2) + ax.set_xlim(runFlags.flow, runFlags.srate/2); ax.set_ylim(1e-50,1e-42) + ax.set_xscale('log'); ax.set_yscale('log') + ax.set_xlabel('Frequency (Hz)'); ax.set_ylabel('Power') + ax.set_title(ifo) + plt.savefig(plotsDir+name); plt.close() ###################################################################################################################### -# # BWB webpage production # ###################################################################################################################### def whitened_residual_plots(model,ifoList,ifoNames,runFlags = RUN_FLAGS): - if model == 'glitch': + if model == 'glitch' or model == 'clean': lineColors = ifoColors elif ((mod == 'signal') or (mod == 'cbc')): lineColors = signal_ifoColorList elif model == 'noise': colour = ncolor lineColors = ifoColors + else: + lineColors = ifoColors hashlist = ['solid','dashed','dashdot','solid','dashed','dashdot'] @@ -1696,13 +1569,9 @@ def whitened_residual_plots(model,ifoList,ifoNames,runFlags = RUN_FLAGS): whitened = {} psd_info = {} + local_runType, local_model = runType_and_model(model, runFlags = RUN_FLAGS) for ifo in ifoList: - - if runFlags.multi_type: - filename = 'post/{2}/{1}_median_PSD_{0}.dat'.format(ifoNames[int(ifo)],model, runFlags.run_dirname()) - else: - filename = 'post/{1}/{1}_median_PSD_{0}.dat'.format(ifoNames[int(ifo)],model) - + filename = 'post/{runType}/{model}_median_PSD_{ifo}.dat'.format(runType = local_runType, model = local_model, ifo = ifoNames[int(ifo)]) print("whitened_residual_plots filename 1: " + filename) psd_info[ifo] = get_waveform(filename) @@ -1710,11 +1579,7 @@ def whitened_residual_plots(model,ifoList,ifoNames,runFlags = RUN_FLAGS): while psd_info[ifo][1][imin] >= 1.0: imin += 1 - if runFlags.multi_type: - filename = 'post/{2}/fourier_domain_{1}_median_residual_{0}.dat'.format(ifoNames[int(ifo)], model, runFlags.run_dirname()) - else: - filename = 'post/{1}/fourier_domain_{1}_median_residual_{0}.dat'.format(ifoNames[int(ifo)],model) - + filename = 'post/{runType}/fourier_domain_{model}_median_residual_{ifo}.dat'.format(runType = local_runType, model = local_model, ifo = ifoNames[int(ifo)]) print("whitened_residual_plots filename 2: " + filename) residual[ifo] = np.genfromtxt(filename) @@ -1776,11 +1641,13 @@ def make_verbose_page(htmlDir, plotsDir, runFlags = RUN_FLAGS): # put in tf plots for glitch for ifo in ifoNames: - plotsrc = './' + plotsDir + '{2}{5}_{0}_{1}_2dscatter_chains_{3}_{4}.png'.format('t', 'f', 'glitch', 0, runFlags.Nchain, ifo) + plotsrc = './' + plotsDir + '{model}{ifo}_{param1}_{param2}_2dscatter_chains_{chainmin}_{chainmax}.png'.format(param1 = 't', + param2 = 'f', model = 'glitch', chainmin = 0, chainmax = runFlags.Nchain, ifo = ifo) subpage.write(' '+plotsrc+'
\n') # put in QA plots for glitch for ifo in ifoNames: - plotsrc = './' + plotsDir + '{2}{5}_{0}_{1}_2dscatter_chains_{3}_{4}.png'.format('Q', 'A', 'glitch', 0, runFlags.Nchain, ifo) + plotsrc = './' + plotsDir + '{model}{ifo}_{param1}_{param2}_2dscatter_chains_{chainmin}_{chainmax}.png'.format(param1 = 'Q', + param2 = 'logA', model = 'glitch', chainmin = 0, chainmax = runFlags.Nchain, ifo = ifo) subpage.write(' '+plotsrc+'
\n') if 'cbc' in runFlags.modelList: @@ -1834,8 +1701,10 @@ def make_skymap_page(htmlDir, plotsDir, runFlags = RUN_FLAGS): if runFlags.CBCType: for chain in range(runFlags.Nchain): - plotsrc = './' + plotsDir + 'cbc_skymap_{0}.png'.format(chain) - subpage.write(' '+plotsrc+'
\n') + # Only make the 0th skymap unless verbose is on + if runFlags.verbose or chain == 0: + plotsrc = './' + plotsDir + 'cbc_skymap_{0}.png'.format(chain) + subpage.write(' '+plotsrc+'
\n') # Display all the megasky results: remember to run megasky first, then megaplot if os.path.exists(str(jobName)+'megasky_results.dat'): @@ -1879,12 +1748,12 @@ def splash_page(plotsDir, ifoNames, snrList, bayeswaverunfile, sig_gl, sig_noise html_string += 'Off
\n' html_string += ' Signal model: ' - if not restrictModel == 'glitch': + if 'signal' in runFlags.modelList: html_string += 'On
\n' else: html_string += 'Off
\n' html_string += ' Glitch model: ' - if not restrictModel == 'signal': + if 'glitch' in runFlags.modelList: html_string += 'On
\n' else: html_string += 'Off
\n' @@ -1899,13 +1768,9 @@ def splash_page(plotsDir, ifoNames, snrList, bayeswaverunfile, sig_gl, sig_noise html_string += ' {0} injected with SNR {1:.1f}
\n'.format(ifo, snr) html_string += '

Log Info

\n' html_string += ' See full log file\n' - if not restrictModel: - html_string +='

This is a {0} only run.

\n'.format(restrictModel) + if len(runFlags.modelList) == 1 and runFlags.modelList != ['clean']: + html_string +='

This is a {0} only run.

\n'.format(runFlags.modelList[0]) html_string +=' There are no Bayes factors for this run since it\'s only a single model.\n' - if not (runFlags.fullOnly_flag or runFlags.GlitchCBC_flag): - html_string +='

Evidence for Signal

\n' - html_string +=' log(Evidence_signal / Evidence_glitch) = {0:.1f} ± {1:.1f}
\n'.format(sig_gl,err_sig_gl) - html_string +=' log(Evidence_signal / Evidence_noise) = {0:.1f} ± {1:.1f}
\n'.format(sig_noise,err_sig_noise) elif runFlags.fullOnly_flag: html_string +='

Bayes factor for signal+glitch vs signal only:

\n' html_string +=' {0}
\n'.format(sig_noise) @@ -1926,56 +1791,20 @@ def splash_page(plotsDir, ifoNames, snrList, bayeswaverunfile, sig_gl, sig_noise html_string +='

Bayes factor for cbc+signal vs signal only:

\n' html_string +=' {0}
\n'.format(sig_gl) - # put anderson darling statistic on 1st page - # only exists if running with bayesLine flag - if runFlags.bayesLine: - if runFlags.multi_type: - models = [runFlags.run_dirname()] - for m in runFlags.modelList: - models.append(m) - else: - models = runFlags.modelList - for m in models: - html_string += '\n' - html_string += '\t\n'.format(m) - html_string += '\t\n' - html_string += '\t\t\n' - html_string += '\t\t\n' - html_string += '\t\t\n' - html_string += '\t\t\n' - html_string += '\t\n' - - for ifo in ifoNames: - - AD_file = postDir + runFlags.run_dirname() + '/' + m + '_anderson_darling_' + ifo + '.dat' - - sample_rate, AD, p_value = np.loadtxt(AD_file, unpack = True) - for j in range(len(AD)): - html_string += '\t\n' - if j == 0: - html_string += '\t\t\n'.format(ifo, len(AD)) - html_string += '\t\t\n'.format(sample_rate[j]) - html_string += '\t\t\n'.format(AD[j]) - html_string += '\t\t\n'.format(p_value[j]) - html_string += '\t\n' - html_string += '
{0} Model
IFOSample Rate [Hz]AD statisticP value
{0}{0}{0}{0}
\n' - - - - html_string +='

Evidence for Signal

\n' if not runFlags.fullOnly_flag: - if restrictModel == 'signal': + if 'glitch' not in runFlags.modelList: html_string += ' log(Evidence_signal / Evidence_glitch) = N/A (no glitch model)
\n' - elif restrictModel == 'glitch': + elif 'signal' not in runFlags.modelList: html_string += ' log(Evidence_signal / Evidence_glitch) = N/A (no signal model)
\n' else: html_string +=' log(Evidence_signal / Evidence_glitch) = {0:.1f} ± {1:.1f}
\n'.format(sig_gl,err_sig_gl) + # if there is a noise flag if not runFlags.noNoiseFlag: html_string +=' log(Evidence_signal / Evidence_noise) = {0:.1f} ± {1:.1f}
\n'.format(sig_noise,err_sig_noise) else: - html_string += ' log(Evidence_signal / Evidence_noise) = N/A (no noise model)\n' + html_string +=' log(Evidence_signal / Evidence_noise) = N/A (no noise model)\n' else: html_string +=' log(Bayes factor for signal+glitch vs signal only):\n' html_string +=' {0}
\n'.format(np.log(sig_noise)) @@ -2031,43 +1860,24 @@ def make_index(htmlDir, plotsDir, model, gps, ifoList, ifoNames, snrList, bayesw # make links to model webpages with timeseries and the like for mod in runFlags.modelList: + if mod == 'clean': + continue index.write('