Commit 730b12e7 authored by Richard O'Shaughnessy's avatar Richard O'Shaughnessy

Merge branch 'temp-RIT-Tides-port_master' into temp-RIT-Tides-port_master-GPUIntegration

Incomplete port of ...NoLoop with cupy.  Neither working correctly (as it did previously) without numpy, nor tested with cupy
parents 526c1478 39093415
#! /usr/bin/env python
#
#
# GOAL
# - takes two sets of samples, and some parameter(s)
# - should be able to interchange samples provided with ILE *.xml.gz, *.composite, or posterior samples (preferred). FLEXIBILITY NOT YET IMPLEMENTED.
# Postfix determines behavior
# - performs specified test, with specified tolerance, to see if they are 'similar enough'
# - returns FAILURE if test is a success (!), so a condor DAG will terminate
#
# EXAMPLES
# convergence_test_samples.py --samples GW170823_pure_NR_and_NRSur7dq2_lmax3_fmin20_C02_cleaned_alignedspin_zprior.dat --samples GW170823_pure_NR_and_NRSur7dq2_lmax3_fmin20_C02_cleaned_alignedspin_zprior.dat --parameter m1 --parameter m2 # test samples against themselves, must return 0!
#
# RESOURCES
# Based on code in util_DriverIterateILEFitPosterior*.py
import numpy as np
import argparse
import scipy.stats
import numpy.linalg as la
import sys
parser = argparse.ArgumentParser()
parser.add_argument("--samples", action='append', help="Samples used in convergence test")
parser.add_argument("--parameter", action='append', help="Parameters used in convergence test")
parser.add_argument("--parameter-range", action='append', help="Parameter ranges used in convergence test (used if KDEs or similar knowledge of the PDF is needed). If used, must specify for ALL variables, in order")
parser.add_argument("--method", default='lame', help="Test to perform: lame|ks1d|...")
parser.add_argument("--threshold",default=None, help="Manual threshold for the test being performed. (If not specified, the success condition is determined by default for that diagnostic, based on the samples size and properties")
parser.add_argument("--test-output", help="Filename to return output. Result is a scalar >=0 and ideally <=1. Closer to 0 should be good. Second column is the diagnostic, first column is 0 or 1 (success or failure)")
parser.add_argument("--always-succeed",action='store_true',help="Test output is always success. Use for plotting convergence diagnostics so jobs insured to run for many iterations.")
opts= parser.parse_args()
if len(opts.samples)<1:
print " Need at least two sets of samples"
sys.exit(0)
# Test options
#
# (a) lame: Compute a multivariate gaussian estimate (sample mean and variance), and then use KL divergence between them !
# (b) KS_1d: One-dimensional KS test on cumulative distribution
# (c) KL_1d: One-dimensional KL divergence, using KDE estimate. Requires bounded domain; parameter bounds can be passed
def calc_kl(mu_1, mu_2, sigma_1, sigma_2, sigma_1_inv, sigma_2_inv):
"""
calc_kl : KL divergence for two gaussians. sigma_1, and sigma_2 are the covariance matricies.
"""
return 0.5*(np.trace(np.dot(sigma_2_inv,sigma_1))+np.dot(np.dot((mu_2-mu_1).T, sigma_2_inv), (mu_2-mu_1))-len(mu_1)+np.log(la.det(sigma_2)/la.det(sigma_1)))
def calc_kl_scalar(mu_1, mu_2, sigma_1, sigma_2):
"""
calc_kl : KL divergence for two gaussians. sigma_1, and sigma_2 are the covariance matricies.
"""
return np.log(sigma_2/sigma_1) -0.5 +( (mu_1-mu_2)**2 + sigma_1**2)/(2*sigma_2**2)
def test_lame(dat1,dat2):
"""
Compute a multivariate gaussian estimate (sample mean and variance), and then use KL divergence between them !
"""
mu_1 = np.mean(dat1,axis=0)
mu_2 = np.mean(dat2,axis=0)
sigma_1 = np.cov(dat1.T)
sigma_2 = np.cov(dat2.T)
if np.isscalar(mu_1) or len(mu_1)==1:
return np.asscalar(calc_kl_scalar(mu_1, mu_2, sigma_1, sigma_2))
else:
sigma_1_inv = np.linalg.inv(sigma_1)
sigma_2_inv = np.linalg.inv(sigma_2)
return calc_kl(mu_1,mu_2, sigma_1, sigma_2, sigma_1_inv, sigma_2_inv)
def test_ks1d(dat1_1d, dat2_1d):
"""
KS test based on two sample sets. Uses the KS D value as threshold
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.ks_2samp.html
"""
return scipy.stats.ks_2samp(dat1_1d,dat2_1d)[0] # return KS statistic
def test_KL1d(dat1_1d,dat2_1d,range1=None, range2=None):
return None
# Procedure
samples1 = np.genfromtxt(opts.samples[0], names=True)
samples2 = np.genfromtxt(opts.samples[1], names=True)
param_names1 = samples1.dtype.names; param_names2 = samples2.dtype.names
npts1 = len(samples1[param_names1[0]])
npts2 = len(samples2[param_names2[0]])
# Read in data into array. For now, assume the specific parameters requested are provided.
dat1 = np.empty( (npts1,len(opts.parameter)))
dat2 = np.empty( (npts2,len(opts.parameter)))
indx=0
for param in opts.parameter:
dat1[:,indx] = samples1[param]
dat2[:,indx] = samples2[param]
indx+=1
# Perform test
val_test = np.inf
if opts.method == 'lame':
val_test = test_lame(dat1,dat2)
elif opts.method == 'KS_1d':
val_test = test_ks1d(dat1[:,0],dat2[:,0])
elif opts.method == 'KL_1d':
val_test = test_KL1d(dat1[:,0],dat2[:,0])
else:
print " No known method ", opts.method
print val_test
if opts.always_succeed:
sys.exit(0)
if (val_test < 0.01):
sys.exit(1)
else:
sys.exit(0)
......@@ -35,6 +35,7 @@ optp.add_option("--fref",default=20,type=float,help="Reference frequency. Depend
optp.add_option("--export-extra-spins",action='store_true',help="Reference frequency. Depending on approximant and age of implementation, may be ignored")
optp.add_option("--export-tides",action='store_true',help="Include tidal parameters")
optp.add_option("--export-cosmology",action='store_true',help="Include source frame masses and redshift")
optp.add_option("--export-weights",action='store_true',help="Include a field 'weights' equal to L p/ps")
optp.add_option("--with-cosmology",default="Planck15",help="Specific cosmology to use")
optp.add_option("--convention",default="RIFT",help="RIFT|LI")
opts, args = optp.parse_args()
......@@ -58,6 +59,8 @@ if opts.convention == 'LI':
print "lambda1 lambda2 lam_tilde",
if opts.export_cosmology:
print " m1_source m2_source mc_source mtotal_source redshift ",
if opts.export_weights:
print " weights ",
print
for fname in args:
points = table.get_table(utils.load_filename(fname,contenthandler=ligolw.LIGOLWContentHandler), lsctables.SimInspiralTable.tableName)
......@@ -126,6 +129,8 @@ if opts.convention == 'LI':
m1_source = pt.mass1/(1+z)
m2_source = pt.mass2/(1+z)
print m1_source, m2_source, mc_here/(1+z), mtot_here/(1+z), z,
if opts.export_weights:
print wt[indx]
print
......@@ -140,6 +145,8 @@ if opts.export_tides:
print "lambda1 lambda2",
if opts.export_cosmology:
print " m1_source m2_source redshift ",
if opts.export_weights:
print " weights ",
print
for fname in args:
if ".hdf5" in fname:
......@@ -176,6 +183,7 @@ for fname in args:
Nmax = np.max([int(row.simulation_id) for row in points])+1
sim_id = np.array([int(row.simulation_id) for row in points])+1
wt = np.exp(like)*p/ps
for indx in np.arange(len(points)):
pt = points[indx]
......@@ -210,5 +218,7 @@ for fname in args:
m1_source = pt.mass1/(1+z)
m2_source = pt.mass2/(1+z)
print m1_source, m2_source, z,
if opts.export_weights:
print wt[indx],
print
# print pt.geocent_end_time + 1e-9* pt.geocent_end_time_ns, pt.coa_phase, pt.inclination, pt.polarization, pt.longitude,pt.latitude, pt.distance, ind like[indx]
......@@ -707,5 +707,167 @@ def write_unify_sub_simple(tag='unify', exe=None, base=None,target=None,arg_str=
return ile_job, ile_sub_name
def write_convert_sub(tag='convert', exe=None, file_input=None,file_output=None,arg_str='',log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a 'convert' job
convert_output_format_ile2inference
"""
exe = exe or which("convert_output_format_ile2inference") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
if not(arg_str is None or len(arg_str)<2):
ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_arg(file_input)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(file_output)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print " LIGO accounting information not available. You must add this manually to integrate.sub !"
return ile_job, ile_sub_name
def write_test_sub(tag='converge', exe=None,samples_files=None, base=None,target=None,arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a convergence test job
"""
exe = exe or which("convergence_test_samples.py")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Add options for two parameter files
for name in samples_files:
# ile_job.add_opt("samples",name) # do not add in usual fashion, because otherwise the key's value is overwritten
ile_job.add_opt("samples " + name,'')
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print " LIGO accounting information not available. You must add this manually to integrate.sub !"
return ile_job, ile_sub_name
def write_plot_sub(tag='converge', exe=None,samples_files=None, base=None,target=None,arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a final plot. Note the user can in principle specify several samples (e.g., several iterations, if we want to diagnose them)
"""
exe = exe or which("plot_posterior_corner.py")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Add options for two parameter files
for name in samples_files:
# ile_job.add_opt("samples",name) # do not add in usual fashion, because otherwise the key's value is overwritten
ile_job.add_opt("posterior-file " + name,'')
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print " LIGO accounting information not available. You must add this manually to integrate.sub !"
return ile_job, ile_sub_name
def write_init_sub(tag='gridinit', exe=None,arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a grid initialization job.
Note this routine MUST create whatever files are needed by the ILE iteration
"""
exe = exe or which("util_ManualOverlapGrid.py")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print " LIGO accounting information not available. You must add this manually to integrate.sub !"
return ile_job, ile_sub_name
......@@ -305,6 +305,89 @@ def TestLogLikelihoodInfrastructure(TestDictionary,theEpochFiducial, data_dict,
if bSavePlots:
plt.savefig("FLT-lnL."+fExtensionLowDensity)
# lnLdata (plot), using vectorized packing
if TestDictionary["lnLDataPlotAlt"] and not bNoMatplotlib:
lookupNKDict = {}
lookupKNDict={}
lookupKNconjDict={}
ctUArrayDict = {}
ctVArrayDict={}
rholmArrayDict={}
rholms_intpArrayDict={}
epochDict={}
for det in rholms_intp.keys():
lookupNKDict[det],lookupKNDict[det], lookupKNconjDict[det], ctUArrayDict[det], ctVArrayDict[det], rholmArrayDict[det], rholms_intpArrayDict[det], epochDict[det] = factored_likelihood.PackLikelihoodDataStructuresAsArrays( rholms[det].keys(), rholms_intp[det], rholms[det], crossTerms[det],crossTermsV[det])
# Plot the interpolated lnLData versus *time*
print " ======= lnLdata timeseries at the injection parameters =========="
tvals = np.linspace(tWindowExplore[0],tWindowExplore[1],fSample*(tWindowExplore[1]-tWindowExplore[0]))
for det in detectors:
lnLData = map( lambda x: factored_likelihood.SingleDetectorLogLikelihoodData(theEpochFiducial,rholms_intp, theEpochFiducial+x, Psig.phi, Psig.theta, Psig.incl, Psig.phiref,Psig.psi, Psig.dist, Lmax, det), tvals)
lnLDataEstimate = np.ones(len(tvals))*rhoExpected[det]*rhoExpected[det]
plt.figure(1)
plt.xlabel('t(s) [geocentered]relative to '+lalsimutils.stringGPSNice(theEpochFiducial))
plt.ylabel('lnLdata')
plt.title("lnLdata (interpolated) vs narrow time interval")
indx = [k for k,value in enumerate((tvals>tWindowExplore[0]) * (tvals<tWindowExplore[1])) if value] # gets results if true
lnLfac = 4*np.max([np.abs(lnLData[k]) for k in indx]) # Max in window
if lnLfac < 100:
lnLfac = 100
plt.ylim(-lnLfac,lnLfac) # sometimes we get yanked off the edges. Larger than this isn't likely
tvalsPlot = tvals
plt.plot(tvalsPlot, lnLData,label='Ldata(t)+'+det)
plt.plot(tvalsPlot, lnLDataEstimate,label="$rho^2("+det+")$")
nBinsDiscrete = len(data_dict[det].data.data)#int(fSample*1) # plot all of data, straight up!
tStartOffsetDiscrete = 0 #tWindowExplore[0]-0.5 # timeshift correction *should* already performed by DiscreteSingleDetectorLogLikelihood
tvalsDiscrete = tStartOffsetDiscrete +np.arange(nBinsDiscrete) *1.0/fSample
lnLDataDiscrete = factored_likelihood.DiscreteSingleDetectorLogLikelihoodDataViaArray(tvalsPlot,Psig, lookupNKDict, rholmArrayDict, Lmax=Lmax,det=det)
tvalsDiscrete = tvalsDiscrete[:len(lnLDataDiscrete)]
plt.figure(2)
plt.xlabel('t(s) [not geocentered] relative to '+lalsimutils.stringGPSNice(theEpochFiducial))
plt.ylabel('lnLdata')
nSkip = 1 # len(tvalsDiscrete)/4096 # Go to fixed number of points
lnLDataEstimate = np.ones(len(tvalsDiscrete))*rhoExpected[det]*rhoExpected[det]
plt.plot(tvalsDiscrete, lnLDataEstimate,label="$rho^2("+det+")$")
plt.plot(tvalsDiscrete[::nSkip], lnLDataDiscrete[::nSkip],label='Ldata(t):discrete+'+det)
plt.title('lnLdata(t) discrete, NO TIME SHIFTS')
plt.legend()
tEventRelative =float( Psig.tref - theEpochFiducial)
print " Real time (relative to fiducial start time) ", tEventFiducial, " and our triggering time is ", tEventRelative
plt.figure(1)
plt.plot([tEventFiducial,tEventFiducial],[0,rho2Net], color='k',linestyle='--')
plt.title("lnLdata (interpolated) vs narrow time interval")
plt.xlim(-0.05,0.05)
if bSavePlots:
plt.savefig("FLT-lnLaltData."+fExtensionLowDensity)
print " ======= rholm test: Plot the lnL timeseries at the injection parameters =========="
tvals = np.linspace(tWindowExplore[0],tWindowExplore[1],fSample*(tWindowExplore[1]-tWindowExplore[0]))
print " ... plotting over range ", [min(tvals), max(tvals)], " with npts = ", len(tvals)
P = Psig.copy()
lnL = np.zeros(len(tvals))
for indx in np.arange(len(tvals)):
P.tref = theEpochFiducial+tvals[indx]
lnL[indx] = factored_likelihood.FactoredLogLikelihood(P, rholms, rholms_intp, crossTerms,crossTermsV, Lmax)
lnLEstimate = np.ones(len(tvals))*rho2Net/2
plt.figure(1)
tvalsPlot = tvals
plt.plot(tvalsPlot, lnL,label='lnL(t)')
plt.plot(tvalsPlot, lnLEstimate,label="$rho^2/2(net)$")
indx = [k for k,value in enumerate((tvals>tWindowExplore[0]) * (tvals<tWindowExplore[1])) if value] # gets results if true
lnLfac = 4*np.max([np.abs(lnL[k]) for k in indx]) # Max in window
if lnLfac < 100:
lnLfac = 100
plt.ylim(-lnLfac,lnLfac) # sometimes we get yanked off the edges. Larger than this isn't likely
tEventRelative =float( Psig.tref - theEpochFiducial)
print " Real time (relative to fiducial start time) ", tEventFiducial, " and our triggering time is the same ", tEventRelative
plt.plot([tEventFiducial,tEventFiducial],[0,rho2Net], color='k',linestyle='--')
plt.title("lnL (interpolated) vs narrow time interval")
plt.xlim(-0.05,0.05)
plt.legend()
if bSavePlots:
plt.savefig("FLT-lnL_alt."+fExtensionLowDensity)
# lnLdata (plot)
if TestDictionary["lnLDataPlotVersusPsi"] and not bNoMatplotlib:
print " ======= Code test: Plot the lnL versus psi, at the injection parameters =========="
......
......@@ -90,11 +90,13 @@ optp.add_option("-t", "--event-time", type=float, help="GPS time of the event --
optp.add_option("-i", "--inv-spec-trunc-time", type=float, default=8., help="Timescale of inverse spectrum truncation in seconds (Default is 8 - give 0 for no truncation)")
optp.add_option("-w", "--window-shape", type=float, default=0, help="Shape of Tukey window to apply to data (default is no windowing)")
optp.add_option("-m", "--time-marginalization", action="store_true", help="Perform marginalization over time via direct numerical integration. Default is false.")
optp.add_option("--vectorized", action="store_true", help="Perform manipulations of lm and timeseries using numpy arrays, not LAL data structures. (Combine with --gpu to enable GPU use, where available)")
optp.add_option("-o", "--output-file", help="Save result to this file.")
optp.add_option("-O", "--output-format", default='xml', help="[xml|hdf5]")
optp.add_option("-S", "--save-samples", action="store_true", help="Save sample points to output-file. Requires --output-file to be defined.")
optp.add_option("-L", "--save-deltalnL", type=float, default=float("Inf"), help="Threshold on deltalnL for points preserved in output file. Requires --output-file to be defined")
optp.add_option("-P", "--save-P", type=float,default=0, help="Threshold on cumulative probability for points preserved in output file. Requires --output-file to be defined")
optp.add_option("--verbose",action='store_true')
#
# Add the integration options
......@@ -124,6 +126,7 @@ optp.add_option_group(integration_params)
#
intrinsic_params = OptionGroup(optp, "Intrinsic Parameters", "Intrinsic parameters (e.g component mass) to use.")
intrinsic_params.add_option("--pin-to-sim", help="Pin values to sim_inspiral table entry.")
intrinsic_params.add_option("--pin-distance-to-sim",action='store_true', help="Pin *distance* value to sim entry. Used to enable source frame reconstruction with NR.")
intrinsic_params.add_option("--mass1", type=float, help="Value of first component mass, in solar masses. Required if not providing coinc tables.")
intrinsic_params.add_option("--mass2", type=float, help="Value of second component mass, in solar masses. Required if not providing coinc tables.")
intrinsic_params.add_option("--eff-lambda", type=float, help="Value of effective tidal parameter. Optional, ignored if not given.")
......@@ -306,7 +309,10 @@ if opts.sim_xml:
m1 = P.m1/lal.MSUN_SI
m2 =P.m2/lal.MSUN_SI
lambda1, lambda2 = P.lambda1,P.lambda2
P.dist = factored_likelihood.distMpcRef * 1.e6 * lal.PC_SI
if opts.pin_distance_to_sim:
dist_in =P.dist
opts.distance = dist_in/lal.PC_SI/1e6 # create de facto pinnable parrameter. Use Mpc unit
P.dist = factored_likelihood.distMpcRef * 1.e6 * lal.PC_SI # use *nonstandard* distance
P.phi=0.0
P.psi=0.0
P.incl = 0.0 # only works for aligned spins. Be careful.
......@@ -384,8 +390,9 @@ for inst, chan in map(lambda c: c.split("="), opts.channel_name):
print "Reading channel %s from cache %s" % (inst+":"+chan, opts.cache_file)
data_dict[inst] = lalsimutils.frame_data_to_non_herm_hoff(opts.cache_file,
inst+":"+chan, start=start_time, stop=end_time,
window_shape=opts.window_shape)
print "Frequency binning: %f, length %d" % (data_dict[inst].deltaF,
window_shape=opts.window_shape,verbose=opts.verbose)
if opts.verbose:
print "Frequency binning: %f, length %d" % (data_dict[inst].deltaF,
data_dict[inst].data.length)
flow_ifo_dict = {}
......@@ -873,7 +880,7 @@ if not opts.time_marginalization:
res, var, neff, dict_return = sampler.integrate(likelihood_function, *unpinned_params, **pinned_params)
else: # Sum over time for every point in other extrinsic params
if not opts.rom_integrate_intrinsic:
if not (opts.rom_integrate_intrinsic or opts.vectorized) :
def likelihood_function(right_ascension, declination, phi_orb, inclination,
psi, distance):
dec = numpy.copy(declination).astype(numpy.float64) # get rid of 'object', and allocate space
......@@ -906,6 +913,42 @@ else: # Sum over time for every point in other extrinsic params
return numpy.exp(lnL -manual_avoid_overflow_logarithm)
args = likelihood_function.func_code.co_varnames[:likelihood_function.func_code.co_argcount]
print " --------> Arguments ", args
res, var, neff, dict_return = sampler.integrate(likelihood_function, *unpinned_params, **pinned_params)
elif opts.vectorized: # use array-based multiplications, fewer for loops
lookupNKDict = {}
lookupKNDict={}
lookupKNconjDict={}
ctUArrayDict = {}
ctVArrayDict={}
rholmArrayDict={}
rholms_intpArrayDict={}
epochDict={}
nEvals=0
for det in rholms_intp.keys():
print " Packing ", det
lookupNKDict[det],lookupKNDict[det], lookupKNconjDict[det], ctUArrayDict[det], ctVArrayDict[det], rholmArrayDict[det], rholms_intpArrayDict[det], epochDict[det] = factored_likelihood.PackLikelihoodDataStructuresAsArrays( rholms[det].keys(), rholms_intp[det], rholms[det], cross_terms[det],cross_terms_V[det])
def likelihood_function(right_ascension, declination, phi_orb, inclination,
psi, distance):
global nEvals
tvals = numpy.linspace(-t_ref_wind,t_ref_wind,int((t_ref_wind)*2/P.deltaT)) # choose an array at the target sampling rate. P is inherited globally
# use EXTREMELY many bits
lnL = numpy.zeros(right_ascension.shape,dtype=numpy.float128)
P.phi = right_ascension.astype(float) # cast to float
P.theta = declination.astype(float)
P.tref = float(fiducial_epoch)
P.phiref = phi_orb.astype(float)
P.incl = inclination.astype(float)
P.psi = psi.astype(float)
P.dist = (distance* 1.e6 * lalsimutils.lsu_PC).astype(float) # luminosity distance
lnL = factored_likelihood.DiscreteFactoredLogLikelihoodViaArrayVector(tvals,
P, lookupNKDict, rholmArrayDict, ctUArrayDict, ctVArrayDict,epochDict,Lmax=opts.l_max)
nEvals +=len(right_ascension)
return numpy.exp(lnL-manual_avoid_overflow_logarithm)
args = likelihood_function.func_code.co_varnames[:likelihood_function.func_code.co_argcount]
print " --------> Arguments ", args
res, var, neff, dict_return = sampler.integrate(likelihood_function, *unpinned_params, **pinned_params)
......@@ -1100,7 +1143,11 @@ if opts.output_file:
if opts.event == None:
event_id = -1
if not (P.lambda1>0 or P.lambda2>0):
if not opts.pin_distance_to_sim:
numpy.savetxt(fname_output_txt, numpy.array([[event_id, m1, m2, P.s1x, P.s1y, P.s1z, P.s2x, P.s2y, P.s2z, numpy.log(res)+manual_avoid_overflow_logarithm, numpy.sqrt(var)/res,sampler.ntotal, neff ]])) #dict_return["convergence_test_results"]["normal_integral]"
else:
# Use case for this scenario is NR, where lambda is not present
numpy.savetxt(fname_output_txt, numpy.array([[event_id, m1, m2, P.s1x, P.s1y, P.s1z, P.s2x, P.s2y, P.s2z, pinned_params["distance"], numpy.log(res)+manual_avoid_overflow_logarithm, numpy.sqrt(var)/res,sampler.ntotal, neff ]])) #dict_return["convergence_test_results"]["normal_integral]"
else:
# Alternative output format if lambda is active
numpy.savetxt(fname_output_txt, numpy.array([[event_id, m1, m2, P.s1x, P.s1y, P.s1z, P.s2x, P.s2y, P.s2z, P.lambda1, P.lambda2, numpy.log(res)+manual_avoid_overflow_logarithm, numpy.sqrt(var)/res,sampler.ntotal, neff ]])) #dict_return["convergence_test_results"]["normal_integral]"
......@@ -1185,5 +1232,8 @@ if opts.output_file:
# FIXME: likelihood or loglikehood
# FIXME: How to encode variance?
if opts.output_format == "xml":
xmlutils.append_likelihood_result_to_xmldoc(xmldoc, numpy.log(res)+manual_avoid_overflow_logarithm, neff=neff, converged=dict_return["convergence_test_results"]["normal_integral"], **{"mass1": m1, "mass2": m2, "alpha5":P.lambda1, "alpha6":P.lambda2, "event_duration": numpy.sqrt(var)/res, "ttotal": sampler.ntotal})
dict_out={"mass1": m1, "mass2": m2, "alpha5":P.lambda1, "alpha6":P.lambda2, "event_duration": numpy.sqrt(var)/res, "ttotal": sampler.ntotal}
if 'distance' in pinned_params:
dict_out['distance'] = pinned_params["distance"]
xmlutils.append_likelihood_result_to_xmldoc(xmldoc, numpy.log(res)+manual_avoid_overflow_logarithm, neff=neff, converged=dict_return["convergence_test_results"]["normal_integral"], **dict_out)
utils.write_filename(xmldoc, opts.output_file, gz=opts.output_file.endswith(".gz"))
......@@ -338,8 +338,17 @@ if opts.posterior_file:
samples = add_field(samples, [('eta', float)]); samples['eta'] = eta_here
samples = add_field(samples, [('m1', float)]); samples['m1'] = m1_here
samples = add_field(samples, [('m2', float)]); samples['m2'] = mtot_here * q_here/(1+q_here)
if (not 'theta1' in samples.dtype.names) and ('a1x' in samples.dtype.names): # probably does not have polar coordinates
chiperp_here = np.sqrt( samples['a1x']**2+ samples['a1y']**2)
chi1_here = np.sqrt( samples['a1z']**2 + chiperp_here**2)
theta1_here = np.arctan( samples['a1z']/chiperp_here)
phi1_here = np.angle(samples['a1x']+1j*samples['a1y'])
samples = add_field(samples, [('chi1', float)]); samples['chi1'] = chi1_here
samples = add_field(samples, [('theta1', float)]); samples['theta1'] = theta1_here
samples = add_field(samples, [('phi1', float)]); samples['phi1'] = phi1_here
if "theta1" in samples.dtype.names:
elif "theta1" in samples.dtype.names:
a1x_dat = samples["a1"]*np.sin(samples["theta1"])*np.cos(samples["phi1"])
a1y_dat = samples["a1"]*np.sin(samples["theta1"])*np.sin(samples["phi1"])
chi1_perp = samples["a1"]*np.sin(samples["theta1"])
......
......@@ -22,6 +22,7 @@ data_at_intrinsic = {}
my_digits=5 # safety for high-SNR BNS
tides_on = False
distance_on = False
col_intrinsic = 9
......@@ -31,8 +32,12 @@ for fname in sys.argv[1:]:
for line in data:
line = np.around(line, decimals=my_digits)
lambda1=lambda2=0
if len(line) == 13 and not tides_on: # strip lines with the wrong length
if len(line) == 13 and (not tides_on) and (not distance_on): # strip lines with the wrong length
indx, m1,m2, s1x,s1y,s1z,s2x,s2y,s2z,lnL, sigmaOverL, ntot, neff = line
elif len(line) == 14:
distance_on=True
col_intrinsic=10
indx, m1,m2, s1x,s1y,s1z,s2x,s2y,s2z,dist, lnL, sigmaOverL, ntot, neff = line
elif len(line)==15:
tides_on = True
col_intrinsic =11
......@@ -57,5 +62,7 @@ for key in data_at_intrinsic:
if tides_on:
print -1, key[0],key[1], key[2], key[3],key[4], key[5],key[6], key[7], key[8],key[9], lnLmeanMinusLmax+lnLmax, sigmaNetOverL, np.sum(ntot), -1
elif distance_on:
print -1, key[0],key[1], key[2], key[3],key[4], key[5],key[6], key[7], key[8], lnLmeanMinusLmax+lnLmax, sigmaNetOverL, np.sum(ntot), -1
else:
print -1, key[0],key[1], key[2], key[3],key[4], key[5],key[6], key[7], lnLmeanMinusLmax+lnLmax, sigmaNetOverL, np.sum(ntot), -1
......@@ -175,7 +175,8 @@ def add_field(a, descr):
parser = argparse.ArgumentParser()
parser.add_argument("--fname",help="filename of *.dat file [standard ILE output]")
parser.add_argument("--input-tides",action='store_true',help="Use input format with tidal fields included.")
parser.add_argument("--input-tides",action='store_true',help="Use input format with tidal fields included.")
parser.add_argument("--input-distance",action='store_true',help="Use input format with distance fields (but not tidal fields?) enabled.")
parser.add_argument("--fname-lalinference",help="filename of posterior_samples.dat file [standard LI output], to overlay on corner plots")
parser.add_argument("--fname-output-samples",default="output-ILE-samples",help="output posterior samples (default output-ILE-samples -> output-ILE)")
parser.add_argument("--fname-output-integral",default="integral_result",help="output filename for integral result. Postfixes appended")
......@@ -199,7 +200,12 @@ parser.add_argument("--downselect-parameter-range",action='append',type=str)
parser.add_argument("--no-downselect",action='store_true',help='Prevent using downselection on output points' )
parser.add_argument("--no-downselect-grid",action='store_true',help='Prevent using downselection on input points. Applied only to mc range' )
parser.add_argument("--aligned-prior", default="uniform",help="Options are 'uniform', 'volumetric', and 'alignedspin-zprior'")
parser.add_argument("--spin-prior-chizplusminus-alternate-sampling",default='alignedspin_zprior',help="Use gaussian sampling when using chizplus, chizminus, to make reweighting more efficient.")
parser.add_argument("--spin-prior-chizplusminus-alternate-sampling",default='alignedspin_zprior',help="Use gaussian sampling when using chizplus, chizminus, to make reweighting more efficient.")
parser.add_argument("--prior-gaussian-mass-ratio",action='store_true',help="Applies a gaussian mass ratio prior (mean=0.5, width=0.2 by default). Only viable in mtot, q coordinates. Not properly normalized, so will break bayes factors by about 2%")
parser.add_argument("--prior-tapered-mass-ratio",action='store_true',help="Applies a tapered mass ratio prior (transition 0.8, kappa=20). Only viable in mtot, q coordinates. Not properly normalized, a tapering factor instread")
parser.add_argument("--prior-gaussian-spin1-magnitude",action='store_true',help="Applies a gaussian spin magnitude prior (mean=0.7, width=0.1 by default) for FIRST spin. Only viable in polar spin coordinates. Not properly normalized, so will break bayes factors by a small amount (depending on chi_max). Used for 2g+1g merger arguments")
parser.add_argument("--prior-tapered-spin1-magnitude",action='store_true',help="Applies a tapered prior to spin1 magnitude")
parser.add_argument("--prior-tapered-spin1z",action='store_true',help="Applies a tapered prior to spin1's z component")
parser.add_argument("--pseudo-uniform-magnitude-prior", action='store_true',help="Applies volumetric prior internally, and then reweights at end step to get uniform spin magnitude prior")
parser.add_argument("--pseudo-uniform-magnitude-prior-alternate-sampling", action='store_true',help="Changes the internal sampling to be gaussian, not volumetric")
parser.add_argument("--pseudo-gaussian-mass-prior",action='store_true', help="Applies a gaussian mass prior in postprocessing. Done via reweighting so we can use arbitrary mass sampling coordinates.")
......@@ -550,9 +556,32 @@ def lambda_tilde_prior(x):
def delta_lambda_tilde_prior(x):
return np.ones(x.shape)/1000. # -500,500
def gaussian_mass_prior(x,mu=0,sigma=