Commit 96430399 authored by Richard O'Shaughnessy's avatar Richard O'Shaughnessy

ILE_batchmode: fix (after committing the wrong version)

parent 5c320929
......@@ -57,6 +57,7 @@ optp.add_option("-k", "--skymap-file", help="Use skymap stored in given FITS fil
optp.add_option("-x", "--coinc-xml", help="gstlal_inspiral XML file containing coincidence information.")
optp.add_option("-I", "--sim-xml", help="XML file containing mass grid to be evaluated")
optp.add_option("-E", "--event", default=0,type=int, help="Event number used for this run")
optp.add_option("--n-events-to-analyze", default=1,type=int, help="Number of events to analyze from this XML")
optp.add_option("--soft-fail-event-range",action='store_true',help='Soft failure (exit 0) if event ID is out of range. This happens in pipelines, if we have pre-built a DAG attempting to analyze more points than we really have')
optp.add_option("-f", "--reference-freq", type=float, default=100.0, help="Waveform reference frequency. Required, default is 100 Hz.")
optp.add_option("--fmin-template", dest='fmin_template', type=float, default=40, help="Waveform starting frequency. Default is 40 Hz. Also equal to starting frequency for integration")
......@@ -124,8 +125,7 @@ optp.add_option_group(integration_params)
# Add the intrinsic parameters
#
intrinsic_params = OptionGroup(optp, "Intrinsic Parameters", "Intrinsic parameters (e.g component mass) to use.")
intrinsic_params.add_option("--pin-to-sim", help="Pin values to sim_inspiral table entry.")
intrinsic_params.add_option("--pin-distance-to-sim",action='store_true', help="Pin *distance* value to sim entry. Used to enable source frame reconstruction with NR.")
#intrinsic_params.add_option("--pin-distance-to-sim",action='store_true', help="Pin *distance* value to sim entry. Used to enable source frame reconstruction with NR.")
intrinsic_params.add_option("--mass1", type=float, help="Value of first component mass, in solar masses. Required if not providing coinc tables.")
intrinsic_params.add_option("--mass2", type=float, help="Value of second component mass, in solar masses. Required if not providing coinc tables.")
intrinsic_params.add_option("--eff-lambda", type=float, help="Value of effective tidal parameter. Optional, ignored if not given.")
......@@ -204,16 +204,6 @@ if opts.nr_group and opts.nr_param:
NR_template_param = eval(opts.nr_param)
else:
NR_template_param = opts.nr_param
# if opts.nr_params and opts.nr_index>-1:
# import re
# datNR=[]
# with open(opts.nr_params, "r") as f:
# for line in f:
# line=line.strip()
# line = line.partition('#')[0]
# line=re.split('\s+', line)
# if len(line)>=3:
# datNR.append(line[0:3]) # take first 3 - required.
#
......@@ -247,50 +237,13 @@ n_eff = opts.n_eff # Effective number of points evaluated
if opts.seed is not None:
numpy.random.seed(opts.seed)
#
# Gather information about a injection put in the data
#
if opts.pin_to_sim is not None:
xmldoc = utils.load_filename(opts.pin_to_sim)
sim_table = table.get_table(xmldoc, lsctables.SimInspiralTable.tableName)
assert len(sim_table) == 1
sim_row = sim_table[0]
#
# Gather information from the detection pipeline
#
if opts.coinc_xml is not None:
xmldoc = utils.load_filename(opts.coinc_xml)
coinc_table = table.get_table(xmldoc, lsctables.CoincInspiralTable.tableName)
assert len(coinc_table) == 1
coinc_row = coinc_table[0]
event_time = coinc_row.get_end()
print "Coinc XML loaded, event time: %s" % str(coinc_row.get_end())
elif opts.event_time is not None:
if opts.event_time is not None:
event_time = glue.lal.LIGOTimeGPS(opts.event_time)
print "Event time from command line: %s" % str(event_time)
else:
raise ValueError("Either --coinc-xml or --event-time must be provided to parse event time.")
#
# Set masses
#
if opts.mass1 is not None and opts.mass2 is not None:
m1, m2 = opts.mass1, opts.mass2
elif opts.coinc_xml is not None:
sngl_inspiral_table = table.get_table(xmldoc, lsctables.SnglInspiralTable.tableName)
assert len(sngl_inspiral_table) == len(coinc_row.ifos.split(","))
m1, m2 = None, None
for sngl_row in sngl_inspiral_table:
# NOTE: gstlal is exact match, but other pipelines may not be
assert m1 is None or (sngl_row.mass1 == m1 and sngl_row.mass2 == m2)
m1, m2 = sngl_row.mass1, sngl_row.mass2
elif opts.sim_xml:
True
else:
raise ValueError("Need either --mass1 --mass2, --coinc-xml, or --sim-xml to retrieve masses.")
print " Error! "
sys.exit(0)
#
# Template descriptors
......@@ -300,61 +253,33 @@ fiducial_epoch = lal.LIGOTimeGPS()
fiducial_epoch = event_time.seconds + 1e-9*event_time.nanoseconds # no more direct access to gpsSeconds
# Struct to hold template parameters
P_list = None
if opts.sim_xml:
print "====Loading injection XML:", opts.sim_xml, opts.event, " ======="
P_list = lalsimutils.xml_to_ChooseWaveformParams_array(str(opts.sim_xml))
if len(P_list) > opts.event and opts.soft_fail_event_range:
print " Event out of range; soft exit"
if len(P_list) < opts.event+opts.n_events_to_analyze:
print " Event list of range; soft exit"
sys.exit(0)
P = P_list[opts.event] # Load in the physical parameters of the injection.
P.radec =False # do NOT propagate the epoch later
P.fref = opts.reference_freq
P.fmin = template_min_freq
P.tref = fiducial_epoch # the XML table
m1 = P.m1/lal.MSUN_SI
m2 =P.m2/lal.MSUN_SI
lambda1, lambda2 = P.lambda1,P.lambda2
if opts.pin_distance_to_sim:
dist_in =P.dist
opts.distance = dist_in/lal.PC_SI/1e6 # create de facto pinnable parrameter. Use Mpc unit
P.dist = factored_likelihood.distMpcRef * 1.e6 * lal.PC_SI # use *nonstandard* distance
P.phi=0.0
P.psi=0.0
P.incl = 0.0 # only works for aligned spins. Be careful.
P.fref = opts.reference_freq
if opts.approximant != "TaylorT4": # not default setting
P.approx = lalsimutils.lalsim.GetApproximantFromString(opts.approximant) # allow user to override the approx setting. Important for NR followup, where no approx set in sim_xml!
else:
lambda1, lambda2 = 0, 0
if opts.eff_lambda is not None:
lambda1, lambda2 = lalsimutils.tidal_lambda_from_tilde(m1, m2, opts.eff_lambda, opts.deff_lambda or 0)
P = lalsimutils.ChooseWaveformParams(
approx = lalsimutils.lalsim.GetApproximantFromString(opts.approximant),
fmin = template_min_freq,
radec = False, # do NOT propagate the epoch later
incl = 0.0, # only works for aligned spins. Be careful.
phiref = 0.0,
theta = 0.0,
phi = 0.0,
psi = 0.0,
m1 = m1 * lal.MSUN_SI,
m2 = m2 * lal.MSUN_SI,
lambda1 = lambda1,
lambda2 = lambda2,
ampO = opts.amp_order,
fref = opts.reference_freq,
tref = fiducial_epoch,
dist = factored_likelihood.distMpcRef * 1.e6 * lal.PC_SI
)
#
if 200./((m1+m2)/20) < template_min_freq:
print " WARNING: Minimum frequency is smaller than ISCO, usually causeing template generation to fail"
P_list = P_list[opts.event:(opts.event+opts.n_events_to_analyze)]
for P in P_list:
P.radec =False # do NOT propagate the epoch later
P.fref = opts.reference_freq
P.fmin = template_min_freq
P.tref = fiducial_epoch # the XML table
m1 = P.m1/lal.MSUN_SI
m2 =P.m2/lal.MSUN_SI
lambda1, lambda2 = P.lambda1,P.lambda2
P.dist = factored_likelihood.distMpcRef * 1.e6 * lal.PC_SI # use *nonstandard* distance
P.phi=0.0
P.psi=0.0
P.incl = 0.0 # only works for aligned spins. Be careful.
P.fref = opts.reference_freq
if opts.approximant != "TaylorT4": # not default setting
P.approx = lalsimutils.lalsim.GetApproximantFromString(opts.approximant) # allow user to override the approx setting. Important for NR followup, where no approx set in sim_xml!
P = P_list[0] # Load in the physical parameters of the injection.
print " --- Template for intrinsic parameters ---- "
P.print_params()
# User requested bounds for data segment
if not (opts.data_start_time == None) and not (opts.data_end_time == None):
start_time = opts.data_start_time
......@@ -455,98 +380,6 @@ P.deltaT = 1./ (data_dict[data_dict.keys()[0]].data.length * deltaF)
P.deltaF = deltaF
#
# Perform the Precompute stage
#
# N.B. There is an implicit assumption all detectors use the same
# upper frequency limit for their inner product integrals
# N.B. P.fmin is being used to set the lower freq. limit of the IP integrals
# while in principal we may want to set it separately
if opts.nr_perturbative_extraction:
print " ------------ USING PERTURBATIVE EXTRACTION ------ "
t_window = 0.15
rholms_intp, cross_terms, cross_terms_V, rholms, rest=factored_likelihood.PrecomputeLikelihoodTerms(
fiducial_epoch, t_window, P, data_dict, psd_dict, opts.l_max, fmax,
False, inv_spec_trunc_Q, T_spec,
NR_group=NR_template_group,NR_param=NR_template_param,
use_external_EOB=opts.use_external_EOB,nr_lookup=opts.nr_lookup,nr_lookup_valid_groups=opts.nr_lookup_group,perturbative_extraction=opts.nr_perturbative_extraction,use_provided_strain=opts.nr_use_provided_strain,hybrid_use=opts.nr_hybrid_use,hybrid_method=opts.nr_hybrid_method,ROM_group=opts.rom_group,ROM_param=opts.rom_param,ROM_use_basis=opts.rom_use_basis,verbose=not opts.rom_integrate_intrinsic,ROM_limit_basis_size=opts.rom_limit_basis_size_to,no_memory=opts.no_memory,restricted_mode_list=restricted_mode_list)
### DEBUGGING CHECK: what keys are used (for rom)
if opts.rom_use_basis:
detector_list = data_dict.keys()
print " Keys for each instrument (ROM sanity check) ", rholms_intp[detector_list[0]].keys()
if opts.rom_use_basis and not opts.rom_integrate_intrinsic:
print " ROM : Trivial use (fixed extrinsic): Reconstruct standard expressions "
# If ROM basis, transform to standard form, and proceed normally.
# Note this is NOT allowing us to change intrinsic parameters
# It is included to enable a code test: running with hlm directly provided by ROM, vs running with coefficient reconstruction
rholms_intp, cross_terms, cross_terms_V, rholms, rest = factored_likelihood.ReconstructPrecomputedLikelihoodTermsROM(P, rest, rholms_intp, cross_terms, cross_terms_V, rholms,verbose=False)
if opts.dump_lnL_time_series and opts.sim_xml:
# LOAD IN PARAMETERS
P = lalsimutils.xml_to_ChooseWaveformParams_array(str(opts.sim_xml))[opts.event] # Load in the physical parameters of the injection.
print " ---- Compare at intrinsic parameters (masses irrelevant) ---- "
P.print_params()
tvals = numpy.linspace(-2*t_ref_wind,2*t_ref_wind,numpy.sqrt(3)*4*t_ref_wind/P.deltaT) # make sure I have enough points to oversample. Use irrational number, and oversample
lnLvals = numpy.ones(len(tvals))
for indx in numpy.arange(len(tvals)):
P.tref = fiducial_epoch + tvals[indx]
if opts.rom_integrate_intrinsic:
rholms_intp_A, cross_terms_A, cross_terms_V_A, rholms_A, rest_A = factored_likelihood.ReconstructPrecomputedLikelihoodTermsROM(P, rest, rholms_intp, cross_terms, cross_terms_V, rholms,verbose=False)
# No time-domain interpolation stored for ROMs, must use raw data
lnLvals[indx] = factored_likelihood.FactoredLogLikelihood(P, rholms_A, rholms_intp_A, cross_terms_A, cross_terms_V_A, opts.l_max,interpolate=False)
else:
lnLvals[indx] = factored_likelihood.FactoredLogLikelihood(P, rholms, rholms_intp, cross_terms, cross_terms_V, opts.l_max,interpolate=not opts.rom_use_basis)
numpy.savetxt("lnL_time_dump"+opts.output_file+".dat", numpy.array([tvals+fiducial_epoch,lnLvals]).T)
print " Peak lnL value : ", numpy.max(lnLvals)
sys.exit(0)
if opts.pin_to_sim:
P.copy_lsctables_sim_inspiral(sim_row)
print "Pinned parameters from sim_inspiral"
print "\tRA", P.phi, sim_row.longitude
print "\tdec", P.theta, sim_row.latitude
print "\tt ref %d.%d" % (P.tref.gpsSeconds, P.tref.gpsNanoSeconds), sim_row.get_time_geocent()
print "\torb phase", P.phiref, sim_row.coa_phase # ref. orbital phase
print "\tinclination", P.incl, sim_row.inclination # inclination
print "\tpsi", P.psi, sim_row.polarization # polarization angle
print "\tdistance", P.dist/(1e6 * lalsimutils.lsu_PC), sim_row.distance # luminosity distance
logL = factored_likelihood.FactoredLogLikelihood(P, rholms_intp, cross_terms, opts.l_max)
print "Pinned log likelihood: %g, (%g in \"SNR\")" % (logL, numpy.sqrt(2*logL))
tref = float(P.tref)
tvals = numpy.arange(tref-0.01, tref+0.01, 0.00001)
logLs = []
for t in tvals:
P.tref = lal.LIGOTimeGPS(t)
logLs.append(factored_likelihood.FactoredLogLikelihood(P, rholms_intp, cross_terms, opts.l_max))
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot
print "Maximum logL is %g, (%g in \"SNR\")" % (max(logLs), numpy.sqrt(2*max(logLs)))
print "Which occurs at sample", numpy.argmax(logLs)
print "This corresponds to time %.20g" % tvals[numpy.argmax(logLs)]
print "The data event time is: %.20g" % sim_row.get_time_geocent()
print "Difference from geocenter t_ref is %.20g" %\
(tvals[numpy.argmax(logLs)] - sim_row.get_time_geocent())
print "This difference in discrete samples: %.20g" %\
((tvals[numpy.argmax(logLs)]-sim_row.get_time_geocent())/P.deltaT)
pyplot.plot(tvals-tref, logLs)
pyplot.ylabel("log Likelihood")
pyplot.xlabel("time (relative to %10.5f)" % tref)
pyplot.axvline(0, color="k")
pyplot.title("lnL(t),\n value at event time: %f" % logL)
pyplot.grid()
pyplot.savefig("logL.png")
integral = numpy.sum( numpy.exp(logLs) * P.deltaT )
print "Integral over t of likelihood is:", integral
print "The log of the integral is:", numpy.log(integral)
exit()
#
# Set up parameters and bounds
......@@ -558,28 +391,11 @@ dmax = opts.d_max # max distance FOR ANY SOURCE EVER. EUCLIDEAN
if not opts.rom_integrate_intrinsic: #not opts.rom_use_basis: # Does not work for ROM
crossTermNet = 0
for key in cross_terms.keys():
crossTermNet += float(numpy.abs(cross_terms[key][(2,2),(2,2)]))
# first estimate tends to have problems for NR
dmax_sampling_guess = numpy.min([factored_likelihood.distMpcRef*(numpy.sqrt(crossTermNet)/8), dmax])
distBoundGuess = factored_likelihood.estimateUpperDistanceBoundInMpc(rholms, cross_terms)
else:
dmax_sampling_guess = dmax
distBoundGuess = dmax
dmax_sampling_guess = dmax
distBoundGuess = dmax
print "Recommended distance for sampling ", dmax_sampling_guess, " and probably near ", distBoundGuess, " smaller than ", dmax
print " (recommendation not yet used) "
if distBoundGuess/dmax > 1:
print " ******* WARNING ******** "
print " Your distance cutoff for integration may not include the source distance "
if distBoundGuess/dmax < 0.05 and not opts.no_adapt:
print " ******* WARNING ******** "
print " Adaptive integrator in distance can fail if the dynamic range of distance is too great (bins?) "
print " Please set --d-max appropriately for your problem "
param_limits = { "psi": (0, 2*numpy.pi),
"phi_orb": (0, 2*numpy.pi),
"distance": (dmin, dmax), # CAN LEAD TO CATASTROPHIC FAILURE if dmax is too large (adaptive fails - too few bins)
......@@ -756,6 +572,25 @@ else:
right_limit = 1,
prior_pdf = dec_sampler,
adaptive_sampling = not opts.no_adapt)
if not opts.time_marginalization:
#
# tref - GPS time of geocentric end time
# sampler: uniform in +/-2 ms window around estimated end time
#
tref_sampler = functools.partial(mcsampler.uniform_samp_vector,
param_limits["t_ref"][0], param_limits["t_ref"][1])
tref_sampler_cdf_inv = functools.partial(mcsampler.uniform_samp_cdf_inv_vector,
param_limits["t_ref"][0], param_limits["t_ref"][1])
sampler.add_parameter("t_ref",
pdf = tref_sampler,
cdf_inv = None,
left_limit = param_limits["t_ref"][0],
right_limit = param_limits["t_ref"][1],
prior_pdf = functools.partial(mcsampler.uniform_samp_vector, param_limits["t_ref"][0], param_limits["t_ref"][1]))
#
# Determine pinned and non-pinned parameters
#
......@@ -787,9 +622,6 @@ if pinned_params.has_key("t_ref"):
# FIXME: Currently using hardcoded thresholds, poorly hand-tuned
#
test_converged = {}
if opts.convergence_tests_on:
test_converged['neff'] = functools.partial(mcsampler.convergence_test_MostSignificantPoint,1./opts.n_eff) # most significant point less than 1/neff of probability. Exactly equivalent to usual neff threshold.
test_converged["normal_integral"] = functools.partial(mcsampler.convergence_test_NormalSubIntegrals, 25, 0.01, 0.1) # 20 sub-integrals are gaussian distributed [weakly; mainly to rule out outliers] *and* relative error < 10%, based on sub-integrals . Should use # of intervals << neff target from above. Note this sets our target error tolerance on lnLmarg. Note the specific test requires >= 20 sub-intervals, which demands *very many* samples (each subintegral needs to be converged).
#
# Merge options into one big ol' kwargs dict
......@@ -821,384 +653,156 @@ pinned_params.update({
"igrand_threshold_deltalnL": opts.save_deltalnL, # Threshold on distance from max L to save sample
"igrand_threshold_p": opts.save_P # Threshold on cumulative probability contribution to cache sample
})
#
# Call the likelihood function for various extrinsic parameter values
#
if not opts.time_marginalization:
#
# tref - GPS time of geocentric end time
# sampler: uniform in +/-2 ms window around estimated end time
#
tref_sampler = functools.partial(mcsampler.uniform_samp_vector,
param_limits["t_ref"][0], param_limits["t_ref"][1])
tref_sampler_cdf_inv = functools.partial(mcsampler.uniform_samp_cdf_inv_vector,
param_limits["t_ref"][0], param_limits["t_ref"][1])
sampler.add_parameter("t_ref",
pdf = tref_sampler,
cdf_inv = None,
left_limit = param_limits["t_ref"][0],
right_limit = param_limits["t_ref"][1],
prior_pdf = functools.partial(mcsampler.uniform_samp_vector, param_limits["t_ref"][0], param_limits["t_ref"][1]))
#
# A note of caution:
# In order to make the pinning interface work consistently, the names of
# parameters given to the sampler must match the argument names in the
# called function. This is because the sampler has to reconstruct the
# argument order to pass the right values, and it can only do that by
# comparing the parameter names it knows to the arguments that are passed
# to it.
#
def likelihood_function(right_ascension, declination, t_ref, phi_orb,
inclination, psi, distance):
dec = numpy.copy(declination).astype(numpy.float64)
if opts.declination_cosine_sampler:
dec = numpy.pi/2 - numpy.arccos(dec)
incl = numpy.copy(inclination).astype(numpy.float64)
if opts.inclination_cosine_sampler:
incl = numpy.arccos(incl)
# use EXTREMELY many bits
lnL = numpy.zeros(right_ascension.shape,dtype=numpy.float128)
i = 0
for ph, th, tr, phr, ic, ps, di in zip(right_ascension, dec,
t_ref, phi_orb, inclination, psi, distance):
P.phi = ph # right ascension
P.theta = th # declination
P.tref = fiducial_epoch + tr # ref. time (rel to epoch for data taking)
P.phiref = phr # ref. orbital phase
P.incl = ic # inclination
P.psi = ps # polarization angle
P.dist = di* 1.e6 * lalsimutils.lsu_PC # luminosity distance
lnL[i] = factored_likelihood.FactoredLogLikelihood(
P, rholms_intp, cross_terms, cross_terms_V, opts.l_max)
i+=1
return numpy.exp(lnL - manual_avoid_overflow_logarithm)
res, var, neff, dict_return = sampler.integrate(likelihood_function, *unpinned_params, **pinned_params)
else: # Sum over time for every point in other extrinsic params
if not opts.rom_integrate_intrinsic:
def likelihood_function(right_ascension, declination, phi_orb, inclination,
psi, distance):
dec = numpy.copy(declination).astype(numpy.float64) # get rid of 'object', and allocate space
if opts.declination_cosine_sampler:
dec = numpy.pi/2 - numpy.arccos(dec)
incl = numpy.copy(inclination).astype(numpy.float64)
if opts.inclination_cosine_sampler:
incl = numpy.arccos(incl)
# use EXTREMELY many bits
lnL = numpy.zeros(right_ascension.shape,dtype=numpy.float128)
i = 0
tvals = numpy.linspace(-t_ref_wind,t_ref_wind,int((t_ref_wind)*2/P.deltaT)) # choose an array at the target sampling rate. P is inherited globally
for ph, th, phr, ic, ps, di in zip(right_ascension, dec,
phi_orb, inclination, psi, distance):
P.phi = ph # right ascension
P.theta = th # declination
P.tref = fiducial_epoch # see 'tvals', above
P.phiref = phr # ref. orbital phase
P.incl = ic # inclination
P.psi = ps # polarization angle
P.dist = di* 1.e6 * lalsimutils.lsu_PC # luminosity distance
lnL[i] = factored_likelihood.FactoredLogLikelihoodTimeMarginalized(tvals,
P, rholms_intp, rholms, cross_terms, cross_terms_V,
opts.l_max,interpolate=opts.interpolate_time)
i+=1
return numpy.exp(lnL -manual_avoid_overflow_logarithm)
def analyze_event(P_list, indx_event, data_dict, psd_dict, fmax, opts,inv_spec_trunc_Q=inv_spec_trunc_Q, T_spec=T_spec):
P = P_list[indx_event]
# Precompute
t_window = 0.15
rholms_intp, cross_terms, cross_terms_V, rholms, rest=factored_likelihood.PrecomputeLikelihoodTerms(
fiducial_epoch, t_window, P, data_dict, psd_dict, opts.l_max, fmax,
False, inv_spec_trunc_Q, T_spec,
NR_group=NR_template_group,NR_param=NR_template_param,
use_external_EOB=opts.use_external_EOB,nr_lookup=opts.nr_lookup,nr_lookup_valid_groups=opts.nr_lookup_group,perturbative_extraction=opts.nr_perturbative_extraction,use_provided_strain=opts.nr_use_provided_strain,hybrid_use=opts.nr_hybrid_use,hybrid_method=opts.nr_hybrid_method,ROM_group=opts.rom_group,ROM_param=opts.rom_param,ROM_use_basis=opts.rom_use_basis,verbose=not opts.rom_integrate_intrinsic,ROM_limit_basis_size=opts.rom_limit_basis_size_to,no_memory=opts.no_memory,restricted_mode_list=restricted_mode_list)
# Likelihood
if not opts.time_marginalization:
def likelihood_function(right_ascension, declination, t_ref, phi_orb,
inclination, psi, distance):
dec = numpy.copy(declination).astype(numpy.float64)
if opts.declination_cosine_sampler:
dec = numpy.pi/2 - numpy.arccos(dec)
incl = numpy.copy(inclination).astype(numpy.float64)
if opts.inclination_cosine_sampler:
incl = numpy.arccos(incl)
# use EXTREMELY many bits
lnL = numpy.zeros(right_ascension.shape,dtype=numpy.float128)
i = 0
for ph, th, tr, phr, ic, ps, di in zip(right_ascension, dec,
t_ref, phi_orb, inclination, psi, distance):
P.phi = ph # right ascension
P.theta = th # declination
P.tref = fiducial_epoch + tr # ref. time (rel to epoch for data taking)
P.phiref = phr # ref. orbital phase
P.incl = ic # inclination
P.psi = ps # polarization angle
P.dist = di* 1.e6 * lalsimutils.lsu_PC # luminosity distance
lnL[i] = factored_likelihood.FactoredLogLikelihood(
P, rholms_intp, cross_terms, cross_terms_V, opts.l_max)
i+=1
return numpy.exp(lnL - manual_avoid_overflow_logarithm)
else: # Sum over time for every point in other extrinsic params
if not opts.rom_integrate_intrinsic:
def likelihood_function(right_ascension, declination, phi_orb, inclination,
psi, distance):
dec = numpy.copy(declination).astype(numpy.float64) # get rid of 'object', and allocate space
if opts.declination_cosine_sampler:
dec = numpy.pi/2 - numpy.arccos(dec)
incl = numpy.copy(inclination).astype(numpy.float64)
if opts.inclination_cosine_sampler:
incl = numpy.arccos(incl)
# use EXTREMELY many bits
lnL = numpy.zeros(right_ascension.shape,dtype=numpy.float128)
i = 0
tvals = numpy.linspace(-t_ref_wind,t_ref_wind,int((t_ref_wind)*2/P.deltaT)) # choose an array at the target sampling rate. P is inherited globally
for ph, th, phr, ic, ps, di in zip(right_ascension, dec,
phi_orb, inclination, psi, distance):
P.phi = ph # right ascension
P.theta = th # declination
P.tref = fiducial_epoch # see 'tvals', above
P.phiref = phr # ref. orbital phase
P.incl = ic # inclination
P.psi = ps # polarization angle
P.dist = di* 1.e6 * lalsimutils.lsu_PC # luminosity distance
lnL[i] = factored_likelihood.FactoredLogLikelihoodTimeMarginalized(tvals,
P, rholms_intp, rholms, cross_terms, cross_terms_V,
opts.l_max,interpolate=opts.interpolate_time)
i+=1
return numpy.exp(lnL -manual_avoid_overflow_logarithm)
else: # integrate over intrinsic variables. Right now those variables ahave HARDCODED NAMES, alas
def likelihood_function(right_ascension, declination, phi_orb, inclination,
psi, distance,q):
dec = numpy.copy(declination).astype(numpy.float64)
if opts.declination_cosine_sampler:
dec = numpy.pi/2 - numpy.arccos(dec)
incl = numpy.copy(inclination).astype(numpy.float64)
if opts.inclination_cosine_sampler:
incl = numpy.arccos(incl)
lnL = numpy.zeros(len(right_ascension),dtype=numpy.float128)
i = 0
tvals = numpy.linspace(-t_ref_wind,t_ref_wind,int((t_ref_wind)*2/P.deltaT)) # choose an array at the target sampling rate. P is inherited globally
t_start =lal.GPSTimeNow()
for ph, th, phr, ic, ps, di,qi in zip(right_ascension, dec,
phi_orb, inclination, psi, distance,q):
# Reconstruct U,V using ROM fits. PROBABLY should do this once for every q, rather than deep on the loop
P.assign_param('q',qi) # mass ratio
rholms_intp_A, cross_terms_A, cross_terms_V_A, rholms_A, rest_A = factored_likelihood.ReconstructPrecomputedLikelihoodTermsROM(P, rest, rholms_intp, cross_terms, cross_terms_V, rholms,verbose=False)
# proceed for rest
P.phi = ph # right ascension
P.theta = th # declination
P.tref = fiducial_epoch # see 'tvals', above
P.phiref = phr # ref. orbital phase
P.incl = ic # inclination
P.psi = ps # polarization angle
P.dist = di* 1.e6 * lalsimutils.lsu_PC # luminosity distance
lnL[i] = factored_likelihood.FactoredLogLikelihoodTimeMarginalized(tvals,
P, rholms_intp_A, rholms_A, cross_terms_A, cross_terms_V_A,
opts.l_max,interpolate=opts.interpolate_time)
if numpy.isnan(lnL[i]) or lnL[i]<-200:
lnL[i] = -200 # regularize : a hack, for now, to deal with rare ROM problems. Only on the ROM logic fork
i+=1
t_end =lal.GPSTimeNow()
print " Cost per evaluation ", (t_end - t_start)/len(q)
print " Max lnL for this iteration ", numpy.max(lnL)
return numpy.exp(lnL - manual_avoid_overflow_logarithm)
# Integrate
args = likelihood_function.func_code.co_varnames[:likelihood_function.func_code.co_argcount]
print " --------> Arguments ", args
res, var, neff, dict_return = sampler.integrate(likelihood_function, *unpinned_params, **pinned_params)
else: # integrate over intrinsic variables. Right now those variables ahave HARDCODED NAMES, alas
def likelihood_function(right_ascension, declination, phi_orb, inclination,
psi, distance,q):
dec = numpy.copy(declination).astype(numpy.float64)
if opts.declination_cosine_sampler:
dec = numpy.pi/2 - numpy.arccos(dec)
incl = numpy.copy(inclination).astype(numpy.float64)
if opts.inclination_cosine_sampler:
incl = numpy.arccos(incl)
lnL = numpy.zeros(len(right_ascension),dtype=numpy.float128)
i = 0
tvals = numpy.linspace(-t_ref_wind,t_ref_wind,int((t_ref_wind)*2/P.deltaT)) # choose an array at the target sampling rate. P is inherited globally
t_start =lal.GPSTimeNow()
# print " ILE with intrinsic (ROM) ", rholms_intp["H1"].keys()
# for qi in q:
# P.assign_param('q',qi) # mass ratio
# print " Re-constructing U,V, rho for ", qi, " time cost ", t_end - t_start
for ph, th, phr, ic, ps, di,qi in zip(right_ascension, dec,
phi_orb, inclination, psi, distance,q):
# Reconstruct U,V using ROM fits. PROBABLY should do this once for every q, rather than deep on the loop
P.assign_param('q',qi) # mass ratio
rholms_intp_A, cross_terms_A, cross_terms_V_A, rholms_A, rest_A = factored_likelihood.ReconstructPrecomputedLikelihoodTermsROM(P, rest, rholms_intp, cross_terms, cross_terms_V, rholms,verbose=False)
# proceed for rest
P.phi = ph # right ascension
P.theta = th # declination
P.tref = fiducial_epoch # see 'tvals', above
P.phiref = phr # ref. orbital phase
P.incl = ic # inclination
P.psi = ps # polarization angle
P.dist = di* 1.e6 * lalsimutils.lsu_PC # luminosity distance
lnL[i] = factored_likelihood.FactoredLogLikelihoodTimeMarginalized(tvals,
P, rholms_intp_A, rholms_A, cross_terms_A, cross_terms_V_A,
opts.l_max,interpolate=opts.interpolate_time)
if numpy.isnan(lnL[i]) or lnL[i]<-200:
lnL[i] = -200 # regularize : a hack, for now, to deal with rare ROM problems. Only on the ROM logic fork
i+=1
t_end =lal.GPSTimeNow()
print " Cost per evaluation ", (t_end - t_start)/len(q)
print " Max lnL for this iteration ", numpy.max(lnL)
return numpy.exp(lnL - manual_avoid_overflow_logarithm)
args = likelihood_function.func_code.co_varnames[:likelihood_function.func_code.co_argcount]
print " --------> Arguments ", args
res, var, neff, dict_return = sampler.integrate(likelihood_function, *unpinned_params, **pinned_params)
print " lnLmarg is ", numpy.log(res+manual_avoid_overflow_logarithm), " with expected relative error ", numpy.sqrt(var)/res