diff --git a/bin/gwcosmo_compute_pdet b/bin/gwcosmo_compute_pdet index 9340354daf4accdf7d209dc9a450f3288be44244..837b66b388fbf09d13bc4cc65b0d05c5c9b1eb30 100755 --- a/bin/gwcosmo_compute_pdet +++ b/bin/gwcosmo_compute_pdet @@ -62,7 +62,9 @@ parser = OptionParser( Option("--combine", default=None, help="Directory of constant_H0 Pdets to combine into single Pdet pickle."), Option("--outputfile", default=None, - help="Name of output pdet file.") + help="Name of output pdet file."), + Option("--snr", default='12.0', type=float, + help="Network SNR threshold.") ]) opts, args = parser.parse_args() print(opts) @@ -90,7 +92,7 @@ linear = str2bool(opts.linear_cosmology) basic = str2bool(opts.basic_pdet) full_waveform = str2bool(opts.full_waveform) Nsamps = int(opts.Nsamps) - +network_snr_threshold = float(opts.snr) constant_H0 = str2bool(opts.constant_H0) pdet_path = str(opts.combine) @@ -103,7 +105,7 @@ if full_waveform is True: kind = 'full_waveform' else: kind = 'inspiral' - + if opts.combine is None: if mass_distribution == 'BBH-powerlaw': print("Calculating Pdet with a " + mass_distribution + " mass distribution with alpha = " + str(alpha) @@ -114,7 +116,7 @@ if opts.combine is None: pdet = gwcosmo.detection_probability.DetectionProbability(mass_distribution=mass_distribution, asd=psd, basic=basic, linear=linear, alpha=alpha, Mmin=Mmin, Mmax=Mmax, full_waveform=full_waveform, Nsamps=Nsamps, - constant_H0=constant_H0, H0=H0) + constant_H0=constant_H0, H0=H0, network_snr_threshold=network_snr_threshold) if opts.outputfile is None: if mass_distribution == 'BBH-powerlaw': @@ -128,6 +130,16 @@ if opts.combine is None: else: probs = {} + for file in os.listdir(pdet_path): + if file.endswith(".p"): + pdets = pickle.load(open(os.path.join(pdet_path,str(file)), 'rb')) + psd = pdets.asd + alpha = pdets.alpha + Mmin = pdets.Mmin + Mmax = pdets.Mmax + mass_distribution = pdets.mass_distribution + break + for h0 in H0: try: pdets = pickle.load(open(pdet_path+'/pdet_'+psd+'_'+str(alpha)+'_'+str(int(h0))+'.p', 'rb')) @@ -142,13 +154,53 @@ else: pdet = gwcosmo.likelihood.detection_probability.DetectionProbability( mass_distribution=mass_distribution, alpha=alpha, asd=psd, detectors=['H1', 'L1'], Nsamps=2, - network_snr_threshold=12, Omega_m=0.308, + network_snr_threshold=12.0, Omega_m=0.308, linear=False, basic=False, M1=50., M2=50., constant_H0=False, H0=H0vec, full_waveform=True) + Nsamps = pdets.Nsamps + RAs = pdets.RAs + Decs = pdets.Decs + incs = pdets.incs + psis = pdets.psis + phis = pdets.phis + mass_distribution = pdets.mass_distribution + dl_array = pdets.dl_array + m1 = pdets.m1/1.988e30 + m2 = pdets.m2/1.988e30 + alpha = pdets.alpha + Mmin = pdets.Mmin + Mmax = pdets.Mmax + psd = pdets.asd + full_waveform = pdets.full_waveform + network_snr_threshold = pdets.snr_threshold + Omega_m = pdets.Omega_m + linear = pdets.linear + seed = pdets.seed + detectors = pdets.detectors + + if full_waveform is True: + kind = 'full_waveform' + else: + kind = 'inspiral' + pdet.H0vec = H0vec pdet.Nsamps = Nsamps pdet.prob = prob + pdet.RAs = RAs + pdet.Decs = Decs + pdet.incs = incs + pdet.psis = psis + pdet.phis = phis + pdet.mass_distribution = mass_distribution + pdet.dl_array = dl_array + pdet.m1 = m1 + pdet.m2 = m2 + pdet.Omega_m = Omega_m + pdet.linear = linear + pdet.seed = seed + pdet.detectors = detectors + pdet.network_snr_threshold = network_snr_threshold logit_prob=logit(prob) for i in range (len(logit_prob)): @@ -157,11 +209,11 @@ else: pdet.interp_average = interp_average if mass_distribution == 'BBH-powerlaw': - pdet_path = '{}PSD_{}_alpha_{}_Mmin_{}_Mmax_{}_Nsamps{}_{}.p'.format(psd, mass_distribution, + pdet_path = '{}PSD_{}_alpha_{}_Mmin_{}_Mmax_{}_Nsamps{}_{}_snr_{}.p'.format(psd, mass_distribution, str(alpha), str(Mmin), str(Mmax), - str(Nsamps), kind) + str(Nsamps), kind,network_snr_threshold) else: - pdet_path = '{}PSD_{}_Nsamps{}_{}.p'.format(psd, mass_distribution, str(Nsamps), kind) + pdet_path = '{}PSD_{}_Nsamps{}_{}_snr_{}.p'.format(psd, mass_distribution, str(Nsamps), kind,network_snr_threshold) pickle.dump( pdet, open( pdet_path, "wb" ) ) diff --git a/bin/gwcosmo_single_posterior b/bin/gwcosmo_single_posterior index 2d0fe23aa4bd05bf4be3fde1bd43b5717e27d3f0..5fd155be55aa07fd6cec13eac7963d05205e912b 100755 --- a/bin/gwcosmo_single_posterior +++ b/bin/gwcosmo_single_posterior @@ -12,7 +12,7 @@ import pickle import multiprocessing as mp #Global Imports -import matplotlib +import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt matplotlib.rcParams['font.family']= 'Times New Roman' @@ -71,7 +71,7 @@ parser = OptionParser( Option("--galaxy_catalog", default=None, help="Load galaxy catalog in pickle format"), Option("--psd", default=None, - help="Select between 'O1' and 'O2' PSDs, by default we use aLIGO at design sensitivity (default=None)."), + help="Select between 'O1' and 'O2' PSDs, by default we use aLIGO at design sensitivity (default=None)."), Option("--galaxy_weighting", default='False', help="Set galaxy catalog weighting"), Option("--catalog_band", default='B', type=str, @@ -103,10 +103,16 @@ parser = OptionParser( Option("--full_waveform", default='True', help="Use detection probability calculated using full waveforms (default=True) otherwise just use the inspiral part \ (only use full_waveform = False for O2-H0 backwards compatibility"), + Option("--Kcorrections", default='False', + help="Apply K-corrections."), + Option("--reweight", default='True', + help="Reweight samples to pop model."), Option("--outputfile", default='Posterior', help="Name of output file"), Option("--plot", default=None, - help="Plot .npz file") + help="Plot .npz file"), + Option("--snr", default='12.0', type=float, + help="Network SNR threshold.") ]) opts, args = parser.parse_args() print(opts) @@ -134,7 +140,7 @@ if opts.plot is not None: posterior_uniform_norm = data[2] posterior_log_norm = data[3] outputfile = filename[:-4] - + else: if (opts.posterior_samples is None and opts.skymap is None): parser.error('Provide either posterior samples or skymap.') @@ -144,7 +150,7 @@ else: if (opts.galaxy_catalog is None and opts.method == 'statistical'): parser.error('The statistical method requires a galaxy catalog. Please provide one.') - + if opts.psd is None: parser.error('Please provide a PSD.') @@ -167,7 +173,7 @@ else: if opts.method == 'counterpart': galaxy_weighting = False completion = True - if (opts.counterpart is not None and opts.counterpart[-2:] == '.p'): + if (opts.counterpart is not None and opts.counterpart[-5:] == '.hdf5'): counterpart = gwcosmo.prior.catalog.galaxyCatalog(catalog_file=opts.counterpart) else: if opts.counterpart_ra is not None: @@ -183,7 +189,7 @@ else: min_H0 = float(opts.min_H0) max_H0 = float(opts.max_H0) bins_H0 = float(opts.bins_H0) - + uncertainty = str2bool(opts.uncertainty) linear = str2bool(opts.linear_cosmology) basic = str2bool(opts.basic_pdet) @@ -191,12 +197,16 @@ else: Lambda = float(opts.Lambda) psd = str(opts.psd) alpha = float(opts.powerlaw_slope) - Mmin = float(opts.minimum_mass) - Mmax = float(opts.maximum_mass) + mmin = float(opts.minimum_mass) + mmax = float(opts.maximum_mass) Nsamps = 20000 new_skypatch=str2bool(opts.new_skypatch) band = str(opts.catalog_band) full_waveform=str2bool(opts.full_waveform) + Kcorr = str2bool(opts.Kcorrections) + reweight = str2bool(opts.reweight) + network_snr_threshold = float(opts.snr) + population_params = dict(mass_distribution=mass_distribution,alpha=alpha,mmin=mmin,mmax=mmax,Lambda=Lambda) options_string = opts.method outputfile = str(opts.outputfile) @@ -204,73 +214,61 @@ else: "Compute P(H0)" H0 = np.linspace(min_H0, max_H0, bins_H0) dH0 = H0[1] - H0[0] - + if opts.posterior_samples is not None: print("Loading posterior samples.") samples = gwcosmo.likelihood.posterior_samples.posterior_samples(posterior_samples) - + if opts.skymap is not None: print("Loading 2D skymap.") skymap = gwcosmo.likelihood.skymap.skymap(skymap) - + if opts.posterior_samples is None: print("Loading 3D skymap.") skymap = gwcosmo.likelihood.skymap.skymap(skymap) samples = None - + if opts.method == 'statistical': - if galaxy_catalog[-2:] == '.p': - catalog = gwcosmo.prior.catalog.galaxyCatalog(catalog_file=galaxy_catalog) + if galaxy_catalog[-5:] == '.hdf5': + catalog = gwcosmo.prior.catalog.galaxyCatalog(catalog_file=galaxy_catalog,band=band,Kcorr=Kcorr) + print("Using "+str(band)+"-band maggies.") else: print('Not a compatible catalog.') - - if catalog.skypatch['allsky'] is not None: - allsky = False - radeclims = catalog.skypatch['allsky'] - else: - allsky = True - radeclims = None counterpart = None population = False - + if opts.method == 'population': population = True new_skypatch = False galaxy_weighting = False completion = False - catalog = None - allsky = True - radeclims = None + catalog = None counterpart = None if (opts.method == "counterpart" and opts.counterpart is None): catalog = None - counterpart = gwcosmo.prior.catalog.galaxyCatalog() + counterpart = gwcosmo.prior.catalog.galaxy() if (opts.counterpart_ra is None or opts.counterpart_dec is None or opts.counterpart_z is None): parser.error('The counterpart method requires the ra, dec, and z of the galaxy.') else: - counterpart.get_galaxy(0).ra = counterpart_ra*np.pi/180. - counterpart.get_galaxy(0).dec = counterpart_dec*np.pi/180. + counterpart.ra = counterpart_ra*np.pi/180. + counterpart.dec = counterpart_dec*np.pi/180. if (counterpart_z > 0) & (counterpart_z < 3): - counterpart.get_galaxy(0).z = zhelio_to_zcmb(counterpart.ra, counterpart.dec, counterpart_z) + counterpart.z = counterpart_z else: - counterpart.get_galaxy(0).z = zhelio_to_zcmb(counterpart.ra, counterpart.dec, counterpart_z/speed_of_light) + counterpart.z = counterpart_z/speed_of_light if (counterpart_v > 0) & (counterpart_v < 1): - counterpart.get_galaxy(0).sigmaz = counterpart_v + counterpart.sigmaz = counterpart_v else: - counterpart.get_galaxy(0).sigmaz = counterpart_v/speed_of_light - allsky = True - radeclims = None + counterpart.sigmaz = counterpart_v/speed_of_light population = False elif (opts.method == "counterpart" and opts.counterpart is not None): catalog = None - allsky = True - radeclims = None population = False if full_waveform is True: @@ -279,28 +277,29 @@ else: kind = 'inspiral' if mass_distribution == 'BBH-powerlaw': - - pdet_path = data_path + '{}PSD_{}_alpha_{}_Mmin_{}_Mmax_{}_Nsamps{}_{}.p'.format(psd, mass_distribution, alpha, Mmin, Mmax, Nsamps, kind) + + pdet_path = data_path + '{}PSD_{}_alpha_{}_Mmin_{}_Mmax_{}_Nsamps{}_{}_snr_{}.p'.format(psd, mass_distribution, alpha, mmin, mmax, Nsamps, kind,network_snr_threshold) + print(pdet_path) if os.path.isfile(pdet_path) is True: pdet = pickle.load(open(pdet_path, 'rb')) print('Loading precomputed pdet with a {} mass distribution (alpha = {}) at {} sensitivity using the {}'.format(mass_distribution, alpha, psd, kind)) else: print('Calculating detection probability from scratch. This will take a while... Consider using a precomputed one for speed.') - pdet = gwcosmo.detection_probability.DetectionProbability(mass_distribution=mass_distribution, asd=psd, - basic=basic, full_waveform=full_waveform) + pdet = gwcosmo.detection_probability.DetectionProbability(mass_distribution=mass_distribution,asd=psd, + basic=basic,full_waveform=full_waveform,H0=H0,network_snr_threshold=network_snr_threshold) if (mass_distribution == 'BNS' or mass_distribution == 'NSBH'): - pdet_path = data_path + '{}PSD_{}_Nsamps{}_{}.p'.format(psd, mass_distribution, Nsamps, kind) + pdet_path = data_path + '{}PSD_{}_Nsamps{}_{}_snr_{}.p'.format(psd, mass_distribution, Nsamps, kind,network_snr_threshold) if os.path.isfile(pdet_path) is True: pdet = pickle.load(open(pdet_path, 'rb')) print('Loading precomputed pdet with a {} mass distribution at {} sensitivity.'.format(mass_distribution, psd)) - - me = gwcosmo.gwcosmo.gwcosmoLikelihood(samples,skymap,catalog,pdet,EM_counterpart=counterpart, - linear=linear,weighted=galaxy_weighting,whole_cat=allsky, - radec_lim=radeclims,uncertainty=uncertainty,basic=basic, - rate=rate_evolution,Lambda=Lambda,band=band) - likelihood_original,pxG,pDG,pGD,catalog_fraction, pxnG,pDnG,pnGD, pxnG_rest_of_sky,pDnG_rest_of_sky,rest_fraction = me.likelihood(H0,complete=completion,counterpart_case='direct',new_skypatch=new_skypatch,population=population) + me = gwcosmo.gwcosmo.gwcosmoLikelihood( + H0,samples,skymap,catalog,pdet,reweight=reweight,EM_counterpart=counterpart, + linear=linear,weighted=galaxy_weighting,uncertainty=uncertainty,basic=basic, + rate=rate_evolution,population_params=population_params,Kcorr=Kcorr) + + likelihood_original,pxG,pDG,pGD,catalog_fraction,pxnG,pDnG,pnGD,pxnG_rest_of_sky,pDnG_rest_of_sky,rest_fraction = me.likelihood(H0,complete=completion,counterpart_case='direct',new_skypatch=new_skypatch,population=population) likelihood = np.array(likelihood_original)/np.sum(np.array(likelihood_original)*dH0) @@ -308,12 +307,12 @@ else: posterior_uniform = prior_uniform*likelihood prior_log = gwcosmo.prior.priors.pH0(H0,prior='log') posterior_log= prior_log*likelihood - + prior_uniform_norm = prior_uniform/np.sum(prior_uniform*dH0) posterior_uniform_norm = posterior_uniform/np.sum(posterior_uniform*dH0) prior_log_norm = prior_log/np.sum(prior_log*dH0) posterior_log_norm = posterior_log/np.sum(posterior_log*dH0) - + np.savez(outputfile+'.npz',[H0,likelihood,posterior_uniform_norm,posterior_log_norm,opts]) np.savez(outputfile+'_likelihood_breakdown.npz',[H0,likelihood_original, pxG,pDG,pGD,catalog_fraction, pxnG,pDnG,pnGD,pxnG_rest_of_sky,pDnG_rest_of_sky,rest_fraction]) @@ -323,7 +322,7 @@ MAP_uniform = confidence_uniform.map a_uniform = confidence_uniform.lower_level b_uniform = confidence_uniform.upper_level print('H0 = %.0f + %.0f - %.0f (MAP and 68.3 percent HDI)' %(MAP_uniform,b_uniform-MAP_uniform,MAP_uniform-a_uniform)) - + print("Log Prior") confidence_log = confidence_interval(posterior_log_norm,H0,level=0.683) MAP_log = confidence_log.map diff --git a/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.0_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p b/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.0_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p new file mode 100644 index 0000000000000000000000000000000000000000..367ad36fd1ae99c3f1d7ad5a16ab02cb095fdbd2 Binary files /dev/null and b/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.0_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p differ diff --git a/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_10.0.p b/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_10.0.p new file mode 100644 index 0000000000000000000000000000000000000000..9414a76d5de43715db03dd7fbe8eade7c8f0db05 Binary files /dev/null and b/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_10.0.p differ diff --git a/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform.p b/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_11.0.p similarity index 54% rename from gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform.p rename to gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_11.0.p index e8d0266e623628cd033a790aa1635fe68d3df11f..94127ff44688c6ea085f6a405eb2686321bbf6fb 100644 Binary files a/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform.p and b/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_11.0.p differ diff --git a/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p b/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p new file mode 100644 index 0000000000000000000000000000000000000000..3dccbd12f53687296d797bc13c509577ae070fde Binary files /dev/null and b/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p differ diff --git a/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_50.0_Nsamps20000_full_waveform_snr_12.0.p b/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_50.0_Nsamps20000_full_waveform_snr_12.0.p new file mode 100644 index 0000000000000000000000000000000000000000..efcacd5b878a1c545d754c41a95db1fa24845562 Binary files /dev/null and b/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_50.0_Nsamps20000_full_waveform_snr_12.0.p differ diff --git a/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_2.3_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p b/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_2.3_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p new file mode 100644 index 0000000000000000000000000000000000000000..a489034b00665ad7979902b753598997b61862ae Binary files /dev/null and b/gwcosmo/data/O1PSD_BBH-powerlaw_alpha_2.3_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p differ diff --git a/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.0_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p b/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.0_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p new file mode 100644 index 0000000000000000000000000000000000000000..93096b0208f2dbfd82c85dba7aee81109088f15c Binary files /dev/null and b/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.0_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p differ diff --git a/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_10.0.p b/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_10.0.p new file mode 100644 index 0000000000000000000000000000000000000000..6b0a2c1df9781437a6b594f99edeeb602a30b415 Binary files /dev/null and b/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_10.0.p differ diff --git a/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_11.0.p b/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_11.0.p new file mode 100644 index 0000000000000000000000000000000000000000..561286db1cfaa8ea31b0e1484e3ab48bfbb58e2c Binary files /dev/null and b/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_11.0.p differ diff --git a/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform.p b/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p similarity index 50% rename from gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform.p rename to gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p index 7b53c08b4fdfce27c367f8bcc3809e877eaea2a2..71e168a1ec67b3885c0ee1e6e839e2f6245646a5 100644 Binary files a/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform.p and b/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p differ diff --git a/gwcosmo/data/O2PSD_BNS_Nsamps20000_full_waveform.p b/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_50.0_Nsamps20000_full_waveform_snr_12.0.p similarity index 54% rename from gwcosmo/data/O2PSD_BNS_Nsamps20000_full_waveform.p rename to gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_50.0_Nsamps20000_full_waveform_snr_12.0.p index c0352ca09186f11baf6c9a0537944439f4fcdcd2..170886bba59292046155a016643a4b8630ff6d91 100644 Binary files a/gwcosmo/data/O2PSD_BNS_Nsamps20000_full_waveform.p and b/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_1.6_Mmin_5.0_Mmax_50.0_Nsamps20000_full_waveform_snr_12.0.p differ diff --git a/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_2.3_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p b/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_2.3_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p new file mode 100644 index 0000000000000000000000000000000000000000..f7d38d9520552340a177ea9d30a11e5db5d87320 Binary files /dev/null and b/gwcosmo/data/O2PSD_BBH-powerlaw_alpha_2.3_Mmin_5.0_Mmax_100.0_Nsamps20000_full_waveform_snr_12.0.p differ diff --git a/gwcosmo/data/O2PSD_BNS_Nsamps20000_full_waveform_snr_12.0.p b/gwcosmo/data/O2PSD_BNS_Nsamps20000_full_waveform_snr_12.0.p new file mode 100644 index 0000000000000000000000000000000000000000..9acc4a37ee82a55e64244d2616d6d54f87de1fd7 Binary files /dev/null and b/gwcosmo/data/O2PSD_BNS_Nsamps20000_full_waveform_snr_12.0.p differ diff --git a/gwcosmo/gwcosmo.py b/gwcosmo/gwcosmo.py index 2e8bfd26a9e10fd5298c6d6f59c0ac758cbd9a49..95c8010e916589dd9e7933b6b84a808702b373ce 100644 --- a/gwcosmo/gwcosmo.py +++ b/gwcosmo/gwcosmo.py @@ -4,7 +4,7 @@ Rachel Gray, Archisman Ghosh, Ignacio Magana, John Veitch, Ankan Sur In general: p(x|z,H0,\Omega) is written as p(x|dl(z,H0))*p(x|\Omega) -p(x|dL(z,H0)): self.px_dl(dl(z,H0)) +p(x|dL(z,H0)): self.norms[H0]*self.px_dl(dl(z,H0)) p(x|\Omega): self.skymap.skyprob(ra,dec) or self.skymap.prob[idx] p(D|z,H0): pdet.pD_zH0_eval(z,H0) p(s|M(H0)): L_M(M) or L_mdl(m,dl(z,H0)) @@ -26,16 +26,22 @@ warnings.filterwarnings("ignore") from scipy.integrate import quad, dblquad from scipy.stats import ncx2, norm, truncnorm -from scipy.interpolate import splev, splrep +from scipy.interpolate import splev, splrep, interp1d from astropy import constants as const from astropy import units as u from ligo.skymap.moc import rasterize from ligo.skymap.core import uniq2ang +import astropy.constants as constants + + + import gwcosmo from .utilities.standard_cosmology import * from .utilities.schechter_function import * +from .utilities.schechter_params import * +from .utilities.calc_kcor import * import time import progressbar @@ -48,9 +54,7 @@ class gwcosmoLikelihood(object): Parameters ---------- - event_type : str - Type of gravitational wave event (either 'BNS', 'BNS-uniform' or 'BBH') - GW_data : gwcosmo.likelihood.posterior_samples.posterior_samples object + GW_data : gwcosmo.likelihood.posterior_samples.posterior_samples object Gravitational wave event samples skymap : gwcosmo.likelihood.skymap.skymap object Gravitational wave event skymap @@ -58,22 +62,15 @@ class gwcosmoLikelihood(object): The relevant galaxy catalog EM_counterpart : gwcosmo.prior.catalog.galaxyCatalog object, optional EM_counterpart data (default=None) - If not None, will default to using this over the galaxy_catalog + If not None, will default to using this over the galaxy_catalog Omega_m : float, optional The matter fraction of the universe (default=0.3) linear : bool, optional Use linear cosmology (default=False) - weighted : bool, optional - Use luminosity weighting (default=False) weights : str, optional Specifies type of luminosity weighting to use: 'schechter' or 'trivial' (default='schechter') 'trivial' is only for testing purposes and should not be used in analysis - whole_cat : bool, optional - Does the galaxy catalog provided cover the whole sky? (default=True) - radec_lim : array_like, optional - Needed if whole_cat=False. RA and Dec limits on the catalog in the - format np.array([ramin,ramax,decmin,decmax]) in radians basic : bool, optional If True, uses pdet suitable for MDC analysis (default=False) uncertainty : bool, optional @@ -81,106 +78,146 @@ class gwcosmoLikelihood(object): for (default=False) rate : str, optional specifies rate evolution model, 'const' or 'evolving' - band : str, optional - specify B or K band catalog (and hence Schechter function parameters) (default='B') + Kcorr : bool, optional + If true, will attempt to apply K corrections (default=False) """ - def __init__(self, GW_data, skymap, galaxy_catalog, pdet, EM_counterpart=None, - Omega_m=0.308, linear=False, weighted=False, whole_cat=True, radec_lim=None, - basic=False, uncertainty=False, rate='constant', Lambda=3.0, area=0.999, band='B'): + def __init__(self, H0, GW_data, skymap, galaxy_catalog, pdet, reweight=False, EM_counterpart=None, + Omega_m=0.308, linear=False, weighted=False, basic=False, uncertainty=False, + rate='constant', population_params=None, area=0.999, Kcorr=False): + + self.H0 = H0 self.pdet = pdet self.Omega_m = Omega_m self.linear = linear self.weighted = weighted - self.whole_cat = whole_cat - self.radec_lim = radec_lim self.basic = basic self.uncertainty = uncertainty self.skymap = skymap self.area = area - self.band = band - - if self.band == 'B': - self.alpha = -1.07 - self.Mstar_obs = -20.457 - self.Mobs_min = -22.96 - self.Mobs_max = -12.96 - elif self.band == 'K': - self.alpha = -1.02 - self.Mstar_obs = -23.55 - self.Mobs_min = -27. - self.Mobs_max = -12.96 + self.Kcorr = Kcorr + self.reweight = reweight + + if population_params is None: + self.mass_distribution = pdet.mass_distribution + self.alpha = 1.6 + self.mmin = 5 + self.mmax = 100 + self.Lambda = 0 else: - raise Exception("Expected 'B' or 'K' band argument") - + self.mass_distribution = population_params['mass_distribution'] + self.alpha = population_params['alpha'] + self.mmin = population_params['mmin'] + self.mmax = population_params['mmax'] + self.Lambda = population_params['Lambda'] + + try: + self.band = galaxy_catalog.band + except: + self.band = 'B' # hack so that population analysis works from command line. + + sp = SchechterParams(self.band) + self.alpha_sp = sp.alpha + self.Mstar_obs = sp.Mstar + self.Mobs_min = sp.Mmin + self.Mobs_max = sp.Mmax + if galaxy_catalog == None: self.galaxy_catalog = None self.mth = None - self.EM_counterpart = None + self.EM_counterpart = EM_counterpart self.whole_cat = True else: - if self.uncertainty == False: - self.galaxy_catalog = galaxy_catalog - self.mth = galaxy_catalog.mth() - self.EM_counterpart = None - if EM_counterpart is not None: - self.EM_counterpart = EM_counterpart - else: - if galaxy_catalog is not None: - self.galaxy_catalog = galaxy_catalog - self.mth = galaxy_catalog.mth() - - self.EM_counterpart = None - if EM_counterpart is not None: - self.EM_counterpart = EM_counterpart.redshiftUncertainty(peculiarVelocityCorr=True) - + self.galaxy_catalog = galaxy_catalog + self.mth = galaxy_catalog.mth() + self.EM_counterpart = None + + if GW_data is not None: - distkernel = GW_data.marginalized_distance() - distmin = 0.5*np.amin(GW_data.distance) - distmax = 2.0*np.amax(GW_data.distance) - dl_array = np.linspace(distmin, distmax, 500) - vals = distkernel(dl_array) - + temps = [] + norms = [] + if reweight == True: + print("Reweighting samples") + seed = np.random.randint(10000) + bar = progressbar.ProgressBar() + z_max = [] + for H0 in bar(self.H0): + if reweight == True: + zkernel, norm = GW_data.marginalized_redshift_reweight(H0, self.mass_distribution, self.alpha, self.mmin, self.mmax) + else: + zkernel, norm = GW_data.marginalized_redshift(H0) + + zmin = np.min(zkernel.dataset) + zmax = np.max(zkernel.dataset) + z_max.append(3*zmax) + z_array = np.linspace(zmin, zmax, 500) + vals = zkernel(z_array) + temps.append(interp1d(z_array, vals,bounds_error=False,fill_value=0)) + norms.append(norm) + self.zmax_GW=z_max + self.temps = np.array(temps) + self.norms = np.array(norms) + if (GW_data is None and self.EM_counterpart is None): dl_array, vals = self.skymap.marginalized_distance() - + self.temp = splrep(dl_array,vals) + if (GW_data is None and self.EM_counterpart is not None): - counterpart = self.EM_counterpart.get_galaxy(0) + counterpart = self.EM_counterpart dl_array, vals = self.skymap.lineofsight_distance(counterpart.ra, counterpart.dec) + self.temp = splrep(dl_array,vals) - self.temp = splrep(dl_array,vals) # TODO: calculate mth for the patch of catalog being used, if whole_cat=False - if self.whole_cat == False: - if all(radec_lim) == None: - print('must include ra and dec limits for a catalog which only covers part of the sky') - else: - self.ra_min = radec_lim[0] - self.ra_max = radec_lim[1] - self.dec_min = radec_lim[2] - self.dec_max = radec_lim[3] - - def skynorm(dec,ra): - return np.cos(dec) - self.catalog_fraction = dblquad(skynorm,self.ra_min,self.ra_max,lambda x: self.dec_min,lambda x: self.dec_max,epsabs=0,epsrel=1.49e-4)[0]/(4.*np.pi) - self.rest_fraction = 1-self.catalog_fraction - print('This catalog covers {}% of the full sky'.format(self.catalog_fraction*100)) - else: - self.ra_min = 0.0 - self.ra_max = np.pi*2.0 - self.dec_min = -np.pi/2.0 - self.dec_max = np.pi/2.0 if (self.EM_counterpart is None and self.galaxy_catalog is not None): + self.radec_lim = self.galaxy_catalog.radec_lim[0] + if self.radec_lim == 0: + self.whole_cat = True + else: + self.whole_cat = False + self.ra_min = self.galaxy_catalog.radec_lim[1] + self.ra_max = self.galaxy_catalog.radec_lim[2] + self.dec_min = self.galaxy_catalog.radec_lim[3] + self.dec_max = self.galaxy_catalog.radec_lim[4] + + if self.Kcorr == True: + self.zcut = 0.5 + self.color_name = self.galaxy_catalog.color_name + self.color_limit = self.galaxy_catalog.color_limit + else: + self.zcut = 10. + self.color_limit = [-np.inf,np.inf] + + if self.whole_cat == False: + def skynorm(dec,ra): + return np.cos(dec) + self.catalog_fraction = dblquad(skynorm,self.ra_min,self.ra_max, + lambda x: self.dec_min, + lambda x: self.dec_max, + epsabs=0,epsrel=1.49e-4)[0]/(4.*np.pi) + self.rest_fraction = 1-self.catalog_fraction + print('This catalog covers {}% of the full sky'.format(self.catalog_fraction*100)) + + #find galaxies within the bounds of the galaxy catalog - ind = np.argwhere((self.ra_min <= galaxy_catalog.ra) & (galaxy_catalog.ra <= self.ra_max) & (self.dec_min <= galaxy_catalog.dec) & (galaxy_catalog.dec <= self.dec_max)) - self.allz = galaxy_catalog.z[ind].flatten() - self.allra = galaxy_catalog.ra[ind].flatten() - self.alldec = galaxy_catalog.dec[ind].flatten() - self.allm = galaxy_catalog.m[ind].flatten() - self.allsigmaz = galaxy_catalog.sigmaz[ind].flatten() + sel = np.argwhere((self.ra_min <= self.galaxy_catalog.ra) & \ + (self.galaxy_catalog.ra <= self.ra_max) & \ + (self.dec_min <= self.galaxy_catalog.dec) & \ + (self.galaxy_catalog.dec <= self.dec_max) & \ + ((self.galaxy_catalog.z-3*self.galaxy_catalog.sigmaz) <= self.zcut) & \ + (self.color_limit[0] <= galaxy_catalog.color) & \ + (galaxy_catalog.color <= self.color_limit[1])) + + self.allz = self.galaxy_catalog.z[sel].flatten() + self.allra = self.galaxy_catalog.ra[sel].flatten() + self.alldec = self.galaxy_catalog.dec[sel].flatten() + self.allm = self.galaxy_catalog.m[sel].flatten() + self.allsigmaz = self.galaxy_catalog.sigmaz[sel].flatten() + self.allcolor = self.galaxy_catalog.color[sel].flatten() + self.mth = self.galaxy_catalog.mth() self.nGal = len(self.allz) - + if self.uncertainty == False: self.nsmear_fine = 1 self.nsmear_coarse = 1 @@ -188,7 +225,7 @@ class gwcosmoLikelihood(object): else: self.nsmear_fine = 10000 self.nsmear_coarse = 20 - + self.pDG = None self.pGD = None self.pnGD = None @@ -198,11 +235,10 @@ class gwcosmoLikelihood(object): # should be well above any redshift value that could # impact the results for the considered H0 values. self.zmax = 10. - + self.zprior = redshift_prior(Omega_m=self.Omega_m, linear=self.linear) self.cosmo = fast_cosmology(Omega_m=self.Omega_m, linear=self.linear) self.rate = rate - self.Lambda = Lambda def ps_z(self, z): if self.rate == 'constant': @@ -210,12 +246,21 @@ class gwcosmoLikelihood(object): if self.rate == 'evolving': return (1.0+z)**self.Lambda - def px_dl(self, dl): + def px_dl(self, dl, temp): """ Returns a probability for a given distance dl from the interpolated function. """ - return splev(dl, self.temp, ext=3) + if self.reweight==True: + return splev(dl, temp, ext=3) + else: + return splev(dl, temp, ext=3)/dl**2 + + def pz_xH0(self,z,temp): + """ + Returns p(z|x,H0) + """ + return temp(z) def px_H0G(self, H0): """ @@ -251,7 +296,9 @@ class gwcosmoLikelihood(object): for i in range(nGalEM): counterpart = self.EM_counterpart.get_galaxy(i) tempsky = self.skymap.skyprob(counterpart.ra, counterpart.dec)*self.skymap.npix - tempdist = self.px_dl(self.cosmo.dl_zH0(counterpart.z, H0))/self.cosmo.dl_zH0(counterpart.z, H0)**2 # remove dl^2 prior from samples + tempdist = np.zeros(len(H0)) + for k in range(len(H0)): + tempdist[k] = self.norms[k]*self.pz_xH0(z,self.temps[k]) numnorm += tempdist*tempsky else: @@ -264,12 +311,13 @@ class gwcosmoLikelihood(object): decs = self.alldec[ind].flatten() ms = self.allm[ind].flatten() sigzs = self.allsigmaz[ind].flatten() - + colors = self.allcolor[ind].flatten() + if self.weighted: mlim = np.percentile(np.sort(ms),0.01) # more draws for galaxies in brightest 0.01 percent else: mlim = 1.0 - + bar = progressbar.ProgressBar() print("Calculating p(x|H0,G)") # loop over galaxies @@ -281,20 +329,28 @@ class gwcosmoLikelihood(object): numinner=np.zeros(len(H0)) a = (0.0 - zs[i]) / sigzs[i] zsmear = truncnorm.rvs(a, 5, loc=zs[i], scale=sigzs[i], size=nsmear) - # loop over random draws from galaxies - for n in range(nsmear): - if self.weighted: - weight = L_mdl(ms[i], self.cosmo.dl_zH0(zsmear[n], H0)) - else: - weight = 1.0 - tempdist = self.px_dl(self.cosmo.dl_zH0(zsmear[n], H0))/self.cosmo.dl_zH0(zsmear[n], H0)**2 # remove dl^2 prior from samples - numinner += tempdist*tempsky[i]*weight*self.ps_z(zsmear[n]) + zsmear = zsmear[np.argwhere(zsmear0: + for k in range(len(H0)): + tempdist[k,:] = self.norms[k]*self.pz_xH0(zsmear,self.temps[k])*self.ps_z(zsmear) + for n in range(len(zsmear)): + if self.weighted: + if self.Kcorr == True: + Kcorr = calc_kcor(self.band,zsmear[n],self.color_name,colour_value=colors[i]) + else: + Kcorr = 0. + weight = L_mdl(ms[i], self.cosmo.dl_zH0(zsmear[n], H0), Kcorr=Kcorr) + else: + weight = 1.0 + numinner += tempdist[:,n]*tempsky[i]*weight + normnuminner = numinner/nsmear num += normnuminner print("{} galaxies from this catalog lie in the event's {}% confidence interval".format(len(zs),self.area*100)) numnorm = num/self.nGal - + return numnorm @@ -303,24 +359,24 @@ class gwcosmoLikelihood(object): Returns p(D|H0,G) (the normalising factor for px_H0G). This corresponds to the denominator of Eq 12 in the methods doc. The probability of detection as a function of H0, conditioned on the source being inside the galaxy catalog - + Parameters ---------- H0 : float or array_like Hubble constant value(s) in kms-1Mpc-1 - + Returns ------- float or array_like - p(D|H0,G) - """ + p(D|H0,G) + """ den = np.zeros(len(H0)) if self.weighted: mlim = np.percentile(np.sort(self.allm),0.01) # more draws for galaxies in brightest 0.01 percent else: mlim = 1.0 - + bar = progressbar.ProgressBar() print("Calculating p(D|H0,G)") # loop over galaxies @@ -332,17 +388,23 @@ class gwcosmoLikelihood(object): deninner=np.zeros(len(H0)) a = (0.0 - self.allz[i]) / self.allsigmaz[i] zsmear = truncnorm.rvs(a, 5, loc=self.allz[i], scale=self.allsigmaz[i], size=nsmear) + zsmear = zsmear[np.argwhere(zsmear0: # loop over random draws from galaxies - for n in range(nsmear): - if self.weighted: - weight = L_mdl(self.allm[i], self.cosmo.dl_zH0(zsmear[n], H0)) - else: - weight = 1.0 - if self.basic: - prob = self.pdet.pD_dl_eval_basic(self.cosmo.dl_zH0(zsmear[n],H0)).flatten() - else: - prob = self.pdet.pD_zH0_eval(zsmear[n],H0).flatten() - deninner += prob*weight*self.ps_z(zsmear[n]) + for n in range(len(zsmear)): + if self.weighted: + if self.Kcorr == True: + Kcorr = calc_kcor(self.band,zsmear[n],self.color_name,colour_value=self.allcolor[i]) + else: + Kcorr = 0. + weight = L_mdl(self.allm[i], self.cosmo.dl_zH0(zsmear[n], H0), Kcorr=Kcorr) + else: + weight = 1.0 + if self.basic: + prob = self.pdet.pD_dl_eval_basic(self.cosmo.dl_zH0(zsmear[n],H0)).flatten() + else: + prob = self.pdet.pD_zH0_eval(zsmear[n],H0).flatten() + deninner += prob*weight*self.ps_z(zsmear[n]) normdeninner = deninner/nsmear den += normdeninner @@ -356,46 +418,46 @@ class gwcosmoLikelihood(object): Returns p(G|H0,D) This corresponds to Eq 16 in the doc. The probability that the host galaxy is in the catalogue given detection and H0. - + Parameters ---------- H0 : float or array_like Hubble constant value(s) in kms-1Mpc-1 - + Returns ------- float or array_like - p(G|H0,D) - """ + p(G|H0,D) + """ # Warning - this integral misbehaves for small values of H0 (<25 kms-1Mpc-1). TODO: fix this. - num = np.zeros(len(H0)) + num = np.zeros(len(H0)) den = np.zeros(len(H0)) - + # TODO: vectorize this if possible bar = progressbar.ProgressBar() print("Calculating p(G|H0,D)") for i in bar(range(len(H0))): - def I(z,M): + def I(M,z): if self.basic: - temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha)(M)*self.pdet.pD_dl_eval_basic(self.cosmo.dl_zH0(z,H0[i]))*self.zprior(z)*self.ps_z(z) + temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha_sp)(M)*self.pdet.pD_dl_eval_basic(self.cosmo.dl_zH0(z,H0[i]))*self.zprior(z)*self.ps_z(z) else: - temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha)(M)*self.pdet.pD_zH0_eval(z,H0[i])*self.zprior(z)*self.ps_z(z) + temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha_sp)(M)*self.pdet.pD_zH0_eval(z,H0[i])*self.zprior(z)*self.ps_z(z) if self.weighted: return temp*L_M(M) else: return temp - + # Mmin and Mmax currently corresponding to 10L* and 0.001L* respectively, to correspond with MDC # Will want to change in future. # TODO: test how sensitive this result is to changing Mmin and Mmax. Mmin = M_Mobs(H0[i],self.Mobs_min) Mmax = M_Mobs(H0[i],self.Mobs_max) - - num[i] = dblquad(I,Mmin,Mmax,lambda x: 0,lambda x: z_dlH0(dl_mM(self.mth,x),H0[i],linear=self.linear),epsabs=0,epsrel=1.49e-4)[0] - den[i] = dblquad(I,Mmin,Mmax,lambda x: 0,lambda x: self.zmax,epsabs=0,epsrel=1.49e-4)[0] + + num[i] = dblquad(I,0,self.zcut,lambda x: Mmin,lambda x: min(max(M_mdl(self.mth,self.cosmo.dl_zH0(x,H0[i])),Mmin),Mmax),epsabs=0,epsrel=1.49e-4)[0] + den[i] = dblquad(I,0,self.zmax,lambda x: Mmin,lambda x: Mmax,epsabs=0,epsrel=1.49e-4)[0] self.pGD = num/den - return self.pGD + return self.pGD def pnG_H0D(self,H0): @@ -403,34 +465,34 @@ class gwcosmoLikelihood(object): Returns 1.0 - pG_H0D(H0). This corresponds to Eq 17 in the doc. The probability that a galaxy is not in the catalogue given detection and H0 - + Parameters ---------- H0 : float or array_like Hubble constant value(s) in kms-1Mpc-1 - + Returns ------- float or array_like - p(bar{G}|H0,D) + p(bar{G}|H0,D) """ if all(self.pGD)==None: self.pGD = self.pG_H0D(H0) self.pnGD = 1.0 - self.pGD return self.pnGD - - + + def px_H0nG(self,H0,allsky=True): """ Returns p(x|H0,bar{G}). This corresponds to the numerator of Eq 19 in the doc The likelihood of the GW data given H0, conditioned on the source being outside the galaxy catalog for an - all sky or patchy galaxy catalog. + all sky or patchy galaxy catalog. Parameters ---------- H0 : float or array_like Hubble constant value(s) in kms-1Mpc-1 - + Returns ------- float or array_like @@ -442,9 +504,10 @@ class gwcosmoLikelihood(object): print("Calculating p(x|H0,bar{G})") for i in bar(range(len(H0))): - def Inum(z,M): - temp = self.px_dl(self.cosmo.dl_zH0(z,H0[i]))*self.zprior(z) \ - *SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha)(M)*self.ps_z(z)/self.cosmo.dl_zH0(z,H0[i])**2 # remove dl^2 prior from samples + def Inum(M,z): + temp = self.norms[i]*self.pz_xH0(z,self.temps[i])*self.zprior(z) \ + *SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha_sp)(M)*self.ps_z(z) + if self.weighted: return temp*L_M(M) else: @@ -453,18 +516,19 @@ class gwcosmoLikelihood(object): Mmin = M_Mobs(H0[i],self.Mobs_min) Mmax = M_Mobs(H0[i],self.Mobs_max) if allsky == True: - distnum[i] = dblquad(Inum,Mmin,Mmax,lambda x: z_dlH0(dl_mM(self.mth,x),H0[i],linear=self.linear),lambda x: self.zmax,epsabs=0,epsrel=1.49e-4)[0] + distnum[i] = dblquad(Inum,0.0,self.zcut, lambda x: min(max(M_mdl(self.mth,self.cosmo.dl_zH0(x,H0[i])),Mmin),Mmax), lambda x: Mmax,epsabs=0,epsrel=1.49e-4)[0] \ + + dblquad(Inum,self.zcut,self.zmax_GW[i], lambda x: Mmin, lambda x: Mmax,epsabs=0,epsrel=1.49e-4)[0] else: - distnum[i] = dblquad(Inum,Mmin,Mmax,lambda x: 0.0,lambda x: self.zmax,epsabs=0,epsrel=1.49e-4)[0] + distnum[i] = dblquad(Inum,0.0,self.zmax_GW[i],lambda x: Mmin,lambda x: Mmax,epsabs=0,epsrel=1.49e-4)[0] - # TODO: expand this case to look at a skypatch around the counterpart ('pencilbeam') + # TODO: expand this case to look at a skypatch around the counterpart ('pencilbeam') if self.EM_counterpart != None: nGalEM = self.EM_counterpart.nGal() for i in range(nGalEM): counterpart = self.EM_counterpart.get_galaxy(i) tempsky = self.skymap.skyprob(counterpart.ra,counterpart.dec)*self.skymap.npix num += distnum*tempsky - + else: pixind = range(self.skymap.npix) theta,rapix = hp.pix2ang(self.skymap.nside,pixind,nest=True) @@ -472,7 +536,7 @@ class gwcosmoLikelihood(object): idx = (self.ra_min <= rapix) & (rapix <= self.ra_max) & (self.dec_min <= decpix) & (decpix <= self.dec_max) if allsky == True: skynum = self.skymap.prob[idx].sum() - else: + else: skynum = 1.0 - self.skymap.prob[idx].sum() print("{}% of the event's sky probability is contained within the patch covered by the catalog".format(skynum*100)) num = distnum*skynum @@ -485,34 +549,34 @@ class gwcosmoLikelihood(object): This corresponds to the denominator of Eq 19 in the doc. The probability of detection as a function of H0, conditioned on the source being outside the galaxy catalog for an all sky or patchy galaxy catalog. - + Parameters ---------- H0 : float or array_like Hubble constant value(s) in kms-1Mpc-1 - + Returns ------- float or array_like - p(D|H0,bar{G}) - """ - # TODO: same fixes as for pG_H0D + p(D|H0,bar{G}) + """ + # TODO: same fixes as for pG_H0D den = np.zeros(len(H0)) - + def skynorm(dec,ra): return np.cos(dec) - + norm = dblquad(skynorm,self.ra_min,self.ra_max,lambda x: self.dec_min,lambda x: self.dec_max,epsabs=0,epsrel=1.49e-4)[0]/(4.*np.pi) - + bar = progressbar.ProgressBar() print("Calculating p(D|H0,bar{G})") for i in bar(range(len(H0))): - def I(z,M): + def I(M,z): if self.basic: - temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha)(M)*self.pdet.pD_dl_eval_basic(self.cosmo.dl_zH0(z,H0[i]))*self.zprior(z)*self.ps_z(z) + temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha_sp)(M)*self.pdet.pD_dl_eval_basic(self.cosmo.dl_zH0(z,H0[i]))*self.zprior(z)*self.ps_z(z) else: - temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha)(M)*self.pdet.pD_zH0_eval(z,H0[i])*self.zprior(z)*self.ps_z(z) + temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha_sp)(M)*self.pdet.pD_zH0_eval(z,H0[i])*self.zprior(z)*self.ps_z(z) if self.weighted: return temp*L_M(M) else: @@ -521,14 +585,15 @@ class gwcosmoLikelihood(object): Mmin = M_Mobs(H0[i],self.Mobs_min) Mmax = M_Mobs(H0[i],self.Mobs_max) if allsky == True: - den[i] = dblquad(I,Mmin,Mmax,lambda x: z_dlH0(dl_mM(self.mth,x),H0[i],linear=self.linear),lambda x: self.zmax,epsabs=0,epsrel=1.49e-4)[0] + den[i] = dblquad(I,0.0,self.zcut, lambda x: min(max(M_mdl(self.mth,self.cosmo.dl_zH0(x,H0[i])),Mmin),Mmax), lambda x: Mmax,epsabs=0,epsrel=1.49e-4)[0] \ + + dblquad(I,self.zcut,self.zmax, lambda x: Mmin, lambda x: Mmax,epsabs=0,epsrel=1.49e-4)[0] else: - den[i] = dblquad(I,Mmin,Mmax,lambda x: 0.0,lambda x: self.zmax,epsabs=0,epsrel=1.49e-4)[0] + den[i] = dblquad(I,0.0,self.zmax,lambda x: Mmin,lambda x: Mmax,epsabs=0,epsrel=1.49e-4)[0] if allsky == True: pDnG = den*norm else: pDnG = den*(1.-norm) - + return pDnG def px_H0_counterpart(self,H0): @@ -536,25 +601,27 @@ class gwcosmoLikelihood(object): Returns p(x|H0,counterpart) This corresponds to the numerator or Eq 6 in the doc. The likelihood of the GW data given H0 and direct counterpart. - + Parameters ---------- H0 : float or array_like Hubble constant value(s) in kms-1Mpc-1 - + Returns ------- float or array_like p(x|H0,counterpart) """ - numnorm = np.zeros(len(H0)) - nGalEM = self.EM_counterpart.nGal() - for i in range(nGalEM): - counterpart = self.EM_counterpart.get_galaxy(i) - tempsky = self.skymap.skyprob(counterpart.ra,counterpart.dec)*self.skymap.npix - tempdist = self.px_dl(self.cosmo.dl_zH0(counterpart.z,H0))/self.cosmo.dl_zH0(counterpart.z,H0)**2 # remove dl^2 prior from samples - numnorm += tempdist*tempsky - return numnorm + z = self.EM_counterpart.z + sigma = self.EM_counterpart.sigmaz + a = (0.0 - z) / sigma # boundary so samples don't go below 0 + zsmear = truncnorm.rvs(a, 5, loc=z, scale=sigma, size=10000) + + num = np.zeros(len(H0)) + for k in range(len(H0)): + num[k] = np.sum(self.norms[k]*self.pz_xH0(zsmear,self.temps[k])) + + return num def pD_H0(self,H0): @@ -562,28 +629,28 @@ class gwcosmoLikelihood(object): Returns p(D|H0). This corresponds to the denominator of Eq 6 in the doc. The probability of detection as a function of H0, marginalised over redshift, and absolute magnitude - + Parameters ---------- H0 : float or array_like Hubble constant value(s) in kms-1Mpc-1 - + Returns ------- float or array_like - p(D|H0) + p(D|H0) """ den = np.zeros(len(H0)) - + bar = progressbar.ProgressBar() print("Calculating p(D|H0)") for i in bar(range(len(H0))): def I(z,M): if self.basic: - temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha)(M)*self.pdet.pD_dl_eval_basic(self.cosmo.dl_zH0(z,H0[i]))*self.zprior(z) + temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha_sp)(M)*self.pdet.pD_dl_eval_basic(self.cosmo.dl_zH0(z,H0[i]))*self.zprior(z) else: - temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha)(M)*self.pdet.pD_zH0_eval(z,H0[i])*self.zprior(z) + temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha_sp)(M)*self.pdet.pD_zH0_eval(z,H0[i])*self.zprior(z) if self.weighted: return temp*L_M(M) else: @@ -594,13 +661,13 @@ class gwcosmoLikelihood(object): den[i] = dblquad(I,Mmin,Mmax,lambda x: 0.0,lambda x: self.zmax,epsabs=0,epsrel=1.49e-4)[0] - self.pDnG = den + self.pDnG = den return self.pDnG def px_H0_empty(self,H0): """ - Returns the numerator of the empty catalog case + Returns the numerator of the empty catalog case Parameters ---------- H0 : float or array_like @@ -614,9 +681,9 @@ class gwcosmoLikelihood(object): distnum = np.zeros(len(H0)) for i in range(len(H0)): def Inum(z): - temp = self.px_dl(self.cosmo.dl_zH0(z,H0[i]))*self.zprior(z)*self.ps_z(z)/self.cosmo.dl_zH0(z,H0[i])**2 # remove dl^2 prior from samples + temp = self.norms[i]*self.pz_xH0(z,self.temps[i])*self.zprior(z)*self.ps_z(z) return temp - distnum[i] = quad(Inum,0.0,self.zmax,epsabs=0,epsrel=1.49e-4)[0] + distnum[i] = quad(Inum,0.0,self.zmax_GW[i],epsabs=0,epsrel=1.49e-4)[0] skynum = 1. num = distnum*skynum return num @@ -634,8 +701,8 @@ class gwcosmoLikelihood(object): Returns ------- float or array_like - p(D|H0,bar{G}) - """ + p(D|H0,bar{G}) + """ den = np.zeros(len(H0)) for i in range(len(H0)): def I(z): @@ -649,7 +716,7 @@ class gwcosmoLikelihood(object): """ The likelihood for a single event This corresponds to Eq 3 (statistical) or Eq 6 (counterpart) in the doc, depending on parameter choices. - + Parameters ---------- H0 : float or array_like @@ -660,19 +727,19 @@ class gwcosmoLikelihood(object): Choice of counterpart analysis (default='direct') if 'direct', will assume the counterpart is correct with certainty if 'pencilbeam', will assume the host galaxy is along the counterpart's line of sight, but may be beyond it - + Returns ------- float or array_like p(x|H0,D) - """ + """ if self.EM_counterpart != None: - + if counterpart_case == 'direct': pxG = self.px_H0_counterpart(H0) self.pDG = self.pD_H0(H0) likelihood = pxG/self.pDG # Eq 6 - + # The pencilbeam case is currently coded up along the line of sight of the counterpart # For GW170817 the likelihood produced is identical to the 'direct' counterpart case # TODO: allow this to cover a small patch of sky @@ -687,8 +754,8 @@ class gwcosmoLikelihood(object): if all(self.pDnG)==None: self.pDnG = self.pD_H0nG(H0) pxnG = self.px_H0nG(H0) - - likelihood = self.pGD*(pxG/self.pDG) + self.pnGD*(pxnG/self.pDnG) # Eq 3 along a single line of sight + + likelihood = self.pGD*(pxG/self.pDG) + self.pnGD*(pxnG/self.pDnG) # Eq 3 along a single line of sight else: print("Please specify counterpart_case ('direct' or 'pencilbeam').") @@ -704,10 +771,9 @@ class gwcosmoLikelihood(object): pxG = self.px_H0G(H0) if all(self.pDG)==None: self.pDG = self.pD_H0G(H0) - + if complete==True: likelihood = pxG/self.pDG # Eq 3 with p(G|H0,D)=1 and p(bar{G}|H0,D)=0 - else: if all(self.pGD)==None: @@ -716,9 +782,9 @@ class gwcosmoLikelihood(object): self.pnGD = self.pnG_H0D(H0) if all(self.pDnG)==None: self.pDnG = self.pD_H0nG(H0) - + pxnG = self.px_H0nG(H0) - + likelihood = self.pGD*(pxG/self.pDG) + self.pnGD*(pxnG/self.pDnG) # Eq 3 @@ -734,28 +800,28 @@ class gwcosmoLikelihood(object): self.pnGD = np.zeros(len(H0)) pxnG = np.zeros(len(H0)) self.pDnG = np.ones(len(H0)) - + if (self.whole_cat==True) or (self.EM_counterpart != None) or (population==True): pDnG_rest_of_sky = np.ones(len(H0)) pxnG_rest_of_sky = np.zeros(len(H0)) self.rest_fraction = 0 self.catalog_fraction = 1 - + return likelihood,pxG,self.pDG,self.pGD,self.catalog_fraction, pxnG,self.pDnG,self.pnGD, pxnG_rest_of_sky,pDnG_rest_of_sky,self.rest_fraction def px_DGH0_skypatch(self,H0): """ - The "in catalog" part of the new skypatch method + The "in catalog" part of the new skypatch method using a catalog which follows the GW event's sky patch contour p(x|D,G,H0) - + Parameters ---------- H0 : float or array_like Hubble constant value(s) in kms-1Mpc-1 - + Returns ------- arrays @@ -773,70 +839,80 @@ class gwcosmoLikelihood(object): decs = self.alldec[ind].flatten() ms = self.allm[ind].flatten() sigzs = self.allsigmaz[ind].flatten() - + colors = self.allcolor[ind].flatten() + max_mth = np.amax(ms) N = len(zs) - + if self.weighted: mlim = np.percentile(np.sort(ms),0.01) # more draws for galaxies in brightest 0.01 percent else: - mlim = 1.0 - + mlim = 1.0 + bar = progressbar.ProgressBar() print("Calculating p(x|D,H0,G) for this event's skyarea") # loop over galaxies for i in bar(range(N)): numinner=np.zeros(len(H0)) deninner=np.zeros(len(H0)) - + if ms[i] <= mlim: #do more loops over brightest galaxies nsmear = self.nsmear_fine else: nsmear = self.nsmear_coarse - + a = (0.0 - zs[i]) / sigzs[i] zsmear = truncnorm.rvs(a, 5, loc=zs[i], scale=sigzs[i], size=nsmear) + zsmear = zsmear[np.argwhere(zsmear0: + for k in range(len(H0)): + tempdist[k,:] = self.norms[k]*self.pz_xH0(zsmear,self.temps[k])*self.ps_z(zsmear) # loop over random draws from galaxies - for n in range(nsmear): - if self.weighted: - weight = L_mdl(ms[i], self.cosmo.dl_zH0(zsmear[n], H0)) - else: - weight = 1.0 - tempdist = self.px_dl(self.cosmo.dl_zH0(zsmear[n], H0))/self.cosmo.dl_zH0(zsmear[n], H0)**2 # remove dl^2 prior from samples - numinner += tempdist*tempsky[i]*weight*self.ps_z(zsmear[n]) + for n in range(len(zsmear)): + if self.weighted: + if self.Kcorr == True: + Kcorr = calc_kcor(self.band,zsmear[n],self.color_name,colour_value=colors[i]) + else: + Kcorr = 0. + weight = L_mdl(ms[i], self.cosmo.dl_zH0(zsmear[n], H0), Kcorr=Kcorr) + else: + weight = 1.0 - if self.basic: - prob = self.pdet.pD_dl_eval_basic(self.cosmo.dl_zH0(zsmear[n],H0)).flatten() - else: - prob = self.pdet.pD_zH0_eval(zsmear[n],H0).flatten() - deninner += prob*weight*self.ps_z(zsmear[n]) + numinner += tempdist[:,n]*tempsky[i]*weight + + if self.basic: + prob = self.pdet.pD_dl_eval_basic(self.cosmo.dl_zH0(zsmear[n],H0)).flatten() + else: + prob = self.pdet.pD_zH0_eval(zsmear[n],H0).flatten() + deninner += prob*weight*self.ps_z(zsmear[n]) normnuminner = numinner/nsmear num += normnuminner normdeninner = deninner/nsmear den += normdeninner print("{} galaxies from this catalog lie in the event's {}% confidence interval".format(len(zs),self.area*100)) numnorm = num/self.nGal - + if N >= 500: self.mth = np.median(ms) else: self.mth = max_mth #update mth to reflect the area within the event's sky localisation (max m within patch) - + print('event patch apparent magnitude threshold: {}'.format(self.mth)) print("{} galaxies (out of a total possible {}) are supported by this event's skymap".format(N,self.nGal)) return num,den - + def px_DnGH0_skypatch(self,H0): """ - The "beyond catalog" part of the new skypatch method + The "beyond catalog" part of the new skypatch method using a catalog which follows the GW event's sky patch contour p(x|D,Gbar,H0) - + Parameters ---------- H0 : float or array_like Hubble constant value(s) in kms-1Mpc-1 - + Returns ------- arrays @@ -844,76 +920,75 @@ class gwcosmoLikelihood(object): """ distnum = np.zeros(len(H0)) distden = np.zeros(len(H0)) - + bar = progressbar.ProgressBar() print("Calculating p(x|D,H0,bar{G}) for this event's skyarea") for i in bar(range(len(H0))): - + Mmin = M_Mobs(H0[i],self.Mobs_min) Mmax = M_Mobs(H0[i],self.Mobs_max) - + def Inum(z,M): - temp = self.px_dl(self.cosmo.dl_zH0(z,H0[i]))*self.zprior(z) \ - *SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha)(M)*self.ps_z(z)/self.cosmo.dl_zH0(z,H0[i])**2 # remove dl^2 prior from samples + temp = self.norms[i]*self.pz_xH0(z,self.temps[i])*self.zprior(z) \ + *SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha_sp)(M)*self.ps_z(z) if self.weighted: return temp*L_M(M) else: return temp - distnum[i] = dblquad(Inum,Mmin,Mmax,lambda x: z_dlH0(dl_mM(self.mth,x),H0[i],linear=self.linear),lambda x: self.zmax,epsabs=0,epsrel=1.49e-4)[0] - + distnum[i] = dblquad(Inum,Mmin,Mmax,lambda x: z_dlH0(dl_mM(self.mth,x),H0[i],linear=self.linear),lambda x: self.zmax_GW[i],epsabs=0,epsrel=1.49e-4)[0] + def Iden(z,M): if self.basic: - temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha)(M)*self.pdet.pD_dl_eval_basic(self.cosmo.dl_zH0(z,H0[i]))*self.zprior(z)*self.ps_z(z) + temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha_sp)(M)*self.pdet.pD_dl_eval_basic(self.cosmo.dl_zH0(z,H0[i]))*self.zprior(z)*self.ps_z(z) else: - temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha)(M)*self.pdet.pD_zH0_eval(z,H0[i])*self.zprior(z)*self.ps_z(z) + temp = SchechterMagFunction(H0=H0[i],Mstar_obs=self.Mstar_obs,alpha=self.alpha_sp)(M)*self.pdet.pD_zH0_eval(z,H0[i])*self.zprior(z)*self.ps_z(z) if self.weighted: return temp*L_M(M) else: return temp - distden[i] = dblquad(Iden,Mmin,Mmax,lambda x: z_dlH0(dl_mM(self.mth,x),H0[i],linear=self.linear),lambda x: self.zmax,epsabs=0,epsrel=1.49e-4)[0] - + distden[i] = dblquad(Iden,Mmin,Mmax,lambda x: z_dlH0(dl_mM(self.mth,x),H0[i],linear=self.linear),lambda x: self.zmax,epsabs=0,epsrel=1.49e-4)[0] + skynum = 1.0 num = distnum*skynum a = len(np.asarray(np.where(self.skymap.prob!=0)).flatten()) # find number of pixels with any GW event support skyden = a/self.skymap.npix den = distden*skyden - + return num,den - + def likelihood_skypatch(self,H0,complete=False): """ - The event likelihood using the new skypatch method + The event likelihood using the new skypatch method p(x|D,H0) - + Parameters ---------- H0 : float or array_like Hubble constant value(s) in kms-1Mpc-1 - + Returns ------- array the unnormalised likelihood - """ + """ pxDG_num,pxDG_den = self.px_DGH0_skypatch(H0) - + if complete==True: likelihood = pxDG_num/pxDG_den pGD = np.ones(len(H0)) pnGD = np.zeros(len(H0)) pxDnG_num = np.zeros(len(H0)) - pxDnG_den = np.ones(len(H0)) + pxDnG_den = np.ones(len(H0)) else: pGD = self.pG_H0D(H0) pnGD = self.pnG_H0D(H0) - + pxDnG_num,pxDnG_den = self.px_DnGH0_skypatch(H0) pxDnG = pxDnG_num/pxDnG_den - likelihood = pGD*(pxDG_num/pxDG_den) + pnGD*pxDnG + likelihood = pGD*(pxDG_num/pxDG_den) + pnGD*pxDnG return likelihood,pxDG_num,pxDG_den,pGD,pnGD,pxDnG_num,pxDnG_den - diff --git a/gwcosmo/likelihood/detection_probability.py b/gwcosmo/likelihood/detection_probability.py index 0ff9163b6eca0b19f734d3ef43bb246bb2b9255b..15773a84a2a9d00f66f858d02183f56c3dc02942 100644 --- a/gwcosmo/likelihood/detection_probability.py +++ b/gwcosmo/likelihood/detection_probability.py @@ -12,7 +12,8 @@ from scipy.stats import ncx2 from scipy.special import logit, expit import healpy as hp from gwcosmo.utilities.standard_cosmology import * -from gwcosmo.prior.priors import * +from gwcosmo.prior.priors import mass_sampling as mass_prior + import pickle import time import progressbar @@ -82,7 +83,7 @@ class DetectionProbability(object): self.full_waveform = full_waveform self.constant_H0 = constant_H0 self.seed = seed - + np.random.seed(seed) ASD_data = {} @@ -114,18 +115,18 @@ class DetectionProbability(object): self.psis = np.random.rand(N)*2.0*np.pi self.phis = np.random.rand(N)*2.0*np.pi if self.mass_distribution == 'BNS': - m1, m2 = BNS_uniform_distribution(N, mmin=1.0, mmax=3.0) + mass_priors = mass_prior(self.mass_distribution) self.dl_array = np.linspace(1.0e-100, 1000.0, 500) if self.mass_distribution == 'NSBH': - m1, _ = BBH_mass_distribution(N, mmin=self.Mmin, mmax=self.Mmax, alpha=self.alpha) - _, m2 = BNS_uniform_distribution(N, mmin=1.0, mmax=3.0) + mass_priors = mass_prior(self.mass_distribution, self.alpha, self.Mmin, self.Mmax) self.dl_array = np.linspace(1.0e-100, 1000.0, 500) if self.mass_distribution == 'BBH-powerlaw': - m1, m2 = BBH_mass_distribution(N, mmin=self.Mmin, mmax=self.Mmax, alpha=self.alpha) + mass_priors = mass_prior(self.mass_distribution, self.alpha, self.Mmin, self.Mmax) self.dl_array = np.linspace(1.0e-100, 15000.0, 500) if self.mass_distribution == 'BBH-constant': - m1, m2 = BBH_constant_mass(N, M1=self.M1, M2=self.M2) + mass_priors = mass_prior(self.mass_distribution, self.M1, self.M2) self.dl_array = np.linspace(1.0e-100, 15000.0, 500) + m1, m2 = mass_priors.sample(N) self.m1 = m1*1.988e30 self.m2 = m2*1.988e30 diff --git a/gwcosmo/likelihood/posterior_samples.py b/gwcosmo/likelihood/posterior_samples.py index 1a43112f956c1b32d1628bf64641c413b9470b49..3cac76385f6a3cfa51334eb9a309273e4c7a8258 100644 --- a/gwcosmo/likelihood/posterior_samples.py +++ b/gwcosmo/likelihood/posterior_samples.py @@ -10,6 +10,26 @@ from astropy import units as u from astropy import constants as const from astropy.table import Table import h5py +from bilby.core.prior import Uniform, PowerLaw, PriorDict, Constraint +from bilby import gw +from ..utilities.standard_cosmology import z_dlH0, fast_cosmology +from ..prior.priors import * + +from astropy.cosmology import FlatLambdaCDM, z_at_value +import astropy.units as u +import astropy.constants as constants +from scipy.interpolate import interp1d + +Om0 = 0.308 +zmin = 0.0001 +zmax = 10 +zs = np.linspace(zmin, zmax, 10000) +cosmo = fast_cosmology(Omega_m=Om0) + +def constrain_m1m2(parameters): + converted_parameters = parameters.copy() + converted_parameters['m1m2'] = parameters['mass_1'] - parameters['mass_2'] + return converted_parameters class posterior_samples(object): @@ -27,10 +47,26 @@ class posterior_samples(object): except: print("No posterior samples were specified") + + def E(self,z,Om): + return np.sqrt(Om*(1+z)**3 + (1.0-Om)) + + def dL_by_z_H0(self,z,H0,Om0): + speed_of_light = constants.c.to('km/s').value + cosmo = fast_cosmology(Omega_m=Om0) + return cosmo.dl_zH0(z, H0)/(1+z) + speed_of_light*(1+z)/(H0*self.E(z,Om0)) + + def jacobian_times_prior(self,z,H0,Om0=0.308): + + cosmo = fast_cosmology(Omega_m=Om0) + jacobian = np.power(1+z,2)*self.dL_by_z_H0(z,H0,Om0) + dl = cosmo.dl_zH0(z, H0) + return jacobian*(dl**2) + def load_posterior_samples(self): """ Method to handle different types of posterior samples file formats. - Currently it supports .dat (LALinference), .hdf5 (GWTC-1), + Currently it supports .dat (LALinference), .hdf5 (GWTC-1), .h5 (PESummary) and .hdf (pycbcinference) formats. """ if self.posterior_samples[-3:] == 'dat': @@ -95,14 +131,79 @@ class posterior_samples(object): self.nsamples = len(self.distance) file.close() - def marginalized_distance(self): + def marginalized_sky(self): + """ + Computes the marginalized sky localization posterior KDE. + """ + return gaussian_kde(np.vstack((self.ra, self.dec))) + + def compute_source_frame_samples(self, H0): + cosmo = FlatLambdaCDM(H0=H0, Om0=Om0) + dLs = cosmo.luminosity_distance(zs).to(u.Mpc).value + z_at_dL = interp1d(dLs,zs) + redshift = z_at_dL(self.distance) + mass_1_source = self.mass_1/(1+redshift) + mass_2_source = self.mass_2/(1+redshift) + return redshift, mass_1_source, mass_2_source + + def reweight_samples(self, H0, name, alpha=1.6, mmin=5, mmax=100, seed=1): + # Prior distribution used in the LVC analysis + prior = distance_distribution(name=name) + # Prior distribution used in this work + new_prior = mass_distribution(name=name, alpha=alpha, mmin=mmin, mmax=mmax) + + # Get source frame masses + redshift, mass_1_source, mass_2_source = self.compute_source_frame_samples(H0) + + # Re-weight + weights = new_prior.joint_prob(mass_1_source,mass_2_source)/ prior.prob(self.distance) + np.random.seed(seed) + draws = np.random.uniform(0, max(weights), weights.shape) + keep = weights > draws + m1det = self.mass_1[keep] + m2det = self.mass_2[keep] + dl = self.distance[keep] + return dl, weights + + + def marginalized_redshift_reweight(self, H0, name, alpha=1.6, mmin=5, mmax=100): """ Computes the marginalized distance posterior KDE. """ - return gaussian_kde(self.distance) + # Prior distribution used in this work + new_prior = mass_distribution(name=name, alpha=alpha, mmin=mmin, mmax=mmax) - def marginalized_sky(self): + # Get source frame masses + redshift, mass_1_source, mass_2_source = self.compute_source_frame_samples(H0) + + # Re-weight + weights = new_prior.joint_prob(mass_1_source,mass_2_source)/self.jacobian_times_prior(redshift,H0) + norm = np.sum(weights) + return gaussian_kde(redshift,weights=weights), norm + + def marginalized_redshift(self, H0): """ - Computes the marginalized sky localization posterior KDE. + Computes the marginalized distance posterior KDE. """ - return gaussian_kde(np.vstack((self.ra, self.dec))) + # Get source frame masses + redshift, mass_1_source, mass_2_source = self.compute_source_frame_samples(H0) + + # remove dl^2 prior and include dz/ddL jacobian + weights = 1/(self.dL_by_z_H0(redshift,H0,Om0)*cosmo.dl_zH0(redshift,H0)**2) + norm = np.sum(weights) + return gaussian_kde(redshift,weights=weights), norm + + def marginalized_distance_reweight(self, H0, name, alpha=1.6, mmin=5, mmax=100, seed=1): + """ + Computes the marginalized distance posterior KDE. + """ + dl, weights = self.reweight_samples(H0, name, alpha=alpha, mmin=mmin, mmax=mmax, seed=seed) + norm = np.sum(weights)/len(weights) + return gaussian_kde(dl), norm + + def marginalized_distance(self, H0): + """ + Computes the marginalized distance posterior KDE. + """ + norm = 1 + return gaussian_kde(self.distance), norm diff --git a/gwcosmo/prior/catalog.py b/gwcosmo/prior/catalog.py index 2cb15c4a4f797707864e5ebcd13883938908ad77..737954ad75289593ef9f8638b24986e11e4d1e96 100644 --- a/gwcosmo/prior/catalog.py +++ b/gwcosmo/prior/catalog.py @@ -7,17 +7,61 @@ import numpy as np import healpy as hp import pandas as pd -from ligo.skymap.moc import nest2uniq, uniq2nest - -import pickle +import h5py import pkg_resources import time import progressbar +from ..utilities.standard_cosmology import * +from ..utilities.schechter_function import * # Global catalog_data_path = pkg_resources.resource_filename('gwcosmo', 'data/catalog_data/') +Kcorr_bands = {'B':'B', 'K':'K', 'u':'r', 'g':'r', 'r':'g', 'i':'g', 'z':'r'} +Kcorr_signs = {'B':1, 'K':1, 'u':1, 'g':1, 'r':-1, 'i':-1, 'z':-1} +color_names = {'B':None, 'K':None, 'u':'u - r', 'g':'g - r', 'r':'g - r', 'i':'g - i', 'z':'r - z'} +color_limits = {'u - r':[-0.1,2.9], 'g - r':[-0.1,1.9], 'g - i':[0,3], 'r - z':[0,1.5]} + +def redshiftUncertainty(ra, dec, z, sigmaz, m, tempsky, luminosity_weights): + """ + A function which "smears" out galaxies in the catalog, therefore + incorporating redshift uncetainties. + """ + sort = np.argsort(m) + ra, dec, z, sigmaz, m, tempsky = ra[sort], dec[sort], z[sort], sigmaz[sort], m[sort], tempsky[sort] + + if luminosity_weights is not 'constant': + mlim = np.percentile(m,0.01) # more draws for galaxies in brightest 0.01 percent + else: + mlim = 1.0 + + z_uncert = [] + ra_uncert = [] + dec_uncert = [] + m_uncert = [] + tempsky_uncert = [] + nsmear_uncert = [] + for k in range(len(m)): + if m[k]<=mlim: + nsmear = 10000 + else: + nsmear = 20 + z_uncert.append(np.random.normal(z[k],sigmaz[k],nsmear)) + ra_uncert.append(np.repeat(ra[k],nsmear)) + dec_uncert.append(np.repeat(dec[k],nsmear)) + m_uncert.append(np.repeat(m[k],nsmear)) + tempsky_uncert.append(np.repeat(tempsky[k],nsmear)) + nsmear_uncert.append(np.repeat(nsmear,nsmear)) + + z_uncert = np.concatenate(z_uncert).ravel() + ra_uncert = np.concatenate(ra_uncert).ravel() + dec_uncert = np.concatenate(dec_uncert).ravel() + m_uncert = np.concatenate(m_uncert).ravel() + tempsky_uncert = np.concatenate(tempsky_uncert).ravel() + nsmear_uncert = np.concatenate(nsmear_uncert).ravel() + + return ra_uncert, dec_uncert, z_uncert, m_uncert, tempsky_uncert, nsmear_uncert class galaxy(object): """ @@ -28,18 +72,43 @@ class galaxy(object): index : galaxy index ra : Right ascension in radians dec : Declination in radians - z : redshift - m : Apparent blue magnitude - sigmaz : redshift uncertainty + z : Galaxy redshift + m : Apparent magnitude dictonary + sigmaz : Galaxy redshift uncertainty """ - def __init__(self, index=0, ra=0, dec=0, z=0, m=0, sigmaz=0): + def __init__(self, index=0, ra=0, dec=0, z=1, m={'band':20}, sigmaz=0, Kcorr=0): self.index = index self.ra = ra self.dec = dec self.z = z self.m = m + self.Kcorr = Kcorr self.sigmaz = sigmaz - + self.dl = self.luminosity_distance() + self.M = self.absolute_magnitude() + self.L = self.luminosity() + + def luminosity_distance(self, H0=70.): + return dl_zH0(self.z, H0) + + def absolute_magnitude(self, H0=70., band=None): + if band is not None: + return M_mdl(self.m[band], self.dl) + else: + M = {} + for band in self.m: + M[band] = M_mdl(self.m[band], self.dl) + return M + + def luminosity(self, band=None): + if band is not None: + return L_M(self.M[band]) + else: + L = {} + for band in self.M: + L[band] = L_M(self.M[band]) + return L + class galaxyCatalog(object): """ @@ -47,98 +116,99 @@ class galaxyCatalog(object): Parameters ---------- - catalog_file : Path to catalog.p file - catalog_name : Name of stored catalog to be loaded + catalog_file : Path to catalog.hdf5 file + skymap_filename : Path to skymap.fits(.gz) file + thresh: Probablity contained within sky map region + band: key of band """ - def __init__(self, catalog_file=None, skypatch=None): + def __init__(self, catalog_file=None, skymap_filename=None, thresh=.9, band='B', Kcorr=False): if catalog_file is not None: self.catalog_file = catalog_file - self.dictionary, self.skypatch = self.__load_catalog() + self.dictionary = self.__load_catalog() + self.extract_galaxies(skymap_filename=skymap_filename, thresh=thresh, band=band, Kcorr=Kcorr) + if catalog_file is None: self.catalog_name = "" - self.dictionary = {'0': galaxy()} - self.skypatch = None - - self.ra, self.dec, self.z, self.m, self.sigmaz = self.extract_galaxies() + self.dictionary = {'ra': [], 'dec': [], 'z': [], 'sigmaz': [], + 'skymap_indices': [], 'radec_lim': [], 'm_{0}'.format(band): []} def __load_catalog(self): - cat = pickle.load(open(self.catalog_file, "rb")) - dictionary = cat[0] - skypatch = cat[1] - return dictionary, skypatch + return h5py.File(self.catalog_file,'r') def nGal(self): - return len(self.dictionary) + return len(self.dictionary['z']) def get_galaxy(self, index): - return self.dictionary[str(int(index))] + return galaxy(index, self.ra[index], self.dec[index], + self.z[index], {self.band: self.m[index]}, + self.sigmaz[index]) def mth(self): - ngal = self.nGal() - m = np.zeros(ngal) - for i in range(ngal): - m[i] = self.get_galaxy(i).m + m = self.m if sum(m) == 0: mth = 25.0 else: mth = np.median(m) return mth - def extract_galaxies(self): - nGal = self.nGal() - ra = np.zeros(nGal) - dec = np.zeros(nGal) - z = np.zeros(nGal) - m = np.zeros(nGal) - sigmaz = np.zeros(nGal) - for i in range(nGal): - gal = self.get_galaxy(i) - ra[i] = gal.ra - dec[i] = gal.dec - z[i] = gal.z - m[i] = gal.m - sigmaz[i] = gal.sigmaz - if all(m) == 0: # for mdc1 and mdc2 - m = np.ones(nGal) + def extract_galaxies(self, skymap_filename=None, thresh=.9, band='B', Kcorr=False): + band_key = 'm_{0}'.format(band) + if Kcorr is True: + band_Kcorr = Kcorr_bands[band] + band_Kcorr_key = 'm_{0}'.format(band_Kcorr) + self.color_name = color_names[band] + self.color_limit = color_limits[self.color_name] + if skymap_filename: + skymap = hp.read_map(skymap_filename, verbose=False) + gal_ind = self.dictionary['skymap_indices'][:] + if skymap_filename is None: + ra = self.dictionary['ra'][:] + dec = self.dictionary['dec'][:] + z = self.dictionary['z'][:] + sigmaz = self.dictionary['sigmaz'][:] + m = self.dictionary[band_key][:] + if Kcorr is True: + m_K = self.dictionary[band_Kcorr_key][:] + radec_lim = self.dictionary['radec_lim'][:] + else: + ind = self.galaxies_within_region(skymap, gal_ind, thresh) + ra = self.dictionary['ra'][ind] + dec = self.dictionary['dec'][ind] + z = self.dictionary['z'][ind] + sigmaz = self.dictionary['sigmaz'][ind] + m = self.dictionary[band_key][ind] + if Kcorr is True: + m_K = self.dictionary[band_Kcorr_key][ind] + radec_lim = self.dictionary['radec_lim'][:] + self.band = band + self.radec_lim = radec_lim + if Kcorr is True: + mask = (~np.isnan(m))&(~np.isnan(m_K)) + m_K = m_K[mask] + else: + mask = ~np.isnan(m) + ra, dec, z, m, sigmaz = ra[mask], dec[mask], z[mask], m[mask], sigmaz[mask] + self.ra, self.dec, self.z, self.sigmaz, self.m = ra, dec, z, sigmaz, m + if Kcorr is True: + self.color = Kcorr_signs[band]*(m - m_K) + else: + self.color = np.zeros(len(m)) return ra, dec, z, m, sigmaz + + def above_percentile(self, skymap, thresh): + """Returns indices of array within the given threshold + credible region.""" + # Sort indicies of sky map + ind_sorted = np.argsort(-skymap) + # Cumulatively sum the sky map + cumsum = np.cumsum(skymap[ind_sorted]) + # Find indicies contained within threshold area + lim_ind = np.where(cumsum > thresh)[0][0] + return ind_sorted[:lim_ind] + + def galaxies_within_region(self, skymap, gal_ind, thresh): + """Returns boolean array of whether galaxies are within + the sky map's credible region above the given threshold""" + skymap_ind = self.above_percentile(skymap, thresh) + return np.in1d(gal_ind, skymap_ind) - def redshiftUncertainty(self, peculiarVelocityCorr=False, nsmear=10): - """ - A function which "smears" out galaxies in the catalog, therefore - incorporating redshift uncetainties. - """ - z_uncert = [] - ralist, declist, zlist, mlist, sigmaz = self.extract_galaxies() - if peculiarVelocityCorr == True: - nsmear = 10000 - if peculiarVelocityCorr == False: - nsmear = nsmear - zmaxmax = 1.0 - ra_uncert = np.repeat(ralist, nsmear) - dec_uncert = np.repeat(declist, nsmear) - m_uncert = np.repeat(mlist, nsmear) - for i, z in enumerate(zlist): - z_uncert.append(z+sigmaz[i]*np.random.randn(nsmear)) - z_uncert = np.array(z_uncert).flatten() - sel = (z_uncert > 0.) & (z_uncert < zmaxmax) - z_uncert = z_uncert[sel] - ra_uncert = ra_uncert[sel] - dec_uncert = dec_uncert[sel] - m_uncert = m_uncert[sel] - - galaxies = {} - index = np.arange(len(z_uncert)) - - bar = progressbar.ProgressBar() - print("Loading galaxy catalog.") - for i in bar(index): - gal = gwcosmo.prior.catalog.galaxy() - gal.ra = ra_uncert[i] - gal.dec = dec_uncert[i] - gal.z = z_uncert[i] - gal.m = m_uncert[i] - gal.sigmaz = 0. - galaxies[str(i)] = gal - catalog = gwcosmo.prior.catalog.galaxyCatalog() - catalog.dictionary = galaxies - return catalog diff --git a/gwcosmo/prior/priors.py b/gwcosmo/prior/priors.py index b8b4879c267ddb7f710c8f1fbcc01c469ea855c3..dd912d91470947813ce02e46810fb155922b7375 100644 --- a/gwcosmo/prior/priors.py +++ b/gwcosmo/prior/priors.py @@ -12,10 +12,13 @@ from scipy.stats import ncx2, norm from scipy.interpolate import splev, splrep from astropy import constants as const from astropy import units as u +from bilby.core.prior import Uniform, PowerLaw, PriorDict, Constraint, DeltaFunction +from bilby import gw import gwcosmo + def pH0(H0, prior='log'): """ Returns p(H0) @@ -40,134 +43,150 @@ def pH0(H0, prior='log'): if prior == 'log': return 1./H0 - -def BBH_mass_distribution(N, mmin=5., mmax=40., alpha=1.6): - """ - Returns p(m1,m2) - The prior on the mass distribution that follows a power - law for BBHs. +class mass_sampling(object): + def __init__(self, name, alpha=1.6, mmin=5, mmax=50, m1=50, m2=50): + self.name = name + self.alpha = alpha + self.mmin = mmin + self.mmax = mmax + self.m1 = m1 + self.m2 = m2 + + def sample(self, N_samples): + if self.name == 'BBH-powerlaw': + m1, m2 = self.Binary_mass_distribution(name='BBH-powerlaw', N=N_samples, mmin=self.mmin, mmax=self.mmax, alpha=self.alpha) + elif self.name == 'BNS': + m1, m2 = self.Binary_mass_distribution(name='BNS', N=N_samples, mmin=1.0, mmax=3.0, alpha=0.0) + elif self.name == 'NSBH': + m1, m2 = self.Binary_mass_distribution(name='NSBH', N=N_samples, mmin=self.mmin, mmax=self.mmax, alpha=self.alpha) + return m1, m2 + + def Binary_mass_distribution(self, name, N, mmin=5., mmax=40., alpha=1.6): + """ + Returns p(m1,m2) + The prior on the mass distribution that follows a power + law for BBHs. + + Parameters + ---------- + N : integer + Number of masses sampled + mmin : float + minimum mass + mmax : float + maximum mass + alpha : float + slope of the power law p(m) = m^-\alpha where alpha > 0 + + Returns + ------- + float or array_like + m1, m2 + """ + alpha_ = -1*alpha + u = np.random.rand(N) + if alpha_ != -1: + m1 = (u*(mmax**(alpha_+1)-mmin**(alpha_+1)) + + mmin**(alpha_+1))**(1.0/(alpha_+1)) + print('Powerlaw mass distribution with alpha = ' + str(alpha)) + else: + m1 = np.exp(u*(np.log(mmax)-np.log(mmin))+np.log(mmin)) + print('Flat in log mass distribution') + if name== 'NSBH': + m2 = np.random.uniform(low=1.0, high=3.0, size=N) + else: + m2 = np.random.uniform(low=mmin, high=m1) + return m1, m2 - Parameters - ---------- - N : integer - Number of masses sampled - mmin : float - minimum mass - mmax : float - maximum mass - alpha : float - slope of the power law p(m) = m^-\alpha where alpha > 0 - Returns - ------- - float or array_like - m1, m2 - """ - alpha_ = -1*alpha - u = np.random.rand(N) - if alpha_ != -1: - m1 = (u*(mmax**(alpha_+1)-mmin**(alpha_+1)) + - mmin**(alpha_+1))**(1.0/(alpha_+1)) - print('Powerlaw mass distribution with alpha = ' + str(alpha)) - else: - m1 = np.exp(u*(np.log(mmax)-np.log(mmin))+np.log(mmin)) - print('Flat in log mass distribution') - m2 = np.random.uniform(low=mmin, high=m1) - return m1, m2 +class mass_distribution(object): + def __init__(self, name, alpha=1.6, mmin=5, mmax=50, m1=50, m2=50): + self.name = name + self.alpha = alpha + self.mmin = mmin + self.mmax = mmax + self.m1 = m1 + self.m2 = m2 -def BBH_constant_mass(N, M1=50., M2=50.): - """ - Returns p(M1,M2) for given masses. + dist = {} - Parameters - ---------- - N : integer - Number of masses sampled - M1 : float - source mass - M2 : float - source mass - ------- - float or array_like - m1, m2 - """ - m1=[] - m2=[] - while len(m1) < N: - m1.append(M1) - m2.append(M2) - m1 = np.array(m1) - m2 = np.array(m2) - print('BBH source masses M1 = ' + str(M1) + ' M2 = '+ str(M2)) - return m1, m2 - - -def BNS_gaussian_distribution(N, mean=1.35, sigma=0.15): - """ - Returns p(m1,m2) - The prior on the mass distribution that follows gaussian for BNSs. + if self.name == 'BBH-powerlaw': - Parameters - ---------- - N : integer - Number of masses sampled - mean : float - mean of gaussian dist - sigma : float - std of gaussian dist + if self.alpha != 1: + dist['mass_1'] = lambda m1s: (np.power(m1s,-self.alpha)/(1-self.alpha))*(np.power(self.mmax,1-self.alpha)-np.power(self.mmin,1-self.alpha)) + else: + dist['mass_1'] = lambda m1s: np.power(m1s,-self.alpha)/(np.log(self.mmax)-np.log(self.mmin)) - Returns - ------- - float or array_like - mass1, mass2 - """ - mass1 = [] - mass2 = [] - while len(mass1) < N: - m1 = np.random.normal(mean, sigma) - m2 = np.random.normal(mean, sigma) - if m2 > m1: - m3 = m2 - m2 = m1 - m1 = m3 - mass1.append(m1) - mass2.append(m2) - mass1 = np.array(mass1) - mass2 = np.array(mass2) - return mass1, mass2 - - -def BNS_uniform_distribution(N, mmin=1., mmax=3.): - """ - Returns p(m1,m2) - The prior on the mass distribution that follows gaussian for BNSs. + dist['mass_2'] = lambda m1s: 1/(m1s-self.mmin) - Parameters - ---------- - N : integer - Number of masses sampled - mmin : float - minimum mass - mmax : float - maximum mass + if self.name == 'BNS': + # We assume p(m1,m2)=p(m1)p(m2) + dist['mass_1'] = lambda m1s: 1/(3-1) + dist['mass_2'] = lambda m2s: 1/(3-1) - Returns - ------- - float or array_like - mass1, mass2 - """ - mass1 = [] - mass2 = [] - while len(mass1) < N: - m1 = np.random.uniform(mmin, mmax) - m2 = np.random.uniform(mmin, mmax) - if m2 > m1: - m3 = m2 - m2 = m1 - m1 = m3 - mass1.append(m1) - mass2.append(m2) - mass1 = np.array(mass1) - mass2 = np.array(mass2) - return mass1, mass2 + if self.name == 'NSBH': + if self.alpha != 1: + dist['mass_1'] = lambda m1s: (np.power(m1s,-self.alpha)/(1-self.alpha))*(np.power(self.mmax,1-self.alpha)-np.power(self.mmin,1-self.alpha)) + else: + dist['mass_1'] = lambda m1s: np.power(m1s,-self.alpha)/(np.log(self.mmax)-np.log(self.mmin)) + + dist['mass_2'] = lambda m2s: 1/(3-1) + + if self.name == 'BBH-constant': + dist['mass_1'] = DeltaFunction(self.m1) + dist['mass_2'] = DeltaFunction(self.m2) + + self.dist = dist + + def joint_prob(self, ms1, ms2): + + if self.name == 'BBH-powerlaw': + # ms1 is not a bug in mass_2. That depends only on that var + + arr_result = self.dist['mass_1'](ms1)*self.dist['mass_2'](ms1) + arr_result[(ms1>self.mmax) | (ms23) | (ms2<1)]=0 + + if self.name == 'NSBH': + arr_result = self.dist['mass_1'](ms1)*self.dist['mass_2'](ms2) + arr_result[(ms1>self.mmax) | (ms13)]=0 + + if self.name == 'BBH-constant': + arr_result = self.dist['mass_1'].prob(ms1)*self.dist['mass_2'].prob(ms2) + + return arr_result + + +class distance_distribution(object): + def __init__(self, name): + self.name = name + + if self.name == 'BBH-powerlaw': + dist = PriorDict(conversion_function=constrain_m1m2) + dist['luminosity_distance'] = PowerLaw(alpha=2, minimum=1, maximum=15000) + + if self.name == 'BNS': + dist = PriorDict(conversion_function=constrain_m1m2) + dist['luminosity_distance'] = PowerLaw(alpha=2, minimum=1, maximum=1000) + + if self.name == 'NSBH': + dist = PriorDict(conversion_function=constrain_m1m2) + dist['luminosity_distance'] = PowerLaw(alpha=2, minimum=1, maximum=1000) + + if self.name == 'BBH-constant': + dist = PriorDict() + dist['luminosity_distance'] = PowerLaw(alpha=2, minimum=1, maximum=15000) + + self.dist = dist + + def sample(self, N_samples): + samples = self.dist.sample(N_samples) + return samples['luminosity_distance'] + + def prob(self, samples): + return self.dist['luminosity_distance'].prob(samples) diff --git a/gwcosmo/utilities/calc_kcor.py b/gwcosmo/utilities/calc_kcor.py new file mode 100644 index 0000000000000000000000000000000000000000..8beea522ee6b941e7dc874c859d77f2708cac312 --- /dev/null +++ b/gwcosmo/utilities/calc_kcor.py @@ -0,0 +1,417 @@ +def calc_kcor(filter_name, redshift, colour_name, colour_value): + """ + K-corrections calculator in Python. See http://kcor.sai.msu.ru for the + reference. Available filter-colour combinations must be present in the + `coeff` dictionary keys. + + @type filter_name: string + @param filter_name: Name of the filter to calculate K-correction for, e.g. + 'u', 'g', 'r' for some of the SDSS filters, or 'J2', + 'H2', 'Ks2' for 2MASS filters (must be present in + `coeff` dictionary) + @type redshift: float + @param redshift: Redshift of a galaxy, should be between 0.0 and 0.5 (no + check is made, however) + @type colour_name: string + @param colour_name: Human name of the colour, e.g. 'u - g', 'g - r', + 'V - Rc', 'J2 - Ks2' (must be present in `coeff` dictionary) + @type colour_value: float + @param colour_value: Value of the galaxy's colour, specified in colour_name + @rtype: float + @return: K-correction in specified filter for given redshift and + colour + @version: 2012 + @author: Chilingarian, I., Melchior. A.-L., and Zolotukhin, I. + @license: Simplified BSD license, see http://kcor.sai.msu.ru/license.txt + + Usage example: + + >>> calc_kcor('g', 0.2, 'g - r', 1.1) + 0.5209713975999992 + >>> calc_kcor('Ic', 0.4, 'V - Ic', 2.0) + 0.310069919999993 + >>> calc_kcor('H', 0.5, 'H - K', 0.1) + -0.14983142499999502 + + """ + coeff = { + + 'B_BRc': [ + [0,0,0,0], + [-1.99412,3.45377,0.818214,-0.630543], + [15.9592,-3.99873,6.44175,0.828667], + [-101.876,-44.4243,-12.6224,0], + [299.29,86.789,0,0], + [-304.526,0,0,0], + ], + + 'B_BIc': [ + [0,0,0,0], + [2.11655,-5.28948,4.5095,-0.8891], + [24.0499,-4.76477,-1.55617,1.85361], + [-121.96,7.73146,-17.1605,0], + [236.222,76.5863,0,0], + [-281.824,0,0,0], + ], + + 'H2_H2Ks2': [ + [0,0,0,0], + [-1.88351,1.19742,10.0062,-18.0133], + [11.1068,20.6816,-16.6483,139.907], + [-79.1256,-406.065,-48.6619,-430.432], + [551.385,1453.82,354.176,473.859], + [-1728.49,-1785.33,-705.044,0], + [2027.48,950.465,0,0], + [-741.198,0,0,0], + ], + + 'H2_J2H2': [ + [0,0,0,0], + [-4.99539,5.79815,4.19097,-7.36237], + [70.4664,-202.698,244.798,-65.7179], + [-142.831,553.379,-1247.8,574.124], + [-414.164,1206.23,467.602,-799.626], + [763.857,-2270.69,1845.38,0], + [-563.812,-1227.82,0,0], + [1392.67,0,0,0], + ], + + 'Ic_VIc': [ + [0,0,0,0], + [-7.92467,17.6389,-15.2414,5.12562], + [15.7555,-1.99263,10.663,-10.8329], + [-88.0145,-42.9575,46.7401,0], + [266.377,-67.5785,0,0], + [-164.217,0,0,0], + ], + + 'J2_J2Ks2': [ + [0,0,0,0], + [-2.85079,1.7402,0.754404,-0.41967], + [24.1679,-34.9114,11.6095,0.691538], + [-32.3501,59.9733,-29.6886,0], + [-30.2249,43.3261,0,0], + [-36.8587,0,0,0], + ], + + 'J2_J2H2': [ + [0,0,0,0], + [-0.905709,-4.17058,11.5452,-7.7345], + [5.38206,-6.73039,-5.94359,20.5753], + [-5.99575,32.9624,-72.08,0], + [-19.9099,92.1681,0,0], + [-45.7148,0,0,0], + ], + + 'Ks2_J2Ks2': [ + [0,0,0,0], + [-5.08065,-0.15919,4.15442,-0.794224], + [62.8862,-61.9293,-2.11406,1.56637], + [-191.117,212.626,-15.1137,0], + [116.797,-151.833,0,0], + [41.4071,0,0,0], + ], + + 'Ks2_H2Ks2': [ + [0,0,0,0], + [-3.90879,5.05938,10.5434,-10.9614], + [23.6036,-97.0952,14.0686,28.994], + [-44.4514,266.242,-108.639,0], + [-15.8337,-117.61,0,0], + [28.3737,0,0,0], + ], + + 'Rc_BRc': [ + [0,0,0,0], + [-2.83216,4.64989,-2.86494,0.90422], + [4.97464,5.34587,0.408024,-2.47204], + [-57.3361,-30.3302,18.4741,0], + [224.219,-19.3575,0,0], + [-194.829,0,0,0], + ], + + 'Rc_VRc': [ + [0,0,0,0], + [-3.39312,16.7423,-29.0396,25.7662], + [5.88415,6.02901,-5.07557,-66.1624], + [-50.654,-13.1229,188.091,0], + [131.682,-191.427,0,0], + [-36.9821,0,0,0], + ], + + 'U_URc': [ + [0,0,0,0], + [2.84791,2.31564,-0.411492,-0.0362256], + [-18.8238,13.2852,6.74212,-2.16222], + [-307.885,-124.303,-9.92117,12.7453], + [3040.57,428.811,-124.492,-14.3232], + [-10677.7,-39.2842,197.445,0], + [16022.4,-641.309,0,0], + [-8586.18,0,0,0], + ], + + 'V_VIc': [ + [0,0,0,0], + [-1.37734,-1.3982,4.76093,-1.59598], + [19.0533,-17.9194,8.32856,0.622176], + [-86.9899,-13.6809,-9.25747,0], + [305.09,39.4246,0,0], + [-324.357,0,0,0], + ], + + 'V_VRc': [ + [0,0,0,0], + [-2.21628,8.32648,-7.8023,9.53426], + [13.136,-1.18745,3.66083,-41.3694], + [-117.152,-28.1502,116.992,0], + [365.049,-93.68,0,0], + [-298.582,0,0,0], + ], + + 'FUV_FUVNUV': [ + [0,0,0,0], + [-0.866758,0.2405,0.155007,0.0807314], + [-1.17598,6.90712,3.72288,-4.25468], + [135.006,-56.4344,-1.19312,25.8617], + [-1294.67,245.759,-84.6163,-40.8712], + [4992.29,-477.139,174.281,0], + [-8606.6,316.571,0,0], + [5504.2,0,0,0], + ], + + 'FUV_FUVu': [ + [0,0,0,0], + [-1.67589,0.447786,0.369919,-0.0954247], + [2.10419,6.49129,-2.54751,0.177888], + [15.6521,-32.2339,4.4459,0], + [-48.3912,37.1325,0,0], + [37.0269,0,0,0], + ], + + 'g_gi': [ + [0,0,0,0], + [1.59269,-2.97991,7.31089,-3.46913], + [-27.5631,-9.89034,15.4693,6.53131], + [161.969,-76.171,-56.1923,0], + [-204.457,217.977,0,0], + [-50.6269,0,0,0], + ], + + 'g_gz': [ + [0,0,0,0], + [2.37454,-4.39943,7.29383,-2.90691], + [-28.7217,-20.7783,18.3055,5.04468], + [220.097,-81.883,-55.8349,0], + [-290.86,253.677,0,0], + [-73.5316,0,0,0], + ], + + 'g_gr': [ + [0,0,0,0], + [-2.45204,4.10188,10.5258,-13.5889], + [56.7969,-140.913,144.572,57.2155], + [-466.949,222.789,-917.46,-78.0591], + [2906.77,1500.8,1689.97,30.889], + [-10453.7,-4419.56,-1011.01,0], + [17568,3236.68,0,0], + [-10820.7,0,0,0], + ], + + 'H_JH': [ + [0,0,0,0], + [-1.6196,3.55254,1.01414,-1.88023], + [38.4753,-8.9772,-139.021,15.4588], + [-417.861,89.1454,808.928,-18.9682], + [2127.81,-405.755,-1710.95,-14.4226], + [-5719,731.135,1284.35,0], + [7813.57,-500.95,0,0], + [-4248.19,0,0,0], + ], + + 'H_HK': [ + [0,0,0,0], + [0.812404,7.74956,1.43107,-10.3853], + [-23.6812,-235.584,-147.582,188.064], + [283.702,2065.89,721.859,-713.536], + [-1697.78,-7454.39,-1100.02,753.04], + [5076.66,11997.5,460.328,0], + [-7352.86,-7166.83,0,0], + [4125.88,0,0,0], + ], + + 'i_gi': [ + [0,0,0,0], + [-2.21853,3.94007,0.678402,-1.24751], + [-15.7929,-19.3587,15.0137,2.27779], + [118.791,-40.0709,-30.6727,0], + [-134.571,125.799,0,0], + [-55.4483,0,0,0], + ], + + 'i_ui': [ + [0,0,0,0], + [-3.91949,3.20431,-0.431124,-0.000912813], + [-14.776,-6.56405,1.15975,0.0429679], + [135.273,-1.30583,-1.81687,0], + [-264.69,15.2846,0,0], + [142.624,0,0,0], + ], + + 'J_JH': [ + [0,0,0,0], + [0.129195,1.57243,-2.79362,-0.177462], + [-15.9071,-2.22557,-12.3799,-2.14159], + [89.1236,65.4377,36.9197,0], + [-209.27,-123.252,0,0], + [180.138,0,0,0], + ], + + 'J_JK': [ + [0,0,0,0], + [0.0772766,2.17962,-4.23473,-0.175053], + [-13.9606,-19.998,22.5939,-3.99985], + [97.1195,90.4465,-21.6729,0], + [-283.153,-106.138,0,0], + [272.291,0,0,0], + ], + + 'K_HK': [ + [0,0,0,0], + [-2.83918,-2.60467,-8.80285,-1.62272], + [14.0271,17.5133,42.3171,4.8453], + [-77.5591,-28.7242,-54.0153,0], + [186.489,10.6493,0,0], + [-146.186,0,0,0], + ], + + 'K_JK': [ + [0,0,0,0], + [-2.58706,1.27843,-5.17966,2.08137], + [9.63191,-4.8383,19.1588,-5.97411], + [-55.0642,13.0179,-14.3262,0], + [131.866,-13.6557,0,0], + [-101.445,0,0,0], + ], + + 'NUV_NUVr': [ + [0,0,0,0], + [2.2112,-1.2776,0.219084,0.0181984], + [-25.0673,5.02341,-0.759049,-0.0652431], + [115.613,-5.18613,1.78492,0], + [-278.442,-5.48893,0,0], + [261.478,0,0,0], + ], + + 'NUV_NUVg': [ + [0,0,0,0], + [2.60443,-2.04106,0.52215,0.00028771], + [-24.6891,5.70907,-0.552946,-0.131456], + [95.908,-0.524918,1.28406,0], + [-208.296,-10.2545,0,0], + [186.442,0,0,0], + ], + + 'r_gr': [ + [0,0,0,0], + [1.83285,-2.71446,4.97336,-3.66864], + [-19.7595,10.5033,18.8196,6.07785], + [33.6059,-120.713,-49.299,0], + [144.371,216.453,0,0], + [-295.39,0,0,0], + ], + + 'r_ur': [ + [0,0,0,0], + [3.03458,-1.50775,0.576228,-0.0754155], + [-47.8362,19.0053,-3.15116,0.286009], + [154.986,-35.6633,1.09562,0], + [-188.094,28.1876,0,0], + [68.9867,0,0,0], + ], + + 'u_ur': [ + [0,0,0,0], + [10.3686,-6.12658,2.58748,-0.299322], + [-138.069,45.0511,-10.8074,0.95854], + [540.494,-43.7644,3.84259,0], + [-1005.28,10.9763,0,0], + [710.482,0,0,0], + ], + + 'u_ui': [ + [0,0,0,0], + [11.0679,-6.43368,2.4874,-0.276358], + [-134.36,36.0764,-8.06881,0.788515], + [528.447,-26.7358,0.324884,0], + [-1023.1,13.8118,0,0], + [721.096,0,0,0], + ], + + 'u_uz': [ + [0,0,0,0], + [11.9853,-6.71644,2.31366,-0.234388], + [-137.024,35.7475,-7.48653,0.655665], + [519.365,-20.9797,0.670477,0], + [-1028.36,2.79717,0,0], + [767.552,0,0,0], + ], + + 'Y_YH': [ + [0,0,0,0], + [-2.81404,10.7397,-0.869515,-11.7591], + [10.0424,-58.4924,49.2106,23.6013], + [-0.311944,84.2151,-100.625,0], + [-45.306,3.77161,0,0], + [41.1134,0,0,0], + ], + + 'Y_YK': [ + [0,0,0,0], + [-0.516651,6.86141,-9.80894,-0.410825], + [-3.90566,-4.42593,51.4649,-2.86695], + [-5.38413,-68.218,-50.5315,0], + [57.4445,97.2834,0,0], + [-64.6172,0,0,0], + ], + + 'z_gz': [ + [0,0,0,0], + [0.30146,-0.623614,1.40008,-0.534053], + [-10.9584,-4.515,2.17456,0.913877], + [66.0541,4.18323,-8.42098,0], + [-169.494,14.5628,0,0], + [144.021,0,0,0], + ], + + 'z_rz': [ + [0,0,0,0], + [0.669031,-3.08016,9.87081,-7.07135], + [-18.6165,8.24314,-14.2716,13.8663], + [94.1113,11.2971,-11.9588,0], + [-225.428,-17.8509,0,0], + [197.505,0,0,0], + ], + + 'z_uz': [ + [0,0,0,0], + [0.623441,-0.293199,0.16293,-0.0134639], + [-21.567,5.93194,-1.41235,0.0714143], + [82.8481,-0.245694,0.849976,0], + [-185.812,-7.9729,0,0], + [168.691,0,0,0], + ], + + } + + c = coeff[filter_name + '_' + colour_name.replace(' - ', '')] + kcor = 0.0 + + for x, a in enumerate(c): + for y, b in enumerate(c[x]): + kcor += c[x][y] * redshift**x * colour_value**y + + return kcor + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/gwcosmo/utilities/schechter_function.py b/gwcosmo/utilities/schechter_function.py index cc46617806e3472686070e2e2f6634d150c66df8..07ef9d57e2ed26d0e3bb5364f0585b83e10dc75f 100644 --- a/gwcosmo/utilities/schechter_function.py +++ b/gwcosmo/utilities/schechter_function.py @@ -27,7 +27,7 @@ class SchechterMagFunctionInternal(object): return self.evaluate(m)/self.norm -def SchechterMagFunction(H0=70., Mstar_obs=-20.457, alpha=-1.07, phistar=1.): +def SchechterMagFunction(H0=70., Mstar_obs=-19.70, alpha=-1.07, phistar=1.): """ Returns a Schechter magnitude function fort a given set of parameters diff --git a/gwcosmo/utilities/schechter_params.py b/gwcosmo/utilities/schechter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..24c2c58414120e7ba93399349a5168011e576958 --- /dev/null +++ b/gwcosmo/utilities/schechter_params.py @@ -0,0 +1,41 @@ +""" +Rachel Gray 2020 +""" + +class SchechterParams(): + """ + Returns the source frame Schechter function parameters for a given band. + ugriz parameters are from https://arxiv.org/pdf/astro-ph/0210215.pdf + (tables 1 and 2) + + Parameters + ---------- + band : observation band (B,K,u,g,r,i,z) + """ + def __init__(self, band): + self.Mstar = None + self.alpha = None + self.Mmin = None + self.Mmax = None + + self.alpha, self.Mstar, self.Mmin, self.Mmax = self.values(band) + + def values(self, band): + if band == 'B': + return -1.07, -19.70, -22.96, -12.96 + elif band == 'K': + return -1.02, -22.77, -27.0, -12.96 + elif band == 'u': + return -0.92, -17.93, -21.93, -15.54 #TODO check Mmin and Mmax + elif band == 'g': + return -0.89, -19.39, -23.38, -16.10 #TODO check Mmin and Mmax + elif band == 'r': + return -1.05, -20.44, -24.26, -16.11 #TODO check Mmin and Mmax + elif band == 'i': + return -1.00, -20.82, -23.84, -17.07 #TODO check Mmin and Mmax + elif band == 'z': + return -1.08, -21.18, -24.08, -17.34 #TODO check Mmin and Mmax + else: + raise Exception("Expected 'B', 'K', 'u', 'g', 'r', 'i' or 'z' band argument") + + diff --git a/gwcosmo/utilities/standard_cosmology.py b/gwcosmo/utilities/standard_cosmology.py index ad10877605f565871c5a42d14dafaca9c668d16c..4b18b713da3474b3c5a3820a67aa757976ff29b1 100644 --- a/gwcosmo/utilities/standard_cosmology.py +++ b/gwcosmo/utilities/standard_cosmology.py @@ -176,7 +176,7 @@ def redshift(dHoc, Omega_m=Omega_m, tolerance_z=1e-06): # Distance modulus given luminosity distance -def mu(dL): +def DistanceModulus(dL): """ Returns distance modulus given luminosity distance @@ -186,40 +186,89 @@ def mu(dL): Returns ------- - distance modulus, mu = 5*np.log10(dL)+25 + distance modulus = 5*np.log10(dL)+25 """ return 5*np.log10(dL)+25 # dL has to be in Mpc -def dl_mM(m, M): +def dl_mM(m, M, Kcorr=0.): """ returns luminosity distance in Mpc given apparent magnitude and absolute magnitude + + Parameters + ---------- + m : apparent magnitude + M : absolute magnitude in the source frame + Kcorr : (optional) K correction, to convert absolute magnitude from the + observed band to the source frame band (default=0). If fluxes are + bolometric, this should be left as 0. If not, a K correction of 0 is + only valid at low redshifts. + + Returns + ------- + Luminosity distance in Mpc """ - return 10**(0.2*(m-M-25)) + return 10**(0.2*(m-M-Kcorr-25)) def L_M(M): """ Returns luminosity when given an absolute magnitude + + Parameters + ---------- + M : absolute magnitude in the source frame + + Returns + ------- + Luminosity in Watts """ # TODO: double check use of L0=3.0128e28 return 3.0128e28*10**(-0.4*M) -def M_mdl(m, dl): +def M_mdl(m, dl, Kcorr=0.): """ Returns a source's absolute magnitude given apparent magnitude and luminosity distance + If a K correction is supplied it will be applied + + Parameters + ---------- + m : apparent magnitude + dl : luminosity distance in Mpc + Kcorr : (optional) K correction, to convert absolute magnitude from the + observed band to the source frame band (default=0). If fluxes are + bolometric, this should be left as 0. If not, a K correction of 0 is + only valid at low redshifts. + + Returns + ------- + Absolute magnitude in the source frame """ - return m - mu(dl) + return m - DistanceModulus(dl) - Kcorr -def L_mdl(m, dl): +def L_mdl(m, dl, Kcorr=0.): """ Returns luminosity when given apparent magnitude and luminosity distance + If a K correction is supplied it will be applied + + Parameters + ---------- + m : apparent magnitude + dl : luminosity distance in Mpc + Kcorr : (optional) K correction, to convert absolute magnitude from the + observed band to the source frame band (default=0). If fluxes are + bolometric, this should be left as 0. If not, a K correction of 0 is + only valid at low redshifts. + + Returns + ------- + Luminosity in the source frame """ - return L_M(M_mdl(m, dl)) + return L_M(M_mdl(m, dl, Kcorr=Kcorr)) # Rachel: I've put dl_zH0 and z_dlH0 in as place holders. @@ -336,5 +385,5 @@ class fast_cosmology(object): # Local cosmology return z*c/H0 else: - # Standard cosmology + # Standard cosmology return splev(z, self.interp, ext=3)*c/H0