Commit c3f7a1dc authored by Leo Pound Singer's avatar Leo Pound Singer

bayestar_plot_found_injections: fix some subtle Python 3 issues

Original: 9f1a117b19020efd744e1343cddc8c851da6a666
parent 1d796473
......@@ -37,7 +37,7 @@ parser.add_argument('--pp-confidence-interval', type=float, metavar='PCT',
'samples, overlay binomial confidence bands for this percentage on '
'the P--P plot [default: %(default)s]')
parser.add_argument(
'input', type=argparse.FileType('r'), nargs='+',
'input', type=argparse.FileType('rb'), nargs='+',
help='Name of input file generated by bayestar_aggregate_found_injections')
opts = parser.parse_args()
......@@ -65,11 +65,15 @@ dataset_names = [os.path.splitext(file.name)[0] for file in opts.input]
combined = np.concatenate([dataset['searched_area'] for dataset in datasets_])
min_searched_area = np.min(combined)
max_searched_area = np.max(combined)
if 'offset' in dataset.dtype.names:
have_offset = all('offset' in dataset.dtype.names for dataset in datasets_)
have_runtime = all('runtime' in dataset.dtype.names for dataset in datasets_)
have_searched_prob_distance = all('searched_prob_distance' in dataset.dtype.names for dataset in datasets_)
if have_offset:
have_offset = True
combined = np.concatenate([dataset['offset'] for dataset in datasets_])
min_offset = np.min(combined)
max_offset = np.max(combined)
if 'runtime' in dataset.dtype.names:
if have_runtime:
combined = np.concatenate([dataset['runtime'] for dataset in datasets_])
if np.any(np.isfinite(combined)):
min_runtime = np.nanmin(combined)
......@@ -157,7 +161,7 @@ for i, (bin_edge, subdir, title) in enumerate(zip(bin_edges, bin_names, bin_titl
ax2.set_title(title)
# Set up figure 3.
if 'offset' in dataset.dtype.names:
if have_offset:
fig3 = plt.figure(figsize=(6, 4.5))
ax3 = fig3.add_subplot(111)
ax3.set_xscale('log')
......@@ -167,7 +171,7 @@ for i, (bin_edge, subdir, title) in enumerate(zip(bin_edges, bin_names, bin_titl
ax3.set_title(title)
# Set up figure 4.
if 'runtime' in dataset.dtype.names:
if have_runtime:
fig4 = plt.figure(figsize=(6, 4.5))
ax4 = fig4.add_subplot(111)
ax4.set_xscale('log')
......@@ -176,7 +180,7 @@ for i, (bin_edge, subdir, title) in enumerate(zip(bin_edges, bin_names, bin_titl
ax4.set_ylabel(histlabel)
# Set up figure 5.
if 'searched_prob_distance' in dataset.dtype.names:
if have_searched_prob_distance:
fig5 = plt.figure(figsize=(6, 6))
ax5 = fig5.add_subplot(111, projection='pp_plot')
fig5.subplots_adjust(bottom=0.15)
......@@ -192,12 +196,12 @@ for i, (bin_edge, subdir, title) in enumerate(zip(bin_edges, bin_names, bin_titl
searched_prob = data['p_value']
ax1.add_series(searched_prob, label=label)
ax2.hist(data['searched_area'], histtype='step', label=label, bins=np.logspace(np.log10(min_searched_area), np.log10(max_searched_area), 20), cumulative=opts.cumulative, normed=opts.normed)
if 'offset' in dataset.dtype.names:
if have_offset:
ax3.hist(data['offset'], histtype='step', label=label, bins=np.logspace(np.log10(min_offset), np.log10(max_offset), 20), cumulative=opts.cumulative, normed=opts.normed)
if 'runtime' in dataset.dtype.names:
if have_runtime:
if np.any(np.isfinite(data['runtime'])):
ax4.hist(data['runtime'], histtype='step', bins=np.logspace(np.log10(min_runtime), np.log10(max_runtime), 20), cumulative=opts.cumulative, normed=opts.normed)
if 'searched_prob_distance' in dataset.dtype.names:
if have_searched_prob_distance:
ax5.add_series(data['searched_prob_distance'], label=label)
# Finish and save plot 1.
......@@ -219,20 +223,20 @@ for i, (bin_edge, subdir, title) in enumerate(zip(bin_edges, bin_names, bin_titl
fig2.savefig('searched_area_hist.pdf')
# Finish and save plot 3.
if 'offset' in dataset.dtype.names:
if have_offset:
pb.update(i * 4 + 2)
ax3.grid()
fig3.savefig('offset_hist.pdf')
# Finish and save plot 4.
if 'runtime' in dataset.dtype.names:
if have_runtime:
pb.update(i * 4 + 3)
ax4.grid()
fig4.savefig('runtime_hist.pdf')
plt.close()
# Finish and save plot 4.
if 'runtime' in dataset.dtype.names:
if have_searched_prob_distance:
pb.update(i * 4 + 4)
# Only plot target confidence band if all datasets have the same number
# of samples, because the confidence band depends on the number of samples.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment