From 569c02c5d5e520539821b91f684c5aa81f96127b Mon Sep 17 00:00:00 2001
From: Gregory Ashton <gregory.ashton@ligo.org>
Date: Tue, 19 Jun 2018 20:35:12 +1000
Subject: [PATCH] Clean up of documentation

- Adds in comments from Denyz, and general tidy up.
- Closes #83
---
 docs/basics-of-parameter-estimation.txt       | 20 ++++++++--------
 ...inary-coalescence-parameter-estimation.txt |  7 +++---
 docs/installation.txt                         |  2 +-
 docs/likelihood.txt                           | 18 +++++++--------
 docs/prior.txt                                | 23 +++++++++++++------
 docs/tupak-output.txt                         |  6 ++---
 docs/writing-documentation.txt                |  2 +-
 examples/other_examples/linear_regression.py  |  3 ++-
 .../linear_regression_unknown_noise.py        |  3 ++-
 tupak/__init__.py                             |  8 ++++---
 tupak/core/__init__.py                        |  2 +-
 tupak/core/likelihood.py                      |  2 +-
 tupak/core/prior.py                           |  2 +-
 tupak/core/sampler.py                         |  2 +-
 14 files changed, 57 insertions(+), 43 deletions(-)

diff --git a/docs/basics-of-parameter-estimation.txt b/docs/basics-of-parameter-estimation.txt
index e3be9d6a0..102c4c023 100644
--- a/docs/basics-of-parameter-estimation.txt
+++ b/docs/basics-of-parameter-estimation.txt
@@ -3,7 +3,7 @@ Basics of parameter estimation
 ==============================
 
 In this example, we'll go into some of the basics of parameter estimation and
-how they are implemented in `tupak`.
+how they are implemented in :code:`tupak`.
 
 Firstly, consider a situation where you have discrete data :math:`\{y_0,
 y_1,\ldots, y_n\}` taken at a set of times :math:`\{t_0, t_1, \ldots, y_n\}`.
@@ -20,7 +20,7 @@ least squares estimator for example).
 Here, we will describe a Bayesian approach using `nested sampling
 <https://en.wikipedia.org/wiki/Nested_sampling_algorithm>`_ which might feel
 like overkill for this problem. However, it is a useful way to introduce some
-of the basic features of `tupak` before seeing them in more complicated
+of the basic features of :code:`tupak` before seeing them in more complicated
 settings.
 
 The maths
@@ -31,7 +31,7 @@ by
 
 .. math::
 
-   P(m, c| \{y_i, t_i\}, H) \propto P(\{y_i, t_i\}| m, c, H) \times P(m, c| H)
+   P(m, c| \{y_i, t_i\}, H) \propto P(\{y_i, t_i\}| m, c, H) \times P(m, c| H)\,.
 
 where the first term on the right-hand-side is the *likelihood* while the
 second is the *prior*. In the model :math:`H`, the likelihood of the data point
@@ -41,37 +41,37 @@ Gaussian distributed as such:
 .. math::
 
    P(y_i, t_i| m, c, H) = \frac{1}{\sqrt{2\pi\sigma^2}}
-                         \mathrm{exp}\left(\frac{-(y_i - (t_i x + c))^2}{2\sigma^2}\right)
+                         \mathrm{exp}\left(\frac{-(y_i - (t_i x + c))^2}{2\sigma^2}\right) \,.
 
 Next, we assume that all data points are independent. As such,
 
 .. math::
 
-   P(\{y_i, t_i\}| m, c, H) = \prod_{i=1}^n P(y_i, t_i| m, c, H)
+   P(\{y_i, t_i\}| m, c, H) = \prod_{i=1}^n P(y_i, t_i| m, c, H) \,.
 
 When solving problems on a computer, it is often convienient to work with
-the log-likelihood. Indeed, a `tupak` *likelihood* must have a `log_likelihood()`
+the log-likelihood. Indeed, a :code:`tupak` *likelihood* must have a `log_likelihood()`
 method. For the normal distribution, the log-likelihood for *n* data points is
 
 .. math::
 
    \log(P(\{y_i, t_i\}| m, c, H)) = -\frac{1}{2}\left[
        \sum_{i=1}^n \left(\frac{(y_i - (t_i x + c))}{\sigma}\right)^2
-       + n\log\left(2\pi\sigma^2\right)\right]
+       + n\log\left(2\pi\sigma^2\right)\right] \,.
 
 Finally, we need to specify a *prior*. In this case we will use uncorrelated
 uniform priors
 
 .. math::
 
-  P(m, c| H) = P(m| H) \times P(c| H) = \textrm{Unif}(0, 5) \times \textrm{Unif}(-2, 2)
+  P(m, c| H) = P(m| H) \times P(c| H) = \textrm{Unif}(0, 5) \times \textrm{Unif}(-2, 2)\,.
 
-the choice of prior in general should be guided by physical knowledge about the
+The choice of prior in general should be guided by physical knowledge about the
 system and not the data in question.
 
 The key point to take away here is that the **likelihood** and **prior** are
 the inputs to figuring out the **posterior**. There are many ways to go about
-this, we will now show how to do so in `tupak`. In this case, we explicitly
+this, we will now show how to do so in :code:`tupak`. In this case, we explicitly
 show how to write the `GaussianLikelihood` so that one can see how the
 maths above gets implemented. For the prior, this is done implicitly by the
 naming of the priors.
diff --git a/docs/compact-binary-coalescence-parameter-estimation.txt b/docs/compact-binary-coalescence-parameter-estimation.txt
index 54f6215f2..232684a74 100644
--- a/docs/compact-binary-coalescence-parameter-estimation.txt
+++ b/docs/compact-binary-coalescence-parameter-estimation.txt
@@ -2,9 +2,10 @@
 Compact binary coalescence parameter estimation
 ===============================================
 
-In this example, we demonstrate how to generate simulated data for a binary
-black hold coalescence observed by the two LGIO interferometers at Hanford,
-Livingston and the Virgo detector.
+In this example, which can be found `here
+<https://git.ligo.org/Monash/tupak/blob/master/examples/injection_examples/basic_tutorial.py>`_,
+we demonstrate how to generate simulated data for a binary black hole
+coalescence observed by the two LIGO interferometers at Hanford and Livingston.
 
 .. literalinclude:: /../examples/injection_examples/basic_tutorial.py
    :language: python
diff --git a/docs/installation.txt b/docs/installation.txt
index 9c2255f39..771b58c3f 100644
--- a/docs/installation.txt
+++ b/docs/installation.txt
@@ -29,7 +29,7 @@ Clone the repository, install the requirements, and then install the software:
    $ pip install -r requirements.txt
    $ python setup.py install
 
-Once you have run these steps, you have `tupak` installed. You can now try to
+Once you have run these steps, you have :code:`tupak` installed. You can now try to
 run the examples.
 
 .. note::
diff --git a/docs/likelihood.txt b/docs/likelihood.txt
index 2158aa923..a504a62d8 100644
--- a/docs/likelihood.txt
+++ b/docs/likelihood.txt
@@ -7,9 +7,9 @@ Likelihood
 `tupak` likelihood objects are used in calculating the likelihood of the data
 for some specific set of parameters. In mathematical notation, the likelihood
 can be generically written as :math:`\mathcal{L}(d| \theta)`. How this is
-coded up will depend on the problem, but `tupak` expects all likelihood
+coded up will depend on the problem, but :code:`tupak` expects all likelihood
 objects to have a `parameters` attribute (a dictionary of key-value pairs) and
-a `log_likelihood()` method. In thie page, we'll discuss how to write your own
+a `log_likelihood()` method. In this page, we'll discuss how to write your own
 Likelihood, and the standard likelihoods in :code:`tupak`.
 
 The simplest likelihood
@@ -80,9 +80,9 @@ General likelihood for fitting a function :math:`y(x)` to some data with known n
 ------------------------------------------------------------------------------------
 
 The previous example was rather simplistic, Let's now consider that we have some
-dependent data :math:`\vec{y}=y_1, y_2, \ldots y_N$` measured at
+dependent data :math:`\vec{y}=y_1, y_2, \ldots y_N` measured at
 :math:`\vec{x}=x_1, x_2, \ldots, x_N`. We believe that the data is generated
-by additive Gaussian noise with a known variance :math:`sigma^2` and a function
+by additive Gaussian noise with a known variance :math:`\sigma^2` and a function
 :math:`y(x; \theta)` where :math:`\theta`  are some unknown parameters; that is
 
 .. math::
@@ -90,7 +90,7 @@ by additive Gaussian noise with a known variance :math:`sigma^2` and a function
    y_i = y(x_i; \theta) + n_i
 
 where :math:`n_i` is drawn from a normal distribution with zero mean and
-standard deviation :math:`sigma`. As such, :math:`y_i - y(x_i; \theta)`
+standard deviation :math:`\sigma`. As such, :math:`y_i - y(x_i; \theta)`
 itself will have a likelihood
 
 .. math::
@@ -100,7 +100,7 @@ itself will have a likelihood
    \mathrm{exp}\left(\frac{-(y_i - y(x_i; \theta))^2}{2\sigma^2}\right)
 
 
-As with the previous case, the likelihood for all the data is the produce over
+As with the previous case, the likelihood for all the data is the product over
 the likelihood for each data point.
 
 In :code:`tupak`, we can code this up as a likelihood in the following way::
@@ -143,7 +143,7 @@ In :code:`tupak`, we can code this up as a likelihood in the following way::
 This likelihood can be given any python function, the data (in the form of
 :code:`x` and :code:`y`) and the standard deviation of the noise. The
 parameters are inferred from the arguments to the :code:`function` argument,
-for example if, when instatiating the likelihood you passed in a the following
+for example if, when instantiating the likelihood you passed in the following
 function::
 
    def f(x, a, b):
@@ -167,7 +167,7 @@ In the last example, we considered only cases with known noise (e.g., a
 prespecified standard deviation. We now present a general function which can
 handle unknown noise (in which case you need to specify a prior for
 :math:`\sigma`, or known noise (in which case you pass the known noise in when
-instatiating the likelihood::
+instantiating the likelihood::
 
   class GaussianLikelihood(tupak.Likelihood):
       def __init__(self, x, y, function, sigma=None):
@@ -181,7 +181,7 @@ instatiating the likelihood::
               The data to analyse
           function:
               The python function to fit to the data. Note, this must take the
-              dependent variable as its first argument. The other arguments are
+              dependent variable as its first argument. The other arguments
               will require a prior and will be sampled over (unless a fixed
               value is given).
           sigma: None, float, array_like
diff --git a/docs/prior.txt b/docs/prior.txt
index f0c85df55..bd485ccf4 100644
--- a/docs/prior.txt
+++ b/docs/prior.txt
@@ -12,7 +12,7 @@ Typically, these are passed into :ref:`run_sampler <run_sampler>` as a regular
 `python dictionary
 <https://docs.python.org/2/tutorial/datastructures.html#dictionaries>`_.
 
-The keys of the priors objects should reference the model parameters (in
+The keys of the priors objects should reference the model parameters, in
 particular, the :code:`parameters` attribute of the :ref:`likelihood`. Each key
 can be either
 
@@ -20,7 +20,7 @@ can be either
   this is a Delta-function prior,
 - or a :code:`tupak.prior.Prior` instance.
 
-If the later, it will be sampled during the parameter estimation. Here is a
+If the latter, it will be sampled during the parameter estimation. Here is a
 simple example that sets a uniform prior for :code:`a`, and a fixed value for
 :code:`b`::
 
@@ -44,14 +44,23 @@ hole and defined in a file like this
 You can define your own default prior and pass a string pointing to that file
 to :ref:`run_sampler <run_sampler>`.
 
-Complete list of available prior classes
+Available prior classes
 ----------------------------------------
 
-We have provided a number of standard priors. You can define your own by
-subclassing the :code:`tupak.prior.Prior` class. Here is the complete list of
-those implemented:
+We have provided a number of standard priors. Here we document a few of them,
+note that this list is incomplete.
 
-.. automodule:: tupak.core.prior
+.. autoclass:: tupak.core.prior.Uniform
    :members:
+   :special-members:
 
+.. autoclass:: tupak.core.prior.Gaussian
+   :members:
+   :special-members:
+
+You can also define your own by
+subclassing the :code:`tupak.prior.Prior` class.
 
+.. autoclass:: tupak.core.prior.Prior
+   :members:
+   :special-members:
diff --git a/docs/tupak-output.txt b/docs/tupak-output.txt
index 6afb9ce86..df2ff6d12 100644
--- a/docs/tupak-output.txt
+++ b/docs/tupak-output.txt
@@ -2,7 +2,7 @@
 Tupak output
 ============
 
-In this document, we will describe what :code::code:`tupak` outputs, where it is stored,
+In this document, we will describe what :code:`tupak` outputs, where it is stored,
 and how you can access it.
 
 When you call :code:`run_sampler`, there are two arguments :code:`outdir` and :code:`label` which
@@ -83,7 +83,7 @@ This shows the different data that is stored in the :code:`h5` file. You can thi
 the file like a python dictionary - its a bag with lots of different kind of
 data which can be accessed via a :code:`key` (a string). We use `deepdish
 <http://deepdish.io/>`_ to handle the saving of :code:`h5` files in :code:`tupak`. In python,
-can load any :code:`h5` file and access its contents like a dictionary::
+you can load any :code:`h5` file and access its contents like a dictionary::
 
    >>> import deepdish
    >>> output = deepdish.io.load('outdir/label_result.h5')
@@ -98,7 +98,7 @@ Reading in a result file
 ------------------------
 Rather than reading in the raw :code:`h5` file, may find it more convienient to
 instead load a :code:`*result.h5` as a :code:`tupak` :code:`result` object (i.e., like the output
-of :code:`run_sampler`. To do this::
+of :code:`run_sampler`). To do this::
 
    >>> import tupak
    >>> result = tupak.result.read_in_result(outdir=outdir, label=label)
diff --git a/docs/writing-documentation.txt b/docs/writing-documentation.txt
index dd971c663..71fe2ee65 100644
--- a/docs/writing-documentation.txt
+++ b/docs/writing-documentation.txt
@@ -59,7 +59,7 @@ all of the new/changed files::
    $ git commit -m "Adding my documentation for the feature"
    $ git push origin adding-my-new-documentation
 
-Then, on the web interface create an merge request.
+Then, on the web interface create a merge request.
 
 Using reStructured text
 -----------------------
diff --git a/examples/other_examples/linear_regression.py b/examples/other_examples/linear_regression.py
index 6f8121221..f3a6b9980 100644
--- a/examples/other_examples/linear_regression.py
+++ b/examples/other_examples/linear_regression.py
@@ -14,6 +14,7 @@ import inspect
 # A few simple setup steps
 label = 'linear_regression'
 outdir = 'outdir'
+tupak.utils.check_directory_exists_and_if_not_mkdir(outdir)
 
 
 # First, we define our "signal model", in this case a simple linear function
@@ -21,7 +22,7 @@ def model(time, m, c):
     return time * m + c
 
 
-# New we define the injection parameters which we make simulated data with
+# Now we define the injection parameters which we make simulated data with
 injection_parameters = dict(m=0.5, c=0.2)
 
 # For this example, we'll use standard Gaussian noise
diff --git a/examples/other_examples/linear_regression_unknown_noise.py b/examples/other_examples/linear_regression_unknown_noise.py
index 680349355..835674bce 100644
--- a/examples/other_examples/linear_regression_unknown_noise.py
+++ b/examples/other_examples/linear_regression_unknown_noise.py
@@ -13,6 +13,7 @@ import matplotlib.pyplot as plt
 # A few simple setup steps
 label = 'linear_regression_unknown_noise'
 outdir = 'outdir'
+tupak.utils.check_directory_exists_and_if_not_mkdir(outdir)
 
 
 # First, we define our "signal model", in this case a simple linear function
@@ -20,7 +21,7 @@ def model(time, m, c):
     return time * m + c
 
 
-# New we define the injection parameters which we make simulated data with
+# Now we define the injection parameters which we make simulated data with
 injection_parameters = dict(m=0.5, c=0.2)
 
 # For this example, we'll inject standard Gaussian noise
diff --git a/tupak/__init__.py b/tupak/__init__.py
index 7e25711f1..f370d6b79 100644
--- a/tupak/__init__.py
+++ b/tupak/__init__.py
@@ -2,14 +2,16 @@
 tupak
 =====
 
-Tupak is The User friendly Parameter estimAtion Kode
+Tupak is The User friendly Parameter estimAtion Kode.
 
 The aim of tupak is to provide user friendly interface to perform parameter
 estimation. It is primarily designed and built for inference of compact
 binary coalescence events in interferometric data, but it can also be used for
 more general problems.
 
-For installation instructions see https://git.ligo.org/Monash/tupak
+The code, and many examples are hosted at https://git.ligo.org/Monash/tupak.
+For installation instructions see
+https://monash.docs.ligo.org/tupak/installation.html.
 
 """
 
@@ -17,7 +19,7 @@ For installation instructions see https://git.ligo.org/Monash/tupak
 from __future__ import print_function, division, absolute_import
 
 # import local files, core utils should be imported first
-from tupak.core import utils, likelihood, prior, result, sampler
+from tupak.core import utils, likelihood, result, sampler
 from tupak.gw import detector, conversion, source, waveform_generator
 
 # import a few often-used functions and classes to simplify scripts
diff --git a/tupak/core/__init__.py b/tupak/core/__init__.py
index 1329ed8e8..4a967f19c 100644
--- a/tupak/core/__init__.py
+++ b/tupak/core/__init__.py
@@ -1,6 +1,6 @@
 from __future__ import absolute_import
 import tupak.core.likelihood
-import tupak.core.prior
+#import tupak.core.prior
 import tupak.core.result
 import tupak.core.sampler
 import tupak.core.utils
diff --git a/tupak/core/likelihood.py b/tupak/core/likelihood.py
index 479193fe8..e635ae54d 100644
--- a/tupak/core/likelihood.py
+++ b/tupak/core/likelihood.py
@@ -61,7 +61,7 @@ class GaussianLikelihood(Likelihood):
             The data to analyse
         function:
             The python function to fit to the data. Note, this must take the
-            dependent variable as its first argument. The other arguments are
+            dependent variable as its first argument. The other arguments
             will require a prior and will be sampled over (unless a fixed
             value is given).
         sigma: None, float, array_like
diff --git a/tupak/core/prior.py b/tupak/core/prior.py
index 5072fc365..9b9ec4029 100644
--- a/tupak/core/prior.py
+++ b/tupak/core/prior.py
@@ -347,7 +347,7 @@ class Prior(object):
         """Test if 0 < val < 1
 
         Parameters
-        -------
+        ----------
         val: float
 
         Raises
diff --git a/tupak/core/sampler.py b/tupak/core/sampler.py
index d90d37919..0c079a54e 100644
--- a/tupak/core/sampler.py
+++ b/tupak/core/sampler.py
@@ -667,7 +667,7 @@ def run_sampler(likelihood, priors=None, label='label', outdir='outdir',
         All kwargs are passed directly to the samplers `run` function
 
     Returns
-    ------
+    -------
     result
         An object containing the results
     """
-- 
GitLab