diff --git a/bilby/core/likelihood.py b/bilby/core/likelihood.py
index 5abe4c2d8bac279d627a5bfc93dc7d646a28eaf1..df30f3e4914bcb2a38c36ff7651e0397bb289872 100644
--- a/bilby/core/likelihood.py
+++ b/bilby/core/likelihood.py
@@ -407,7 +407,7 @@ class Multinomial(Likelihood):
     Likelihood for system with N discrete possibilities.
     """
 
-    def __init__(self, data, n_dimensions, label="parameter_"):
+    def __init__(self, data, n_dimensions, base="parameter_"):
         """
 
         Parameters
@@ -416,19 +416,21 @@ class Multinomial(Likelihood):
             The number of objects in each class
         n_dimensions: int
             The number of classes
+        base: str
+            The base of the parameter labels
         """
         self.data = np.array(data)
         self._total = np.sum(self.data)
         super(Multinomial, self).__init__(dict())
         self.n = n_dimensions
-        self.label = label
+        self.base = base
         self._nll = None
 
     def log_likelihood(self):
         """
         Since n - 1 parameters are sampled, the last parameter is 1 - the rest
         """
-        probs = [self.parameters[self.label + str(ii)]
+        probs = [self.parameters[self.base + str(ii)]
                  for ii in range(self.n - 1)]
         probs.append(1 - sum(probs))
         return self._multinomial_ln_pdf(probs=probs)
diff --git a/examples/core_examples/alternative_samplers/linear_regression_pymc3_custom_likelihood.py b/examples/core_examples/alternative_samplers/linear_regression_pymc3_custom_likelihood.py
index 864c9afe3fdcc0870c27cf6ce9230056529412ee..d2074304f83064167facf1166e7ec582f8056fb7 100644
--- a/examples/core_examples/alternative_samplers/linear_regression_pymc3_custom_likelihood.py
+++ b/examples/core_examples/alternative_samplers/linear_regression_pymc3_custom_likelihood.py
@@ -8,8 +8,6 @@ would give equivalent results as using the pre-defined 'Gaussian Likelihood'
 
 """
 
-import inspect
-
 import bilby
 import matplotlib.pyplot as plt
 import numpy as np
@@ -52,8 +50,8 @@ fig.savefig("{}/{}_data.png".format(outdir, label))
 
 # Parameter estimation: we now define a Gaussian Likelihood class relevant for
 # our model.
-class GaussianLikelihoodPyMC3(bilby.Likelihood):
-    def __init__(self, x, y, sigma, function):
+class GaussianLikelihoodPyMC3(bilby.core.likelihood.GaussianLikelihood):
+    def __init__(self, x, y, sigma, func):
         """
         A general Gaussian likelihood - the parameters are inferred from the
         arguments of function
@@ -64,22 +62,13 @@ class GaussianLikelihoodPyMC3(bilby.Likelihood):
             The data to analyse
         sigma: float
             The standard deviation of the noise
-        function:
+        func:
             The python function to fit to the data. Note, this must take the
             dependent variable as its first argument. The other arguments are
             will require a prior and will be sampled over (unless a fixed
             value is given).
         """
-        self.x = x
-        self.y = y
-        self.sigma = sigma
-        self.N = len(x)
-        self.function = function
-
-        # These lines of code infer the parameters from the provided function
-        parameters = inspect.getargspec(function).args
-        parameters.pop(0)
-        self.parameters = dict.fromkeys(parameters)
+        super(GaussianLikelihoodPyMC3, self).__init__(x=x, y=y, func=func, sigma=sigma)
 
     def log_likelihood(self, sampler=None):
         """
@@ -88,12 +77,15 @@ class GaussianLikelihoodPyMC3(bilby.Likelihood):
         sampler: :class:`bilby.core.sampler.Pymc3`
             A Sampler object must be passed containing the prior distributions
             and PyMC3 :class:`~pymc3.Model` to use as a context manager.
+            If this is not passed, the super class is called and the regular
+            likelihood is evaluated.
         """
 
         from bilby.core.sampler import Pymc3
 
         if not isinstance(sampler, Pymc3):
-            raise ValueError("Sampler is not a bilby Pymc3 sampler object")
+            print(sampler, type(sampler))
+            return super(GaussianLikelihoodPyMC3, self).log_likelihood()
 
         if not hasattr(sampler, "pymc3_model"):
             raise AttributeError("Sampler has not PyMC3 model attribute")
@@ -114,12 +106,11 @@ likelihood = GaussianLikelihoodPyMC3(time, data, sigma, model)
 
 
 # Define a custom prior for one of the parameter for use with PyMC3
-class PriorPyMC3(bilby.core.prior.Prior):
+class PyMC3UniformPrior(bilby.core.prior.Uniform):
     def __init__(self, minimum, maximum, name=None, latex_label=None):
         """
         Uniform prior with bounds (should be equivalent to bilby.prior.Uniform)
         """
-
         bilby.core.prior.Prior.__init__(
             self, name, latex_label, minimum=minimum, maximum=maximum
         )
@@ -128,12 +119,15 @@ class PriorPyMC3(bilby.core.prior.Prior):
         """
         Change ln_prob method to take in a Sampler and return a PyMC3
         distribution.
+
+        If the passed argument is not a `Pymc3` sampler, assume that it is a
+        float or array to be passed to the superclass.
         """
 
         from bilby.core.sampler import Pymc3
 
         if not isinstance(sampler, Pymc3):
-            raise ValueError("Sampler is not a bilby Pymc3 sampler object")
+            return super(PyMC3UniformPrior, self).ln_prob(sampler)
 
         return pm.Uniform(self.name, lower=self.minimum, upper=self.maximum)
 
@@ -142,7 +136,7 @@ class PriorPyMC3(bilby.core.prior.Prior):
 # We make a prior
 priors = dict()
 priors["m"] = bilby.core.prior.Uniform(0, 5, "m")
-priors["c"] = PriorPyMC3(-2, 2, "c")
+priors["c"] = PyMC3UniformPrior(-2, 2, "c")
 
 # And run sampler
 result = bilby.run_sampler(
diff --git a/examples/core_examples/conditional_prior.py b/examples/core_examples/conditional_prior.py
index 5da96c90ab633c28a60dae2a13c07cb9bb918d01..aef3d91cf955f2dd34bfb6b2c644cb8ed94c59e1 100644
--- a/examples/core_examples/conditional_prior.py
+++ b/examples/core_examples/conditional_prior.py
@@ -1,12 +1,16 @@
+"""
+This tutorial demonstrates how we can sample a prior in the shape of a ball.
+Note that this will not end up sampling uniformly in that space, constraint
+priors are more suitable for that.
+This implementation will draw a value for the x-coordinate from p(x), and
+given that draw a value for the
+y-coordinate from p(y|x), and given that draw a value for the z-coordinate
+from p(z|x,y).
+Only the x-coordinate will end up being uniform for this
+"""
 import bilby
 import numpy as np
 
-# This tutorial demonstrates how we can sample a prior in the shape of a ball
-# Note that this will not end up sampling uniformly in that space, constraint priors are more suitable for that.
-# This implementation will draw a value for the x-coordinate from p(x), and given that draw a value for the
-# y-coordinate from p(y|x), and given that draw a value for the z-coordinate from p(z|x,y).
-# Only the x-coordinate will end up being uniform for this
-
 
 class ZeroLikelihood(bilby.core.likelihood.Likelihood):
     """Flat likelihood. This always returns 0.
diff --git a/examples/core_examples/dirichlet.py b/examples/core_examples/dirichlet.py
index 71d29ee5ef71bd3a9ef8230339ad19dcc9ec9425..6cd8ceff4d2661b974fd57b58c136119348d7abd 100644
--- a/examples/core_examples/dirichlet.py
+++ b/examples/core_examples/dirichlet.py
@@ -15,7 +15,7 @@ injection_parameters = dict(
 )
 data = [injection_parameters[label + str(ii)] * 1000 for ii in range(n_dim)]
 
-likelihood = Multinomial(data=data, n_dimensions=n_dim, label=label)
+likelihood = Multinomial(data=data, n_dimensions=n_dim, base=label)
 
 result = run_sampler(
     likelihood=likelihood,
diff --git a/examples/core_examples/hyper_parameter_example.py b/examples/core_examples/hyper_parameter_example.py
index 4ce066e938ef2e8d9ff571d10ee154e0af9e871f..44c8825cabe36ebd51022cd0dbdce0e00225b8c8 100644
--- a/examples/core_examples/hyper_parameter_example.py
+++ b/examples/core_examples/hyper_parameter_example.py
@@ -83,16 +83,13 @@ def hyper_prior(dataset, mu, sigma):
     )
 
 
-def run_prior(dataset):
-    return 1 / 20
-
-
 samples = [result.posterior for result in results]
+for sample in samples:
+    sample["prior"] = 1 / 20
 evidences = [result.log_evidence for result in results]
 hp_likelihood = HyperparameterLikelihood(
     posteriors=samples,
     hyper_prior=hyper_prior,
-    sampling_prior=run_prior,
     log_evidences=evidences,
     max_samples=500,
 )
@@ -107,6 +104,8 @@ result = run_sampler(
     likelihood=hp_likelihood,
     priors=hp_priors,
     sampler="dynesty",
+    walks=10,
+    nact=3,
     nlive=1000,
     use_ratio=False,
     outdir=outdir,
diff --git a/examples/core_examples/linear_regression_grid.py b/examples/core_examples/linear_regression_grid.py
index 76fd49630ae606e2eb47e30889424b0c11d3d8a8..8138c213665c306e444ee9aec80ee147a9980a6a 100644
--- a/examples/core_examples/linear_regression_grid.py
+++ b/examples/core_examples/linear_regression_grid.py
@@ -75,12 +75,12 @@ grid_evidence = grid.log_evidence
 axes = fig.get_axes()
 axes[0].plot(
     grid.sample_points["c"],
-    np.exp(grid.marginalize_ln_posterior(not_parameter="c") - grid_evidence),
+    np.exp(grid.marginalize_ln_posterior(not_parameters="c") - grid_evidence),
     "k--",
 )
 axes[3].plot(
     grid.sample_points["m"],
-    np.exp(grid.marginalize_ln_posterior(not_parameter="m") - grid_evidence),
+    np.exp(grid.marginalize_ln_posterior(not_parameters="m") - grid_evidence),
     "k--",
 )
 axes[2].contour(
diff --git a/examples/core_examples/occam_factor_example.py b/examples/core_examples/occam_factor_example.py
index 5ff7d47453d69d1371e2125bd7a4345477a95dce..e7ebb38a5f3e6ca34b0e7af3ef289b8602adbc12 100644
--- a/examples/core_examples/occam_factor_example.py
+++ b/examples/core_examples/occam_factor_example.py
@@ -99,7 +99,7 @@ def fit(n):
     result = bilby.run_sampler(
         likelihood=likelihood,
         priors=priors,
-        nlive=1000,
+        nlive=100,
         outdir=outdir,
         label=label,
         sampler="nestle",
diff --git a/examples/core_examples/radioactive_decay.py b/examples/core_examples/radioactive_decay.py
index 9e2070d5d8c7b699848aac202f014afd09fba384..44682bd699ae81961dc890073d17482e6091b9af 100644
--- a/examples/core_examples/radioactive_decay.py
+++ b/examples/core_examples/radioactive_decay.py
@@ -42,15 +42,9 @@ def decay_rate(delta_t, halflife, n_init):
     times = np.insert(times, 0, 0.0)
 
     n_atoms = n_init * atto * n_avogadro
+    counts = np.exp(-np.log(2) * times / halflife)
 
-    rates = (
-        (
-            np.exp(-np.log(2) * (times[:-1] / halflife))
-            - np.exp(-np.log(2) * (times[1:] / halflife))
-        )
-        * n_atoms
-        / delta_t
-    )
+    rates = (counts[:-1] - counts[1:]) * n_atoms / delta_t
 
     return rates
 
@@ -95,6 +89,7 @@ result = bilby.run_sampler(
     likelihood=likelihood,
     priors=priors,
     sampler="dynesty",
+    sample="unif",
     nlive=1000,
     injection_parameters=injection_parameters,
     outdir=outdir,
diff --git a/examples/core_examples/slabspike_example.py b/examples/core_examples/slabspike_example.py
index 30521fa28c4220694f09b378f90bf59b28edb476..cca1a880e3e132d5dea2212a0f649d8a348c4bee 100644
--- a/examples/core_examples/slabspike_example.py
+++ b/examples/core_examples/slabspike_example.py
@@ -5,6 +5,12 @@ In this example we look at a simple example with the sum
 of two Gaussian distributions, and we try to fit with
 up to three Gaussians.
 
+We will use the `PyMultiNest` sampler which is fast but can be unreliable when
+significant correlations exist in the likelihood.
+
+To install `PyMultiNest` call
+
+$ conda install -c conda-forge pymultinest
 """
 
 import matplotlib.pyplot as plt
@@ -130,7 +136,7 @@ result = bilby.run_sampler(
     priors=priors,
     outdir=outdir,
     label=label,
-    sampler="dynesty",
+    sampler="pymultinest",
     nlive=400,
 )
 
diff --git a/test/integration/example_test.py b/test/integration/example_test.py
index c24ebfdebef920a68cdae32282967adb599cc8f7..efd65bf83f4f94c94b7caebea4de4b4a0db8603e 100644
--- a/test/integration/example_test.py
+++ b/test/integration/example_test.py
@@ -1,21 +1,35 @@
 import matplotlib
 
-matplotlib.use("Agg")
+matplotlib.use("Agg")  # noqa
 
+import glob
+import importlib.util
 import unittest
 import os
+import parameterized
+import pytest
 import shutil
 import logging
 
-# Required to run the tests
-from past.builtins import execfile
 import bilby.core.utils
 
-# Imported to ensure the examples run
-import numpy as np  # noqa: F401
-import inspect  # noqa: F401
+bilby.core.utils.command_line_args.clean = True
+core_examples = glob.glob("examples/core_examples/*.py")
+core_examples += glob.glob("examples/core_examples/*/*.py")
+core_args = [(fname.split("/")[-1][:-3], fname) for fname in core_examples]
 
-bilby.core.utils.command_line_args.bilby_test_mode = True
+gw_examples = [
+    "examples/gw_examples/injection_examples/fast_tutorial.py",
+    "examples/gw_examples/data_examples/GW150914.py",
+]
+gw_args = [(fname.split("/")[-1][:-3], fname) for fname in gw_examples]
+
+
+def _execute_file(name, fname):
+    print("Running {}".format(fname))
+    spec = importlib.util.spec_from_file_location(name, fname)
+    foo = importlib.util.module_from_spec(spec)
+    spec.loader.exec_module(foo)
 
 
 class ExampleTest(unittest.TestCase):
@@ -39,25 +53,20 @@ class ExampleTest(unittest.TestCase):
             except OSError:
                 logging.warning("{} not removed prior to tests".format(cls.outdir))
 
-    def test_examples(self):
+    @parameterized.parameterized.expand(core_args)
+    def test_core_examples(self, name, fname):
         """ Loop over examples to check they run """
-        examples = [
-            "examples/core_examples/linear_regression.py",
-            "examples/core_examples/linear_regression_unknown_noise.py",
-        ]
-        for filename in examples:
-            print("Testing {}".format(filename))
-            execfile(filename)
+        bilby.core.utils.command_line_args.bilby_test_mode = False
+        ignore = ["15d_gaussian"]
+        if any([item in fname for item in ignore]):
+            pytest.skip()
+        _execute_file(name, fname)
 
-    def test_gw_examples(self):
+    @parameterized.parameterized.expand(gw_args)
+    def test_gw_examples(self, name, fname):
         """ Loop over examples to check they run """
-        examples = [
-            "examples/gw_examples/injection_examples/fast_tutorial.py",
-            "examples/gw_examples/data_examples/GW150914.py",
-        ]
-        for filename in examples:
-            print("Testing {}".format(filename))
-            execfile(filename)
+        bilby.core.utils.command_line_args.bilby_test_mode = True
+        _execute_file(name, fname)
 
 
 if __name__ == "__main__":