diff --git a/gracedb/events/management/commands/clean_up_performance_log.py b/gracedb/events/management/commands/clean_up_performance_log.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0960e6a7fb950436a1d0992bd58090fe14b237a
--- /dev/null
+++ b/gracedb/events/management/commands/clean_up_performance_log.py
@@ -0,0 +1,40 @@
+import datetime
+import re
+
+from django.conf import settings
+from django.core.management.base import BaseCommand
+
+# Parameters
+LOOKBACK_TIME = 5 # days
+LOG_FILE_PATH = settings.LOGGING['handlers']['performance_file']['filename']
+
+
+class Command(BaseCommand):
+
+    def handle(self, *args, **kwargs):
+
+        # Read log
+        logfile = open(LOG_FILE_PATH, "r")
+        lines = logfile.readlines()
+        logfile.close()
+
+        # Lookback time is 5 days.
+        dt_now = datetime.datetime.now()
+        dt_min = dt_now + datetime.timedelta(days=-1*LOOKBACK_TIME)
+
+        # Get "fresh" enough log messages from logfile
+        dateformat = settings.LOG_DATEFMT
+        logfile_str = ""
+        for line in lines:
+
+            # Check the date to see whether it's fresh enough
+            match = re.search(r'^(.*) \| .*', line)
+            datestring = match.group(1)
+            dt = datetime.datetime.strptime(datestring, dateformat)
+            if dt > dt_min:
+                logfile_str += line
+
+        # Overwrite file
+        logfile = open(LOG_FILE_PATH, "w")
+        logfile.write(logfile_str)
+        logfile.close()
diff --git a/gracedb/events/management/commands/make_latency_histograms.py b/gracedb/events/management/commands/make_latency_histograms.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f7e2aa3b993fc71f6a68f7f4e4eef3ca62c81e4
--- /dev/null
+++ b/gracedb/events/management/commands/make_latency_histograms.py
@@ -0,0 +1,146 @@
+from datetime import timedelta
+import matplotlib
+matplotlib.use("Agg")
+import matplotlib.pyplot as pyplot
+import numpy
+import os
+
+from django.conf import settings
+from django.core.management.base import BaseCommand
+from django.utils import timezone
+
+from events.models import Event, Pipeline
+
+DEST_DIR = settings.LATENCY_REPORT_DEST_DIR
+MAX_X = settings.LATENCY_MAXIMUM_CHARTED
+WEB_PAGE_FILE_PATH = settings.LATENCY_REPORT_WEB_PAGE_FILE_PATH
+URL_PREFIX = settings.REPORT_INFO_URL_PREFIX
+# XXX Branson introduced during ER6 to clean things up a bit.
+PIPELINE_EXCLUDE_LIST = ['HardwareInjection', 'X', 'Q', 'Omega', 'Ringdown',
+    'LIB', 'SNEWS', 'pycbc', 'CWB2G']
+
+
+def writeIndex(notes, fname):
+
+    createdDate = str(timezone.now())
+    maxx = MAX_X
+
+    table = '<table border="1" bgcolor="white">'
+    table += """<caption>Tables generated: %s<br/>
+                      Maximum charted latency: %s seconds</caption>""" \
+             % (createdDate, maxx)
+    table += "<tr><th>&nbsp;</th>"
+    for time_range in ['day', 'week', 'month']:
+        table += "<th>last %s</th>" % time_range
+    table += "</tr>"
+    for pipeline in Pipeline.objects.all():
+    #for atype, atype_name in Event.ANALYSIS_TYPE_CHOICES:
+        if pipeline.name in PIPELINE_EXCLUDE_LIST:
+            continue
+        pname = pipeline.name
+        table += "<tr>"
+        table += "<td>%s</td>" % pname
+        for time_range in ['day', 'week', 'month']:
+            table += '<td align="center" bgcolor="white">'
+            n = notes[pname][time_range]
+            extra = ""
+            if n['fname'] is not None:
+                table += '<img width="400" height="300" src="%s"/>' % \
+                           (URL_PREFIX + os.path.basename(n['fname']))
+                extra = "%d total events" % n['count']
+            else:
+                extra = "No Applicable Events"
+            if n['over'] != 0:
+                extra += "<br/>%d events over maximum latency of %s seconds" % (n['over'], MAX_X)
+            table += "<br/>%s" % extra
+            table += "</td>"
+        table += "</tr>"
+    table += "</table>"
+
+    f = open(fname, "w")
+    f.write(table)
+    f.close()
+
+
+def makePlot(data, title, maxx=1800, facecolor='green'):
+    # convert data to float (might be Decimal type)
+    data = [float(d) for d in data]
+
+    # make sure plot is clear!
+    pyplot.close()
+    #nbins = maxx / 30
+    nbins = numpy.logspace(1.3, numpy.log10(maxx), 50)
+
+    pyplot.xlim([20,maxx])
+    fig = pyplot.figure()
+
+    ax = fig.add_axes((.1, .1, .8, .8))
+
+    n, bins, patches = ax.hist(data, nbins, facecolor=facecolor)
+
+    vmax = max(n)
+    if vmax <= 10:
+        vmax = 10
+    elif (vmax%10) == 0:
+        vmax += 10
+    else:
+        vmax += 10 - (vmax % 10)
+
+    ax.set_xlabel('Seconds', fontsize=20)
+    ax.set_ylabel('Number of Events', fontsize=20)
+    ax.set_xscale('log')
+    ax.axis([20, maxx, 0, vmax])
+    ax.grid(True)
+
+    return pyplot
+
+
+class Command(BaseCommand):
+    help="Create latency histograms"
+
+    def add_arguments(self, parser):
+        pass
+
+    def handle(self, *args, **options):
+        now = timezone.now()
+
+        start_day = now - timedelta(1)
+        start_week = now - timedelta(7)
+        start_month = now - timedelta(30)
+
+        time_ranges =  [(start_day, "day"), (start_week, "week"), (start_month,
+            "month")]
+
+        annotations = {}
+
+        # Make the histograms, save as png's.
+        for pipeline in Pipeline.objects.all():
+            if pipeline.name in PIPELINE_EXCLUDE_LIST:
+                continue
+            pname = pipeline.name
+            annotations[pname] = {}
+            for start_time, time_range in time_ranges:
+                note = {}
+                fname = os.path.join(DEST_DIR, "%s-%s.png" % (pname, time_range))
+                note['fname'] = fname
+                data = Event.objects.filter(pipeline=pipeline,
+                                            created__range=[start_time, now],
+                                            gpstime__gt=0) \
+                                    .exclude(group__name="Test")
+                note['count'] = data.count()
+                data = [e.reportingLatency() for e in data]
+                data = [d for d in data if d <= MAX_X and d > 0]
+                note['npoints'] = len(data)
+                note['over'] = note['count'] - note['npoints']
+                if note['npoints'] <= 0:
+                    try:
+                        note['fname'] = None
+                        os.unlink(fname)
+                    except OSError:
+                        pass
+                else:
+                    makePlot(data, pname, maxx=MAX_X).savefig(fname)
+                annotations[pname][time_range] = note
+
+        writeIndex(annotations, WEB_PAGE_FILE_PATH)
+
diff --git a/gracedb/events/management/commands/write_binned_counts.py b/gracedb/events/management/commands/write_binned_counts.py
new file mode 100644
index 0000000000000000000000000000000000000000..84cb6e85d7f49eeacdc9e5802dba7480d10a5114
--- /dev/null
+++ b/gracedb/events/management/commands/write_binned_counts.py
@@ -0,0 +1,164 @@
+from datetime import timedelta, datetime
+from dateutil import parser
+import json
+import pytz
+
+from django.conf import settings
+from django.core.management.base import BaseCommand, CommandError
+from django.utils import timezone
+
+from events.models import Event, Pipeline, Search, Group
+
+# Default settings
+LOOKBACK_HOURS = 720
+BIN_WIDTH = 24
+
+# Set up time range
+end_time = datetime.utcnow()
+end_time = end_time.replace(hour=0, minute=0, second=0, microsecond=0)
+end_time = pytz.utc.localize(end_time)
+start_time = end_time - timedelta(hours=LOOKBACK_HOURS)
+# Convert to ISOformat
+start_time = start_time.isoformat()
+end_time = end_time.isoformat()
+
+# get_counts_for_bin
+# Takes as input:
+# - the lower bin boundary (a naive datetime object in UTC)
+# - the bin_width in hours
+# - the pipeline we are interested in
+# Returns the number of events in that bin, excluding MDC and Test.
+MDC = Search.objects.get(name='MDC')
+Test = Group.objects.get(name='Test')
+
+# make a list of pipeline objects
+PIPELINES = []
+for n in settings.BINNED_COUNT_PIPELINES:
+    try:
+        PIPELINES.append(Pipeline.objects.get(name=n))
+    except:
+        pass
+OTHER_PIPELINES = []
+for p in Pipeline.objects.all():
+    if p.name not in PIPELINES:
+        OTHER_PIPELINES.append(p)
+
+
+def get_counts_for_bin(lbb, bin_width, pipeline):
+    ubb = lbb + timedelta(hours=bin_width)
+    events = Event.objects.filter(pipeline=pipeline, created__range=(lbb, ubb))
+    if MDC:
+        events = events.exclude(search=MDC)
+    if Test:
+        events = events.exclude(group=Test)
+    return events.count()
+
+
+# given a date string, parse it and localize to UTC if necessary
+def parse_and_localize(date_string):
+    if not date_string:
+        return None
+    dt = parser.parse(date_string)
+    if not dt.tzinfo:
+        dt = pytz.utc.localize(dt)
+    return dt
+
+
+def get_record(lbb, bin_width):
+    bc = lbb + timedelta(hours=bin_width/2)
+    r = { 'time': bc, 'delta_t': bin_width }
+    total = 0
+    for p in PIPELINES:
+        count = get_counts_for_bin(lbb, bin_width, p)
+        total += count
+        r[p.name] = count
+    other = 0
+    for p in OTHER_PIPELINES:
+        other += get_counts_for_bin(lbb, bin_width, p)
+    r['Other'] = other
+    total += other
+    r['Total'] = total
+    return r
+
+
+def dt_record(r):
+    r['time'] = parse_and_localize(r['time'])
+    return r
+
+
+def strtime_record(r):
+    r['time'] = r['time'].replace(tzinfo=None).isoformat()
+    return r
+
+
+class Command(BaseCommand):
+    help = 'Manage the binned counts file used for plotting rates.'
+
+    def handle(self, *args, **options):
+        # First of all, that bin width had better be an even number of hours.
+        bin_width = BIN_WIDTH
+        if bin_width % 2 != 0:
+            raise ValueError("Bin width must be divisible by 2. Sorry.")
+
+        # Let's take our desired range and turn it into UTC datetime objects
+        start = parse_and_localize(start_time)
+        end = parse_and_localize(end_time)
+
+        duration = end - start
+        # This timedelta has days, seconds, and total seconds.
+        # What we want to verify is that that is an integer number of hours.
+        # That is, the total seconds should be divisible by 3600.
+        hours, r_seconds = divmod(duration.total_seconds(), 3600)
+        if r_seconds != 0.0:
+            msg = "The start and end times must be separated by an integer number of hours."
+            raise ValueError(msg)
+
+        # Now we need to verify that the number of hours is divisible by our
+        # bin width
+        bins, r_hours = divmod(hours, bin_width)
+        bins = int(bins)
+
+        if r_hours != 0.0:
+            msg = "The start and end times must correspond to an integer number of bins."
+            raise ValueError(msg)
+
+        # read in the file and interpret it as JSON
+        f = None
+        try:
+            f = open(settings.BINNED_COUNT_FILE, 'r')
+        except:
+            pass
+
+        records = []
+        if f:
+            try:
+                records = json.loads(f.read())
+            except:
+                pass
+            f.close()
+
+        # process the records so that the time is a datetime for all of them
+        # Note that the times here are at the bin centers
+        records = [dt_record(r) for r in records]
+
+        # accumlate the necessary records
+        new_records = []
+        for i in range(bins):
+            lbb = start + timedelta(hours = i*bin_width)
+            bc = lbb + timedelta(hours = bin_width/2)
+            # look for an existing record with the desired lower bin
+            # boundary and delta.
+            found = False
+            for r in records:
+                if bc == r['time'] and bin_width == r['delta_t']:
+                    found = True
+                    new_records.append(r)
+            if not found:
+                new_records.append(get_record(lbb, bin_width))
+
+        new_records = [strtime_record(r) for r in new_records]
+
+        # write out the file
+        f = open(settings.BINNED_COUNT_FILE, 'w')
+        f.write(json.dumps(new_records))
+        f.close()
diff --git a/gracedb/ligoauth/management/__init__.py b/gracedb/ligoauth/management/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gracedb/ligoauth/management/commands/__init__.py b/gracedb/ligoauth/management/commands/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gracedb/ligoauth/management/commands/update_user_accounts_from_ligo_ldap.py b/gracedb/ligoauth/management/commands/update_user_accounts_from_ligo_ldap.py
new file mode 100644
index 0000000000000000000000000000000000000000..64dfe8c4404edf0702b73ac0bb1f6b71ca76e19c
--- /dev/null
+++ b/gracedb/ligoauth/management/commands/update_user_accounts_from_ligo_ldap.py
@@ -0,0 +1,193 @@
+from django.core.management.base import BaseCommand, CommandError
+
+import datetime
+from ligoauth.models import LigoLdapUser, X509Cert, AlternateEmail
+from django.conf import settings
+from django.contrib.auth.models import User, Group
+import ldap
+
+# Variables for LDAP search
+BASE_DN = "ou=people,dc=ligo,dc=org"
+SEARCH_SCOPE = ldap.SCOPE_SUBTREE
+SEARCH_FILTER = "(employeeNumber=*)"
+RETRIEVE_ATTRIBUTES = [
+    "krbPrincipalName",
+    "gridX509subject",
+    "givenName",
+    "sn",
+    "mail",
+    "isMemberOf",
+    "mailAlternateAddress",
+    "mailForwardingAddress"
+]
+LDAP_ADDRESS = "ldap.ligo.org"
+LDAP_PORT = 636
+
+class Command(BaseCommand):
+    help="Get updated user data from LIGO LDAP"
+
+    def add_arguments(self, parser):
+        parser.add_argument('-q', '--quiet', action='store_true',
+            default=False, help='Suppress output')
+
+    def handle(self, *args, **options):
+        verbose = not options['quiet']
+        if verbose:
+            self.stdout.write('Refreshing users from LIGO LDAP at {0}' \
+                .format(datetime.datetime.utcnow()))
+
+        # Get LVC group
+        lvc_group = Group.objects.get(name=settings.LVC_GROUP)
+
+        # Open connection to LDAP and run a search
+        l = ldap.initialize("ldaps://{address}:{port}".format(
+            address=LDAP_ADDRESS, port=LDAP_PORT))
+        l.protocol_version = ldap.VERSION3
+        ldap_result_id = l.search(BASE_DN, SEARCH_SCOPE, SEARCH_FILTER,
+            RETRIEVE_ATTRIBUTES)
+
+        # Get all results
+        result_data = True
+        while result_data:
+            result_type, result_data = l.result(ldap_result_id, 0)
+
+            if result_type == ldap.RES_SEARCH_ENTRY:
+                for (ldap_dn, ldap_result) in result_data:
+                    first_name = unicode(ldap_result['givenName'][0], 'utf-8')
+                    last_name = unicode(ldap_result['sn'][0], 'utf-8')
+                    email = ldap_result['mail'][0]
+                    new_dns = set(ldap_result.get('gridX509subject', []))
+                    memberships = ldap_result.get('isMemberOf', [])
+                    is_active = lvc_group.name in memberships
+                    principal = ldap_result['krbPrincipalName'][0]
+
+                    # Update/Create LigoLdapUser entry
+                    defaults = {
+                        'first_name': first_name,
+                        'last_name': last_name,
+                        'email': email,
+                        'username': principal,
+                        'is_active': is_active,
+                    }
+
+                    # Determine if base user and ligoldapuser objects exist
+                    user_exists = User.objects.filter(username=
+                        defaults['username']).exists()
+                    l_user_exists = LigoLdapUser.objects.filter(
+                        ldap_dn=ldap_dn).exists()
+
+                    # Handle different cases
+                    created = False
+                    if l_user_exists:
+                        l_user = LigoLdapUser.objects.get(ldap_dn=ldap_dn)
+                        user = l_user.user_ptr
+                    else:
+                        if user_exists:
+                            user = User.objects.get(username=
+                                defaults['username'])
+                            l_user = LigoLdapUser.objects.create(
+                                ldap_dn=ldap_dn, user_ptr=user)
+                            l_user.__dict__.update(user.__dict__)
+                            if verbose:
+                                self.stdout.write(("Created ligoldapuser "
+                                    "for {0}").format(user.username))
+                        else:
+                            l_user = LigoLdapUser.objects.create(
+                                ldap_dn=ldap_dn, **defaults)
+                            user = l_user.user_ptr
+                            if verbose:
+                                self.stdout.write(("Created user and "
+                                    "ligoldapuser for {0}").format(
+                                    l_user.username))
+                            created = True
+
+                    # Typically a case where the person's username was changed
+                    # and there are now two user accounts in GraceDB
+                    if user.username != defaults['username'] and user_exists:
+                        self.stdout.write(('ERROR: requires manual '
+                            'investigation. LDAP username: {0}, '
+                            'ligoldapuser.user_ptr.username: {1}').format(
+                            defaults['username'], l_user.user_ptr.username))
+                        continue
+
+                    # Update user attributes from LDAP
+                    changed = False
+                    if not created:
+                        for k in defaults:
+                            if (defaults[k] != getattr(user, k)):
+                                setattr(l_user, k, defaults[k])
+                                changed = True
+                    if changed and verbose:
+                        self.stdout.write("User {0} updated".format(
+                            l_user.username))
+
+                    # Revoke staff/superuser if not active.
+                    if ((l_user.is_staff or l_user.is_superuser)
+                        and not is_active):
+                        l_user.is_staff = l_user.is_superuser = False
+                        changed = True
+    
+                    # Try to save user.
+                    if created or changed:
+                        try:
+                            l_user.save()
+                        except Exception as e:
+                            self.stdout.write(("Failed to save user '{0}': "
+                                "{1}.").format(l_user.username, e))
+                            continue
+
+                    # Update X509 certs for user
+                    current_dns = set([c.subject for c in
+                        user.x509cert_set.all()])
+                    if current_dns != new_dns:
+                        for dn in new_dns - current_dns:
+                            cert, created = X509Cert.objects.get_or_create(
+                                subject=dn)
+                            cert.users.add(l_user)
+
+                    # Update group information - we do this only for groups
+                    # that already exist in GraceDB
+                    for g in Group.objects.all():
+                        # Add the user to the group if they aren't a member
+                        if (g.name in memberships and g not in
+                            l_user.groups.all()):
+
+                            g.user_set.add(l_user)
+                            if verbose:
+                                self.stdout.write("Adding {0} to {1}".format(
+                                    l_user.username, g.name))
+
+                    # Remove the user from the LVC group if they are no longer
+                    # a member. This is the only group in GraceDB which is
+                    # populated from the LIGO LDAP.
+                    if (lvc_group.name not in memberships and
+                        lvc_group in l_user.groups.all()):
+
+                        l_user.groups.remove(lvc_group)
+                        if verbose:
+                            self.stdout.write("Removing {user} from {group}" \
+                                .format(user=l_user.username,
+                                group=lvc_group.name))
+
+                    # Get alternate email addresses (for some reason...)
+                    try:
+                        mailForwardingAddress = unicode(ldap_result['mailForwardingAddress'][0])
+                    except:
+                        mailForwardingAddress = None
+                    mailAlternateAddresses = ldap_result.get('mailAlternateAddress', [])
+
+                    # Finally, deal with alternate emails.
+                    if mailForwardingAddress:
+                        try:
+                            AlternateEmail.objects.get_or_create(l_user=user,
+                                email=mailForwardingAddress)
+                        except:
+                            pass
+
+                    if len(mailAlternateAddresses) > 0:
+                        for email in mailAlternateAddresses:
+                            try:
+                                AlternateEmail.objects.get_or_create(
+                                    l_user=user, email=email)
+                            except:
+                                pass
diff --git a/gracedb/userprofile/management/__init__.py b/gracedb/userprofile/management/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gracedb/userprofile/management/commands/__init__.py b/gracedb/userprofile/management/commands/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/gracedb/userprofile/management/commands/remove_inactive_alerts.py b/gracedb/userprofile/management/commands/remove_inactive_alerts.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d48c50ab4615ecf629f1de1d776731f6e34b5a3
--- /dev/null
+++ b/gracedb/userprofile/management/commands/remove_inactive_alerts.py
@@ -0,0 +1,42 @@
+import datetime
+
+from django.conf import settings
+from django.contrib.auth.models import Group
+from django.core.management.base import BaseCommand, CommandError
+
+from userprofile.models import Contact, Trigger
+
+
+class Command(BaseCommand):
+    help="Delete Contacts and Notifications for inactive users"
+
+    def add_arguments(self, parser):
+        parser.add_argument('-q', '--quiet', action='store_true',
+            default=False, help='Suppress output')
+
+    def handle(self, *args, **options):
+        verbose = not options['quiet']
+
+        if verbose:
+            self.stdout.write(('Checking inactive users\' triggers and '
+                'contacts at {0}').format(datetime.datetime.utcnow()))
+
+        # Get contacts and triggers whose user is no longer in the LVC
+        lvc = Group.objects.get(name=settings.LVC_GROUP)
+        triggers = Trigger.objects.exclude(user__groups=lvc)
+        contacts = Contact.objects.exclude(user__groups=lvc)
+
+        # Generate log message
+        if verbose:
+            if triggers.exists():
+                t_log_msg = "Deleting {0} triggers: ".format(triggers.count())\
+                    + " | ".join([t.__str__() for t in triggers])
+                self.stdout.write(t_log_msg)
+            if contacts.exists():
+                c_log_msg = "Deleting {0} contacts: ".format(contacts.count())\
+                     + " | ".join([c.__str__() for c in contacts])
+                self.stdout.write(c_log_msg)
+
+        # Delete
+        triggers.delete()
+        contacts.delete()