Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • alexander.pace/server
  • geoffrey.mo/gracedb-server
  • deep.chatterjee/gracedb-server
  • cody.messick/server
  • sushant.sharma-chaudhary/server
  • michael-coughlin/server
  • daniel.wysocki/gracedb-server
  • roberto.depietri/gracedb
  • philippe.grassia/gracedb
  • tri.nguyen/gracedb
  • jonah-kanner/gracedb
  • brandon.piotrzkowski/gracedb
  • joseph-areeda/gracedb
  • duncanmmacleod/gracedb
  • thomas.downes/gracedb
  • tanner.prestegard/gracedb
  • leo-singer/gracedb
  • computing/gracedb/server
18 results
Show changes
Showing
with 1806 additions and 0 deletions
# Basic Apache configuration
<IfModule mod_alias.c>
<Location /shibboleth-ds>
Allow from all
<IfModule mod_shib.c>
AuthType shibboleth
ShibRequestSetting requireSession false
require shibboleth
</IfModule>
</Location>
Alias /shibboleth-ds/idpselect_config.js /etc/shibboleth-ds/idpselect_config.js
Alias /shibboleth-ds/idpselect.js /etc/shibboleth-ds/idpselect.js
Alias /shibboleth-ds/idpselect.css /etc/shibboleth-ds/idpselect.css
Alias /shibboleth-ds/index.html /etc/shibboleth-ds/index.html
Alias /shibboleth-ds/blank.gif /etc/shibboleth-ds/blank.gif
</IfModule>
Name: shibboleth-embedded-ds
Version: 1.2.0
Release: 1
Summary: Client-side federation discovery service for SAML-based SSO
Group: Productivity/Networking/Security
Vendor: Shibboleth Consortium
License: Apache-2.0
URL: http://shibboleth.net/
Source: %{name}-%{version}.tar.gz
BuildArch: noarch
BuildRoot: %{_tmppath}/%{name}-%{version}-root
%if "%{_vendor}" == "redhat"
BuildRequires: redhat-rpm-config
%{!?_without_builtinapache:BuildRequires: httpd}
%endif
%if "%{_vendor}" == "suse"
%{!?_without_builtinapache:BuildRequires: apache2}
%endif
%description
The Embedded Discovery Service is a JS/CSS/HTML-based tool for
identity provider selection in conjunction with SAML-based web
single sign-on implementations such as Shibboleth.
%prep
%setup -q
%build
%install
%{__make} install DESTDIR=$RPM_BUILD_ROOT
# Plug the DS into the built-in Apache on a recognized system.
touch rpm.filelist
APACHE_CONFIG="shibboleth-ds.conf"
%{?_without_builtinapache:APACHE_CONFIG="no"}
if [ "$APACHE_CONFIG" != "no" ] ; then
APACHE_CONFD="no"
if [ -d %{_sysconfdir}/httpd/conf.d ] ; then
APACHE_CONFD="%{_sysconfdir}/httpd/conf.d"
fi
if [ -d %{_sysconfdir}/apache2/conf.d ] ; then
APACHE_CONFD="%{_sysconfdir}/apache2/conf.d"
fi
if [ "$APACHE_CONFD" != "no" ] ; then
%{__mkdir} -p $RPM_BUILD_ROOT$APACHE_CONFD
%{__cp} -p $RPM_BUILD_ROOT%{_sysconfdir}/shibboleth-ds/$APACHE_CONFIG $RPM_BUILD_ROOT$APACHE_CONFD/$APACHE_CONFIG
echo "%config(noreplace) $APACHE_CONFD/$APACHE_CONFIG" > rpm.filelist
fi
fi
%clean
[ "$RPM_BUILD_ROOT" != "/" ] && %{__rm} -rf $RPM_BUILD_ROOT
%post
%if "%{_vendor}" == "redhat"
# On upgrade, restart components if they're already running.
if [ "$1" -gt "1" ] ; then
%{!?_without_builtinapache:/sbin/service httpd status 1>/dev/null && /sbin/service httpd restart 1>/dev/null}
exit 0
fi
%endif
%preun
%if "%{_vendor}" == "redhat"
if [ "$1" = 0 ] ; then
%{!?_without_builtinapache:/sbin/service httpd status 1>/dev/null && /sbin/service httpd restart 1>/dev/null}
fi
%endif
%if "%{_vendor}" == "suse"
if [ "$1" = 0 ] ; then
%{!?_without_builtinapache:/sbin/service apache2 status 1>/dev/null && /sbin/service apache2 restart 1>/dev/null}
fi
%endif
exit 0
%postun
%if "%{_vendor}" == "suse"
cd /
%{!?_without_builtinapache:%restart_on_update apache2}
%endif
%files -f rpm.filelist
%defattr(-,root,root,-)
%dir %{_sysconfdir}/shibboleth-ds
%{_sysconfdir}/shibboleth-ds/*.txt
%{_sysconfdir}/shibboleth-ds/*.gif
%config(noreplace) %{_sysconfdir}/shibboleth-ds/index.html
%config(noreplace) %{_sysconfdir}/shibboleth-ds/idpselect.css
%config(noreplace) %{_sysconfdir}/shibboleth-ds/idpselect_config.js
%config %{_sysconfdir}/shibboleth-ds/idpselect.js
%config %{_sysconfdir}/shibboleth-ds/shibboleth-ds.conf
%changelog
* Mon Jun 6 2016 Scott Cantor <cantor.2@osu.edu> - 1.2.0-1
- Update version
- Fix license name
* Wed Apr 29 2015 Scott Cantor <cantor.2@osu.edu> - 1.1.0-1
- Update version
- Stop marking text files as configs
- Add gif to package
* Mon Apr 11 2011 Scott Cantor <cantor.2@osu.edu> - 1.0-1
- First version.
[program:apache2]
command=/usr/sbin/apache2ctl -DFOREGROUND
stdout_logfile=/dev/fd/1
stdout_logfile_maxbytes=0
redirect_stderr=true
priority=3
[program:gracedb]
command=/usr/local/bin/gunicorn config.wsgi:application --limit-request-field_size 16384 --forwarder-headers 'SCRIPT_NAME,PATH_INFO,REMOTE_USER,ISMEMBEROF,SSL_CLIENT_S_DN,SSL_CLIENT_I_DN,X_FORWARDED_TLS_CLIENT_CERT,X_FORWARDED_TLS_CLIENT_CERT_INFOS' --reload --config /app/gracedb_project/config/gunicorn_config.py --error-logfile='-' --access-logfile='-'
directory=/app/gracedb_project
user=gracedb
group=www-data
stdout_logfile=/dev/fd/1
stdout_logfile_maxbytes=0
redirect_stderr=true
priority=2
autorestart=unexpected
[program:aws_xray]
autostart=%(ENV_ENABLE_AWS_XRAY)s
command=/usr/bin/xray -f /var/log/xray/xray.log -l info
user=xray
group=xray
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
redirect_stderr=true
priority=2
autorestart=true
startretries=100
[program:igwn_alert_overseer]
autostart=%(ENV_ENABLE_IGWN_OVERSEER)s
command=igwn_alert_overseer -a %(ENV_IGWN_ALERT_USER)s -b %(ENV_IGWN_ALERT_PASSWORD)s
-s %(ENV_IGWN_ALERT_SERVER)s -p %(ENV_IGWN_ALERT_OVERSEER_PORT)s
-g %(ENV_IGWN_ALERT_GROUP)s
-l - -e - -q - -c -f -i %(ENV_IGWN_ALERT_FLUSH_INTERVAL)s
user=gracedb
group=www-data
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
redirect_stderr=true
priority=2
autorestart=true
startretries=100
[program:redis_server]
autostart=%(ENV_DJANGO_ENABLE_LOCAL_REDIS)s
command=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
redirect_stderr=true
priority=3
autorestart=true
startretries=100
[program:redis_qcluster]
autostart=%(ENV_DJANGO_ENABLE_REDIS_QUEUE)s
command=/usr/bin/python3 /app/gracedb_project/manage.py qcluster --settings %(ENV_DJANGO_SETTINGS_MODULE)s
user=gracedb
group=www-data
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
redirect_stderr=true
priority=2
autorestart=true
startretries=100
[program:shibd]
autostart=%(ENV_ENABLE_SHIBD)s
command=/usr/sbin/shibd -F
user=_shibd
group=_shibd
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
redirect_stderr=true
priority=2
; supervisor config file
[unix_http_server]
file=/var/run/supervisor.sock ; (the path to the socket file)
chmod=0700 ; sockef file mode (default 0700)
username=k7zsaqyt9vQZByiAXTpG4iyKUIKQxDQh
password=k7zsaqyt9vQZByiAXTpG4iyKUIKQxDQh
[supervisord]
nodaemon=true
user=root
pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
logfile=/dev/null
logfile_maxbytes=0
; the below section must remain in the config file for RPC
; (supervisorctl/web interface) to work, additional interfaces may be
; added by defining them in separate rpcinterface: sections
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL for a unix socket
username=k7zsaqyt9vQZByiAXTpG4iyKUIKQxDQh
password=k7zsaqyt9vQZByiAXTpG4iyKUIKQxDQh
; The [include] section can just contain the "files" setting. This
; setting can list multiple files (separated by whitespace or
; newlines). It can also contain wildcards. The filenames are
; interpreted as relative to this file. Included files *cannot*
; include files themselves.
[include]
files = /etc/supervisor/conf.d/*.conf
html, body {
color: black;
background-color: white;
margin: 0;
padding: 0;
}
a.link, a, a.active {
color: #369;
}
h1,h2,h3,h4,h5,h6,#getting_started_steps {
font-family: "Century Schoolbook L", Georgia, serif;
font-weight: bold;
}
h1.docnav {
font-size: 25px;
}
#getting_started_steps li {
font-size: 80%;
margin-bottom: 0.5em;
}
#gracedb-nav-header {
color: black;
font-size: 127%;
background-color: white;
font: x-small "Lucida Grande", "Lucida Sans Unicode", geneva, verdana, sans-serif;
/* width: 757px; */
width: 95%;
margin: 0 auto 0 auto;
border: none;
/* border-left: 1px solid #aaa; */
/* border-right: 1px solid #aaa; */
padding: 10px 10px 0px 10px;
}
/* Nav */
#nav {
margin:0;
padding:0;
background:#eee; /* Nav base color */
float: left;
width: 100%;
font-size: 13px;
border:1px solid #42432d;
border-width:1px 1px;
}
#nav #nav-user
{
color:#000;
background:#eee; /* Nav base color */
padding:4px 20px 4px 20px;
float: right;
width: auto;
text-decoration:none;
font:bold 1em/1em Arial, Helvetica, sans-serif;
text-transform:uppercase;
/* text-shadow: 2px 2px 2px #555; */
}
#nav #nav-login, #nav #nav-logout {
float: right;
border-left: 1px solid black;
}
#nav li {
display:inline;
padding:0;
margin:0;
}
/*
#nav li:first-child a {
border-left:1px solid #42432d;
}
*/
#nav a:link,
#nav a:visited {
color:#000;
background:#eee; /* Nav base color */
/* padding:20px 40px 4px 10px; */
padding:4px 20px 4px 20px;
float: left;
width: auto;
border-right:1px solid #42432d;
text-decoration:none;
font:bold 1em/1em Arial, Helvetica, sans-serif;
text-transform:uppercase;
/* text-shadow: 2px 2px 2px #555; */
}
#nav a:hover {
/* color:#fff; / * Use if bg is dark */
background: #dce2ed; /* Nav hover color */
}
#home #nav-home a,
#public #nav-public a,
#search #nav-search a,
#pipelines #nav-pipelines a,
#alerts #nav-alerts a,
#password #nav-password a,
#doc #nav-doc a,
#other #nav-other a,
#about #nav-about a,
#archive #nav-archive a,
#lab #nav-lab a,
#reviews #nav-reviews a,
#latest #nav-latest a,
#contact #nav-contact a {
background: #a9b0ba; /* Nav selected color */
/* color:#fff; / * Use if bg is dark */
/* text-shadow:none; */
}
#home #nav-home a:hover,
#public #nav-public a,
#search #nav-search a,
#pipelines #nav-pipelines a,
#alerts #nav-alerts a,
#password #nav-password a,
#doc #nav-doc a,
#other #nav-other a,
#about #nav-about a:hover,
#archive #nav-archive a:hover,
#lab #nav-lab a:hover,
#reviews #nav-reviews a:hover,
#latest #nav-latest a:hover,
#contact #nav-contact a:hover {
/* background:#e35a00; */
background: #a9b0ba; /* Nav selected color */
}
#nav a:active {
/* background:#e35a00; */
background: #a9b0ba; /* Nav selected color */
color:#fff;
}
/* The Following is for the subclasses table. */
table.subclasses_main {
width: 100%;
}
td.subclasses_row {
vertical-align: top;
}
{% extends "!layout.html" %}
{% block extrahead %}
<link rel="stylesheet" href="_static/gracedb-nav-style.css" />
<script src="/static/dojo/dojo.js" data-dojo-config="async: true"></script>
<script>
var getKeys = function(obj){
var keys = [];
for(var key in obj){
keys.push(key);
}
return keys;
}
require([
'dojo/_base/declare',
'dojo/query',
'dojo/parser',
'put-selector/put',
'dojo/dom',
'dojo/dom-construct',
'dojo/dom-style',
'dojo/request',
'dojo/NodeList-dom',
'dojo/NodeList-traverse',
'dojo/domReady!',
], function(declare, query, parser, put, dom, domConstruct, domStyle, request) {
parser.parse();
// The url will look like: base + /admin_docs/...
var loc = window.location.href;
var ind = loc.indexOf('admin_docs');
var url = loc.substring(0,ind);
url += 'navbar_only';
var header_div = dom.byId("gracedb-nav-header");
request.get(url).then(
function(text) {
var node = domConstruct.toDom(text);
var nl = query('*', node);
var header_content = "";
// XXX this should not be necessary. Why can't I just query directly for the node with
// id == 'content'?
nl.forEach(function(n) {
if (n.tagName == 'DIV' && n.id == 'content') {
header_content = n.innerHTML;
}
});
header_div.innerHTML = header_content;
},
function(error) {
console.log("failed to get navbar content.")
}
);
// All the rest of this is to get the silly subclass information table in place.
{% if pagename == 'models' %}
var tableNode = dom.byId("subclasses_table");
var SubclassInfo = new Object();
// You know, there is probably a better way of getting at this information.
SubclassInfo['CoincInspiralEvent'] = ["ifos","end_time","mass","mchirp","minimum_duration","snr","false_alarm_rate","combined_far"];
SubclassInfo['MultiBurstEvent'] = ["ifos","start_time","duration","peak_time","central_freq","bandwidth","amplitude","snr","confidence","false_alarm_rate","ligo_axis_ra","ligo_axis_dec","ligo_angle","ligo_angle_sig"];
SubclassInfo['SimInspiralEvent'] = ["mass1","mass2","eta","mchirp","spin1z","spin2z","amp_order","coa_phase","geocent_end_time","f_lower","f_final","distance","latitude","longitude","polarization","inclination","theta0","phi0","waveform","source_channel","destination_channel"];
SubclassInfo['GrbEvent'] = ["ivorn","author_ivorn","author_shortname","observatory_location_id","coord_system","ra","dec","error_radius","how_description","how_reference_url","trigger_duration","t90"]
var mainTable = put(tableNode, 'table.subclasses_main');
headerRow = put(mainTable, 'tr');
for (var key in SubclassInfo) {
put(headerRow, 'th', key);
}
contentsRow = put(mainTable, 'tr');
for (var key in SubclassInfo) {
var subTable = put(contentsRow, 'td.subclasses_row table');
for (var ind in SubclassInfo[key]) {
put(subTable, 'tr', SubclassInfo[key][ind]);
}
}
{% endif %}
});
</script>
{% endblock %}
.. Information on GraceDB and LVAlert client packages
Client packages
===============
Contents:
.. toctree::
:maxdepth: 2
client_release
shibbolized_client
.. _client_release:
================================
Preparing a new client release
================================
*Last updated 26 June 2017*
This section describes how to prepare new releases of ``gracedb-client`` and ``lvalert-client``.
We use ``gracedb-client`` as an example throughout and provide specifics when a particular step is different for each package.
.. NOTE::
The steps here are only suggestions. You will undoubtedly discover better
and/or different ways to go about this.
Development
===========
Implement the features and bug fixes you wish to include in the new
client release. It's easiest to do this within a virtual environment on
your workstation. That way you can make changes to the code and then::
cd gracedb-client
python setup.py develop
which will install the code into your virtual environment and update it with any changes you make.
When you are satisfied with the changes, commit and push.
Testing
=======
It's a good idea to test your new version on Scientific Linux (ldas-pcdev4 at CIT) and Debian (atlas9 on ATLAS) before proceeding.
The versions of Python there may be a bit behind the one on your workstation, and that can cause complications.
I've been burned by this before.
You can do it by cloning the package's git repository on a cluster headnode and building in a virtual environment::
mkdir gracedb_testing
cd gracedb_testing
git clone https://git.ligo.org/lscsoft/gracedb-client.git
cd gracedb-client
PYTHONPATH=. python ligo/gracedb/test/test.py
PYTHONPATH=. GRACEDB='python bin/gracedb' ./ligo/gracedb/test/test.sh
virtualenv gracedb_virtualenv --system-site-packages
source gracedb_virtualenv/bin/activate
cd gracedb-client
python setup.py develop
cd ligo/gracedb/test
python test.py
For ``gracedb-client``, you should run the unit tests; any other tests you may want to run should be added to the unit tests if not already present.
For ``lvalert-client``, test basic functions like subscribing/unsubscribing from a node, sending and receiving messages, etc., as well as anything specific that you may have modified when adding new features.
Changes for packaging
=====================
Update the source code for the new version number, and update the changelog.
Here are the files you will need to change:
gracedb-client
----------------
* ``debian/changelog``: list your changes in the prescribed format
* ``ligo-gracedb.spec``: check version, unmangled version, and release number
* ``ligo/gracedb/version.py``: update version
lvalert-client
----------------
* ``setup.py``: bump the version number
* ``debian/changelog``: list your changes in the prescribed format
* ``ligo-lvalert.spec``: check version, unmangled version, and release number
.. NOTE::
Updating ``debian/changelog``: ``DEBEMAIL="Albert Einstein <albert.einstein@ligo.org>" dch -v 1.24-1``.
Make sure to mimic the formatting exactly!
Final steps
-----------
After editing these files, make sure to commit and push.
For ``gracedb-client``, make sure the client still passes the unit tests.
Go to the root of the repository and see ``ligo/gracedb/test/README`` for
more instructions on running the unit tests.
Tag this version of the repo and push the tag::
git tag --list
git tag -a "gracedb-1.24-1" -m "notes on your changes"
git push --tags
.. NOTE::
Git tags look like this: ``gracedb-1.24-1``, where 1 is the major version and 24 is the minor version. The last number corresponds to the build number (here, 1). Sometimes the format goes as 1.24.0-1, where 0 is typically referred to as the patch number.
Uploading to PyPI
=====================
Configure your machine
----------------------
The simplest way to upload to PyPI is with the Python package ``twine``.
First, get an account on both `PyPI <https://pypi.python.org/pypi>`__ and `test PyPI <https://testpypi.python.org/pypi>`__.
Then, create a ``$HOME/.pypirc`` file that looks like::
[distutils]
index-servers=
pypi
testpypi
[pypi]
username = username1
password = userpassword1
[testpypi]
repository = https://test.pypi.org/legacy/
username = username2
password = userpassword2
This will be used below when uploading the packages.
.. NOTE::
No repository is needed for the main PyPi if you're using twine 1.8.0+.
If you aren't, set repository = https://upload.pypi.org/legacy/.
Preparing the release
---------------------
To build the package and upload it to PyPi testing::
# Check out the new tag
git checkout gracedb-1.24-1
# Clean up your repository
git clean -dxf
# Build the source tarball
python setup.py sdist bdist_wheel
# Upload to test PyPI
twine upload dist/* -r testpypi
Testing
-------
Make sure that you can install and use the package from the test PyPI.
Login to one of the LIGO clusters and do the following:
.. code-block:: bash
# Set up virtual environment with install from test PyPi
mkdir gracedb_testing
cd gracedb_testing
virtualenv --system-site-packages test
source test/bin/activate
pip install -i https://test.pypi.org/simple/ ligo-gracedb --upgrade
# Clone the git repository (needed for git tag unittest to work)
git clone https://git@git.ligo.org/lscsoft/gracedb-client.git
# Check out tag
cd gracedb-client
git checkout gracedb-1.24-1
# Run tests
cd ligo/gracedb/test
python test.py
./test.sh
# Cleanup
deactivate
cd ../
rm -rf gracedb_testing
Final upload
------------
This step should only be done **after** the release has gone through the entire
LIGO packaging and SCCB approval process (see below).
Upload to the real PyPI::
twine upload dist/* -r pypi
Lastly, make sure you can pip install the package::
deactivate
cd ~/my_virtual_envs
virtualenv --system-site-packages test
source test/bin/activate
pip install ligo-gracedb
deactivate
rm -f -r test
Steps for LIGO packaging
========================
Uploading the source
--------------------
Move the source tarball to ``software.ligo.org``. I do this with a script
I obtained from Adam Mercer, ``lscsrc_new_file.sh``.
I have added a version of this to the GraceDB ``admin-tools`` repo::
cd /path/to/gracedb-client/dist
cp /path/to/admin-tools/releases/lscsrc_new_file.sh .
./lscsrc_new_file.sh ligo-gracedb-*gz
.. NOTE::
You must run the script in the same directory where the tarball lives.
Otherwise it will put it onto the server in a weird subdirectory rather
than just uploading the file directly.
Make sure that the file is accessible in the expected location, something
like ``http://software.ligo.org/lscsoft/source/ligo-gracedb-1.24.tar.gz``.
SCCB packaging and approval
---------------------------
Create a new issue on the `SCCB project page <https://bugs.ligo.org/redmine/projects/sccb>`__.
The title of the issue should be the name of the tag (ex: ``ligo-gracedb-1.24-1``).
The description should include an overview of the new features or modifications along with a list of the tests you have performed.
An example is shown below::
Requesting deb and rpm packaging for ligo-gracedb-1.24-1.
A source tarball has been uploaded to the usual location.
A diff of the code changes is here: https://git.ligo.org/lscsoft/gracedb-client/compare/gracedb-1.23-1...gracedb-1.24-1
This release includes:
* Improved method for checking .netrc file permissions.
* Added capability of creating events with labels initially attached, rather than having to add them as a separate step and generate multiple LVAlerts.
* Added "offline" boolean parameter when creating an event. This parameter signifies whether the event was identified by an offline search (True) or online/low-latency search (False). Default: offline=False, which is identical to the current behavior.
I've tested this release extensively, including:
* Attempting to use several combinations of "bad" inputs for both labels and the "offline" parameter and ensuring that it fails appropriately (without contacting the server)
* Running the unit tests (some of which were added in this patch)
Leave the issue status as 'New' to begin with.
The package builders will create packages for Scientific Linux and Debian; after it's deployed to the test machines (atlas9 on ATLAS and ldas-pcdev4 at CIT), someone will set the status to 'Testing'.
Run your tests on these machines if you didn't do it already; then update the issue's status to 'Requested'.
The SCCB members will vote and set the status to 'Approved' after it's approved.
After approval, the package will be deployed during the next maintenance period and the admins will set the category to 'Production' and the status to 'Closed'.
.. NOTE::
You should submit a package for building and approval by Thursday at the very latest if you want it to be moved into production during maintenance on the following Tuesday.
# -*- coding: utf-8 -*-
#
# GraceDB Administration and Development documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 25 16:37:58 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GraceDB Administration and Development'
copyright = u'2017, Tanner Prestegard, Alexander Pace, Branson Stephens'
author = u'Tanner Prestegard, Alexander Pace, Branson Stephens'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.20'
# The full version, including alpha/beta/rc tags.
release = '1.20'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GraceDBAdministrationandDevelopmentdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GraceDBAdministrationandDevelopment.tex', u'GraceDB Administration and Development Documentation',
u'Branson Stephens', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gracedbadministrationanddevelopment', u'GraceDB Administration and Development Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GraceDBAdministrationandDevelopment', u'GraceDB Administration and Development Documentation',
author, 'GraceDBAdministrationandDevelopment', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
.. GraceDB developer's guide
Developer's Guide
=================
Contents:
.. toctree::
:maxdepth: 2
new_server_feature
new_gracedb_instance
new_event_subclass
public_gracedb
.. GraceDB Administration and Development documentation master file, created by
sphinx-quickstart on Thu Feb 25 16:37:58 2016.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
GraceDB Admin and Dev Info
===========================
Contents:
.. toctree::
:maxdepth: 2
introduction
ops
dev
client
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
.. _introduction:
============
Introduction
============
*Last updated 13 Feb 2018*
GraceDB is a service for aggregating and disseminating information about candidate gravitational-wave events.
It is a key component of the effort to offer low-latency GW notifications to astronomer partners.
We provide a web interface and a RESTful API, along with a Python client for easily interacting with the API.
GraceDB currently runs on Debian Stretch, although upcoming developments may allow the service to be run on any operating system.
Components of the service
=========================
We can divide the GraceDB service into five main components:
- Django application
- Database backend (MariaDB)
- Backend webserver (Gunicorn)
- Frontend webserver (Apache)
- Primary authentication (Shibboleth)
Django
------
GraceDB is written in Python and is constructed around the `Django <https://www.djangoproject.com/>`__ web framework.
We are currently using Python 2 and Django 1.11.
Note that this is the last version of Django to support Python 2, so a migration to Python 3 will be necessary in the future.
Gunicorn
--------
`Gunicorn <http://gunicorn.org/>`__ is a lightweight Python webserver which interfaces directly with the Django service via the WSGI protocol.
The settings are managed with a config file and the service is started via systemd.
Apache
------
`Apache <https://httpd.apache.org/>`__ is one of the longest-running open source webservers.
We still use Apache in concert with Gunicorn because it's necessary for Shibboleth.
It is configured as a reverse proxy which gets authentication information from Shibboleth, sets that information in the headers, and then passes it on to Gunicorn.
Shibboleth
----------
`Shibboleth <https://www.shibboleth.net/>`__ is a software package for managing federated identities and providing a single sign-on portal.
It uses metadata providers to collect user attributes from an attribute authority and put them into the user's session.
These attributes are then available to the relevant service providers which are accessed by the user.
Metadata providers used by GraceDB:
- LIGO metadata provider
- InCommon - provides access via institutional accounts registered on gw-astronomy.org
- Cirrus Gateway - provides access via Google accounts registered on gw-astronomy.org
MariaDB
-------
Currently, we use MariaDB 10.1 with the MyISAM table engine.
Note that the table engine is set within the Django settings, not directly in the database.
We may want to look into other table engines in the future.
Description of current servers
==============================
Here's a short overview of the currently available GraceDB servers:
- Production:
- gracedb.ligo.org
- Test: (almost) identical to production, to be used for final testing of development work.
- gracedb-test.ligo.org
- Development: for raw code development; feel free to break these servers as needed. Note that these servers are not registered with any non-LIGO metadata providers, so testing of external authentication needs to happen on gracedb-test.
- gracedb-dev1.ligo.org
- gracedb-dev2.ligo.org
- Other:
- gracedb-playground.ligo.org: for pipeline and follow-up testing. Uses the gracedb-playground branch of the server code repository. Maintained very closely to production.
- simdb.ligo.org: for gstlal testing. May be retired in the near future.
See :ref:`new_gracedb_instance` for information on setting up new servers.
Available tools
===============
The `scripts <https://git.ligo.org/gracedb/scripts>`__ repository contains a set of scripts for running cron jobs and performing useful tasks on a GraceDB server.
Examples include:
- Pulling LIGO users from the LIGO LDAP
- Adding users to executives and advocates groups
- Parsing Apache logs and database logs
- Starting LVAlert Overseer
- Managing LVAlert nodes
The `admin tools <https://git.ligo.org/gracedb/admin-tools>`__ repository is a collection of tools to be used by GraceDB administrators.
It also includes notes on past work and debugging efforts, as well as planning for future development.
.. _lvalert_management:
==================
LVAlert management
==================
*Last updated 28 June 2017*
Server configuration
====================
You can access some of the Openfire server settings through the `web interface <lvalert.cgca.uwm.edu:9090>`__.
Use your LVAlert credentials on that server to log in.
If you don't have an account, you'll need another GraceDB/LVAlert developer to create an account for you and make you an admin.
There is also an admin account, ask Patrick Brady if you don't know the password.
Note that this password can also be used to login to the database via the MySQL interface.
There are a few things you can do from within this web interface, but the main useful function is to create/edit/delete user or pipeline accounts.
Managing nodes
==============
In an effort to tightly control production LVAlert nodes, we've developed a `script <https://git.ligo.org/gracedb/scripts/blob/master/add_lvalert_nodes.py>`__ which is used to create them and add publishers.
There are more instructions in the script, but the main principles are:
* The LVAlert account used by the production GraceDB server (username ``gracedb``) should be the owner of **ALL** GW event nodes, even those on the test LVAlert server.
* LVAlert accounts for test/development GraceDB servers should be added as publishers to these nodes on the test LVAlert server **only**.
* The production and test LVAlert servers should contain the same GW event nodes.
.. _miscellaneous:
================================
Miscellaneous
================================
*Last updated 17 October 2017*
Replacing the database on the test instance
===========================================
Sometimes it's nice to update the database on the test instance so that
it matches the one on the production instance--which is being constantly
updated with new events and annotations.
.. NOTE::
There is no automated database replication between the test and
production instances. This would get in the way of development,
especially when one is working on database schema migrations to
try out on the test box.
There are no longer SSH keys tied to the gracedb user account, so here is what I consider to be teh simplest strategy for copying the database to a new server::
# On production
sudo -i
cp /opt/gracedb/sql_backups/gracedb.sql.gz /home/albert.einstein
chown albert.einstein:uwmlsc /home/albert.einstein/gracedb.sql.gz
logout
scp $HOME/gracedb.sql.gz albert.einstein@gracedb-test.ligo.org:~
# Log into gracedb-test
gunzip gracedb.sql.gz
mysql -u gracedb -p gracedb < gracedb.sql
The latter step requires entering the MySQL password for the ``gracedb``
testing user. This can be found in ``config/settings/secret.py``.
.. _copying_event_data:
Getting data for particular events onto the test instance
=========================================================
The above procedure, for better or worse, doesn't move all of the data
onto the test server: there are still backing files to move (i.e., the files
associated with annotations). Normally, you'll only want to move files over
for a specific event or set of events. This is because the test server is
likely to have a much smaller disk than the production machine, so you can't
just rsync all of the data over. I cobbled together an extremely hacky
way of moving only the data for selected events.
First, on the machine with the data, somehow create a list of the GraceID's
of the events that you want to move data for. I would do this in the Django
console (i.e., ``./manage.py shell``). Suppose I want to move the data
for all gstlal events during O1::
from events.models import Event
from search.forms import SimpleSearchForm
f = SimpleSearchForm({'query': 'gstlal O1'})
outfile = open('/home/gracedb/query_graceids.txt', 'w')
if f.is_valid():
objects = form.cleaned_data['query']
for object in objects:
f.write('%s\n' % object.graceid)
f.close()
Now, go to the data directory root ``/opt/gracedb/data`` and temporarily
copy the tarring script there. Then run the script::
cd /opt/gracedb/data
cp /home/gracedb/bin/tar_up_data_dirs.py .
./tar_up_data_dirs.py
Now, you should have a new tar file ``/home/gracedb/tmp.tar``. Simply take
this to the new machine, ``cd`` into the GraceDB data directory, and
un-tar the file.
Adding a parameter to the VOEvent (and other "mini" development tasks)
======================================================================
We send information about events to GCN in the
`VOEvent <http://www.ivoa.net/documents/VOEvent/>`__ format. It's basically
just a big XML file. Sometimes,
the consumers of this information will ask you to add an additional parameter,
or make some other small modification. This is an example of what might be
called a "mini" development task: It doesn't involve any major code changes,
but you still have to go through the same sequence of steps that you would
for a true developement task. I recommend the workflow described in :ref:`new_server_feature`.
In this particular case, the only necessary code change is to edit the
file ``gracedb/annotations/voevent_utils.py`` and add something like::
p_new = vp.Param(
"MyParam",
value=getMyParamForEvent(event),
dataType="float"
)
p_new.Description = "My lovely new parameter"
v.What.append(p_new)
working by analogy with the other parameters present. I only wanted to give
this example here, because it seems likely that such a task will be considered
"operational" even though it is really mini-development. The line is pretty
blurry.
Adding an interferometer
========================
Note that these directions may change in the near future since we plan to add an instruments table to the database.
A good starting point is to search the GraceDB server code for "L1" to see where interferometers directly come into play.
Specifics (assume X1 is the IFO code):
1. Add X1OPS, X1OK, X1NO labels, update ``gracedb/templates/gracedb/event_detail_script.js`` with description, and update ``gracedb/templates/search/query_help_frag.html``
2. Add to instruments in ``gracedb/annotations/voevent_utils.py``
3. Update ifoList in ``gracedb/events/query.py``
4. Add entry to ``CONTROL_ROOM_IPS`` in ``gracedb/config/settings/base.py``
5. Add signoff option for X1 in ``gracedb/templates/gracedb/event_detail.html``
6. Update INSTRUMENTS and time zones in ``gracedb/events/models.py``.
7. Update any event objects which need it (currently only LIB events)
8. Update lots of things in ``gracedb/events/serialize.py``
9. Add a permission like 'do_X1_signoff' to the superevent (and probably event) signoff model
10. Handle that case in the corresponding permissions in the API
See an example (Virgo) `here <https://git.ligo.org/lscsoft/gracedb/commit/65a4c08e25d7a472e1f995072d166b4c8dc611df>`__, but note that a lot of the Virgo-related stuff was already in the code.
Leap seconds
============
GraceDB does its own conversion between UTC and GPS time, but unfortunately, we have to track leap seconds.
This is done in ``gracedb/core/time_utils.py``.
You'll have to update this whenever a new leap second is announced (preferably in advance of its implementation).
There is probably a better way to do this.
On backups
==========
Backups for GraceDB are controlled by the file::
``/root/backup-scripts/gracedb.ligo.uwm.edu-filesystems``
on ``backup01.nemo.uwm.edu``. This file simply contains::
/etc
/opt/gracedb
which means that everything under these directories on ``gracedb.ligo.uwm.edu``
will be backed up on ``backup00``. You can see the files under the location
``/backup/gracedb.ligo.uwm.edu/``. This is occasionally useful for recovering
a config file that got blown away by puppet. Notice, though, that nothing
under ``/home/gracedb`` is backed up. That's because the core server code and
accompanying scripts are under version control, and thus are backed up elsewhere.
I believe that everything backed up on ``backup01`` is also backed up off-site at CIT.
.. _new_event_subclass:
==================================
Creating a new event subclass
==================================
Why?
==========
Most events in GraceDB have attributes that go beyond those in the base
event class. If a new pipeline is developed, and the data analysts wish
to upload events to GraceDB, these events will often have attributes
that do not correspond to any of the existing event subclasses. In this
case, you will need to create a new event subclass. In addition, you'll
have to tailor the representation of the event, both in the web browser
and REST interfaces, to account for the presence of the new attributes.
This section of the documentation is meant to point out the places where
changes in the code will be required and to suggest a workflow.
The workflow aspect, however, is idiosyncratic, and you should feel
free to adapt as you see fit.
The pipeline
============
Most often, a new event subclass is necessitated by the desire to support
events from a new pipeline. Thus we will need a new pipeline object and
appropriate permissions to populate it. Instructions for these steps are
given in :ref:`new_pipeline`. As in those instructions, we will assume
that our new pipeline is named ``newpipeline``.
The model and migration
=======================
You will need to understand the attributes of the events the pipeline uploads.
Importantly, you should ask the pipeline developers for an example of the
type of file they plan to upload. Then you will be able to decide on which
attributes are to be added in the event subclass.
In creating the new model, it will likely be helpful to compare with the
existing event subclasses: ``GrbEvent``, ``CoincInspiralEvent``,
``MultiBurstEvent``, ``LalInferenceBurstEvent``, and ``SimInspiralEvent``.
Three of these (``CoincInspiralEvent``, ``MultiBurstEvent``, and
``SimInspiralEvent``) were named according to the the names of the
``ligolw`` tables from which the event information is drawn. The
``LalInferenceBurstEvent`` class is named for the pipeline from which the
events come (Omicron-LIB). Finally, the ``GrbEvent`` class is named based
on the astrophysical transient being represented (as these events come
through multiple pipelines). For your new subclass, you may wish to name
it after the pipeline (e.g., ``NewPipelineEvent``), or you may decide to
go with something more physical if you suspect that more than one pipeline
will eventually be producing events with this same set of attributes.
For the sake of argument, we'll assume the former in what follows.
You'll want to begin by going to the GraceDB test instance and checking out
a new branch, such as ``my_new_pipeline_branch`` or something.
Creating the new event subclass is as simple as adding a new model in
``gracedb/gracedb/models.py``::
class NewPipelineEvent(Event):
attribute1 = models.FloatField(null=True)
attribute2 = models.IntegerField(null=True)
attribute3 = models.CharacterField(max_length=50, blank=True, default="")
attribute4 = ...
This new model will correspond to a new database table: ``gracedb_newpipelineevent``.
To make the necessary changes to the database, we'll use a migration.
As the ``gracedb`` user::
cd
source djangoenv/bin/activate
cd gracedb
./manage.py makemigrations --name added_newpipelineevent gracedb
Check that the new migration is present and has yet to be applied, and then apply it::
./manage.py migrate gracedb --list
./manage.py migrate gracedb
Finally commit the ``models.py`` file and the migration file on our new branch:
git add gracedb/models.py
git add gracedb/migrations/00XX_added_newpipelineevent.py
git commit -m "New pipeline model and migration."
View logic for event creation
=============================
Now that we've got our new pipeline and event subclass, we can start putting in the
logic to create the events. The first place to look is the utility function
``_createEventFromForm`` in ``gracedb/view_logic.py``. There is an unwieldy ``if``
statement here that creates a new event object instance according to the
pipeline. We'll need to add our new one::
# Create Event
if pipeline.name in ['gstlal', 'spiir', 'MBTAOnline', 'pycbc',]:
event = CoincInspiralEvent()
elif pipeline.name in ['Fermi', 'Swift', 'SNEWS']:
event = GrbEvent()
elif pipeline.name in ['CWB', 'CWB2G']:
event = MultiBurstEvent()
elif pipeline.name in ['HardwareInjection',]:
event = SimInspiralEvent()
elif pipeline.name in ['oLIB',]:
event = LalInferenceBurstEvent()
### BEHOLD, a new case:
elif pipeline.name in ['newpipeline',]:
event = NewPipelineEvent()
else:
event = Event()
Now when we go to actually assign values to the pipeline specific fields, they
will actually exist. (If we had used the base ``Event`` class, of course, they
would not.)
Next, edit the function ``handle_uploaded_data`` in ``gracedb/translator.py``.
This function has a large ``if``-statement based on the pipeline name. It is
the pipeline, after all, that determines how the data file will be parsed. Hopefully
you were able to convince the pipeline developers to send you something that's
simple to parse, like JSON. If so, you can add something like this to the large
if statement::
elif pipeline == 'newpipeline':
event_file = open(datafilename, 'r')
event_file_contents = event_file.read()
event_file.close()
event_dict = json.loads(event_file_contents)
# Extract relevant data from dictionary to put into event record.
event.attribute1 = event_dict['attribute1']
event.attribute2 = event_dict['attribute2']
event.attribute3 = event_dict['attribute3']
# Save the event
event.save()
REST API changes
================
The representation of events in the REST API is controlled by the event serializer,
``eventToDict``. The various serializers are found in ``gracedb/view_utils.py``.
The event dictionary constructed there has an ``extra_attributes`` key, which is meant
to hold the attributes which are not present in the base evnet class. The value
for this key is populated by duck-typing the event. So we'll need an additional
try/except block::
try:
# NewPipelineEvent
rv['extra_attributes']['NewPipeline'] = {
"attribute1" : event.attribute1,
"attribute2" : event.attribute2,
"attribute3" : event.attribute3,
}
except:
pass
And ... yeah, I think that's basically all you have to do for the REST API.
Web interface changes
=====================
When a users looks at this event in the web interface, they should see the
pipeline-specific attributes there as well. This will require a little bit of
customization of the event view. In the main event view, ``gracedb.views.view``,
we'll need to add something to the control structure that chooses the template::
if event.pipeline.name in settings.COINC_PIPELINES:
templates.insert(0, 'gracedb/event_detail_coinc.html')
elif event.pipeline.name in settings.GRB_PIPELINES:
templates.insert(0, 'gracedb/event_detail_GRB.html')
elif event.pipeline.name.startswith('CWB'):
templates.insert(0, 'gracedb/event_detail_CWB.html')
elif event.pipeline.name in ['HardwareInjection',]:
templates.insert(0, 'gracedb/event_detail_injection.html')
elif event.pipeline.name in ['oLIB',]:
templates.insert(0, 'gracedb/event_detail_oLIB.html')
elif event.pipeline.name in ['newpipeline',]:
templates.insert(0, 'gracedb/event_detail_newpipeline.html')
There you see our new template: ``event_detail_newpipeline.html`` right at the end.
What this is doing is inserting the pipeline specific event page template at the
beginning of the list of templates. This is a nice thing to do, because Django will
just use the first template on the list that it is able to find.
You'll find the other templates at ``gracedb/templates/gracedb``. Most of the time,
the pipeline-specific templates just override the ``analysis_specific`` block, and inherit
the rest of the sections from the base ``event_detail.html`` template. Usually, the
special section for pipeline specific attributes just consists of a table of key-value
pairs, but it could be just about anything--big block of text, images, whatever.
It winds up right underneath the "Basic Info" section, toward the top.
This is where it really comes in handy to have an example data file or two from your
pipeline developer with some realistic values in it. That way you can design the
template to look nice when populated with real data.
.. _new_gracedb_instance:
==================================
Standing up a new GraceDB instance
==================================
*Last updated 14 December 2017*
Disclaimer
==========
Certain parts of these instructions may not work.
Please edit when you find something that fails.
Also note that setup of a GraceDB server relies heavily on Puppet.
You may attempt a Puppet-less setup at your own risk!
Initial steps
========================
The first step is to pick the FQDN for your new server.
As of spring 2017, it's preferred to use the ``.ligo.uwm.edu`` domain.
For this exercise, we'll assume that a server name ``gracedb-new.ligo.uwm.edu``.
You should also decide whether you will need a LIGO.ORG domain name (i.e., ``gracedb-new.ligo.org``).
This is not absolutely necessary for test instances, but is recommended in order to simulate the production environment as closely as possible.
Virtual machine setup
---------------------
You'll need to either have one of the VMWare tools (VMWare Workstation for Linux or VMWare Fusion for OS X) installed on your machine, access to ``headroom.cgca.uwm.edu`` (a Windows machine that has VMWare vSphere), or access to the `web interface <http://vc5.ad.uwm.edu>`__.
Currently, the web interface is the preferred method for setting up a new VM, so the following instructions will be for this method.
Find the VM template you want to use (click on a VM host (left panel), then "VMs" (middle frame), then "VM Templates in Folders").
Currently, the Debian templates are on ``vmhost05``, but you may have to check all of the VM hosts if you don't find it there.
We are currently using Debian 8, but in the process of moving to Debian 9.
Left-click on the template and then choose "New VM from This Template" (above the list of templates).
Enter ``gracedb-new`` for the virtual machine's name and hit "Next".
Then, choose the VM host you want to put the VM on and hit "Next".
You can probably skip the next two steps and hit "Finish".
At this point, you can modify the VM's settings:
- CPU: 2 is fine for testing, use only 1 socket total (not 1 per core).
- RAM: something like 2 GB should be fine for testing.
- Storage space: add a second hard drive of about 100 GB (for testing). You may want a larger disk if this is a production server or if you intend to copy the entire production database for testing purposes.
- Network adapter: use public VLAN 61.
The instructions on the CGCA computing `wiki <https://www.lsc-group.phys.uwm.edu/wiki/Computing/ManagingVirtualMachines>`__ provide more detailed information that may be helpful.
Getting certificates
--------------------
It's best to submit your requests for any certificates as soon as possible, as waiting for these will most likely be the biggest bottleneck in this process.
- In all cases, you'll need an InCommon SSL certificate for your ligo.uwm.edu domain name. Follow the instructions on the CGCA computing wiki `here <https://www.lsc-group.phys.uwm.edu/wiki/CertificateRequestUWM>`__. Note that the "short hostname" for our server is ``gracedb-new``.
- If you decided that you want a LIGO.ORG domain name, you'll need an InCommon SSL certificate for this, as well. Follow the instructions `here <https://wiki.ligo.org/AuthProject/ComodoInCommonCert>`__.
- Finally, you may want an IGTF certificate to provide gsissh access. It depends whether you want non-UWM people to potentially have access via the command line without SSH keys. You can do this for either the UWM or LIGO domain names; Tom prefers that we use the UWM one. The instructions for the UWM SSL certificate also contain information about obtaining an IGTF certificate.
In all cases, you'll generate a key and a certificate request, and will send the certificate request to the proper authorities for it to be signed.
Once your certificate is ready, you'll receive an e-mail with instructions for downloading your certificate.
You will usually want the certificate labeled as "X509 Certificate only, Base64 encoded".
DNS configuration
-----------------
UWM
___
In the web interface, you should be able to find the MAC address of the network adapter under the adapter's settings.
If you need to generate a new MAC address, I'm not sure how to do that through the web interface.
However, you can do this with VMWare Workstation by right-clicking on your VM to access "Settings", then "Network adapter", and then "Advanced."
Follow the `instructions <https://www.lsc-group.phys.uwm.edu/wiki/Computing/ManagingVirtualMachines#Create_a_DNS_entry_for_the_guest>`__ on the CGCA wiki for setting up a DNS entry through ``dns.uwm.edu``.
Note that you will have to click on the "Data Management" tab in the top middle to get to the "Network" settings specified in these instructions.
After this is complete, you can boot up the VM.
LIGO DNS
________
This section is only relevant if you are using a LIGO.ORG domain name.
Email Larry Wallace (larry.wallace@ligo.org) and ask him to configure ``gracedb-new.ligo.org`` as a CNAME that points to ``gracedb-new.ligo.uwm.edu``.
VM configuration
================
Standard CGCA server configuration
----------------------------------
Log on to your server through VMWare Workstation, using the standard root password (note that the hostname is initially set to ``server``).
Download and run the Debian setup script (as shown on the CGCA wiki)::
curl -s http://omen.phys.uwm.edu/setup_debian.sh | bash -s -- gracedb-new.ligo.uwm.edu
Reboot the VM.
The hostname should now be ``gracedb-new.ligo.uwm.edu``.
Change the root password to match the new hostname using the standard root password formula (use the ``passwd`` command).
Note: the root password formula may change/be removed in late 2017.
The setup script has generated and sent a Puppet certificate request to the puppetmaster server.
Log in to ``puppet.cgca.uwm.edu`` and sign the certificate (see instructions `here <https://www.lsc-group.phys.uwm.edu/wiki/Computing/AddingPuppet>`__).
Running Puppet
--------------
GraceDB servers use the standard CGCA configuration for a webserver, with several customizations implemented by a gracedb module.
More information about how to use this module is in its README file.
You can find the module `here <https://git.ligo.org/cgca-computing-team/cgca-config/tree/production/localmodules/gracedb>`__ (for now, it may move to its own repo in the near future).
First, you'll need to generate hiera files for this server for use with Puppet.
In the cgca-config repository, create ``data/nodes/gracedb-new.ligo.uwm.edu.yaml`` and ``data/nodes/gracedb-new.ligo.uwm.eyaml``.
I suggest copying another GraceDB server's files and customizing them as needed.
Things you will likely need to change include:
- The database password: ``gracedb::mysql::database::password``
- The root MySQL password: ``mysql::server::root_password``)
- Accounts for LVAlert servers (if this is a test server, use only ``lvalert-test.cgca.uwm.edu``): create the new account on the LVAlert server (current best method is the online Openfire interface). You'll need to add an entry to ``gracedb::config::netrc`` for this account.
- Set ``shibboleth::certificate::useHiera`` to false. This will cause a new Shibboleth key and certificate to be generate on the first Puppet run. After that, you'll copy the generated certificate and key into your server's .eyaml file and set this variable to true. Then re-run Puppet.
- If you have SSL certificates already, add them to the .eyaml file. If not, remove these lines for now and add them back in once you have the certificates.
- Add this server to the gracedb hostgroup (contains base setup for all GraceDB servers) in the `puppet_node_classifier <https://git.ligo.org/cgca-computing-team/cgca-config/blob/production/site/profile/files/puppetmaster/puppet_node_classifier>`__.
Push your changes to the repository (use a branch and ``r10k`` if you want to be cautious).
Then, run Puppet on your new server.
Note that it may take a few minutes for the changes to propagate to the puppetmaster machine, so you may have to wait before running Puppet.
Shibboleth SP registration
--------------------------
Once you have your Shibboleth key and certificate set up in the Puppet configuration, with ``shibboleth::certificate::useHiera`` set to true, you need to register your SP.
Send an email to ``rt-auth@ligo.org`` and ask that a service provider with your FQDN be added to the LIGO shibboleth metadata (generally, use the LIGO.org FQDN, if available).
You will need to attach the cert you find at ``/etc/shibboleth/sp-cert.pem``.
Shibboleth discovery service
----------------------------
Next, set up the embedded discovery service for Shibboleth.
Go to the "latest" Shibboleth downloads `page <http://shibboleth.net/downloads/embedded-discovery-service/latest/>`__ and determine the version.
Then you can do::
wget http://shibboleth.net/downloads/embedded-discovery-service/latest/shibboleth-embedded-ds-1.2.0.tar.gz
Unpack the archive into ``/etc/shibboleth-ds`` (create the directory if it doesn't exist), and edit ``idpselect_config.js``.
Change the line starting with ``this.preferredIdP`` to::
this.preferredIdP = ['https://login.ligo.org/idp/shibboleth', 'https://login.guest.ligo.org/idp/shibboleth', 'https://google.cirrusidentity.com/gateway'];
This determines the identity providers which will be shown on the discovery service login page.
For test deployments, you may not need to include the Google IdP (depends if your server is set up to use the Cirrus Google gateway or not), but it doesn't hurt anything to include it.
You may need to increase the width of the ``idpSelectIdpSelector`` element in
``idpselect.css`` (set to ~512 px for 3 IdPs).
You may need to edit ``this.maxPreferredIdPs`` if you have more than the default number (3).
Check if the link provided in ``this.helpURL`` is functional or not; it has not worked for me in the past several versions of ``shibboleth-ds``.
I suggest using this `link <https://wiki.shibboleth.net/confluence/display/SHIB2/DiscoveryService>`__ instead (if functional).
Finally, if you are confused about parts (or all) of this section, I suggest looking at other GraceDB servers and emulating their configuration.
Populating the database
=======================
"Fresh" database
----------------
To construct a "fresh" database from migrations, just run::
cd $HOME/gracedb_project
python manage.py migrate
Copying production database
---------------------------
First, as yourself, copy the database from a test server to your new server::
sudo cp /opt/gracedb/sql_backups/gracedb.sql.gz $HOME
scp gracedb.sql.gz $(whoami)@gracedb-new.ligo.uwm.edu:~
On the new server, as yourself, import the database using the ``gracedb`` user's credentials::
gunzip gracedb.sql.gz
mysql -u gracedb -p gracedb < gracedb.sql
Note that files related to the events aren't part of the database and won't exist on the new server unless you copy them over, too (see :ref:`copying_event_data` for more information).
Next, become the ``gracedb`` user, enter the Django manager shell, and delete all Contacts and Notifications so that people don't get phone or email alerts from this instance without signing up for them::
from alerts.models import Contact, Notification
for c in Contact.objects.iterator():
c.delete()
for t in Notification.objects.iterator():
t.delete()
You might want to delete the Events, too, especially if you copy the production database.
Extra steps
===========
As root
-------
- Add time zone information to the database::
mysql_tzinfo_to_sql /usr/share/zoneinfo/ | mysql -u root mysql
systemctl restart mariadb
- Upgrade ``nodejs`` version (also installs ``npm``)::
curl -sL https://deb.nodesource.com/setup_8.x | bash -
apt-get install nodejs
- Note: you may want to check for a newer version than 8.x.
- Install ``bower`` for managing JavaScript packages: ``npm install -g bower``
- Reconfigure ``exim4`` package for sending e-mail: ``dpkg-reconfigure exim4-config``. Accept the defaults, except for:
- Set the host to be an "internet site"; mail is sent and received directly using SMTP.
- Remove ``::`` from the list of listening addresses; seems to cause the server to hang.
- Set "system mail name" to ``gracedb-new.ligo.uwm.edu``.
- Set the IP address to listen to for incoming connections to be ``127.0.0.1``.
- Set "other destinations for which mail is accepted" to ``gracedb-new.ligo.uwm.edu``; can optionally add ``gracedb-new.ligo.org`` if desired.
- Once you're done, restart the ``exim4`` process: ``systemctl restart exim4``
- Build and mount the secondary file system for holding data files:
- Build the filesystem: ``mkfs.ext4 /dev/sdb``
- Add the following line to ``/etc/fstab``: ``/dev/sdb /opt/gracedb ext4 errors=remount-ro 0 1``
- A safer option is to find the UUID for your drive (``ls -lh /dev/disk/by-uuid``) and use that in place of ``/dev/sdb`` (see other entries in the file for examples).
- If there are subdirectories currently in ``/opt/gracedb``, move them somewhere else temporarily.
- Mount the filesystem: ``mount -a``
- Move back any subdirectories that you may have temporarily moved.
As the ``gracedb`` user
-----------------------
- Activate the virtualenv: ``source $HOME/djangoenv/bin/activate``
- Build the GraceDB documentation::
cd $HOME/gracedb_project/docs/user_docs
sphinx-build -b html source build
cd ../admin_docs
sphinx-build -b html source build
- Clone the GraceDB admin scripts repo into the gracedb user's ``$HOME``::
git clone https://git.ligo.org/gracedb/scripts.git $HOME/bin
- Note that the server code repo has already been cloned by Puppet, since it's publicly available. We clone this repo by hand since it's private and dealing with deploy keys is too annoying.
- You can call the directory whatever you want (instead of ``bin``), but then you should change the corresponding parameter (``gracedb::config::script_dir``) in the server's Puppet configuration file.
- Run the setup script in this repository (``initial_server_setup.py``) to pull user accounts from the LIGO LDAP, set up admin/superuser accounts, add users to the executives group, and add users to the EM advocates group.
- Collect static files::
cd $HOME/gracedb_project
python manage.py collectstatic
- Use bower to install Javascript and CSCS packages (the ``bower.json`` file contains all of the package details)::
cd $HOME/gracedb_project
bower install
- Instantiate the database backups (``logrotate`` will fail if there isn't an initial file)::
touch /opt/gracedb/sql_backups/gracedb.sql.gz
Allowing access
===============
Outside networks
----------------
As configured, your new VM is only accessible from the UWM campus network (or from outside if you are on the VPN).
If you'd like to allow access from the outside world, email ``noc@uwm.edu``, specify the FQDN and IP address of your new server, and ask them to add openings to the entire world for SSH, HTTP, and HTTPS.
In either case, you'll need to update the firewall policy document, which is used to track the accessibility of all of the CGCA servers.
It's hosted in the CGCA Computing SharePoint, accessible through your UWM Microsoft Online account.
The file is called ``cgca-firewall-policy.xlsx``; add a new entry and follow the syntax of the other GraceDB servers.
Non-LVC users
-------------
For non-internal users to be able to access this server, you'll need to register the server with InCommon.
This provides access via federated identity login (through their university or organization).
Talk to Scott K. about how to set this up.
If you want to allow Google account access, you'll need to set it up through the Cirrus gateway in addition to registering with InCommon.
Go `here <https://apps.cirrusidentity.com/console/auth/index>`__ to login, look at the other GraceDB servers to see how they are configured, and follow the directions.
Make sure to set the Google service up with your LIGO.ORG credentials rather than a personal Gmail account.
Note that you'll need to be an admin in the Cirrus console to make these changes; talk to Warren A. about setting that up.
If you use either of these services, users will need to register through gw-astronomy in order to get the proper attributes added to their session.
Ask Mike Manske to "add the server to the attribute filter for the attribute authority IdP" (his words).
This is necessary so that gw-astronomy will send information about LV-EM group memberships.
Why isn't everything managed by Puppet?
=======================================
Ideally, the entire process of standing up a GraceDB instance should be automated.
This would be very useful (perhaps necessary?) for moving GraceDB to the cloud, and also for disaster recovery.
However, there do not exist suitable Puppet modules for certain portions of the configuration (i.e., the parts that you just did manually in `Extra steps`_).
As new modules become available (or you develop them yourself), it may be possible to Puppetize more (or all) of this process.