Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • alexander.pace/server
  • geoffrey.mo/gracedb-server
  • deep.chatterjee/gracedb-server
  • cody.messick/server
  • sushant.sharma-chaudhary/server
  • michael-coughlin/server
  • daniel.wysocki/gracedb-server
  • roberto.depietri/gracedb
  • philippe.grassia/gracedb
  • tri.nguyen/gracedb
  • jonah-kanner/gracedb
  • brandon.piotrzkowski/gracedb
  • joseph-areeda/gracedb
  • duncanmmacleod/gracedb
  • thomas.downes/gracedb
  • tanner.prestegard/gracedb
  • leo-singer/gracedb
  • computing/gracedb/server
18 results
Show changes
Commits on Source (1820)
Showing
with 2212 additions and 4635 deletions
{
"directory" : "../bower_components"
}
.git
*.swo
*.swp
*~
*.pyc
django-*.wsgi
static-collected
static/admin/
static/rest_framework/
static/debug_toolbar/
static/guardian/
doc/build/*
doc/build/.buildinfo
admin_doc/build/*
admin_doc/build/.buildinfo
settings/settings_secret.py
config/settings/secret.py
config/settings/local.py
docs/user_docs/build/*
docs/admin_docs/build/*
static_root/*
.pytest_cache
junit.xml
.coverage
---
image: docker:latest
variables:
APT_CACHE_DIR: "${CI_PROJECT_DIR}/.cache/apt"
DOCKER_DRIVER: overlay
DOCKER_BRANCH: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME
DOCKER_LATEST: $CI_REGISTRY_IMAGE:latest
PIP_CACHE_DIR: "${CI_PROJECT_DIR}/.cache/pip"
stages:
- test
- branch
- latest
before_script:
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
include:
# Container scanning
- component: $CI_SERVER_FQDN/computing/gitlab/components/container-scanning/container-scanning@~latest
inputs:
job_name: branch_scan
# Software scanning
- component: $CI_SERVER_FQDN/computing/gitlab/components/sast/sast@~latest
inputs:
run_advanced_sast: true
- component: $CI_SERVER_FQDN/computing/gitlab/components/secret-detection/secret-detection@~latest
- component: $CI_SERVER_FQDN/computing/gitlab/components/python/dependency-scanning@~latest
# -- software scanning
# overwrite some settings for the scanning jobs
dependency_scanning:
stage: test
needs: []
variables:
DEBIAN_FRONTEND: "noninteractive"
before_script:
# install some underlying utilities using `apt` so that the dependency
# scanner can use pip to install everything else
- apt-get update -yqq
- apt-get install -yqq
libkrb5-dev
libldap-dev
libsasl2-dev
.sast-analyzer:
stage: test
needs: []
before_script: []
secret_detection:
stage: test
needs: []
before_script: []
# -- testing
.test: &test
image: igwn/base:bookworm
services:
- postgres:15.6
- memcached
variables:
AWS_SES_ACCESS_KEY_ID: "fake_aws_id"
AWS_SES_SECRET_ACCESS_KEY: "fake_aws_key"
DJANGO_ALERT_EMAIL_FROM: "fake_email"
DJANGO_DB_HOST: "postgres"
DJANGO_DB_PORT: "5432"
DJANGO_DB_NAME: "fake_name"
DJANGO_DB_USER: "runner"
DJANGO_DB_PASSWORD: ""
DJANGO_PRIMARY_FQDN: "fake_fqdn"
DJANGO_SECRET_KEY: "fake_key"
DJANGO_SETTINGS_MODULE: "config.settings.container.dev"
DJANGO_TWILIO_ACCOUNT_SID: "fake_sid"
DJANGO_TWILIO_AUTH_TOKEN: "fake_token"
DJANGO_DOCKER_MEMCACHED_ADDR: "memcached:11211"
EGAD_URL: "fake_url"
EGAD_API_KEY: "fake_key"
ENABLE_LVALERT_OVERSEER: "false"
ENABLE_IGWN_OVERSEER: "false"
LVALERT_OVERSEER_PORT: "2"
LVALERT_SERVER: "fake_server"
LVALERT_USER: "fake_user"
LVALERT_PASSWORD: "fake_password"
ENABLE_IGWN_OVERSEER: "false"
IGWN_ALERT_OVERSEER_PORT: "2"
IGWN_ALERT_SERVER: "fake_server"
IGWN_ALERT_USER: "fake_user"
IGWN_ALERT_PASSWORD: "fake_password"
POSTGRES_DB: "${DJANGO_DB_NAME}"
POSTGRES_USER: "${DJANGO_DB_USER}"
POSTGRES_PASSWORD: "${DJANGO_DB_PASSWORD}"
POSTGRES_HOST_AUTH_METHOD: trust
before_script:
# create apt cache directory
- mkdir -pv ${APT_CACHE_DIR}
# set python version
- PYTHON_VERSION="${CI_JOB_NAME##*:}"
- PYTHON_MAJOR="${PYTHON_VERSION:0:1}"
- PYTHON="python3"
# install build requirements
- apt-get -y install gnupg
- sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
- wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
- apt-get -yqq update
- apt-get -o dir::cache::archives="${APT_CACHE_DIR}" install -yqq
git
gnupg
libldap2-dev
libsasl2-dev
libssl-dev
libxml2-dev
krb5-user
libkrb5-dev
libsasl2-modules-gssapi-mit
swig
pkg-config
libpng-dev
libfreetype6-dev
libxslt-dev
${PYTHON}-pip
postgresql-15
postgresql-client-15
libpq-dev
# upgrade pip (requirement for lalsuite)
- ${PYTHON} -m pip install --upgrade pip --break-system-packages
# install everything else from pip
- ${PYTHON} -m pip install -r requirements.txt --break-system-packages
# create logs path required for tests
- mkdir -pv ../logs/
# list packages
- ${PYTHON} -m pip list installed
script:
- PYTHONPATH=${PYTHONPATH}:${PWD}/gracedb ${PYTHON} -m pytest --cov-report term-missing --cov ./gracedb --junitxml=${CI_PROJECT_DIR}/junit.xml
after_script:
- rm -fvr ${PIP_CACHE_DIR}/log
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
artifacts:
reports:
junit: junit.xml
cache:
key: "${CI_JOB_NAME}"
paths:
- .cache/pip
- .cache/apt
coverage: '/^TOTAL\s+.*\s+(\d+\.?\d*)%/'
tags:
- executor-docker
test:3.11:
<<: *test
# -- docker
branch_image:
stage: branch
script:
- docker build --pull -t $DOCKER_BRANCH .
- docker push $DOCKER_BRANCH
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
tags:
- executor-docker
branch_scan:
stage: branch
needs: [branch_image]
# default rules spawn a merge request pipeline, we don't want that
rules:
- if: $CI_COMMIT_BRANCH
variables:
GIT_STRATEGY: fetch
# image to scan
CS_IMAGE: "$DOCKER_BRANCH"
# image to compare to
CS_DEFAULT_BRANCH_IMAGE: "$CI_REGISTRY/computing/gitlab/server:latest"
# path to Dockerfile for remediation
CS_DOCKERFILE_PATH: "Dockerfile"
before_script: []
latest_image:
stage: latest
dependencies:
- branch_image
script:
- docker pull $DOCKER_BRANCH
- docker tag $DOCKER_BRANCH $DOCKER_LATEST
- docker push $DOCKER_LATEST
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
tags:
- executor-docker
## Description of problem
<!--
Describe in detail what you are trying to do and what the result is.
Exact timestamps, error tracebacks, and screenshots (if applicable) are very helpful.
-->
## Expected behavior
<!-- What do you expect to happen instead? -->
## Steps to reproduce
<!-- Step-by-step procedure for reproducing the issue -->
## Context/environment
<!--
Describe the environment you are working in:
* If using the ligo-gracedb client package, which version?
* Your operating system
* Your browser (web interface issues only)
* If you are experiencing this problem while working on a LIGO or Virgo computing cluster, which cluster are you using?
-->
## Suggested solutions
<!-- Any ideas for how to resolve this problem? -->
## Description of feature request
<!--
Describe your feature request!
Is it a web interface change? Some underlying feature? An API resource?
The more detail you can provide, the better.
-->
## Use cases
<!-- List some specific cases where this feature will be useful -->
## Benefits
<!-- Describe the benefits of adding this feature -->
## Drawbacks
<!--
Are there any drawbacks to adding this feature?
Can you think of any ways in which this will negatively affect the service for any set of users?
-->
## Suggested solutions
<!-- Do you have any ideas for how to implement this feature? -->
# Change Log
# Changelog
All notable changes to this project will be documented in this file.
## 2017-07-05 (gracedb-1.0.10) <tanner.prestegard@ligo.org>
### Added
- global setting for turning off LVAlerts (SEND_XMPP_ALERTS)
- instructions to the web page for creating notifications
### Fixed
- logging and display of performance information
- failover to lvalert_send when lvalert_overseer is not working
## 2017-05-30 (gracedb-1.0.9) <tanner.prestegard@ligo.org>
### Added
- list of labels for an event is now included in all LVAlert messages
- available labels now exposed via the REST API
- ability to create events with labels attached
- 'offline' parameter
### Fixed
- cleanup of label and event creation code, use of proper HTTP response codes
- LVAlert messages now sent when a label is removed and when a log entry is added via the web interface
## 2017-05-08 (gracedb-1.0.8) <tanner.prestegard@ligo.org>
### Added
- case-insensitive search queries
### Changed
- removed gracedb/pyparsing.py in favor of the pip-installed version
- overall reorganization and cleanup of Django settings
## 2017-04-25 (gracedb-1.0.7) <tanner.prestegard@ligo.org>
### Added
- V1OPS label and access to signoff pages from Virgo control room
- EM_SENT label
- new save method for EventLog, EMBBEventLog, EMObservation, EMFootprint to generate log number and save in a single SQL query
### Fixed
- typo in an MOU group's name (CTA)
## 2017-03-21 (gracedb-1.0.6) <tanner.prestegard@ligo.org>
### Changed
- modify handling of Fermi GCNs so as to not overwrite trigger durations
- change settings so gracedb servers use 'test' settings by default
## 2017-03-07 (gracedb-1.0.5) <tanner.prestegard@ligo.org>
### Added
- extraction of single IFO times from CWB event files, saving them in the database, and exposing them to MOU partners
### Changed
- increased size of 'debug' Django logs
## 2017-02-28 (gracedb-1.0.4) <tanner.prestegard@ligo.org>
### Changed
- added new LIB robot certificate
### Fixed
- issue where non-LVC members could remove the lv-em tag on log messages
## 2017-01-24 (gracedb-1.0.3) <tanner.prestegard@ligo.org>
### Added
- several MOU groups
- human-readable FAR to event pages
- leap second from 31 Dec 2016
- test button for contacts
### Changed
- separated phone alerts into voice and text options
## 2017-01-10 (gracedb-1.0.2) <tanner.prestegard@ligo.org>
### Changed
- increased Django logging verbosity and clarity
- modernized Django template structure to Django 1.8 standard
- reorganized settings
## 2016-12-20 (gracedb-1.0.1) <tanner.prestegard@ligo.org>
### Added
- capability for removing labels via gracedb-client
### Changed
- expose singleInspiral times and IFOs for EM partners
## 2016-11-22 <tanner.prestegard@ligo.org>
### Added
- capability for sending phone/SMS alerts via Twilio
### Changed
- updated admin documentation
## 2016-11-11 <tanner.prestegard@ligo.org>
### Added
- AllSkyLong search and updated LVAlert nodes
### Changed
- event file structure for LIB events
## 2016-10-20 <tanner.prestegard@ligo.org>
### Added
- README.md file
### Changed
- Repository moved from versions.ligo.org to git.ligo.org
## 2016-01-07
### Added
- 'less than' sign in event display when FAR is an upper limit (#3105)
......
FROM debian:bookworm
LABEL name="LIGO GraceDB Django application" \
maintainer="alexander.pace@ligo.org" \
date="20240306"
ARG SETTINGS_MODULE="config.settings.container.dev"
COPY docker/SWITCHaai-swdistrib.gpg /etc/apt/trusted.gpg.d
COPY docker/backports.pref /etc/apt/preferences.d
RUN apt-get update && \
apt-get -y install gnupg curl
RUN echo 'deb http://deb.debian.org/debian bookworm-backports main' > /etc/apt/sources.list.d/backports.list
RUN echo 'deb http://apt.postgresql.org/pub/repos/apt bookworm-pgdg main' > /etc/apt/sources.list.d/pgdg.list
RUN echo 'deb [trusted=yes] https://hypatia.aei.mpg.de/lsc-amd64-bookworm ./' > /etc/apt/sources.list.d/lscsoft.list
RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
RUN apt-get update && \
apt-get --assume-yes upgrade && \
apt-get install --install-recommends --assume-yes \
apache2 \
emacs-nox \
gcc \
git \
krb5-user \
libkrb5-dev \
libapache2-mod-shib \
libapache2-mod-xsendfile \
libldap2-dev \
libldap-2.5-0 \
libsasl2-dev \
libsasl2-modules-gssapi-mit \
libxml2-dev \
pkg-config \
libpng-dev \
libpq-dev \
libfreetype6-dev \
libxslt-dev \
libsqlite3-dev \
ligo-ca-certs \
osg-ca-certs \
php \
php8.2-pgsql \
php8.2-mbstring \
postgresql-client-15 \
python3 \
python3-dev \
python3-libxml2 \
python3-pip \
procps \
redis \
shibboleth-sp-common \
shibboleth-sp-utils \
libssl-dev \
swig \
htop \
telnet \
vim && \
apt-get clean && \
curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - && \
apt-get update && apt-get install --assume-yes yarn
# Install AWS X-ray daemon
RUN curl -O https://s3.us-east-2.amazonaws.com/aws-xray-assets.us-east-2/xray-daemon/aws-xray-daemon-3.x.deb
RUN dpkg -i aws-xray-daemon-3.x.deb
RUN rm aws-xray-daemon-3.x.deb
# Docker scripts:
COPY docker/entrypoint /usr/local/bin/entrypoint
COPY docker/cleanup /usr/local/bin/cleanup
# Supervisord configs:
COPY docker/supervisord.conf /etc/supervisor/supervisord.conf
COPY docker/supervisord-apache2.conf /etc/supervisor/conf.d/apache2.conf
COPY docker/supervisord-igwn-alert-overseer.conf /etc/supervisor/conf.d/igwn-overseer.conf
COPY docker/supervisord-shibd.conf /etc/supervisor/conf.d/shibd.conf
COPY docker/supervisord-aws-xray.conf /etc/supervisor/conf.d/aws-xray.conf
COPY docker/supervisord-qcluster.conf /etc/supervisor/conf.d/qcluster.conf
# Apache configs:
COPY docker/apache-config /etc/apache2/sites-available/gracedb.conf
COPY docker/mpm_prefork.conf /etc/apache2/mods-enabled/mpm_prefork.conf
# Enable mpm_event module:
RUN rm /etc/apache2/mods-enabled/mpm_prefork.*
RUN rm /etc/apache2/mods-enabled/php8.2.*
RUN cp /etc/apache2/mods-available/mpm_event.* /etc/apache2/mods-enabled/
# Shibboleth configs and certs:
COPY docker/shibboleth-ds /etc/shibboleth-ds
COPY docker/login.ligo.org.cert.LIGOCA.pem /etc/shibboleth/login.ligo.org.cert.LIGOCA.pem
COPY docker/inc-md-cert.pem /etc/shibboleth/inc-md-cert.pem
COPY docker/check_shibboleth_status /usr/local/bin/check_shibboleth_status
RUN a2dissite 000-default.conf && \
a2ensite gracedb.conf && \
a2enmod headers proxy proxy_http rewrite xsendfile
# this line is unfortunate because "." updates for nearly any change to the
# repository and therefore docker build rarely caches the steps below
ADD . /app/gracedb_project
# install gracedb application itself
WORKDIR /app/gracedb_project
RUN pip3 install --upgrade pip --break-system-packages
RUN pip3 install -r requirements.txt --break-system-packages
# install supervisor from pip
RUN pip3 install supervisor --break-system-packages
# Give pip-installed packages priority over distribution packages
ENV PYTHONPATH /usr/local/lib/python3.11/dist-packages:$PYTHONPATH
ENV ENABLE_SHIBD false
ENV ENABLE_OVERSEER true
ENV VIRTUAL_ENV /dummy/
# Expose port and run Gunicorn
EXPOSE 8000
# Generate documentation
WORKDIR /app/gracedb_project/docs/user_docs
RUN sphinx-build -b html source build
WORKDIR /app/gracedb_project/docs/admin_docs
RUN sphinx-build -b html source build
RUN mkdir /app/logs /app/project_data
WORKDIR /app/gracedb_project
RUN DJANGO_SETTINGS_MODULE=${SETTINGS_MODULE} \
DJANGO_DB_NAME=fake_name \
DJANGO_DB_USER=fake_user \
DJANGO_DB_PASSWORD=fake_password \
DJANGO_SECRET_KEY=fake_key \
DJANGO_PRIMARY_FQDN=fake_fqdn \
DJANGO_ALERT_EMAIL_FROM=fake_email \
EGAD_URL=fake_url \
EGAD_API_KEY=fake_key \
LVALERT_USER=fake_user \
LVALERT_PASSWORD=fake_password \
LVALERT_SERVER=fake_server \
LVALERT_OVERSEER_PORT=2 \
IGWN_ALERT_USER=fake_user \
IGWN_ALERT_PASSWORD=fake_password \
IGWN_ALERT_SERVER=fake_server \
IGWN_ALERT_OVERSEER_PORT=2 \
IGWN_ALERT_GROUP=fake_group \
DJANGO_TWILIO_ACCOUNT_SID=fake_sid \
DJANGO_TWILIO_AUTH_TOKEN=fake_token \
DJANGO_AWS_ELASTICACHE_ADDR=fake_address:11211 \
AWS_SES_ACCESS_KEY_ID=fake_aws_id \
AWS_SES_SECRET_ACCESS_KEY=fake_aws_key \
python3 manage.py collectstatic --noinput
RUN rm -rf /app/logs/* /app/project_data/*
RUN useradd -M -u 50001 -g www-data -s /bin/false gracedb
#RUN groupadd -r xray
#RUN useradd -M -u 50002 -g xray -s /bin/false xray
# set secure file/directory permissions. In particular, ADD command at
# beginning of recipe inherits umask of user running the build
RUN chmod 0755 /usr/local/bin/entrypoint && \
chmod 0755 /usr/local/bin/cleanup && \
chown gracedb:www-data /app/logs /app/project_data && \
chmod 0750 /app/logs /app/project_data && \
find /app/gracedb_project -type d -exec chmod 0755 {} + && \
find /app/gracedb_project -type f -exec chmod 0644 {} +
# create and set scitoken key cache directory
RUN mkdir /app/scitokens_cache && \
chown gracedb:www-data /app/scitokens_cache && \
chmod 0750 /app/scitokens_cache
ENV XDG_CACHE_HOME /app/scitokens_cache
# patch voeventparse for python3.10+:
RUN sed -i 's/collections.Iterable/collections.abc.Iterable/g' /usr/local/lib/python3.11/dist-packages/voeventparse/voevent.py
# Remove packages that expose security vulnerabilities and close out.
# Edit: zlib1g* can't be removed because of a PrePend error
RUN apt-get --assume-yes --purge autoremove wget libaom3 node-ip
RUN apt-get clean
ENTRYPOINT [ "/usr/local/bin/entrypoint" ]
CMD ["/usr/local/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
This diff is collapsed.
This diff is collapsed.
import sys
import VOEvent
class VOEventExportClass(VOEvent.VOEvent):
def __init__(self, event, schemaURL):
self.event = event
self.schemaURL = schemaURL
def export(self, outfile, level, namespace_='', name_='VOEvent', namespacedef_=''):
VOEvent.showIndent(outfile, level)
added_stuff = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n'
added_stuff += 'xmlns:voe="http://www.ivoa.net/xml/VOEvent/v2.0"\n'
added_stuff += 'xsi:schemaLocation="http://www.ivoa.net/xml/VOEvent/v2.0 %s"\n' % self.schemaURL
outfile.write('<%s%s%s %s' % (namespace_, name_,
namespacedef_ and ' ' + namespacedef_ or '',
added_stuff,
))
# self.event.exportAttributes(outfile, level, [], namespace_)
self.event.exportAttributes(outfile, level, [])
if self.event.hasContent_():
outfile.write('>\n')
# self.event.exportChildren(outfile, level + 1, namespace_='', name_)
self.event.exportChildren(outfile, level + 1, '', name_)
VOEvent.showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def stringVOEvent(event, schemaURL = "http://www.ivoa.net/xml/VOEvent/VOEvent-v2.0.xsd"):
'''
Converts a VOEvent to a string suitable for output
'''
v = VOEventExportClass(event, schemaURL)
out = StringIO()
out.write('<?xml version="1.0" ?>\n')
v.export(out, 0, namespace_='voe:')
out.write('\n')
return out.getvalue()
def paramValue(p):
s1 = p.get_value()
s2 = p.get_Value()
if not s2: return s1
if not s1: return s2
if len(s1) > len(s2): return s1
else: return s2
def htmlList(list):
'''
Converts a list of strings to an HTML <ul><li> structure.
'''
s = '<ul>'
for x in list:
s += '<li>' + str(x) + '</li>'
s += '</ul>'
return s
def htmlParam(g, p):
'''
Builds an HTML table row from a Param and its enclosing Group (or None)
'''
s = ''
if g == None:
s += '<td/>'
else:
s += '<td>' + g.get_name() + '</td>'
s += '<td>' + str(p.get_name()) + '</td>'
s += '<td>'
for d in p.get_Description(): s += str(d)
s += '</td>'
s += '<td><b>' + str(paramValue(p)) + '</b></td>'
s += '<td>' + str(p.get_ucd()) + '</td>'
s += '<td>' + str(p.get_unit()) + '</td>'
s += '<td>' + str(p.get_dataType()) + '</td>'
return s
def parse(file):
'''
Parses a file and builds the VOEvent DOM.
'''
doc = VOEvent.parsexml_(file)
rootNode = doc.getroot()
rootTag, rootClass = VOEvent.get_root_tag(rootNode)
v = rootClass.factory()
v.build(rootNode)
return v
def parseString(inString):
'''
Parses a string and builds the VOEvent DOM.
'''
from StringIO import StringIO
doc = VOEvent.parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = VOEvent.get_root_tag(rootNode)
rootObj = rootClass.factory()
rootObj.build(rootNode)
return rootObj
def getWhereWhen(v):
'''
Builds a dictionary of the information in the WhereWhen section:
observatory: location of observatory (string);
coord_system: coordinate system ID, for example UTC-FK5-GEO;
time: ISO8601 representation of time, for example 1918-11-11T11:11:11;
timeError: in seconds;
longitude: in degrees, usually right ascension;
latitiude: in degrees, usually declination;
positionalError: positional error in degrees.
'''
wwd = {}
ww = v.get_WhereWhen()
if not ww:
return wwd
w = ww.get_ObsDataLocation()
if not w:
return wwd
ol = w.get_ObservatoryLocation()
if ol:
wwd['observatory'] = ol.get_id()
ol = w.get_ObservationLocation()
if not ol:
return wwd
observation = ol.get_AstroCoords()
if not observation:
return wwd
wwd['coord_system'] = observation.get_coord_system_id()
time = observation.get_Time()
wwd['time'] = time.get_TimeInstant().get_ISOTime()
wwd['timeError'] = time.get_Error()
pos = observation.get_Position2D()
if not pos:
return wwd
wwd['positionalError'] = pos.get_Error2Radius()
v2 = pos.get_Value2()
if not v2:
return wwd
wwd['longitude'] = v2.get_C1()
wwd['latitude'] = v2.get_C2()
return wwd
def makeWhereWhen(wwd):
'''
Expects a dictionary of the information in the WhereWhen section, and makes a
VOEvent.WhereWhen object suitable for set_WhereWhen().
observatory: location of observatory (string);
coord_system: coordinate system ID, for example UTC-FK5-GEO;
time: ISO8601 representation of time, for example 1918-11-11T11:11:11;
timeError: in seconds;
longitude: in degrees, usually right ascension;
latitiude: in degrees, usually declination;
positionalError: positional error in degrees.
'''
if not wwd.has_key('observatory'): wwd['observatory'] = 'unknown'
if not wwd.has_key('coord_system'): wwd['coord_system'] = 'UTC-FK5-GEO'
if not wwd.has_key('timeError'): wwd['timeError'] = 0.0
if not wwd.has_key('positionalError'): wwd['positionalError'] = 0.0
if not wwd.has_key('time'):
print "Cannot make WhereWhen without time"
return None
if not wwd.has_key('longitude'):
print "Cannot make WhereWhen without longitude"
return None
if not wwd.has_key('latitude'):
print "Cannot make WhereWhen without latitude"
return None
ac = VOEvent.AstroCoords(coord_system_id=wwd['coord_system'])
ac.set_Time(
VOEvent.Time(
TimeInstant = VOEvent.TimeInstant(wwd['time'])))
ac.set_Position2D(
VOEvent.Position2D(
Value2 = VOEvent.Value2(wwd['longitude'], wwd['latitude']),
Error2Radius = wwd['positionalError']))
acs = VOEvent.AstroCoordSystem(id=wwd['coord_system'])
onl = VOEvent.ObservationLocation(acs, ac)
oyl = VOEvent.ObservatoryLocation(id=wwd['observatory'])
odl = VOEvent.ObsDataLocation(oyl, onl)
ww = VOEvent.WhereWhen()
ww.set_ObsDataLocation(odl)
return ww
def getParamNames(v):
'''
Takes a VOEvent and produces a list of pairs of group name and param name.
For a bare param, the group name is the empty string.
'''
list = []
w = v.get_What()
if not w: return list
for p in v.get_What().get_Param():
list.append(('', p.get_name()))
for g in v.get_What().get_Group():
for p in v.get_What().get_Param():
list.append((g.get_name(), p.get_name()))
return list
def findParam(event, groupName, paramName):
'''
Finds a Param in a given VOEvent that has the specified groupName
and paramName. If it is a bare param, the group name is the empty string.
'''
w = event.get_What()
if not w:
print "No <What> section in the event!"
return None
if groupName == '':
for p in event.get_What().get_Param():
if p.get_name() == paramName:
return p
else:
for g in event.get_What().get_Group():
if g.get_name() == groupName:
for p in event.get_What().get_Param():
if p.get_name() == paramName:
return p
print 'Cannot find param named %s/%s' % (groupName, paramName)
return None
######## utilityTable ########################
class utilityTable(VOEvent.Table):
'''
Class to represent a simple Table from VOEvent
'''
def __init__(self, table):
self.table = table
self.colNames = []
self.default = []
col = 0
for f in table.get_Field():
if f.get_name():
self.colNames.append(f.get_name())
type = f.get_dataType()
if type == 'float': self.default.append(0.0)
elif type == 'int': self.default.append(0)
else: self.default.append('')
def getTable(self):
return self.table
def blankTable(self, nrows):
'''
From a table template, replaces the Data section with nrows of empty TR and TD
'''
data = VOEvent.Data()
ncol = len(self.colNames)
for i in range(nrows):
tr = VOEvent.TR()
for col in range(ncol):
tr.add_TD(self.default[col])
data.add_TR(tr)
self.table.set_Data(data)
def getByCols(self):
'''
Returns a dictionary of column vectors that represent the table.
The key for the dict is the Field name for that column.
'''
d = self.table.get_Data()
nrow = len(d.get_TR())
ncol = len(self.colNames)
# we will build a matrix nrow*ncol and fill in the values as they
# come in, with col varying fastest. The return is a dictionary,
# arranged by column name, each with a vector of
# properly typed values.
data = []
for col in range(ncol):
data.append([self.default[col]]*nrow)
row = 0
for tr in d.get_TR():
col = 0
for td in tr.get_TD():
data[col][row] = td
col += 1
row += 1
dict = {}
col = 0
for colName in self.colNames:
dict[colName] = data[col]
col += 1
return dict
def setValue(self, name, irow, value, out=sys.stdout):
'''
Copies a single value into a cell of the table.
The column is identified by its name, and the row by an index 0,1,2...
'''
if name in self.colNames:
icol = self.colNames.index(name)
else:
print>>out, "setTable: Unknown column name %s. Known list is %s" % (name, str(self.colNames))
return False
d = self.table.get_Data()
ncols = len(self.colNames)
nrows = len(d.get_TR())
if nrows <= irow:
print>>out, "setTable: not enough rows -- you want %d, table has %d. Use blankTable to allocate the table." % (irow+1, nrows)
return False
tr = d.get_TR()[irow]
row = tr.get_TD()
row[icol] = value
tr.set_TD(row)
def toString(self):
'''
Makes a crude string representation of a utilityTable
'''
s = ' '
for name in self.colNames:
s += '%9s|' % name[:9]
s += '\n\n'
d = self.table.get_Data()
for tr in d.get_TR():
for td in tr.get_TD():
s += '%10s' % str(td)[:10]
s += '\n'
return s
================================
Preparing a new client release
================================
.. NOTE::
The steps here are only suggestions. You will undoubtedly discover better
and/or different ways to go about this.
Develop
=======
Implement the features and bug fixes you wish to include in the new
client release. It's easiest to do this within a virtual environment on
your workstation. That way you can make changes to the code and then::
cd gracedb-client
python setup.py install
which will install into your virtual environment. When you are satisfied
with the changes, commit and push.
.. NOTE::
It's a good idea to test this version on Scientific Linux and Debian
at the clusters before proceeding.
The versions of Python there may be a bit behind the one on your workstation,
and that can cause complications. I've been burned by this before.
You can do it by cloning the ``gracedb-client`` package on a cluster
headnode and building in a virtual environment as show above.
Changes for packaging
=====================
Update the source code for the new version number, and update the changelog.
Here are the files you will need to change:
* ``setup.py``: bump the version number
* ``debian/changelog``: list your changes in the prescribed format
* ``ligo-gracedb.spec``: check version, unmangled version, and release number
* ``ligo/gracedb/__init__.py``: update ``GIT_TAG``
* ``ligo/gracedb/cli.py``: update ``GIT_TAG``
* ``ligo/gracedb/test/test.py``: update the version number in the ``GIT_TAG`` test
After editing these files, make sure to commit and push. Also make sure the
client still passes the unit tests::
python setup.py install
cd gracedb-client/ligo/gracedb/test
unset TEST_SERVICE
python test.py
Tag this version of the repo and push the tag::
git tag --list
git tag -a YOUR_GIT_TAG
git push --tags
.. NOTE::
Git tags look like this: ``gracedb-1.20-1``, where 1.20 is the version and
the last number corresponds to the build number (here, 1).
Prepare to upload to PyPI
=========================
Clear everything out of the directory ``gracedb-client/dist`` and then
build the source tarball::
python setup.py sdist
Log into ``testpypi.python.org`` and change the version number. Click on the package
name in the right-hand menu, then click 'edit' near the top. Bump the version
number as appropriate, and then click 'Add Information' at the bottom.
Upload the package to the test PyPI instance. This is easier if you install
the python package ``twine``. I tend to do this in a special virtual environment
used only for this purpose::
deactivate
cd
cd my_virtual_envs
virtualenv --system-site-packages pypi-upload
source pypi-upload/bin/activate
pip install twine
cd /path/to/gracedb-client
twine upload dist/*.gz -r test
Make sure that you can install and use the package from the test PyPI::
deactivate
cd
cd my_virtual_envs
virtualenv --system-site-packages test
source test/bin/activate
pip install -i https://testpypi.python.org/pypi ligo-gracedb --upgrade
cd /path/to/gracedb-client
git pull
cd ligo/gracedb/test
python test.py
cd ~/my_virtual_envs
deactivate
rm -f -r test
Log into ``pypi.python.org`` (the non-test instance) and update the version number
as you did above for the test instance. Next, upload the package to the
regular, non-test PyPI::
deactivate
cd ~/my_virtual_envs
source pypi-upload/bin/activate
cd /path/to/gracedb-client
twine upload dist/*.gz
Lastly, make sure you can pip install the package::
deactivate
cd ~/my_virtual_envs
virtualenv --system-site-packages test
source test/bin/activate
pip install ligo-gracedb
deactivate
rm -f -r test
Steps for LIGO packaging
========================
Move the source tarball to ``software.ligo.org``. I do this with a script
I obtained from Adam Mercer, ``lscsrc_new_file.sh``. I have added a version
of this to the GraceDB ``admin-tools`` repo::
cd /path/to/gracedb-client/dist
cp /path/to/admin-tools/releases/lscsrc_new_file.sh .
./lscsrc_new_file.sh ligo-gracedb-*gz
.. NOTE::
You must run the script in the same directory where the tarball lives.
Otherwise it will put it onto the server in a weird subdirectory rather
than just the file.
Make sure that the file is accessible in the expected location, something
like ``http://software.ligo.org/lscsoft/source/ligo-gracedb-1.20.tar.gz``.
Send an email to the packagers notifying them of the new package. You will
probably want to include the information that you put into the changelog.
Here's an example of one that I sent::
to daswg+announce@ligo.org
There is a new release of the GraceDB client tools.
New features are:
Improved error handling for expired or missing credentials
Improved error handling when server returns non-JSON response
Added --use-basic-auth option to command-line client
The release tag is: ligo-lvalert-1.20-1
The source is available at:
http://software.ligo.org/lscsoft/source/ligo-gracedb-1.20.tar.gz
thanks!
Branson
After the package is in the testing repo, look for the corresponding row in the
`SCCB wiki <https://wiki.ligo.org/SCCB/WebHome>`__.
One of the packagers will hopefully have added it.
Once the new package is installed at the system level on the bleeding-edge
head nodes, test it on different OSes (probably ``ldas-pcdev4`` at CIT for
Scientific Linux and ``atlas9`` at AEI for Debian).
Update the SCCB wiki entry stating that the package has been tested on SL
and Debian and request that it be moved into production.
Forward the package announcement email to the SCCB with some additional text
notifying them that the package is waiting for their approval. Here is an
example of one that I sent::
to daswg+SCCB@ligo.org
dear SCCB,
I have tested this release on ldas-pcdev4 @ CIT and atlas9 @ AEI. The
release passes the unit tests, so I am requesting that it be moved
into production.
best,
Branson
==================================
Standing up a new GraceDB instance
==================================
Disclaimer
==========
These instructions will almost certainly not work. Please edit when you find
something that fails.
Recipe
======
Machine and certificates
------------------------
I'll assume that the new instance will have the FQDN ``gracedb-new.cgca.uwm.edu``.
Follow the
`instructions <https://www.lsc-group.phys.uwm.edu/wiki/Computing/ManagingVirtualMachines>`__
for setting up a new Debian stock VM managed by puppet.
You are going to need an InCommon SSL certificate for Apache, so I recommend
requesting this first. Instructions are found
`here <https://www.lsc-group.phys.uwm.edu/wiki/CertificateRequestUWM>`__. Store the
cert and key with correct file permissions somewhere for safe keeping.
.. NOTE::
If this new instance will have a FQDN ending in ``.ligo.org``, you will
need to get the cert from Caltech instead. Some instructions are found
`here <https://wiki.ligo.org/AuthProject/ComodoInCommonCert>`__.
Puppet configuration
--------------------
On your workstation, clone the ``cgca-hiera`` git repository::
git clone git@git.ligo.org:cgca-computing-team/cgca-hiera.git
Create the necessary YAML files by copying from one of the existing
instances. This will get you pretty far::
cd cgca-hiera
cp gracedb-test.cgca.uwm.edu.yaml gracedb-new.cgca.uwm.edu.yaml
cp gracedb-test.cgca.uwm.edu.eyaml gracedb-new.cgca.uwm.edu.eyaml
Edit the latter file until you are satisfied. Here are some things you
will definitely want to change
- instances of the FQDN
- SSH key for the gracedb@gracedb-new.cgca.uwm.edu user
- user entry for yourself, to map your InCommon cert DN to the gracedb user account
You may also need to add the ``webserver3`` and ``gracedb`` modules to the
list, as these handle much of the work, but are sometimes left off of the
list in order to prevent changes being made to the server without the
maintainer's knowledge.
Next, edit the EYAML file, which has the secret information in it.
At the time of writing, the best way of editing an EYAML file has not
been settled upon. (My favorite way to
do this is to use ``eyaml edit``. But at the time of writing, that is only
available as root on the ``puppet.cgca.uwm.edu`` machine, and you have to
explicitly provide paths to the PKCS7 public and private keys. In the
intervening time, it is likely that a better way to edit eyaml files will
have been devised.) Change the mysql root and gracedb
user passwords, noting that these occur in multiple locations. Add in the
naturally occurring shib cert and key, as well
as the apache cert and key. Importantly, you should comment out the
lines associated with the file ``settings_secret``. We don't want Puppet
to try to create this file yet, since our server code directories that
contain it don't exist yet.
Commit the new files and push. Then log into the new machine as root and
run the puppet agent::
puppet agent -t
This may initially produce errors, so some iteration is to be expected.
Shibboleth SP registration
--------------------------
At this point, the ``shibboleth`` package should be installed, along with its
self-signed certificates. Send email to ``rt-auth`` and ask that a service provider
with your FQDN be added to the LIGO shibboleth metatadata. You will need to
attach the cert you find at ``/etc/shibboleth/sp-cert.pem``. The rest of the
Shibboleth SP configuration should already have been taken care of by Puppet,
so it should "just work" once it is added to the LIGO metadata. If it doesn't,
there is more detail about setting up a new Shibboleth SP
`here <https://wiki.ligo.org/AuthProject/DeployLIGOShibbolethDebianSqueeze>`__.
Application code
----------------
Next, we'll pull down the repo containing the source code. Log in to the
new machine as the ``gracedb`` user, and clone the
server code using your LIGO credentials::
cd
ecp-cookie-init LIGO.ORG https://versions.ligo.org/git albert.einstein
git config --global http.cookiefile /tmp/ecpcookie.u`id -u`
git clone https://versions.ligo.org/git/gracedb.git
Create a new settings file by copying from one of the existing ones::
cd gracedb/settings
cp test.py new.py
or some other appropriate name. (Copy from ``default.py`` if you'd rather
have a production-like instead of testing-like instance.) Edit this new
settings module as desired. You will at least want to change the
``CONFIG_NAME`` and all instances of the FQDN. Now edit
``settings/__init__.py`` to make sure this new settings module will
be invoked::
from default import *
config = configs.get(ROOT_PATH, "production")
if socket.gethostname() == 'gracedb-test':
config = 'test'
elif socket.gethostname() == 'gracedb-new':
config = 'new'
settings_module = __import__('%s' % config, globals(), locals(), 'gracedb')
Note that the behavior here is that we first import everything from default.
Then we'll overwrite those settings with fhe module specified by ``config``.
Also uncomment the ``settings_secret`` file in the EYAML for this machine,
and run the puppet agent again. This will install our secret settings file
that is pulled in by the default settings.
Required packages
-----------------
GraceDB relies on several packages that are best installed in a virtual environment
rather than at the system level. This is important, because we don't want
our regular package updates to suprise us with, say, a new version of Django
that our code hasn't yet been ported to.
Create the virtual environment for the ``gracedb`` user in that user's
home directory::
cd
virtualenv djangoenv --system-site-packages
source djangoenv/bin/activate
pip install mysql-python
pip install python-ldap
pip install html5lib
pip install requests
pip install Sphinx
pip install python-memcached
pip install django-model-utils
pip install djangorestframework==3.3.2
pip install django-guardian==1.4.1
pip install django-debug-toolbar
pip install django-debug-panel
pip install Django==1.8.11
pip install ligo-lvalert --pre
pip install ligo-lvalert-overseer
You may find that you need to install additional packages during the testing
process. Note that the ``--system-site-packages`` is necessary in order for the
system install of ``python-glue`` to be available inside the virtual environment.
Also note that we ask for specific version numbers of some packages. Also, the
ordering of these commands matters, since packages such as ``django-guardian``
will try to pull in the very latest version of Django. So if we really want
Django 1.8, we have to ask for that one *after* installing the third-party
packages. I decided to stick with Django 1.8 for the time being, since it is
one of the designated LTS releases. Version 1.9, by contrast, is not and will
be supported for a shorter period of time. Successive releases of Django often
contain breaking API changes, so be prepared if you decide to update.
Run ``collectstatic`` so that all of the static files from the various Python
sources are collected under ``gracedb/static``, where Apache will expect to
find them::
cd
cd gracedb
./manage.py collectstatic
Next, install the JavaScript components GraceDB uses to render web pages.
As root::
update-alternatives --install /usr/bin/node nodejs /usr/bin/nodejs 100
which node
curl https://www.npmjs.com/install.sh | sh
which npm
npm install -g bower
Then, as the ``gracedb`` user::
cd
bower install dgrid#0.4.0
bower install dijit#1.10.4
bower install dojox#1.10.4
bower install moment#2.11.1
bower install moment-timezone#0.5.0
These particular versions may be required in order for the web pages to render
correctly.
Miscellaneous
-------------
GraceDB relies on the ability to send email--both for alerts to users who
request them, and to the maintainer/developer in case of unhandled exceptions.
Reconfigure ``exim4`` as root by executing::
dpkg-reconfigure exim4-config
You'll want to accept the defaults, except for two: 1) set this host to be an
"internet site; mail is sent and received directly using SMTP." and 2) remove
``::1`` from the list of listening addresses. (The latter seems to be necessary,
as I've observed that the exim4 server hangs if it tries to listen on ``::1``.)
Also check that the system FQDN appears correctly.
Next, set up the embedded discovery service. Download from::
http://shibboleth.net/downloads/embedded-discovery-service/latest/shibboleth-embedded-ds-1.1.0.tar.gz
Unpack the archive into /etc/shibboleth-ds, and edit ``idpselect_config.js``::
this.preferredIdP = ['https://login.ligo.org/idp/shibboleth', 'https://login.guest.ligo.org/idp/shibboleth', 'https://google.cirrusidentity.com/gateway']; // Array of entityIds to always show
You may need to increase the width of the ``idpSelectIdpSelector`` element in
``idpselect.css``. I set this to 512.
As the ``gracedb`` user obtain the random bin scripts used by GraceDB for various purposes::
cd
git clone git@git.ligo.org:gracedb/scripts.git bin
If this raises an error regarding access rights, simply copy over your ssh keypair
that you use to access ``git.ligo.org``, and add the key to your ssh-agent.
Final steps
-----------
As the ``gracedb`` user, fill up the database::
cd
scp gracedb@gracedb.cgca.uwm.edu:/opt/gracedb/sql_backups/gracedb.sql.gz .
gunzip gracedb.sql.gz
mysql -u gracedb -p gracedb < gracedb.sql
From your workstation, test the web interface of your new instance to make
sure it's working, and run the unit tests::
cd gracedb-client/ligo/gracedb/test
export TEST_SERVICE='https://gracedb-new.cgca.uwm.edu/api/'
python test.py
I found it necessary to do this as the ``gracedb`` user::
cd
chmod g+w -R logs
Also build the docs::
cd
cd gracedb/docs
mkdir build
sphinx-build -b html source build
cd ../admin_docs
mkdir build
sphinx-build -b html source build
Explanation of the hiera files
==============================
The ``hiera`` YAML and EYAML files attempt to describe the GraceDB server
as it *should* be. They contain the build of the configuration necessary for
setting up a GraceDB instance, though there are some stray bits that have
to be done by hand.
.. NOTE::
You may find yourself in the situation of needing to stand up an instance
that is *not* managed by puppet--for example if you are setting up an
instance at a different data center. In that case, you will need to take
care of the above tasks by hand. I recommend copying the Apache virtual
host configuration and ``shibboleth2.xml`` from a working GraceDB
instance and modifying as needed.
Why isn't everything managed by Puppet?
=======================================
Ideally, the entire process of standing up a GraceDB instance should be
automated. This would be very useful (perhaps necessary?) for moving GraceDB
to the cloud, and also for disaster recovery. There are gaps in the puppet
config for ``gracedb`` and ``gracedb-test`` however, as I could not find
suitable existing puppet modules. For example, there is a `python module
<https://forge.puppetlabs.com/stankevich/python>`__ in the Puppet forge that
manages virtul environments, but it does not handle dependencies well. You
would have to engineer a ``requirements.txt`` file that lists exact packages
and versions in a strict dependency order in order for that module to work. I
experimented with creating my own process based on a file resource for the
``requirements.txt`` and exec resources to create and update the virtual
environment based on changes to the file. However, this seemed fragile, and I
decided that it would be better to manage the virtual environment by hand.
That being said, I would recommend gradually finding ways to Puppet-ize the
rest of the install process, especially if improved modules become available.
.. GraceDB operation (admin) tasks
Operational Tasks
=================
Contents:
.. toctree::
:maxdepth: 2
new_pipeline
user_permissions
robot_certificate
phone_alerts
miscellaneous
{
"name": "gracedb",
"dependencies": {
"dijit": "1.10.4",
"dojox": "1.10.4"
}
}
File moved
# To run this manually (not via systemd):
# gunicorn --config config/gunicorn_config.py config.wsgi:application
# (assuming that you are in the base directory of the GraceDB server code repo)
import os
from os.path import abspath, dirname, join
import sys
import multiprocessing
# Useful function for getting environment variables
def get_from_env(envvar, default_value=None, fail_if_not_found=True):
value = os.environ.get(envvar, default_value)
if (value == default_value and fail_if_not_found):
raise ImproperlyConfigured(
'Could not get environment variable {0}'.format(envvar))
return value
# Parameters
GUNICORN_PORT = 8080
LOG_DIR = abspath(join(dirname(__file__), "..", "..", "logs"))
# Gunicorn configuration ------------------------------------------------------
# Bind to localhost on specified port
bind = "127.0.0.1:{port}".format(port=GUNICORN_PORT)
# Number of workers -----------------------------------------------------------
# 2*CPU + 1 (recommendation from Gunicorn documentation)
# bumped to 4*CPU + 1 after testing. Maybe increase this number in the cloud
# deployment?
workers = int(get_from_env('GUNICORN_WORKERS',
default_value=multiprocessing.cpu_count()*3 + 1,
fail_if_not_found=False))
# NOTE: it was found in extensive testing that threads > 1 are prone
# to connection lockups. Leave this at 1 for safety until there are
# fixes in gunicorn.
# Why not sync? The sync worker is prone to timeout for long requests,
# like big queries. But gthread sends a heartbeat back to the main worker
# to keep it alive. We could just set the timeout to a really large number
# which would keep the long requests stable, but if there is a stuck worker,
# then they would be subject to that really long timeout. It's a tradeoff.
# All this goes away with async workers, but as of 3.2, django's ORM does support
# async, and testing failed pretty catastrophically and unreliably.
threads = int(get_from_env('GUNICORN_THREADS',
default_value=1,
fail_if_not_found=False))
# Worker connections. Limit the number of connections between apache<-->gunicorn
worker_connections = workers * threads
# Worker class ----------------------------------------------------------------
# sync by default, generally safe and low-resource:
# https://docs.gunicorn.org/en/stable/design.html#sync-workers
worker_class = get_from_env('GUNICORN_WORKER_CLASS',
default_value='gthread',
fail_if_not_found=False)
# Timeout ---------------------------------------------------------------------
# If not specified, the timeout default is 30 seconds:
# https://gunicorn-docs.readthedocs.io/en/stable/settings.html#worker-processes
timeout = get_from_env('GUNICORN_TIMEOUT',
default_value=30,
fail_if_not_found=False)
graceful_timeout = timeout
# max_requests settings -------------------------------------------------------
# The maximum number of requests a worker will process before restarting.
# May be useful if we have memory leak problems.
# The jitter is drawn from a uniform distribution:
# randint(0, max_requests_jitter)
max_requests = get_from_env('GUNICORN_MAX_REQUESTS',
default_value=5000,
fail_if_not_found=False)
max_requests_jitter = get_from_env('GUNICORN_MAX_REQUESTS_JITTER',
default_value=250,
fail_if_not_found=False)
# keepalive -------------------------------------------------------------------
# The number of seconds to wait for requests on a Keep-Alive connection.
# Generally set in the 1-5 seconds range for servers with direct connection
# to the client (e.g. when you don’t have separate load balancer).
# When Gunicorn is deployed behind a load balancer, it often makes sense to set
# this to a higher value.
# NOTE: force gunicorn to close its connection to apache after each request.
# This has been the source of so many 502's. Basically in periods of high activity,
# gunicorn would hold on to open sockets with apache, and just deadlock itself:
# https://github.com/benoitc/gunicorn/issues/2917
keepalive = get_from_env('GUNICORN_KEEPALIVE',
default_value=0,
fail_if_not_found=False)
# preload_app -----------------------------------------------------------------
# Load application code before the worker processes are forked.
# By preloading an application you can save some RAM resources as well as speed
# up server boot times. Although, if you defer application loading to each
# worker process, you can reload your application code easily by restarting
# workers.
# If you aren't going to make use of on-the-fly reloading, consider preloading
# your application code to reduce its memory footprint. So, turn this on in
# production. This is default set to False for development, but
# **TURN THIS TO TRUE FOR AWS DEPLOYMENT **
preload_app = get_from_env('GUNICORN_PRELOAD_APP',
default_value=True,
fail_if_not_found=False)
# Logging ---------------------------------------------------------------------
# Access log
accesslog = join(LOG_DIR, "gunicorn_access.log")
access_log_format = ('GUNICORN | %(h)s %(l)s %(u)s %(t)s '
'"%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"')
# Error log
errorlog = join(LOG_DIR, "gunicorn_error.log")
# debug logging doesn't provide actual information. And this will
# eliminate the "Connection closed." messages while still giving info
# about worker restarts.
loglevel = 'info'
capture_output = True
# using /dev/shm/ instead of /tmp for the temporary worker directory. See:
# https://pythonspeed.com/articles/gunicorn-in-docker/
# “in AWS an EBS root instance volume may sometimes hang for half a minute
# and during this time Gunicorn workers may completely block.”
worker_tmp_dir='/dev/shm'
# Override logger class to modify error format
from gunicorn.glogging import Logger
class CustomLogger(Logger):
error_fmt = 'GUNICORN | ' + Logger.error_fmt
logger_class = CustomLogger
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)", worker.pid)
def pre_fork(server, worker):
pass
def pre_exec(server):
server.log.info("Forked child, re-executing.")
def when_ready(server):
server.log.info("Server is ready. Spawning workers")
def worker_int(worker):
worker.log.info("worker received INT or QUIT signal")
def worker_abort(worker):
worker.log.info("worker received SIGABRT signal")
"""
Django settings for gracedb project.
Environment variable DJANGO_SETTINGS_MODULE should be set for a
given instance to determine the settings to run.
Description of settings:
BASE SETTINGS - not to be used as a full settings configuration
---------------------------------------------------------------
base.py - contains the basic settings for running a GraceDB
server.
secret.py - generated by Puppet, contains secret settings like the
database password, API keys, etc. For use with VM-based
deployments. DO NOT EDIT.
Virtual machine deployments
---------------------------
vm/production.py - settings for a VM-based production instance deployed
with Puppet.
vm/dev.py - settings for Va M-based production instance deployed
with Puppet.
Container-based deployments
---------------------------
NOTE: many settings are imported from environment variables for
this deployment type!
container/production.py - settings for a container-based deployment
of a production instance.
container/dev.py - settings for a container-based deployment of a
development instance.
"""
This diff is collapsed.
File moved