Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • alexander.pace/server
  • geoffrey.mo/gracedb-server
  • deep.chatterjee/gracedb-server
  • cody.messick/server
  • sushant.sharma-chaudhary/server
  • michael-coughlin/server
  • daniel.wysocki/gracedb-server
  • roberto.depietri/gracedb
  • philippe.grassia/gracedb
  • tri.nguyen/gracedb
  • jonah-kanner/gracedb
  • brandon.piotrzkowski/gracedb
  • joseph-areeda/gracedb
  • duncanmmacleod/gracedb
  • thomas.downes/gracedb
  • tanner.prestegard/gracedb
  • leo-singer/gracedb
  • computing/gracedb/server
18 results
Show changes
Commits on Source (2202)
Showing
with 2778 additions and 4116 deletions
{
"directory" : "../bower_components"
}
.git
*.swo
*.swp
*~
*.pyc
django-*.wsgi
static-collected
static/admin/
static/rest_framework/
config/settings/secret.py
config/settings/local.py
docs/user_docs/build/*
docs/admin_docs/build/*
static_root/*
.pytest_cache
junit.xml
.coverage
---
image: docker:latest
variables:
APT_CACHE_DIR: "${CI_PROJECT_DIR}/.cache/apt"
DOCKER_DRIVER: overlay
DOCKER_BRANCH: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME
DOCKER_LATEST: $CI_REGISTRY_IMAGE:latest
PIP_CACHE_DIR: "${CI_PROJECT_DIR}/.cache/pip"
stages:
- test
- branch
- latest
before_script:
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
include:
# Container scanning
- component: $CI_SERVER_FQDN/computing/gitlab/components/container-scanning/container-scanning@~latest
inputs:
job_name: branch_scan
# Software scanning
- component: $CI_SERVER_FQDN/computing/gitlab/components/sast/sast@~latest
inputs:
run_advanced_sast: true
- component: $CI_SERVER_FQDN/computing/gitlab/components/secret-detection/secret-detection@~latest
- component: $CI_SERVER_FQDN/computing/gitlab/components/python/dependency-scanning@~latest
# -- software scanning
# overwrite some settings for the scanning jobs
dependency_scanning:
stage: test
needs: []
variables:
DEBIAN_FRONTEND: "noninteractive"
before_script:
# install some underlying utilities using `apt` so that the dependency
# scanner can use pip to install everything else
- apt-get update -yqq
- apt-get install -yqq
libkrb5-dev
libldap-dev
libsasl2-dev
.sast-analyzer:
stage: test
needs: []
before_script: []
secret_detection:
stage: test
needs: []
before_script: []
# -- testing
.test: &test
image: igwn/base:bookworm
services:
- postgres:15.6
- memcached
variables:
AWS_SES_ACCESS_KEY_ID: "fake_aws_id"
AWS_SES_SECRET_ACCESS_KEY: "fake_aws_key"
DJANGO_ALERT_EMAIL_FROM: "fake_email"
DJANGO_DB_HOST: "postgres"
DJANGO_DB_PORT: "5432"
DJANGO_DB_NAME: "fake_name"
DJANGO_DB_USER: "runner"
DJANGO_DB_PASSWORD: ""
DJANGO_PRIMARY_FQDN: "fake_fqdn"
DJANGO_SECRET_KEY: "fake_key"
DJANGO_SETTINGS_MODULE: "config.settings.container.dev"
DJANGO_TWILIO_ACCOUNT_SID: "fake_sid"
DJANGO_TWILIO_AUTH_TOKEN: "fake_token"
DJANGO_DOCKER_MEMCACHED_ADDR: "memcached:11211"
EGAD_URL: "fake_url"
EGAD_API_KEY: "fake_key"
ENABLE_LVALERT_OVERSEER: "false"
ENABLE_IGWN_OVERSEER: "false"
LVALERT_OVERSEER_PORT: "2"
LVALERT_SERVER: "fake_server"
LVALERT_USER: "fake_user"
LVALERT_PASSWORD: "fake_password"
ENABLE_IGWN_OVERSEER: "false"
IGWN_ALERT_OVERSEER_PORT: "2"
IGWN_ALERT_SERVER: "fake_server"
IGWN_ALERT_USER: "fake_user"
IGWN_ALERT_PASSWORD: "fake_password"
POSTGRES_DB: "${DJANGO_DB_NAME}"
POSTGRES_USER: "${DJANGO_DB_USER}"
POSTGRES_PASSWORD: "${DJANGO_DB_PASSWORD}"
POSTGRES_HOST_AUTH_METHOD: trust
before_script:
# create apt cache directory
- mkdir -pv ${APT_CACHE_DIR}
# set python version
- PYTHON_VERSION="${CI_JOB_NAME##*:}"
- PYTHON_MAJOR="${PYTHON_VERSION:0:1}"
- PYTHON="python3"
# install build requirements
- apt-get -y install gnupg
- sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
- wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
- apt-get -yqq update
- apt-get -o dir::cache::archives="${APT_CACHE_DIR}" install -yqq
git
gnupg
libldap2-dev
libsasl2-dev
libssl-dev
libxml2-dev
krb5-user
libkrb5-dev
libsasl2-modules-gssapi-mit
swig
pkg-config
libpng-dev
libfreetype6-dev
libxslt-dev
${PYTHON}-pip
postgresql-15
postgresql-client-15
libpq-dev
# upgrade pip (requirement for lalsuite)
- ${PYTHON} -m pip install --upgrade pip --break-system-packages
# install everything else from pip
- ${PYTHON} -m pip install -r requirements.txt --break-system-packages
# create logs path required for tests
- mkdir -pv ../logs/
# list packages
- ${PYTHON} -m pip list installed
script:
- PYTHONPATH=${PYTHONPATH}:${PWD}/gracedb ${PYTHON} -m pytest --cov-report term-missing --cov ./gracedb --junitxml=${CI_PROJECT_DIR}/junit.xml
after_script:
- rm -fvr ${PIP_CACHE_DIR}/log
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
artifacts:
reports:
junit: junit.xml
cache:
key: "${CI_JOB_NAME}"
paths:
- .cache/pip
- .cache/apt
coverage: '/^TOTAL\s+.*\s+(\d+\.?\d*)%/'
tags:
- executor-docker
test:3.11:
<<: *test
# -- docker
branch_image:
stage: branch
script:
- docker build --pull -t $DOCKER_BRANCH .
- docker push $DOCKER_BRANCH
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
tags:
- executor-docker
branch_scan:
stage: branch
needs: [branch_image]
# default rules spawn a merge request pipeline, we don't want that
rules:
- if: $CI_COMMIT_BRANCH
variables:
GIT_STRATEGY: fetch
# image to scan
CS_IMAGE: "$DOCKER_BRANCH"
# image to compare to
CS_DEFAULT_BRANCH_IMAGE: "$CI_REGISTRY/computing/gitlab/server:latest"
# path to Dockerfile for remediation
CS_DOCKERFILE_PATH: "Dockerfile"
before_script: []
latest_image:
stage: latest
dependencies:
- branch_image
script:
- docker pull $DOCKER_BRANCH
- docker tag $DOCKER_BRANCH $DOCKER_LATEST
- docker push $DOCKER_LATEST
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
tags:
- executor-docker
## Description of problem
<!--
Describe in detail what you are trying to do and what the result is.
Exact timestamps, error tracebacks, and screenshots (if applicable) are very helpful.
-->
## Expected behavior
<!-- What do you expect to happen instead? -->
## Steps to reproduce
<!-- Step-by-step procedure for reproducing the issue -->
## Context/environment
<!--
Describe the environment you are working in:
* If using the ligo-gracedb client package, which version?
* Your operating system
* Your browser (web interface issues only)
* If you are experiencing this problem while working on a LIGO or Virgo computing cluster, which cluster are you using?
-->
## Suggested solutions
<!-- Any ideas for how to resolve this problem? -->
## Description of feature request
<!--
Describe your feature request!
Is it a web interface change? Some underlying feature? An API resource?
The more detail you can provide, the better.
-->
## Use cases
<!-- List some specific cases where this feature will be useful -->
## Benefits
<!-- Describe the benefits of adding this feature -->
## Drawbacks
<!--
Are there any drawbacks to adding this feature?
Can you think of any ways in which this will negatively affect the service for any set of users?
-->
## Suggested solutions
<!-- Do you have any ideas for how to implement this feature? -->
# Changelog
All notable changes to this project will be documented in this file.
## 2017-07-05 (gracedb-1.0.10) <tanner.prestegard@ligo.org>
### Added
- global setting for turning off LVAlerts (SEND_XMPP_ALERTS)
- instructions to the web page for creating notifications
### Fixed
- logging and display of performance information
- failover to lvalert_send when lvalert_overseer is not working
## 2017-05-30 (gracedb-1.0.9) <tanner.prestegard@ligo.org>
### Added
- list of labels for an event is now included in all LVAlert messages
- available labels now exposed via the REST API
- ability to create events with labels attached
- 'offline' parameter
### Fixed
- cleanup of label and event creation code, use of proper HTTP response codes
- LVAlert messages now sent when a label is removed and when a log entry is added via the web interface
## 2017-05-08 (gracedb-1.0.8) <tanner.prestegard@ligo.org>
### Added
- case-insensitive search queries
### Changed
- removed gracedb/pyparsing.py in favor of the pip-installed version
- overall reorganization and cleanup of Django settings
## 2017-04-25 (gracedb-1.0.7) <tanner.prestegard@ligo.org>
### Added
- V1OPS label and access to signoff pages from Virgo control room
- EM_SENT label
- new save method for EventLog, EMBBEventLog, EMObservation, EMFootprint to generate log number and save in a single SQL query
### Fixed
- typo in an MOU group's name (CTA)
## 2017-03-21 (gracedb-1.0.6) <tanner.prestegard@ligo.org>
### Changed
- modify handling of Fermi GCNs so as to not overwrite trigger durations
- change settings so gracedb servers use 'test' settings by default
## 2017-03-07 (gracedb-1.0.5) <tanner.prestegard@ligo.org>
### Added
- extraction of single IFO times from CWB event files, saving them in the database, and exposing them to MOU partners
### Changed
- increased size of 'debug' Django logs
## 2017-02-28 (gracedb-1.0.4) <tanner.prestegard@ligo.org>
### Changed
- added new LIB robot certificate
### Fixed
- issue where non-LVC members could remove the lv-em tag on log messages
## 2017-01-24 (gracedb-1.0.3) <tanner.prestegard@ligo.org>
### Added
- several MOU groups
- human-readable FAR to event pages
- leap second from 31 Dec 2016
- test button for contacts
### Changed
- separated phone alerts into voice and text options
## 2017-01-10 (gracedb-1.0.2) <tanner.prestegard@ligo.org>
### Changed
- increased Django logging verbosity and clarity
- modernized Django template structure to Django 1.8 standard
- reorganized settings
## 2016-12-20 (gracedb-1.0.1) <tanner.prestegard@ligo.org>
### Added
- capability for removing labels via gracedb-client
### Changed
- expose singleInspiral times and IFOs for EM partners
## 2016-11-22 <tanner.prestegard@ligo.org>
### Added
- capability for sending phone/SMS alerts via Twilio
### Changed
- updated admin documentation
## 2016-11-11 <tanner.prestegard@ligo.org>
### Added
- AllSkyLong search and updated LVAlert nodes
### Changed
- event file structure for LIB events
## 2016-10-20 <tanner.prestegard@ligo.org>
### Added
- README.md file
### Changed
- Repository moved from versions.ligo.org to git.ligo.org
## 2016-01-07
### Added
- 'less than' sign in event display when FAR is an upper limit (#3105)
- deprecation warning header for old client url endpoints (#2420)
- event subclass for oLIB events (#3093)
### Changed
- now including WhereWhen section in retraction VOEvents (#3092)
### Fixed
- only add SkymapViewer button for json files corresponding to skymaps (#3004)
## 2015-11-17
### Added
- support for tagnames with spaces in REST URL patterns (#2730)
- documentation about requesting changes to EM Observation entries (#2591)
- support for more complex label queries for searches and email
notifications (#2672, #2569)
- support for file uploads through the web interface (#2543, #1367)
### Changed
- create LigoLdapUser object (instead of django User) for unknown
Shib users with a valid session (#2629)
- time conversion functions handle None and empty string input (#2664)
### Fixed
- internal_user_required decorator no longer assumes HTTP request as
first arg (#2524)
- removed spurious factor of 1000 from fluence calculation (#2625)
## 2015-10-06
### Added
- banner warning if the user is looking at an lvem_view page (#2600)
- this changelog file (#2599)
- added event page link to labelling email alerts (#2575)
- add value from coinc_event.likelihood column to "Coinc Tables" for gstlal
events (#2513)
### Changed
- interpretation of values for cWB events (see #2484)
- list of labels for query now generated by DB query instead of static list
(#2523)
- allow more than one EMObservation subrow to be expanded at one time (#2605)
- Changed name of "Duration" field in EMObservation form to "On source
exposure" (#2603)
- Removed customized wait method from throttles so that we can send an
x-throttle-wait-seconds header to the user (#2457)
- Allow the internal parameter of VOEvents to be controlled by the requestor.
This is now taken from post data and defaults to 1 (internal only #2608).
### Fixed
- remove user from groups that are not present in IdP shibboleth assertion
(#2600)
- description of the 'internal' parameter in buildVOEvent.py (#2600)
FROM debian:bookworm
LABEL name="LIGO GraceDB Django application" \
maintainer="alexander.pace@ligo.org" \
date="20240306"
ARG SETTINGS_MODULE="config.settings.container.dev"
COPY docker/SWITCHaai-swdistrib.gpg /etc/apt/trusted.gpg.d
COPY docker/backports.pref /etc/apt/preferences.d
RUN apt-get update && \
apt-get -y install gnupg curl
RUN echo 'deb http://deb.debian.org/debian bookworm-backports main' > /etc/apt/sources.list.d/backports.list
RUN echo 'deb http://apt.postgresql.org/pub/repos/apt bookworm-pgdg main' > /etc/apt/sources.list.d/pgdg.list
RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
RUN apt-get update && \
apt-get --assume-yes upgrade && \
apt-get install --install-recommends --assume-yes \
apache2 \
emacs-nox \
gcc \
git \
krb5-user \
libkrb5-dev \
libapache2-mod-shib \
libapache2-mod-xsendfile \
libldap2-dev \
libldap-2.5-0 \
libsasl2-dev \
libsasl2-modules-gssapi-mit \
libxml2-dev \
pkg-config \
libpng-dev \
libpq-dev \
libfreetype6-dev \
libxslt-dev \
libsqlite3-dev \
php \
php8.2-pgsql \
php8.2-mbstring \
postgresql-client-15 \
python3 \
python3-dev \
python3-libxml2 \
python3-pip \
procps \
redis \
shibboleth-sp-common \
shibboleth-sp-utils \
libssl-dev \
swig \
htop \
telnet \
vim && \
apt-get clean && \
curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - && \
apt-get update && apt-get install --assume-yes yarn
# Install AWS X-ray daemon
RUN curl -O https://s3.us-east-2.amazonaws.com/aws-xray-assets.us-east-2/xray-daemon/aws-xray-daemon-3.x.deb
RUN dpkg -i aws-xray-daemon-3.x.deb
RUN rm aws-xray-daemon-3.x.deb
# Install osg-ca-certs:
RUN curl -O https://hypatia.aei.mpg.de/lsc-amd64-bookworm/osg-ca-certs_1.132NEW-1+deb12u0_all.deb
RUN dpkg -i osg-ca-certs_1.132NEW-1+deb12u0_all.deb
RUN rm osg-ca-certs_1.132NEW-1+deb12u0_all.deb
# Install ligo-ca-certs:
RUN curl -O https://hypatia.aei.mpg.de/lsc-amd64-bookworm/ligo-ca-certs_1.0.2-0+deb12u0_all.deb
RUN dpkg -i ligo-ca-certs_1.0.2-0+deb12u0_all.deb
RUN rm ligo-ca-certs_1.0.2-0+deb12u0_all.deb
# Docker scripts:
COPY docker/entrypoint /usr/local/bin/entrypoint
COPY docker/cleanup /usr/local/bin/cleanup
# Supervisord configs:
COPY docker/supervisord.conf /etc/supervisor/supervisord.conf
COPY docker/supervisord-apache2.conf /etc/supervisor/conf.d/apache2.conf
COPY docker/supervisord-igwn-alert-overseer.conf /etc/supervisor/conf.d/igwn-overseer.conf
COPY docker/supervisord-shibd.conf /etc/supervisor/conf.d/shibd.conf
COPY docker/supervisord-aws-xray.conf /etc/supervisor/conf.d/aws-xray.conf
COPY docker/supervisord-qcluster.conf /etc/supervisor/conf.d/qcluster.conf
# Apache configs:
COPY docker/apache-config /etc/apache2/sites-available/gracedb.conf
COPY docker/mpm_prefork.conf /etc/apache2/mods-enabled/mpm_prefork.conf
# Enable mpm_event module:
RUN rm /etc/apache2/mods-enabled/mpm_prefork.*
RUN rm /etc/apache2/mods-enabled/php8.2.*
RUN cp /etc/apache2/mods-available/mpm_event.* /etc/apache2/mods-enabled/
# Shibboleth configs and certs:
COPY docker/shibboleth-ds /etc/shibboleth-ds
COPY docker/login.ligo.org.cert.LIGOCA.pem /etc/shibboleth/login.ligo.org.cert.LIGOCA.pem
COPY docker/inc-md-cert.pem /etc/shibboleth/inc-md-cert.pem
COPY docker/check_shibboleth_status /usr/local/bin/check_shibboleth_status
RUN a2dissite 000-default.conf && \
a2ensite gracedb.conf && \
a2enmod headers proxy proxy_http rewrite xsendfile
# this line is unfortunate because "." updates for nearly any change to the
# repository and therefore docker build rarely caches the steps below
ADD . /app/gracedb_project
# install gracedb application itself
WORKDIR /app/gracedb_project
RUN pip3 install --upgrade pip --break-system-packages
RUN pip3 install -r requirements.txt --break-system-packages
# install supervisor from pip
RUN pip3 install supervisor --break-system-packages
# Give pip-installed packages priority over distribution packages
ENV PYTHONPATH /usr/local/lib/python3.11/dist-packages:$PYTHONPATH
ENV ENABLE_SHIBD false
ENV ENABLE_OVERSEER true
ENV VIRTUAL_ENV /dummy/
# Expose port and run Gunicorn
EXPOSE 8000
# Generate documentation
WORKDIR /app/gracedb_project/docs/user_docs
RUN sphinx-build -b html source build
WORKDIR /app/gracedb_project/docs/admin_docs
RUN sphinx-build -b html source build
RUN mkdir /app/logs /app/project_data
WORKDIR /app/gracedb_project
RUN DJANGO_SETTINGS_MODULE=${SETTINGS_MODULE} \
DJANGO_DB_NAME=fake_name \
DJANGO_DB_USER=fake_user \
DJANGO_DB_PASSWORD=fake_password \
DJANGO_SECRET_KEY=fake_key \
DJANGO_PRIMARY_FQDN=fake_fqdn \
DJANGO_ALERT_EMAIL_FROM=fake_email \
EGAD_URL=fake_url \
EGAD_API_KEY=fake_key \
LVALERT_USER=fake_user \
LVALERT_PASSWORD=fake_password \
LVALERT_SERVER=fake_server \
LVALERT_OVERSEER_PORT=2 \
IGWN_ALERT_USER=fake_user \
IGWN_ALERT_PASSWORD=fake_password \
IGWN_ALERT_SERVER=fake_server \
IGWN_ALERT_OVERSEER_PORT=2 \
IGWN_ALERT_GROUP=fake_group \
DJANGO_TWILIO_ACCOUNT_SID=fake_sid \
DJANGO_TWILIO_AUTH_TOKEN=fake_token \
DJANGO_AWS_ELASTICACHE_ADDR=fake_address:11211 \
AWS_SES_ACCESS_KEY_ID=fake_aws_id \
AWS_SES_SECRET_ACCESS_KEY=fake_aws_key \
python3 manage.py collectstatic --noinput
RUN rm -rf /app/logs/* /app/project_data/*
RUN useradd -M -u 50001 -g www-data -s /bin/false gracedb
#RUN groupadd -r xray
#RUN useradd -M -u 50002 -g xray -s /bin/false xray
# set secure file/directory permissions. In particular, ADD command at
# beginning of recipe inherits umask of user running the build
RUN chmod 0755 /usr/local/bin/entrypoint && \
chmod 0755 /usr/local/bin/cleanup && \
chown gracedb:www-data /app/logs /app/project_data && \
chmod 0750 /app/logs /app/project_data && \
find /app/gracedb_project -type d -exec chmod 0755 {} + && \
find /app/gracedb_project -type f -exec chmod 0644 {} +
# create and set scitoken key cache directory
RUN mkdir /app/scitokens_cache && \
chown gracedb:www-data /app/scitokens_cache && \
chmod 0750 /app/scitokens_cache
ENV XDG_CACHE_HOME /app/scitokens_cache
# patch voeventparse for python3.10+:
RUN sed -i 's/collections.Iterable/collections.abc.Iterable/g' /usr/local/lib/python3.11/dist-packages/voeventparse/voevent.py
# Remove packages that expose security vulnerabilities and close out.
# Edit: zlib1g* can't be removed because of a PrePend error
RUN apt-get --assume-yes --purge autoremove wget libaom3 node-ip
RUN apt-get clean
ENTRYPOINT [ "/usr/local/bin/entrypoint" ]
CMD ["/usr/local/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
This diff is collapsed.
# GraceDB server code
Server code for the GRAvitational-wave Candidate Event Database.
You will need to fork this repository and submit a merge request
in order to make changes to the code.
\ No newline at end of file
This diff is collapsed.
import sys
import VOEvent
class VOEventExportClass(VOEvent.VOEvent):
def __init__(self, event, schemaURL):
self.event = event
self.schemaURL = schemaURL
def export(self, outfile, level, namespace_='', name_='VOEvent', namespacedef_=''):
VOEvent.showIndent(outfile, level)
added_stuff = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n'
added_stuff += 'xmlns:voe="http://www.ivoa.net/xml/VOEvent/v2.0"\n'
added_stuff += 'xsi:schemaLocation="http://www.ivoa.net/xml/VOEvent/v2.0 %s"\n' % self.schemaURL
outfile.write('<%s%s%s %s' % (namespace_, name_,
namespacedef_ and ' ' + namespacedef_ or '',
added_stuff,
))
# self.event.exportAttributes(outfile, level, [], namespace_)
self.event.exportAttributes(outfile, level, [])
if self.event.hasContent_():
outfile.write('>\n')
# self.event.exportChildren(outfile, level + 1, namespace_='', name_)
self.event.exportChildren(outfile, level + 1, '', name_)
VOEvent.showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def stringVOEvent(event, schemaURL = "http://www.ivoa.net/xml/VOEvent/VOEvent-v2.0.xsd"):
'''
Converts a VOEvent to a string suitable for output
'''
v = VOEventExportClass(event, schemaURL)
out = StringIO()
out.write('<?xml version="1.0" ?>\n')
v.export(out, 0, namespace_='voe:')
out.write('\n')
return out.getvalue()
def paramValue(p):
s1 = p.get_value()
s2 = p.get_Value()
if not s2: return s1
if not s1: return s2
if len(s1) > len(s2): return s1
else: return s2
def htmlList(list):
'''
Converts a list of strings to an HTML <ul><li> structure.
'''
s = '<ul>'
for x in list:
s += '<li>' + str(x) + '</li>'
s += '</ul>'
return s
def htmlParam(g, p):
'''
Builds an HTML table row from a Param and its enclosing Group (or None)
'''
s = ''
if g == None:
s += '<td/>'
else:
s += '<td>' + g.get_name() + '</td>'
s += '<td>' + str(p.get_name()) + '</td>'
s += '<td>'
for d in p.get_Description(): s += str(d)
s += '</td>'
s += '<td><b>' + str(paramValue(p)) + '</b></td>'
s += '<td>' + str(p.get_ucd()) + '</td>'
s += '<td>' + str(p.get_unit()) + '</td>'
s += '<td>' + str(p.get_dataType()) + '</td>'
return s
def parse(file):
'''
Parses a file and builds the VOEvent DOM.
'''
doc = VOEvent.parsexml_(file)
rootNode = doc.getroot()
rootTag, rootClass = VOEvent.get_root_tag(rootNode)
v = rootClass.factory()
v.build(rootNode)
return v
def parseString(inString):
'''
Parses a string and builds the VOEvent DOM.
'''
from StringIO import StringIO
doc = VOEvent.parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = VOEvent.get_root_tag(rootNode)
rootObj = rootClass.factory()
rootObj.build(rootNode)
return rootObj
def getWhereWhen(v):
'''
Builds a dictionary of the information in the WhereWhen section:
observatory: location of observatory (string);
coord_system: coordinate system ID, for example UTC-FK5-GEO;
time: ISO8601 representation of time, for example 1918-11-11T11:11:11;
timeError: in seconds;
longitude: in degrees, usually right ascension;
latitiude: in degrees, usually declination;
positionalError: positional error in degrees.
'''
wwd = {}
ww = v.get_WhereWhen()
if not ww:
return wwd
w = ww.get_ObsDataLocation()
if not w:
return wwd
ol = w.get_ObservatoryLocation()
if ol:
wwd['observatory'] = ol.get_id()
ol = w.get_ObservationLocation()
if not ol:
return wwd
observation = ol.get_AstroCoords()
if not observation:
return wwd
wwd['coord_system'] = observation.get_coord_system_id()
time = observation.get_Time()
wwd['time'] = time.get_TimeInstant().get_ISOTime()
wwd['timeError'] = time.get_Error()
pos = observation.get_Position2D()
if not pos:
return wwd
wwd['positionalError'] = pos.get_Error2Radius()
v2 = pos.get_Value2()
if not v2:
return wwd
wwd['longitude'] = v2.get_C1()
wwd['latitude'] = v2.get_C2()
return wwd
def makeWhereWhen(wwd):
'''
Expects a dictionary of the information in the WhereWhen section, and makes a
VOEvent.WhereWhen object suitable for set_WhereWhen().
observatory: location of observatory (string);
coord_system: coordinate system ID, for example UTC-FK5-GEO;
time: ISO8601 representation of time, for example 1918-11-11T11:11:11;
timeError: in seconds;
longitude: in degrees, usually right ascension;
latitiude: in degrees, usually declination;
positionalError: positional error in degrees.
'''
if not wwd.has_key('observatory'): wwd['observatory'] = 'unknown'
if not wwd.has_key('coord_system'): wwd['coord_system'] = 'UTC-FK5-GEO'
if not wwd.has_key('timeError'): wwd['timeError'] = 0.0
if not wwd.has_key('positionalError'): wwd['positionalError'] = 0.0
if not wwd.has_key('time'):
print "Cannot make WhereWhen without time"
return None
if not wwd.has_key('longitude'):
print "Cannot make WhereWhen without longitude"
return None
if not wwd.has_key('latitude'):
print "Cannot make WhereWhen without latitude"
return None
ac = VOEvent.AstroCoords(coord_system_id=wwd['coord_system'])
ac.set_Time(
VOEvent.Time(
TimeInstant = VOEvent.TimeInstant(wwd['time'])))
ac.set_Position2D(
VOEvent.Position2D(
Value2 = VOEvent.Value2(wwd['longitude'], wwd['latitude']),
Error2Radius = wwd['positionalError']))
acs = VOEvent.AstroCoordSystem(id=wwd['coord_system'])
onl = VOEvent.ObservationLocation(acs, ac)
oyl = VOEvent.ObservatoryLocation(id=wwd['observatory'])
odl = VOEvent.ObsDataLocation(oyl, onl)
ww = VOEvent.WhereWhen()
ww.set_ObsDataLocation(odl)
return ww
def getParamNames(v):
'''
Takes a VOEvent and produces a list of pairs of group name and param name.
For a bare param, the group name is the empty string.
'''
list = []
w = v.get_What()
if not w: return list
for p in v.get_What().get_Param():
list.append(('', p.get_name()))
for g in v.get_What().get_Group():
for p in v.get_What().get_Param():
list.append((g.get_Name(), p.get_Name()))
return list
def findParam(event, groupName, paramName):
'''
Finds a Param in a given VOEvent that has the specified groupName
and paramName. If it is a bare param, the group name is the empty string.
'''
w = event.get_What()
if not w:
print "No <What> section in the event!"
return None
if groupName == '':
for p in event.get_What().get_Param():
if p.get_name() == paramName:
return p
else:
for g in event.get_What().get_Group():
if g.get_Name == groupName:
for p in event.get_What().get_Param():
if p.get_name() == paramName:
return p
print 'Cannot find param named %s/%s' % (groupName, paramName)
return None
######## utilityTable ########################
class utilityTable(VOEvent.Table):
'''
Class to represent a simple Table from VOEvent
'''
def __init__(self, table):
self.table = table
self.colNames = []
self.default = []
col = 0
for f in table.get_Field():
if f.get_name():
self.colNames.append(f.get_name())
type = f.get_dataType()
if type == 'float': self.default.append(0.0)
elif type == 'int': self.default.append(0)
else: self.default.append('')
def getTable(self):
return self.table
def blankTable(self, nrows):
'''
From a table template, replaces the Data section with nrows of empty TR and TD
'''
data = VOEvent.Data()
ncol = len(self.colNames)
for i in range(nrows):
tr = VOEvent.TR()
for col in range(ncol):
tr.add_TD(self.default[col])
data.add_TR(tr)
self.table.set_Data(data)
def getByCols(self):
'''
Returns a dictionary of column vectors that represent the table.
The key for the dict is the Field name for that column.
'''
d = self.table.get_Data()
nrow = len(d.get_TR())
ncol = len(self.colNames)
# we will build a matrix nrow*ncol and fill in the values as they
# come in, with col varying fastest. The return is a dictionary,
# arranged by column name, each with a vector of
# properly typed values.
data = []
for col in range(ncol):
data.append([self.default[col]]*nrow)
row = 0
for tr in d.get_TR():
col = 0
for td in tr.get_TD():
data[col][row] = td
col += 1
row += 1
dict = {}
col = 0
for colName in self.colNames:
dict[colName] = data[col]
col += 1
return dict
def setValue(self, name, irow, value, out=sys.stdout):
'''
Copies a single value into a cell of the table.
The column is identified by its name, and the row by an index 0,1,2...
'''
if name in self.colNames:
icol = self.colNames.index(name)
else:
print>>out, "setTable: Unknown column name %s. Known list is %s" % (name, str(self.colNames))
return False
d = self.table.get_Data()
ncols = len(self.colNames)
nrows = len(d.get_TR())
if nrows <= irow:
print>>out, "setTable: not enough rows -- you want %d, table has %d. Use blankTable to allocate the table." % (irow+1, nrows)
return False
tr = d.get_TR()[irow]
row = tr.get_TD()
row[icol] = value
tr.set_TD(row)
def toString(self):
'''
Makes a crude string representation of a utilityTable
'''
s = ' '
for name in self.colNames:
s += '%9s|' % name[:9]
s += '\n\n'
d = self.table.get_Data()
for tr in d.get_TR():
for td in tr.get_TD():
s += '%10s' % str(td)[:10]
s += '\n'
return s
{
"name": "gracedb",
"dependencies": {
"dijit": "1.10.4",
"dojox": "1.10.4"
}
}
File moved
# To run this manually (not via systemd):
# gunicorn --config config/gunicorn_config.py config.wsgi:application
# (assuming that you are in the base directory of the GraceDB server code repo)
import os
from os.path import abspath, dirname, join
import sys
import multiprocessing
# Useful function for getting environment variables
def get_from_env(envvar, default_value=None, fail_if_not_found=True):
value = os.environ.get(envvar, default_value)
if (value == default_value and fail_if_not_found):
raise ImproperlyConfigured(
'Could not get environment variable {0}'.format(envvar))
return value
# Parameters
GUNICORN_PORT = 8080
LOG_DIR = abspath(join(dirname(__file__), "..", "..", "logs"))
# Gunicorn configuration ------------------------------------------------------
# Bind to localhost on specified port
bind = "127.0.0.1:{port}".format(port=GUNICORN_PORT)
# Number of workers -----------------------------------------------------------
# 2*CPU + 1 (recommendation from Gunicorn documentation)
# bumped to 4*CPU + 1 after testing. Maybe increase this number in the cloud
# deployment?
workers = int(get_from_env('GUNICORN_WORKERS',
default_value=multiprocessing.cpu_count()*3 + 1,
fail_if_not_found=False))
# NOTE: it was found in extensive testing that threads > 1 are prone
# to connection lockups. Leave this at 1 for safety until there are
# fixes in gunicorn.
# Why not sync? The sync worker is prone to timeout for long requests,
# like big queries. But gthread sends a heartbeat back to the main worker
# to keep it alive. We could just set the timeout to a really large number
# which would keep the long requests stable, but if there is a stuck worker,
# then they would be subject to that really long timeout. It's a tradeoff.
# All this goes away with async workers, but as of 3.2, django's ORM does support
# async, and testing failed pretty catastrophically and unreliably.
threads = int(get_from_env('GUNICORN_THREADS',
default_value=1,
fail_if_not_found=False))
# Worker connections. Limit the number of connections between apache<-->gunicorn
worker_connections = workers * threads
# Worker class ----------------------------------------------------------------
# sync by default, generally safe and low-resource:
# https://docs.gunicorn.org/en/stable/design.html#sync-workers
worker_class = get_from_env('GUNICORN_WORKER_CLASS',
default_value='gthread',
fail_if_not_found=False)
# Timeout ---------------------------------------------------------------------
# If not specified, the timeout default is 30 seconds:
# https://gunicorn-docs.readthedocs.io/en/stable/settings.html#worker-processes
timeout = get_from_env('GUNICORN_TIMEOUT',
default_value=30,
fail_if_not_found=False)
graceful_timeout = timeout
# max_requests settings -------------------------------------------------------
# The maximum number of requests a worker will process before restarting.
# May be useful if we have memory leak problems.
# The jitter is drawn from a uniform distribution:
# randint(0, max_requests_jitter)
max_requests = get_from_env('GUNICORN_MAX_REQUESTS',
default_value=5000,
fail_if_not_found=False)
max_requests_jitter = get_from_env('GUNICORN_MAX_REQUESTS_JITTER',
default_value=250,
fail_if_not_found=False)
# keepalive -------------------------------------------------------------------
# The number of seconds to wait for requests on a Keep-Alive connection.
# Generally set in the 1-5 seconds range for servers with direct connection
# to the client (e.g. when you don’t have separate load balancer).
# When Gunicorn is deployed behind a load balancer, it often makes sense to set
# this to a higher value.
# NOTE: force gunicorn to close its connection to apache after each request.
# This has been the source of so many 502's. Basically in periods of high activity,
# gunicorn would hold on to open sockets with apache, and just deadlock itself:
# https://github.com/benoitc/gunicorn/issues/2917
keepalive = get_from_env('GUNICORN_KEEPALIVE',
default_value=0,
fail_if_not_found=False)
# preload_app -----------------------------------------------------------------
# Load application code before the worker processes are forked.
# By preloading an application you can save some RAM resources as well as speed
# up server boot times. Although, if you defer application loading to each
# worker process, you can reload your application code easily by restarting
# workers.
# If you aren't going to make use of on-the-fly reloading, consider preloading
# your application code to reduce its memory footprint. So, turn this on in
# production. This is default set to False for development, but
# **TURN THIS TO TRUE FOR AWS DEPLOYMENT **
preload_app = get_from_env('GUNICORN_PRELOAD_APP',
default_value=True,
fail_if_not_found=False)
# Logging ---------------------------------------------------------------------
# Access log
accesslog = join(LOG_DIR, "gunicorn_access.log")
access_log_format = ('GUNICORN | %(h)s %(l)s %(u)s %(t)s '
'"%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"')
# Error log
errorlog = join(LOG_DIR, "gunicorn_error.log")
# debug logging doesn't provide actual information. And this will
# eliminate the "Connection closed." messages while still giving info
# about worker restarts.
loglevel = 'info'
capture_output = True
# using /dev/shm/ instead of /tmp for the temporary worker directory. See:
# https://pythonspeed.com/articles/gunicorn-in-docker/
# “in AWS an EBS root instance volume may sometimes hang for half a minute
# and during this time Gunicorn workers may completely block.”
worker_tmp_dir='/dev/shm'
# Override logger class to modify error format
from gunicorn.glogging import Logger
class CustomLogger(Logger):
error_fmt = 'GUNICORN | ' + Logger.error_fmt
logger_class = CustomLogger
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)", worker.pid)
def pre_fork(server, worker):
pass
def pre_exec(server):
server.log.info("Forked child, re-executing.")
def when_ready(server):
server.log.info("Server is ready. Spawning workers")
def worker_int(worker):
worker.log.info("worker received INT or QUIT signal")
def worker_abort(worker):
worker.log.info("worker received SIGABRT signal")
"""
Django settings for gracedb project.
Environment variable DJANGO_SETTINGS_MODULE should be set for a
given instance to determine the settings to run.
Description of settings:
BASE SETTINGS - not to be used as a full settings configuration
---------------------------------------------------------------
base.py - contains the basic settings for running a GraceDB
server.
secret.py - generated by Puppet, contains secret settings like the
database password, API keys, etc. For use with VM-based
deployments. DO NOT EDIT.
Virtual machine deployments
---------------------------
vm/production.py - settings for a VM-based production instance deployed
with Puppet.
vm/dev.py - settings for Va M-based production instance deployed
with Puppet.
Container-based deployments
---------------------------
NOTE: many settings are imported from environment variables for
this deployment type!
container/production.py - settings for a container-based deployment
of a production instance.
container/dev.py - settings for a container-based deployment of a
development instance.
"""
This diff is collapsed.
File moved
# For running a containerized version of the service that gets secrets
# from environment variables. Builds on base.py settings.
import os
from django.core.exceptions import ImproperlyConfigured
from ..base import *
# Get required variables from environment variables ---------------------------
# Get database user from environment and check
db_user = os.environ.get('DJANGO_DB_USER', None)
if db_user is None:
raise ImproperlyConfigured('Could not get database user from envvars.')
# Get database password from environment and check
db_password = os.environ.get('DJANGO_DB_PASSWORD', None)
if db_password is None:
raise ImproperlyConfigured('Could not get database password from envvars.')
# Get database name from environment and check
db_name = os.environ.get('DJANGO_DB_NAME', None)
if db_name is None:
raise ImproperlyConfigured('Could not get database name from envvars.')
# Secret key for a Django installation
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', None)
if SECRET_KEY is None:
raise ImproperlyConfigured('Could not get secret key from envvars.')
# Get primary FQDN
SERVER_FQDN = os.environ.get('DJANGO_PRIMARY_FQDN', None)
if SERVER_FQDN is None:
raise ImproperlyConfigured('Could not get FQDN from envvars.')
LIGO_FQDN = SERVER_FQDN
## EGAD (External GraceDB Alert Dispatcher) configuration
ENABLE_EGAD_EMAIL = parse_envvar_bool(
get_from_env('ENABLE_EGAD_EMAIL',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_KAFKA = parse_envvar_bool(
get_from_env('ENABLE_EGAD_KAFKA',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_MATTERMOST = parse_envvar_bool(
get_from_env('ENABLE_EGAD_MATTERMOST',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_PHONE = parse_envvar_bool(
get_from_env('ENABLE_EGAD_PHONE',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD = (
ENABLE_EGAD_EMAIL or ENABLE_EGAD_KAFKA
or ENABLE_EGAD_MATTERMOST or ENABLE_EGAD_PHONE
)
EGAD_URL = get_from_env('EGAD_URL',
fail_if_not_found=ENABLE_EGAD, default_value=None)
EGAD_API_KEY = get_from_env('EGAD_API_KEY',
fail_if_not_found=ENABLE_EGAD, default_value=None)
# Turn LVAlert on/off from the environment. Adding this
# to turn lvalerts on/off from docker compose/update instead
# of having to rebuild containers. If the environment variable
# isn't set, then revert to the hardwired behavior:
xmpp_env_var = get_from_env('SEND_LVALERT_XMPP_ALERTS',
default_value=SEND_XMPP_ALERTS,
fail_if_not_found=False)
# Fix for other boolean values:
if (isinstance(xmpp_env_var, str) and
xmpp_env_var.lower() in ['true','t','1']):
SEND_XMPP_ALERTS=True
elif (isinstance(xmpp_env_var, str) and
xmpp_env_var.lower() in ['false','f','0']):
SEND_XMPP_ALERTS=False
else:
SEND_XMPP_ALERTS = True
# Get igwn_alert_overseer status:
igwn_alert_on = get_from_env(
'ENABLE_IGWN_OVERSEER',
default_value=False,
fail_if_not_found=False
)
if (isinstance(igwn_alert_on, str) and
igwn_alert_on.lower() in ['true', 't', '1']):
igwn_alert_overseer_on = True
else:
igwn_alert_overseer_on = False
# Get igwn-alert server
igwn_alert_server = os.environ.get('IGWN_ALERT_SERVER', None)
if igwn_alert_server is None:
raise ImproperlyConfigured('Could not get igwn-alert server from envvars.')
# Get igwn-alert Overseer listen port
igwn_alert_overseer_port = os.environ.get('IGWN_ALERT_OVERSEER_PORT', None)
if igwn_alert_overseer_port is None:
raise ImproperlyConfigured('Could not get igwn-alert overseer port '
'from envvars.')
# Get igwn-alert group from envirnment:
igwn_alert_group = os.environ.get('IGWN_ALERT_GROUP', DEFAULT_IGWN_ALERT_GROUP)
# Get igwn-alert username
igwn_alert_user = os.environ.get('IGWN_ALERT_USER', None)
if igwn_alert_user is None:
raise ImproperlyConfigured('Could not get igwn-alert username from envvars.')
# Get igwn-alert password
igwn_alert_password = os.environ.get('IGWN_ALERT_PASSWORD', None)
if igwn_alert_password is None:
raise ImproperlyConfigured('Could not get igwn-alert password from envvars.')
# Get Twilio account information from environment
TWILIO_ACCOUNT_SID = os.environ.get('DJANGO_TWILIO_ACCOUNT_SID', None)
if TWILIO_ACCOUNT_SID is None:
raise ImproperlyConfigured('Could not get Twilio acct SID from envvars.')
TWILIO_AUTH_TOKEN = os.environ.get('DJANGO_TWILIO_AUTH_TOKEN', None)
if TWILIO_AUTH_TOKEN is None:
raise ImproperlyConfigured('Could not get Twilio auth token from envvars.')
# Get maintenance mode settings from environment
maintenance_mode = get_from_env(
'DJANGO_MAINTENANCE_MODE_ACTIVE',
default_value=False,
fail_if_not_found=False
)
# DB "cool-down" factor for when a db conflict is detected. This
# factor scales a random number of seconds between zero and one.
DB_SLEEP_FACTOR = get_from_env(
'DJANGO_DB_SLEEP_FACTOR',
default_value=1.0,
fail_if_not_found=False
)
# Fix the factor (str to float)
try:
DB_SLEEP_FACTOR = float(DB_SLEEP_FACTOR)
except:
DB_SLEEP_FACTOR = 1.0
if (isinstance(maintenance_mode, str) and
maintenance_mode.lower() in ['true', 't', '1']):
MAINTENANCE_MODE = True
MAINTENANCE_MODE_MESSAGE = \
get_from_env('DJANGO_MAINTENANCE_MODE_MESSAGE', fail_if_not_found=False)
# Get info banner settings from environment
info_banner_enabled = get_from_env(
'DJANGO_INFO_BANNER_ENABLED',
default_value=False,
fail_if_not_found=False
)
# fix for other booleans:
if (isinstance(info_banner_enabled, str) and
info_banner_enabled.lower() in ['true','t','1']):
INFO_BANNER_ENABLED = True
INFO_BANNER_MESSAGE = \
get_from_env('DJANGO_INFO_BANNER_MESSAGE', fail_if_not_found=False)
# Get reports page boolean:
beta_reports_link = get_from_env(
'DJANGO_BETA_REPORTS_LINK',
default_value=False,
fail_if_not_found=False
)
# fix for other booleans:
if (isinstance(beta_reports_link, str) and
beta_reports_link.lower() in ['true','t','1']):
BETA_REPORTS_LINK = True
# Get email settings from environment
EMAIL_BACKEND = 'django_ses.SESBackend'
AWS_SES_ACCESS_KEY_ID = get_from_env('AWS_SES_ACCESS_KEY_ID')
AWS_SES_SECRET_ACCESS_KEY = get_from_env('AWS_SES_SECRET_ACCESS_KEY')
AWS_SES_REGION_NAME = get_from_env('AWS_SES_REGION_NAME',
default_value='us-west-2', fail_if_not_found=False)
AWS_SES_REGION_ENDPOINT = get_from_env('AWS_SES_REGION_ENDPOINT',
default_value='email.us-west-2.amazonaws.com', fail_if_not_found=False)
AWS_SES_AUTO_THROTTLE = 0.25
ALERT_EMAIL_FROM = get_from_env('DJANGO_ALERT_EMAIL_FROM')
# memcached settings. this variable should be set in the deployment to the
# same name as the service name in the docker deployment.
DOCKER_MEMCACHED_ADDR = get_from_env('DJANGO_DOCKER_MEMCACHED_ADDR',
default_value="memcached:11211",
fail_if_not_found=False)
DOCKER_MEMCACHED_SECONDS = get_from_env('DJANGO_DOCKER_MEMCACHED_SECONDS',
default_value="15",
fail_if_not_found=False)
try:
CACHE_MIDDLEWARE_SECONDS = int(DOCKER_MEMCACHED_SECONDS)
except:
CACHE_MIDDLEWARE_SECONDS = 15
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyMemcacheCache',
'LOCATION': DOCKER_MEMCACHED_ADDR,
'OPTIONS': {
'ignore_exc': True,
}
},
# For API throttles
'throttles': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'api_throttle_cache', # Table name
},
}
if ENABLE_REDIS_QUEUE:
# For async alert follow-up:
CACHES.update({"async_followup": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{REDIS_QUEUE_ADDRESS}:{REDIS_QUEUE_PORT}/{REDIS_QUEUE_DATABASE}",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}})
# Set queue backend for async django tasks:
# example django-redis connection
Q_CLUSTER = {
'name': Q_CLUSTER_NAME,
'label': Q_CLUSTER_LABEL,
'retry': REDIS_QUEUE_RETRY,
'timeout': REDIS_QUEUE_TIMEOUT,
'workers': REDIS_QUEUE_WORKERS,
'recycle': REDIS_QUEUE_RECYCLE,
'django_redis': 'async_followup'
}
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
'core.middleware.maintenance.MaintenanceModeMiddleware',
'events.middleware.PerformanceMiddleware',
'core.middleware.accept.AcceptMiddleware',
'core.middleware.api.ClientVersionMiddleware',
'core.middleware.api.CliExceptionMiddleware',
'django.middleware.common.CommonMiddleware',
'core.middleware.proxy.XForwardedForMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'user_sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'ligoauth.middleware.ShibbolethWebAuthMiddleware',
'ligoauth.middleware.ControlRoomMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
# Set up AWS X-ray patching if enabled
ENABLE_AWS_XRAY = (
get_from_env("ENABLE_AWS_XRAY",
default_value="false", fail_if_not_found=False).lower()
in ['true', 't', '1']
)
if ENABLE_AWS_XRAY:
# AWS X-ray middleware must be first in the list to measure timing
# accurately
MIDDLEWARE.insert(0, 'aws_xray_sdk.ext.django.middleware.XRayMiddleware')
# Include X-ray as an installed app in order to allow configuration beyond
# the default
INSTALLED_APPS.append('aws_xray_sdk.ext.django')
# Settings for AWS X-ray
XRAY_RECORDER = {
'AWS_XRAY_DAEMON_ADDRESS': '127.0.0.1:2000',
'AUTO_INSTRUMENT': True,
'AWS_XRAY_CONTEXT_MISSING': 'LOG_ERROR',
'PLUGINS': (),
'SAMPLING': True,
'SAMPLING_RULES': None,
'AWS_XRAY_TRACING_NAME': 'GraceDB',
'DYNAMIC_NAMING': None,
'STREAMING_THRESHOLD': None,
}
# Priority server settings ----------------------------------------------------
PRIORITY_SERVER = False
is_priority_server = get_from_env('DJANGO_PRIORITY_SERVER', None,
fail_if_not_found=False)
if (isinstance(is_priority_server, str) and
is_priority_server.lower() in ['true', 't']):
PRIORITY_SERVER = True
# If priority server, only allow priority users to the API
if PRIORITY_SERVER:
# Add custom permissions for the API
default_perms = list(REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'])
default_perms = ['api.permissions.IsPriorityUser'] + default_perms
REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'] = tuple(default_perms)
# Database settings -----------------------------------------------------------
# New postgresql database
# Configured for the CI pipeline:
# https://docs.gitlab.com/ee/ci/services/postgres.html
DATABASES = {
'default' : {
'NAME': db_name,
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': db_user,
'PASSWORD': db_password,
'HOST': os.environ.get('DJANGO_DB_HOST', ''),
'PORT': os.environ.get('DJANGO_DB_PORT', ''),
'CONN_MAX_AGE': 3600,
'TEST' : {
'NAME': 'gracedb_test_db',
},
},
}
# Main server "hostname" - a little hacky but OK
SERVER_HOSTNAME = SERVER_FQDN.split('.')[0]
# igwn_alert Overseer settings - get from environment
LVALERT_OVERSEER_INSTANCES = []
LVALERT_OVERSEER_INSTANCES.append(
{
"lvalert_server": igwn_alert_server,
"listen_port": int(igwn_alert_overseer_port),
"igwn_alert_group": igwn_alert_group,
"username": igwn_alert_user,
"password": igwn_alert_password,
}
)
# Pull in remaining (phone/email) alert variables from
# the environment. Default to false.
SEND_PHONE_ALERTS = parse_envvar_bool(get_from_env(
'SEND_PHONE_ALERTS',
default_value='False',
fail_if_not_found=False
))
SEND_EMAIL_ALERTS = parse_envvar_bool(get_from_env(
'SEND_EMAIL_ALERTS',
default_value='False',
fail_if_not_found=False
))
SEND_MATTERMOST_ALERTS = parse_envvar_bool(get_from_env(
'SEND_MATTERMOST_ALERTS',
default_value='False',
fail_if_not_found=False
))
INSTANCE_STUB = """
<li>Phone alerts (calls/SMS) are {0}</li>
<li>Email alerts are {1}</li>
<li><span class="text-monospace">igwn-alert</span> messages to <span class="text-monospace">{2}</span> are {3}</li>
"""
INSTANCE_LIST = INSTANCE_STUB.format(ENABLED[SEND_PHONE_ALERTS],
ENABLED[SEND_EMAIL_ALERTS],
LVALERT_OVERSEER_INSTANCES[0]['lvalert_server'],
ENABLED[SEND_XMPP_ALERTS])
# Use full client certificate to authenticate
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = (
'api.backends.GraceDbAuthenticatedAuthentication',
'api.backends.GraceDbSciTokenAuthentication',
'api.backends.GraceDbX509FullCertAuthentication',
'api.backends.GraceDbBasicAuthentication',
)
# Update allowed hosts from environment variables -----------------------------
hosts_from_env = os.environ.get('DJANGO_ALLOWED_HOSTS', None)
if hosts_from_env is not None:
ALLOWED_HOSTS += hosts_from_env.split(',')
ALLOWED_HOSTS += [SERVER_FQDN]
# Email settings - dependent on server hostname and FQDN ----------------------
SERVER_EMAIL = ALERT_EMAIL_FROM
ALERT_EMAIL_TO = []
ALERT_EMAIL_BCC = []
ALERT_TEST_EMAIL_FROM = ALERT_EMAIL_FROM
ALERT_TEST_EMAIL_TO = []
# EMBB email settings
EMBB_MAIL_ADDRESS = 'embb@{fqdn}.ligo.org'.format(fqdn=SERVER_FQDN)
EMBB_SMTP_SERVER = 'localhost'
EMBB_MAIL_ADMINS = [admin[1] for admin in ADMINS]
EMBB_IGNORE_ADDRESSES = ['Mailer-Daemon@{fqdn}'.format(fqdn=SERVER_FQDN)]
# Set up logging to stdout only
for key in LOGGING['loggers']:
LOGGING['loggers'][key]['handlers'] = ['console']
LOGGING['loggers']['django.request']['handlers'].append('mail_admins')
# Turn off debug/error emails when in maintenance mode.
if MAINTENANCE_MODE:
LOGGING['loggers']['django.request']['handlers'].remove('mail_admins')
# Set SciToken accepted audience to server FQDN
SCITOKEN_AUDIENCE = ["https://" + SERVER_FQDN, "https://" + LIGO_FQDN]
# Settings for a test/dev GraceDB instance running in a container
from .base import *
TIER = "dev"
CONFIG_NAME = "DEV"
# Debug settings
DEBUG = True
# Override EMBB email address
# TP (8 Aug 2017): not sure why?
EMBB_MAIL_ADDRESS = 'gracedb@{fqdn}'.format(fqdn=SERVER_FQDN)
# Add middleware
debug_middleware = 'debug_toolbar.middleware.DebugToolbarMiddleware'
MIDDLEWARE += [
debug_middleware,
#'silk.middleware.SilkyMiddleware',
#'core.middleware.profiling.ProfileMiddleware',
#'core.middleware.admin.AdminsOnlyMiddleware',
]
# Add to installed apps
INSTALLED_APPS += [
'debug_toolbar',
#'silk'
]
# Add testserver to ALLOWED_HOSTS
ALLOWED_HOSTS += ['testserver']
# Enforce that phone and email alerts are off XXX: Set by deployment variables!
#SEND_PHONE_ALERTS = False
#SEND_EMAIL_ALERTS = False
#SEND_MATTERMOST_ALERTS = True
# Settings for django-silk profiler
SILKY_AUTHENTICATION = True
SILKY_AUTHORISATION = True
if 'silk' in INSTALLED_APPS:
# Needed to prevent RequestDataTooBig for files > 2.5 MB
# when silk is being used. This setting is typically used to
# prevent DOS attacks, so should not be changed in production.
DATA_UPLOAD_MAX_MEMORY_SIZE = 20*(1024**2)
# Tuple of IPs which are marked as internal, useful for debugging.
# Tanner (5 Dec. 2017): DON'T CHANGE THIS! Django Debug Toolbar exposes
# some headers which we want to keep hidden. So to be safe, we only allow
# it to be used through this server. You need to configure a SOCKS proxy
# on your local machine to use DJDT (see admin docs).
INTERNAL_IPS = [
INTERNAL_IP_ADDRESS,
]
# Set up Sentry for error logging
sentry_dsn = get_from_env('DJANGO_SENTRY_DSN', fail_if_not_found=False)
if sentry_dsn is not None:
USE_SENTRY = True
# Set up Sentry
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
environment='dev',
dsn=sentry_dsn,
integrations=[DjangoIntegration()],
before_send=before_send,
)
# Turn off default admin error emails
LOGGING['loggers']['django.request']['handlers'] = []
# Home page stuff
INSTANCE_TITLE = 'GraceDB Development VM'
# Add sub-bullet with igwn-alert group:
group_sub_bullet = """<ul>
<li> Messages are sent to group: <span class="text-monospace"> {0} </span></li>
</ul>""".format(LVALERT_OVERSEER_INSTANCES[0]['igwn_alert_group'])
INSTANCE_LIST = INSTANCE_LIST + group_sub_bullet
INSTANCE_TITLE = 'GraceDB Development Server'
INSTANCE_INFO = """
<h5>Development Instance</h5>
<hr>
<p>
This GraceDB instance is designed for GraceDB maintainers to develop and
test in the AWS cloud architecture. There is <b>no guarantee</b> that the
behavior of this instance will mimic the production system at any time.
Events and associated data may change or be removed at any time.
</p>
<ul>
{}
<li>Only LIGO logins are provided (no login via InCommon or Google).</li>
</ul>
""".format(INSTANCE_LIST)