Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • alexander.pace/server
  • geoffrey.mo/gracedb-server
  • deep.chatterjee/gracedb-server
  • cody.messick/server
  • sushant.sharma-chaudhary/server
  • michael-coughlin/server
  • daniel.wysocki/gracedb-server
  • roberto.depietri/gracedb
  • philippe.grassia/gracedb
  • tri.nguyen/gracedb
  • jonah-kanner/gracedb
  • brandon.piotrzkowski/gracedb
  • joseph-areeda/gracedb
  • duncanmmacleod/gracedb
  • thomas.downes/gracedb
  • tanner.prestegard/gracedb
  • leo-singer/gracedb
  • computing/gracedb/server
18 results
Show changes
Commits on Source (1633)
Showing
with 2612 additions and 203 deletions
.git
*.swo
*.swp
*~
*.pyc
......@@ -6,3 +7,6 @@ config/settings/local.py
docs/user_docs/build/*
docs/admin_docs/build/*
static_root/*
.pytest_cache
junit.xml
.coverage
---
image: docker:latest
variables:
APT_CACHE_DIR: "${CI_PROJECT_DIR}/.cache/apt"
DOCKER_DRIVER: overlay
DOCKER_BRANCH: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME
DOCKER_LATEST: $CI_REGISTRY_IMAGE:latest
PIP_CACHE_DIR: "${CI_PROJECT_DIR}/.cache/pip"
stages:
- test
- branch
- latest
before_script:
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
include:
# Container scanning
- component: $CI_SERVER_FQDN/computing/gitlab/components/container-scanning/container-scanning@~latest
inputs:
job_name: branch_scan
# Software scanning
- component: $CI_SERVER_FQDN/computing/gitlab/components/sast/sast@~latest
inputs:
run_advanced_sast: true
- component: $CI_SERVER_FQDN/computing/gitlab/components/secret-detection/secret-detection@~latest
- component: $CI_SERVER_FQDN/computing/gitlab/components/python/dependency-scanning@~latest
# -- software scanning
# overwrite some settings for the scanning jobs
dependency_scanning:
stage: test
needs: []
variables:
DEBIAN_FRONTEND: "noninteractive"
before_script:
# install some underlying utilities using `apt` so that the dependency
# scanner can use pip to install everything else
- apt-get update -yqq
- apt-get install -yqq
libkrb5-dev
libldap-dev
libsasl2-dev
.sast-analyzer:
stage: test
needs: []
before_script: []
secret_detection:
stage: test
needs: []
before_script: []
# -- testing
.test: &test
image: igwn/base:bookworm
services:
- postgres:15.6
- memcached
variables:
AWS_SES_ACCESS_KEY_ID: "fake_aws_id"
AWS_SES_SECRET_ACCESS_KEY: "fake_aws_key"
DJANGO_ALERT_EMAIL_FROM: "fake_email"
DJANGO_DB_HOST: "postgres"
DJANGO_DB_PORT: "5432"
DJANGO_DB_NAME: "fake_name"
DJANGO_DB_USER: "runner"
DJANGO_DB_PASSWORD: ""
DJANGO_PRIMARY_FQDN: "fake_fqdn"
DJANGO_SECRET_KEY: "fake_key"
DJANGO_SETTINGS_MODULE: "config.settings.container.dev"
DJANGO_TWILIO_ACCOUNT_SID: "fake_sid"
DJANGO_TWILIO_AUTH_TOKEN: "fake_token"
DJANGO_DOCKER_MEMCACHED_ADDR: "memcached:11211"
EGAD_URL: "fake_url"
EGAD_API_KEY: "fake_key"
ENABLE_LVALERT_OVERSEER: "false"
ENABLE_IGWN_OVERSEER: "false"
LVALERT_OVERSEER_PORT: "2"
LVALERT_SERVER: "fake_server"
LVALERT_USER: "fake_user"
LVALERT_PASSWORD: "fake_password"
ENABLE_IGWN_OVERSEER: "false"
IGWN_ALERT_OVERSEER_PORT: "2"
IGWN_ALERT_SERVER: "fake_server"
IGWN_ALERT_USER: "fake_user"
IGWN_ALERT_PASSWORD: "fake_password"
POSTGRES_DB: "${DJANGO_DB_NAME}"
POSTGRES_USER: "${DJANGO_DB_USER}"
POSTGRES_PASSWORD: "${DJANGO_DB_PASSWORD}"
POSTGRES_HOST_AUTH_METHOD: trust
before_script:
# create apt cache directory
- mkdir -pv ${APT_CACHE_DIR}
# set python version
- PYTHON_VERSION="${CI_JOB_NAME##*:}"
- PYTHON_MAJOR="${PYTHON_VERSION:0:1}"
- PYTHON="python3"
# install build requirements
- apt-get -y install gnupg
- sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
- wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
- apt-get -yqq update
- apt-get -o dir::cache::archives="${APT_CACHE_DIR}" install -yqq
git
gnupg
libldap2-dev
libsasl2-dev
libssl-dev
libxml2-dev
krb5-user
libkrb5-dev
libsasl2-modules-gssapi-mit
swig
pkg-config
libpng-dev
libfreetype6-dev
libxslt-dev
${PYTHON}-pip
postgresql-15
postgresql-client-15
libpq-dev
# upgrade pip (requirement for lalsuite)
- ${PYTHON} -m pip install --upgrade pip --break-system-packages
# install everything else from pip
- ${PYTHON} -m pip install -r requirements.txt --break-system-packages
# create logs path required for tests
- mkdir -pv ../logs/
# list packages
- ${PYTHON} -m pip list installed
script:
- PYTHONPATH=${PYTHONPATH}:${PWD}/gracedb ${PYTHON} -m pytest --cov-report term-missing --cov ./gracedb --junitxml=${CI_PROJECT_DIR}/junit.xml
after_script:
- rm -fvr ${PIP_CACHE_DIR}/log
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
artifacts:
reports:
junit: junit.xml
cache:
key: "${CI_JOB_NAME}"
paths:
- .cache/pip
- .cache/apt
coverage: '/^TOTAL\s+.*\s+(\d+\.?\d*)%/'
tags:
- executor-docker
test:3.11:
<<: *test
# -- docker
branch_image:
stage: branch
script:
- docker build --pull -t $DOCKER_BRANCH .
- docker push $DOCKER_BRANCH
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
tags:
- executor-docker
branch_scan:
stage: branch
needs: [branch_image]
# default rules spawn a merge request pipeline, we don't want that
rules:
- if: $CI_COMMIT_BRANCH
variables:
GIT_STRATEGY: fetch
# image to scan
CS_IMAGE: "$DOCKER_BRANCH"
# image to compare to
CS_DEFAULT_BRANCH_IMAGE: "$CI_REGISTRY/computing/gitlab/server:latest"
# path to Dockerfile for remediation
CS_DOCKERFILE_PATH: "Dockerfile"
before_script: []
latest_image:
stage: latest
dependencies:
- branch_image
script:
- docker pull $DOCKER_BRANCH
- docker tag $DOCKER_BRANCH $DOCKER_LATEST
- docker push $DOCKER_LATEST
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
tags:
- executor-docker
## Description of problem
<!--
Describe in detail what you are trying to do and what the result is.
Exact timestamps, error tracebacks, and screenshots (if applicable) are very helpful.
-->
## Expected behavior
<!-- What do you expect to happen instead? -->
## Steps to reproduce
<!-- Step-by-step procedure for reproducing the issue -->
## Context/environment
<!--
Describe the environment you are working in:
* If using the ligo-gracedb client package, which version?
* Your operating system
* Your browser (web interface issues only)
* If you are experiencing this problem while working on a LIGO or Virgo computing cluster, which cluster are you using?
-->
## Suggested solutions
<!-- Any ideas for how to resolve this problem? -->
## Description of feature request
<!--
Describe your feature request!
Is it a web interface change? Some underlying feature? An API resource?
The more detail you can provide, the better.
-->
## Use cases
<!-- List some specific cases where this feature will be useful -->
## Benefits
<!-- Describe the benefits of adding this feature -->
## Drawbacks
<!--
Are there any drawbacks to adding this feature?
Can you think of any ways in which this will negatively affect the service for any set of users?
-->
## Suggested solutions
<!-- Do you have any ideas for how to implement this feature? -->
FROM debian:bookworm
LABEL name="LIGO GraceDB Django application" \
maintainer="alexander.pace@ligo.org" \
date="20240306"
ARG SETTINGS_MODULE="config.settings.container.dev"
COPY docker/SWITCHaai-swdistrib.gpg /etc/apt/trusted.gpg.d
COPY docker/backports.pref /etc/apt/preferences.d
RUN apt-get update && \
apt-get -y install gnupg curl
RUN echo 'deb http://deb.debian.org/debian bookworm-backports main' > /etc/apt/sources.list.d/backports.list
RUN echo 'deb http://apt.postgresql.org/pub/repos/apt bookworm-pgdg main' > /etc/apt/sources.list.d/pgdg.list
RUN echo 'deb [trusted=yes] https://hypatia.aei.mpg.de/lsc-amd64-bookworm ./' > /etc/apt/sources.list.d/lscsoft.list
RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
RUN apt-get update && \
apt-get --assume-yes upgrade && \
apt-get install --install-recommends --assume-yes \
apache2 \
emacs-nox \
gcc \
git \
krb5-user \
libkrb5-dev \
libapache2-mod-shib \
libapache2-mod-xsendfile \
libldap2-dev \
libldap-2.5-0 \
libsasl2-dev \
libsasl2-modules-gssapi-mit \
libxml2-dev \
pkg-config \
libpng-dev \
libpq-dev \
libfreetype6-dev \
libxslt-dev \
libsqlite3-dev \
ligo-ca-certs \
osg-ca-certs \
php \
php8.2-pgsql \
php8.2-mbstring \
postgresql-client-15 \
python3 \
python3-dev \
python3-libxml2 \
python3-pip \
procps \
redis \
shibboleth-sp-common \
shibboleth-sp-utils \
libssl-dev \
swig \
htop \
telnet \
vim && \
apt-get clean && \
curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - && \
apt-get update && apt-get install --assume-yes yarn
# Install AWS X-ray daemon
RUN curl -O https://s3.us-east-2.amazonaws.com/aws-xray-assets.us-east-2/xray-daemon/aws-xray-daemon-3.x.deb
RUN dpkg -i aws-xray-daemon-3.x.deb
RUN rm aws-xray-daemon-3.x.deb
# Docker scripts:
COPY docker/entrypoint /usr/local/bin/entrypoint
COPY docker/cleanup /usr/local/bin/cleanup
# Supervisord configs:
COPY docker/supervisord.conf /etc/supervisor/supervisord.conf
COPY docker/supervisord-apache2.conf /etc/supervisor/conf.d/apache2.conf
COPY docker/supervisord-igwn-alert-overseer.conf /etc/supervisor/conf.d/igwn-overseer.conf
COPY docker/supervisord-shibd.conf /etc/supervisor/conf.d/shibd.conf
COPY docker/supervisord-aws-xray.conf /etc/supervisor/conf.d/aws-xray.conf
COPY docker/supervisord-qcluster.conf /etc/supervisor/conf.d/qcluster.conf
# Apache configs:
COPY docker/apache-config /etc/apache2/sites-available/gracedb.conf
COPY docker/mpm_prefork.conf /etc/apache2/mods-enabled/mpm_prefork.conf
# Enable mpm_event module:
RUN rm /etc/apache2/mods-enabled/mpm_prefork.*
RUN rm /etc/apache2/mods-enabled/php8.2.*
RUN cp /etc/apache2/mods-available/mpm_event.* /etc/apache2/mods-enabled/
# Shibboleth configs and certs:
COPY docker/shibboleth-ds /etc/shibboleth-ds
COPY docker/login.ligo.org.cert.LIGOCA.pem /etc/shibboleth/login.ligo.org.cert.LIGOCA.pem
COPY docker/inc-md-cert.pem /etc/shibboleth/inc-md-cert.pem
COPY docker/check_shibboleth_status /usr/local/bin/check_shibboleth_status
RUN a2dissite 000-default.conf && \
a2ensite gracedb.conf && \
a2enmod headers proxy proxy_http rewrite xsendfile
# this line is unfortunate because "." updates for nearly any change to the
# repository and therefore docker build rarely caches the steps below
ADD . /app/gracedb_project
# install gracedb application itself
WORKDIR /app/gracedb_project
RUN pip3 install --upgrade pip --break-system-packages
RUN pip3 install -r requirements.txt --break-system-packages
# install supervisor from pip
RUN pip3 install supervisor --break-system-packages
# Give pip-installed packages priority over distribution packages
ENV PYTHONPATH /usr/local/lib/python3.11/dist-packages:$PYTHONPATH
ENV ENABLE_SHIBD false
ENV ENABLE_OVERSEER true
ENV VIRTUAL_ENV /dummy/
# Expose port and run Gunicorn
EXPOSE 8000
# Generate documentation
WORKDIR /app/gracedb_project/docs/user_docs
RUN sphinx-build -b html source build
WORKDIR /app/gracedb_project/docs/admin_docs
RUN sphinx-build -b html source build
RUN mkdir /app/logs /app/project_data
WORKDIR /app/gracedb_project
RUN DJANGO_SETTINGS_MODULE=${SETTINGS_MODULE} \
DJANGO_DB_NAME=fake_name \
DJANGO_DB_USER=fake_user \
DJANGO_DB_PASSWORD=fake_password \
DJANGO_SECRET_KEY=fake_key \
DJANGO_PRIMARY_FQDN=fake_fqdn \
DJANGO_ALERT_EMAIL_FROM=fake_email \
EGAD_URL=fake_url \
EGAD_API_KEY=fake_key \
LVALERT_USER=fake_user \
LVALERT_PASSWORD=fake_password \
LVALERT_SERVER=fake_server \
LVALERT_OVERSEER_PORT=2 \
IGWN_ALERT_USER=fake_user \
IGWN_ALERT_PASSWORD=fake_password \
IGWN_ALERT_SERVER=fake_server \
IGWN_ALERT_OVERSEER_PORT=2 \
IGWN_ALERT_GROUP=fake_group \
DJANGO_TWILIO_ACCOUNT_SID=fake_sid \
DJANGO_TWILIO_AUTH_TOKEN=fake_token \
DJANGO_AWS_ELASTICACHE_ADDR=fake_address:11211 \
AWS_SES_ACCESS_KEY_ID=fake_aws_id \
AWS_SES_SECRET_ACCESS_KEY=fake_aws_key \
python3 manage.py collectstatic --noinput
RUN rm -rf /app/logs/* /app/project_data/*
RUN useradd -M -u 50001 -g www-data -s /bin/false gracedb
#RUN groupadd -r xray
#RUN useradd -M -u 50002 -g xray -s /bin/false xray
# set secure file/directory permissions. In particular, ADD command at
# beginning of recipe inherits umask of user running the build
RUN chmod 0755 /usr/local/bin/entrypoint && \
chmod 0755 /usr/local/bin/cleanup && \
chown gracedb:www-data /app/logs /app/project_data && \
chmod 0750 /app/logs /app/project_data && \
find /app/gracedb_project -type d -exec chmod 0755 {} + && \
find /app/gracedb_project -type f -exec chmod 0644 {} +
# create and set scitoken key cache directory
RUN mkdir /app/scitokens_cache && \
chown gracedb:www-data /app/scitokens_cache && \
chmod 0750 /app/scitokens_cache
ENV XDG_CACHE_HOME /app/scitokens_cache
# patch voeventparse for python3.10+:
RUN sed -i 's/collections.Iterable/collections.abc.Iterable/g' /usr/local/lib/python3.11/dist-packages/voeventparse/voevent.py
# Remove packages that expose security vulnerabilities and close out.
# Edit: zlib1g* can't be removed because of a PrePend error
RUN apt-get --assume-yes --purge autoremove wget libaom3 node-ip
RUN apt-get clean
ENTRYPOINT [ "/usr/local/bin/entrypoint" ]
CMD ["/usr/local/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
This diff is collapsed.
{
"name": "gracedb",
"dependencies": {
"dgrid": "0.4.0",
"dijit": "1.10.4",
"dojox": "1.10.4",
"jquery": "3.2.1",
"moment-timezone": "0.5.0",
"moment": "2.11.1"
"dojox": "1.10.4"
}
}
......@@ -6,6 +6,14 @@ from os.path import abspath, dirname, join
import sys
import multiprocessing
# Useful function for getting environment variables
def get_from_env(envvar, default_value=None, fail_if_not_found=True):
value = os.environ.get(envvar, default_value)
if (value == default_value and fail_if_not_found):
raise ImproperlyConfigured(
'Could not get environment variable {0}'.format(envvar))
return value
# Parameters
GUNICORN_PORT = 8080
LOG_DIR = abspath(join(dirname(__file__), "..", "..", "logs"))
......@@ -14,28 +22,143 @@ LOG_DIR = abspath(join(dirname(__file__), "..", "..", "logs"))
# Bind to localhost on specified port
bind = "127.0.0.1:{port}".format(port=GUNICORN_PORT)
# Number of workers = 2*CPU + 1 (recommendation from Gunicorn documentation)
workers = multiprocessing.cpu_count()*2 + 1
# Number of workers -----------------------------------------------------------
# 2*CPU + 1 (recommendation from Gunicorn documentation)
# bumped to 4*CPU + 1 after testing. Maybe increase this number in the cloud
# deployment?
workers = int(get_from_env('GUNICORN_WORKERS',
default_value=multiprocessing.cpu_count()*3 + 1,
fail_if_not_found=False))
# NOTE: it was found in extensive testing that threads > 1 are prone
# to connection lockups. Leave this at 1 for safety until there are
# fixes in gunicorn.
# Why not sync? The sync worker is prone to timeout for long requests,
# like big queries. But gthread sends a heartbeat back to the main worker
# to keep it alive. We could just set the timeout to a really large number
# which would keep the long requests stable, but if there is a stuck worker,
# then they would be subject to that really long timeout. It's a tradeoff.
# All this goes away with async workers, but as of 3.2, django's ORM does support
# async, and testing failed pretty catastrophically and unreliably.
threads = int(get_from_env('GUNICORN_THREADS',
default_value=1,
fail_if_not_found=False))
# Worker connections. Limit the number of connections between apache<-->gunicorn
worker_connections = workers * threads
# Worker type
worker_type = 'sync'
# Worker class ----------------------------------------------------------------
# sync by default, generally safe and low-resource:
# https://docs.gunicorn.org/en/stable/design.html#sync-workers
# Max requests settings - a worker restarts after handling this many
# requests. May be useful if we have memory leak problems.
worker_class = get_from_env('GUNICORN_WORKER_CLASS',
default_value='gthread',
fail_if_not_found=False)
# Timeout ---------------------------------------------------------------------
# If not specified, the timeout default is 30 seconds:
# https://gunicorn-docs.readthedocs.io/en/stable/settings.html#worker-processes
timeout = get_from_env('GUNICORN_TIMEOUT',
default_value=30,
fail_if_not_found=False)
graceful_timeout = timeout
# max_requests settings -------------------------------------------------------
# The maximum number of requests a worker will process before restarting.
# May be useful if we have memory leak problems.
# The jitter is drawn from a uniform distribution:
# randint(0, max_requests_jitter)
#max_requests = 0
#max_requests_jitter = 0
max_requests = get_from_env('GUNICORN_MAX_REQUESTS',
default_value=5000,
fail_if_not_found=False)
max_requests_jitter = get_from_env('GUNICORN_MAX_REQUESTS_JITTER',
default_value=250,
fail_if_not_found=False)
# keepalive -------------------------------------------------------------------
# The number of seconds to wait for requests on a Keep-Alive connection.
# Generally set in the 1-5 seconds range for servers with direct connection
# to the client (e.g. when you don’t have separate load balancer).
# When Gunicorn is deployed behind a load balancer, it often makes sense to set
# this to a higher value.
# NOTE: force gunicorn to close its connection to apache after each request.
# This has been the source of so many 502's. Basically in periods of high activity,
# gunicorn would hold on to open sockets with apache, and just deadlock itself:
# https://github.com/benoitc/gunicorn/issues/2917
keepalive = get_from_env('GUNICORN_KEEPALIVE',
default_value=0,
fail_if_not_found=False)
# preload_app -----------------------------------------------------------------
# Load application code before the worker processes are forked.
# By preloading an application you can save some RAM resources as well as speed
# up server boot times. Although, if you defer application loading to each
# worker process, you can reload your application code easily by restarting
# workers.
# If you aren't going to make use of on-the-fly reloading, consider preloading
# your application code to reduce its memory footprint. So, turn this on in
# production. This is default set to False for development, but
# **TURN THIS TO TRUE FOR AWS DEPLOYMENT **
preload_app = get_from_env('GUNICORN_PRELOAD_APP',
default_value=True,
fail_if_not_found=False)
# Logging ---------------------------------------------------------------------
# Access log
accesslog = join(LOG_DIR, "gunicorn_access.log")
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
access_log_format = ('GUNICORN | %(h)s %(l)s %(u)s %(t)s '
'"%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"')
# Error log
errorlog = join(LOG_DIR, "gunicorn_error.log")
loglevel = 'debug'
# debug logging doesn't provide actual information. And this will
# eliminate the "Connection closed." messages while still giving info
# about worker restarts.
loglevel = 'info'
capture_output = True
#forwarded_allow_ips = '127.0.0.1'
#proxy_allow_ips = '127.0.0.1'
# using /dev/shm/ instead of /tmp for the temporary worker directory. See:
# https://pythonspeed.com/articles/gunicorn-in-docker/
# “in AWS an EBS root instance volume may sometimes hang for half a minute
# and during this time Gunicorn workers may completely block.”
worker_tmp_dir='/dev/shm'
# Override logger class to modify error format
from gunicorn.glogging import Logger
class CustomLogger(Logger):
error_fmt = 'GUNICORN | ' + Logger.error_fmt
logger_class = CustomLogger
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)", worker.pid)
def pre_fork(server, worker):
pass
def pre_exec(server):
server.log.info("Forked child, re-executing.")
def when_ready(server):
server.log.info("Server is ready. Spawning workers")
def worker_int(worker):
worker.log.info("worker received INT or QUIT signal")
def worker_abort(worker):
worker.log.info("worker received SIGABRT signal")
"""
Django settings for gracedb project.
Environment variable DJANGO_SETTINGS_MODULE should be set for a
given instance to determine the settings to run.
Description of settings files:
__init__.py - this file; determines whether to use production or test
settings for this GraceDB instance.
local.py - generated by Puppet, contains paths to useful directories,
virtualenv, and other settings.
secret.py - generated by Puppet, contains secret settings like the
database password, API keys, etc.
base.py - contains the basic settings for running a GraceDB server.
These can be overridden in production.py or test.py. Uses
settings in local.py and secret.py.
production.py - defines settings for a production GraceDB instance. Imports
base.py settings and overrides/adds settings.
test.py - defines settings for a test/development GraceDB instance.
Imports base.py settings and overrides/adds settings.
"""
Description of settings:
BASE SETTINGS - not to be used as a full settings configuration
---------------------------------------------------------------
base.py - contains the basic settings for running a GraceDB
server.
secret.py - generated by Puppet, contains secret settings like the
database password, API keys, etc. For use with VM-based
deployments. DO NOT EDIT.
Virtual machine deployments
---------------------------
vm/production.py - settings for a VM-based production instance deployed
with Puppet.
vm/dev.py - settings for Va M-based production instance deployed
with Puppet.
# Import either production or test settings
try:
from .local import IS_PRODUCTION_SERVER
if IS_PRODUCTION_SERVER:
settings_file = 'production'
else:
# If ./local.py is not found or IS_PRODUCTION_SERVER
# is not defined, use test settings
settings_file = 'test'
except NameError:
settings_file = 'test'
settings_module = __import__(settings_file, globals(), locals())
Container-based deployments
---------------------------
NOTE: many settings are imported from environment variables for
this deployment type!
# Put these settings into the local scope
for setting in dir(settings_module):
# Only add uppercase variables. We use lowercase as
# temporary variables in the settings files.
if setting == setting.upper():
locals()[setting] = getattr(settings_module, setting)
container/production.py - settings for a container-based deployment
of a production instance.
container/dev.py - settings for a container-based deployment of a
development instance.
"""
This diff is collapsed.
# For running a containerized version of the service that gets secrets
# from environment variables. Builds on base.py settings.
import os
from django.core.exceptions import ImproperlyConfigured
from ..base import *
# Get required variables from environment variables ---------------------------
# Get database user from environment and check
db_user = os.environ.get('DJANGO_DB_USER', None)
if db_user is None:
raise ImproperlyConfigured('Could not get database user from envvars.')
# Get database password from environment and check
db_password = os.environ.get('DJANGO_DB_PASSWORD', None)
if db_password is None:
raise ImproperlyConfigured('Could not get database password from envvars.')
# Get database name from environment and check
db_name = os.environ.get('DJANGO_DB_NAME', None)
if db_name is None:
raise ImproperlyConfigured('Could not get database name from envvars.')
# Secret key for a Django installation
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', None)
if SECRET_KEY is None:
raise ImproperlyConfigured('Could not get secret key from envvars.')
# Get primary FQDN
SERVER_FQDN = os.environ.get('DJANGO_PRIMARY_FQDN', None)
if SERVER_FQDN is None:
raise ImproperlyConfigured('Could not get FQDN from envvars.')
LIGO_FQDN = SERVER_FQDN
## EGAD (External GraceDB Alert Dispatcher) configuration
ENABLE_EGAD_EMAIL = parse_envvar_bool(
get_from_env('ENABLE_EGAD_EMAIL',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_KAFKA = parse_envvar_bool(
get_from_env('ENABLE_EGAD_KAFKA',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_MATTERMOST = parse_envvar_bool(
get_from_env('ENABLE_EGAD_MATTERMOST',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_PHONE = parse_envvar_bool(
get_from_env('ENABLE_EGAD_PHONE',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD = (
ENABLE_EGAD_EMAIL or ENABLE_EGAD_KAFKA
or ENABLE_EGAD_MATTERMOST or ENABLE_EGAD_PHONE
)
EGAD_URL = get_from_env('EGAD_URL',
fail_if_not_found=ENABLE_EGAD, default_value=None)
EGAD_API_KEY = get_from_env('EGAD_API_KEY',
fail_if_not_found=ENABLE_EGAD, default_value=None)
# Turn LVAlert on/off from the environment. Adding this
# to turn lvalerts on/off from docker compose/update instead
# of having to rebuild containers. If the environment variable
# isn't set, then revert to the hardwired behavior:
xmpp_env_var = get_from_env('SEND_LVALERT_XMPP_ALERTS',
default_value=SEND_XMPP_ALERTS,
fail_if_not_found=False)
# Fix for other boolean values:
if (isinstance(xmpp_env_var, str) and
xmpp_env_var.lower() in ['true','t','1']):
SEND_XMPP_ALERTS=True
elif (isinstance(xmpp_env_var, str) and
xmpp_env_var.lower() in ['false','f','0']):
SEND_XMPP_ALERTS=False
else:
SEND_XMPP_ALERTS = True
# Get igwn_alert_overseer status:
igwn_alert_on = get_from_env(
'ENABLE_IGWN_OVERSEER',
default_value=False,
fail_if_not_found=False
)
if (isinstance(igwn_alert_on, str) and
igwn_alert_on.lower() in ['true', 't', '1']):
igwn_alert_overseer_on = True
else:
igwn_alert_overseer_on = False
# Get igwn-alert server
igwn_alert_server = os.environ.get('IGWN_ALERT_SERVER', None)
if igwn_alert_server is None:
raise ImproperlyConfigured('Could not get igwn-alert server from envvars.')
# Get igwn-alert Overseer listen port
igwn_alert_overseer_port = os.environ.get('IGWN_ALERT_OVERSEER_PORT', None)
if igwn_alert_overseer_port is None:
raise ImproperlyConfigured('Could not get igwn-alert overseer port '
'from envvars.')
# Get igwn-alert group from envirnment:
igwn_alert_group = os.environ.get('IGWN_ALERT_GROUP', DEFAULT_IGWN_ALERT_GROUP)
# Get igwn-alert username
igwn_alert_user = os.environ.get('IGWN_ALERT_USER', None)
if igwn_alert_user is None:
raise ImproperlyConfigured('Could not get igwn-alert username from envvars.')
# Get igwn-alert password
igwn_alert_password = os.environ.get('IGWN_ALERT_PASSWORD', None)
if igwn_alert_password is None:
raise ImproperlyConfigured('Could not get igwn-alert password from envvars.')
# Get Twilio account information from environment
TWILIO_ACCOUNT_SID = os.environ.get('DJANGO_TWILIO_ACCOUNT_SID', None)
if TWILIO_ACCOUNT_SID is None:
raise ImproperlyConfigured('Could not get Twilio acct SID from envvars.')
TWILIO_AUTH_TOKEN = os.environ.get('DJANGO_TWILIO_AUTH_TOKEN', None)
if TWILIO_AUTH_TOKEN is None:
raise ImproperlyConfigured('Could not get Twilio auth token from envvars.')
# Get maintenance mode settings from environment
maintenance_mode = get_from_env(
'DJANGO_MAINTENANCE_MODE_ACTIVE',
default_value=False,
fail_if_not_found=False
)
# DB "cool-down" factor for when a db conflict is detected. This
# factor scales a random number of seconds between zero and one.
DB_SLEEP_FACTOR = get_from_env(
'DJANGO_DB_SLEEP_FACTOR',
default_value=1.0,
fail_if_not_found=False
)
# Fix the factor (str to float)
try:
DB_SLEEP_FACTOR = float(DB_SLEEP_FACTOR)
except:
DB_SLEEP_FACTOR = 1.0
if (isinstance(maintenance_mode, str) and
maintenance_mode.lower() in ['true', 't', '1']):
MAINTENANCE_MODE = True
MAINTENANCE_MODE_MESSAGE = \
get_from_env('DJANGO_MAINTENANCE_MODE_MESSAGE', fail_if_not_found=False)
# Get info banner settings from environment
info_banner_enabled = get_from_env(
'DJANGO_INFO_BANNER_ENABLED',
default_value=False,
fail_if_not_found=False
)
# fix for other booleans:
if (isinstance(info_banner_enabled, str) and
info_banner_enabled.lower() in ['true','t','1']):
INFO_BANNER_ENABLED = True
INFO_BANNER_MESSAGE = \
get_from_env('DJANGO_INFO_BANNER_MESSAGE', fail_if_not_found=False)
# Get reports page boolean:
beta_reports_link = get_from_env(
'DJANGO_BETA_REPORTS_LINK',
default_value=False,
fail_if_not_found=False
)
# fix for other booleans:
if (isinstance(beta_reports_link, str) and
beta_reports_link.lower() in ['true','t','1']):
BETA_REPORTS_LINK = True
# Get email settings from environment
EMAIL_BACKEND = 'django_ses.SESBackend'
AWS_SES_ACCESS_KEY_ID = get_from_env('AWS_SES_ACCESS_KEY_ID')
AWS_SES_SECRET_ACCESS_KEY = get_from_env('AWS_SES_SECRET_ACCESS_KEY')
AWS_SES_REGION_NAME = get_from_env('AWS_SES_REGION_NAME',
default_value='us-west-2', fail_if_not_found=False)
AWS_SES_REGION_ENDPOINT = get_from_env('AWS_SES_REGION_ENDPOINT',
default_value='email.us-west-2.amazonaws.com', fail_if_not_found=False)
AWS_SES_AUTO_THROTTLE = 0.25
ALERT_EMAIL_FROM = get_from_env('DJANGO_ALERT_EMAIL_FROM')
# memcached settings. this variable should be set in the deployment to the
# same name as the service name in the docker deployment.
DOCKER_MEMCACHED_ADDR = get_from_env('DJANGO_DOCKER_MEMCACHED_ADDR',
default_value="memcached:11211",
fail_if_not_found=False)
DOCKER_MEMCACHED_SECONDS = get_from_env('DJANGO_DOCKER_MEMCACHED_SECONDS',
default_value="15",
fail_if_not_found=False)
try:
CACHE_MIDDLEWARE_SECONDS = int(DOCKER_MEMCACHED_SECONDS)
except:
CACHE_MIDDLEWARE_SECONDS = 15
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyMemcacheCache',
'LOCATION': DOCKER_MEMCACHED_ADDR,
'OPTIONS': {
'ignore_exc': True,
}
},
# For API throttles
'throttles': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'api_throttle_cache', # Table name
},
}
if ENABLE_REDIS_QUEUE:
# For async alert follow-up:
CACHES.update({"async_followup": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{REDIS_QUEUE_ADDRESS}:{REDIS_QUEUE_PORT}/{REDIS_QUEUE_DATABASE}",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}})
# Set queue backend for async django tasks:
# example django-redis connection
Q_CLUSTER = {
'name': Q_CLUSTER_NAME,
'label': Q_CLUSTER_LABEL,
'retry': REDIS_QUEUE_RETRY,
'timeout': REDIS_QUEUE_TIMEOUT,
'workers': REDIS_QUEUE_WORKERS,
'recycle': REDIS_QUEUE_RECYCLE,
'django_redis': 'async_followup'
}
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
'core.middleware.maintenance.MaintenanceModeMiddleware',
'events.middleware.PerformanceMiddleware',
'core.middleware.accept.AcceptMiddleware',
'core.middleware.api.ClientVersionMiddleware',
'core.middleware.api.CliExceptionMiddleware',
'django.middleware.common.CommonMiddleware',
'core.middleware.proxy.XForwardedForMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'user_sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'ligoauth.middleware.ShibbolethWebAuthMiddleware',
'ligoauth.middleware.ControlRoomMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
# Set up AWS X-ray patching if enabled
ENABLE_AWS_XRAY = (
get_from_env("ENABLE_AWS_XRAY",
default_value="false", fail_if_not_found=False).lower()
in ['true', 't', '1']
)
if ENABLE_AWS_XRAY:
# AWS X-ray middleware must be first in the list to measure timing
# accurately
MIDDLEWARE.insert(0, 'aws_xray_sdk.ext.django.middleware.XRayMiddleware')
# Include X-ray as an installed app in order to allow configuration beyond
# the default
INSTALLED_APPS.append('aws_xray_sdk.ext.django')
# Settings for AWS X-ray
XRAY_RECORDER = {
'AWS_XRAY_DAEMON_ADDRESS': '127.0.0.1:2000',
'AUTO_INSTRUMENT': True,
'AWS_XRAY_CONTEXT_MISSING': 'LOG_ERROR',
'PLUGINS': (),
'SAMPLING': True,
'SAMPLING_RULES': None,
'AWS_XRAY_TRACING_NAME': 'GraceDB',
'DYNAMIC_NAMING': None,
'STREAMING_THRESHOLD': None,
}
# Priority server settings ----------------------------------------------------
PRIORITY_SERVER = False
is_priority_server = get_from_env('DJANGO_PRIORITY_SERVER', None,
fail_if_not_found=False)
if (isinstance(is_priority_server, str) and
is_priority_server.lower() in ['true', 't']):
PRIORITY_SERVER = True
# If priority server, only allow priority users to the API
if PRIORITY_SERVER:
# Add custom permissions for the API
default_perms = list(REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'])
default_perms = ['api.permissions.IsPriorityUser'] + default_perms
REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'] = tuple(default_perms)
# Database settings -----------------------------------------------------------
# New postgresql database
# Configured for the CI pipeline:
# https://docs.gitlab.com/ee/ci/services/postgres.html
DATABASES = {
'default' : {
'NAME': db_name,
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': db_user,
'PASSWORD': db_password,
'HOST': os.environ.get('DJANGO_DB_HOST', ''),
'PORT': os.environ.get('DJANGO_DB_PORT', ''),
'CONN_MAX_AGE': 3600,
'TEST' : {
'NAME': 'gracedb_test_db',
},
},
}
# Main server "hostname" - a little hacky but OK
SERVER_HOSTNAME = SERVER_FQDN.split('.')[0]
# igwn_alert Overseer settings - get from environment
LVALERT_OVERSEER_INSTANCES = []
LVALERT_OVERSEER_INSTANCES.append(
{
"lvalert_server": igwn_alert_server,
"listen_port": int(igwn_alert_overseer_port),
"igwn_alert_group": igwn_alert_group,
"username": igwn_alert_user,
"password": igwn_alert_password,
}
)
# Pull in remaining (phone/email) alert variables from
# the environment. Default to false.
SEND_PHONE_ALERTS = parse_envvar_bool(get_from_env(
'SEND_PHONE_ALERTS',
default_value='False',
fail_if_not_found=False
))
SEND_EMAIL_ALERTS = parse_envvar_bool(get_from_env(
'SEND_EMAIL_ALERTS',
default_value='False',
fail_if_not_found=False
))
SEND_MATTERMOST_ALERTS = parse_envvar_bool(get_from_env(
'SEND_MATTERMOST_ALERTS',
default_value='False',
fail_if_not_found=False
))
INSTANCE_STUB = """
<li>Phone alerts (calls/SMS) are {0}</li>
<li>Email alerts are {1}</li>
<li><span class="text-monospace">igwn-alert</span> messages to <span class="text-monospace">{2}</span> are {3}</li>
"""
INSTANCE_LIST = INSTANCE_STUB.format(ENABLED[SEND_PHONE_ALERTS],
ENABLED[SEND_EMAIL_ALERTS],
LVALERT_OVERSEER_INSTANCES[0]['lvalert_server'],
ENABLED[SEND_XMPP_ALERTS])
# Use full client certificate to authenticate
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = (
'api.backends.GraceDbAuthenticatedAuthentication',
'api.backends.GraceDbSciTokenAuthentication',
'api.backends.GraceDbX509FullCertAuthentication',
'api.backends.GraceDbBasicAuthentication',
)
# Update allowed hosts from environment variables -----------------------------
hosts_from_env = os.environ.get('DJANGO_ALLOWED_HOSTS', None)
if hosts_from_env is not None:
ALLOWED_HOSTS += hosts_from_env.split(',')
ALLOWED_HOSTS += [SERVER_FQDN]
# Email settings - dependent on server hostname and FQDN ----------------------
SERVER_EMAIL = ALERT_EMAIL_FROM
ALERT_EMAIL_TO = []
ALERT_EMAIL_BCC = []
ALERT_TEST_EMAIL_FROM = ALERT_EMAIL_FROM
ALERT_TEST_EMAIL_TO = []
# EMBB email settings
EMBB_MAIL_ADDRESS = 'embb@{fqdn}.ligo.org'.format(fqdn=SERVER_FQDN)
EMBB_SMTP_SERVER = 'localhost'
EMBB_MAIL_ADMINS = [admin[1] for admin in ADMINS]
EMBB_IGNORE_ADDRESSES = ['Mailer-Daemon@{fqdn}'.format(fqdn=SERVER_FQDN)]
# Set up logging to stdout only
for key in LOGGING['loggers']:
LOGGING['loggers'][key]['handlers'] = ['console']
LOGGING['loggers']['django.request']['handlers'].append('mail_admins')
# Turn off debug/error emails when in maintenance mode.
if MAINTENANCE_MODE:
LOGGING['loggers']['django.request']['handlers'].remove('mail_admins')
# Set SciToken accepted audience to server FQDN
SCITOKEN_AUDIENCE = ["https://" + SERVER_FQDN, "https://" + LIGO_FQDN]
# Settings for a test/dev GraceDB instance running in a container
from .base import *
TIER = "dev"
CONFIG_NAME = "DEV"
# Debug settings
DEBUG = True
# Override EMBB email address
# TP (8 Aug 2017): not sure why?
EMBB_MAIL_ADDRESS = 'gracedb@{fqdn}'.format(fqdn=SERVER_FQDN)
# Add middleware
debug_middleware = 'debug_toolbar.middleware.DebugToolbarMiddleware'
MIDDLEWARE += [
debug_middleware,
#'silk.middleware.SilkyMiddleware',
#'core.middleware.profiling.ProfileMiddleware',
#'core.middleware.admin.AdminsOnlyMiddleware',
]
# Add to installed apps
INSTALLED_APPS += [
'debug_toolbar',
#'silk'
]
# Add testserver to ALLOWED_HOSTS
ALLOWED_HOSTS += ['testserver']
# Enforce that phone and email alerts are off XXX: Set by deployment variables!
#SEND_PHONE_ALERTS = False
#SEND_EMAIL_ALERTS = False
#SEND_MATTERMOST_ALERTS = True
# Settings for django-silk profiler
SILKY_AUTHENTICATION = True
SILKY_AUTHORISATION = True
if 'silk' in INSTALLED_APPS:
# Needed to prevent RequestDataTooBig for files > 2.5 MB
# when silk is being used. This setting is typically used to
# prevent DOS attacks, so should not be changed in production.
DATA_UPLOAD_MAX_MEMORY_SIZE = 20*(1024**2)
# Tuple of IPs which are marked as internal, useful for debugging.
# Tanner (5 Dec. 2017): DON'T CHANGE THIS! Django Debug Toolbar exposes
# some headers which we want to keep hidden. So to be safe, we only allow
# it to be used through this server. You need to configure a SOCKS proxy
# on your local machine to use DJDT (see admin docs).
INTERNAL_IPS = [
INTERNAL_IP_ADDRESS,
]
# Set up Sentry for error logging
sentry_dsn = get_from_env('DJANGO_SENTRY_DSN', fail_if_not_found=False)
if sentry_dsn is not None:
USE_SENTRY = True
# Set up Sentry
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
environment='dev',
dsn=sentry_dsn,
integrations=[DjangoIntegration()],
before_send=before_send,
)
# Turn off default admin error emails
LOGGING['loggers']['django.request']['handlers'] = []
# Home page stuff
INSTANCE_TITLE = 'GraceDB Development VM'
# Add sub-bullet with igwn-alert group:
group_sub_bullet = """<ul>
<li> Messages are sent to group: <span class="text-monospace"> {0} </span></li>
</ul>""".format(LVALERT_OVERSEER_INSTANCES[0]['igwn_alert_group'])
INSTANCE_LIST = INSTANCE_LIST + group_sub_bullet
INSTANCE_TITLE = 'GraceDB Development Server'
INSTANCE_INFO = """
<h5>Development Instance</h5>
<hr>
<p>
This GraceDB instance is designed for GraceDB maintainers to develop and
test in the AWS cloud architecture. There is <b>no guarantee</b> that the
behavior of this instance will mimic the production system at any time.
Events and associated data may change or be removed at any time.
</p>
<ul>
{}
<li>Only LIGO logins are provided (no login via InCommon or Google).</li>
</ul>
""".format(INSTANCE_LIST)
# Settings for a playground GraceDB instance (for user testing) running
# in a container on AWS. These settings inherent from base.py)
# and overrides or adds to them.
from .base import *
TIER = "playground"
CONFIG_NAME = "USER TESTING"
# Debug settings
DEBUG = False
# Override EMBB email address
# TP (8 Aug 2017): not sure why?
EMBB_MAIL_ADDRESS = 'gracedb@{fqdn}'.format(fqdn=SERVER_FQDN)
# Enforce that phone and email alerts are off XXX: Set by deployment variables!
#SEND_PHONE_ALERTS = False
#SEND_EMAIL_ALERTS = False
# Enable Mattermost alerts
SEND_MATTERMOST_ALERTS = True
# Add testserver to ALLOWED_HOSTS
ALLOWED_HOSTS += ['testserver']
# Set up Sentry for error logging
sentry_dsn = get_from_env('DJANGO_SENTRY_DSN', fail_if_not_found=False)
if sentry_dsn is not None:
USE_SENTRY = True
# Set up Sentry
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
environment='playground',
dsn=sentry_dsn,
integrations=[DjangoIntegration()],
before_send=before_send,
)
# Turn off default admin error emails
LOGGING['loggers']['django.request']['handlers'] = []
# Home page stuff
INSTANCE_TITLE = 'GraceDB Playground'
# Add sub-bullet with igwn-alert group:
group_sub_bullet = """<ul>
<li> Messages are sent to group: <span class="text-monospace"> {0} </span></li>
</ul>""".format(LVALERT_OVERSEER_INSTANCES[0]['igwn_alert_group'])
INSTANCE_LIST = INSTANCE_LIST + group_sub_bullet
INSTANCE_INFO = """
<h5>Playground instance</h5>
<hr>
<p>
This GraceDB instance is designed for users to develop and test their own
applications. It mimics the production instance in all but the following ways:
</p>
<ul>
{}
<li>Only LIGO logins are provided (no login via InCommon or Google).</li>
<li>Events and associated data will <b>not</b> be preserved indefinitely.
A nightly cron job removes events older than 21 days.</li>
</ul>
""".format(INSTANCE_LIST)
# Safety check on debug mode for playground
if (DEBUG == True):
raise RuntimeError("Turn off debug mode for playground")
# Settings for a production GraceDB instance running in a container
from .base import *
TIER = "production"
DEBUG = False
# Turn on alerts: XXX: Set by deployment variables!
#SEND_PHONE_ALERTS = True
#SEND_EMAIL_ALERTS = True
#SEND_MATTERMOST_ALERTS = True
# TP, March 2019: for now, it looks infeasible to use multiple databases
# since there are many operations which normal LVC users can do that
# do a write and then a read very soon after. And we can't rely on
# the read replica being updated quickly enough for that to work.
# So there are several workflows that need to be redone in order for
# this to be possible, but it's not obvious that they even can be
# reworked properly. I.e. this is a much bigger project than expected
# so we're going to have to revisit it at some point. We'll leave the
# config here for now.
# if not PRIORITY_SERVER:
# # If not a priority server, we use the read-only replica database
# # for reads and master for writes.
# # The username, password, and database name are all replicated
# # from the production database
#
# # Set up dict and add to DATABASES setting
# read_replica = {
# 'NAME': DATABASES['default']['NAME'],
# 'ENGINE': 'django.db.backends.mysql',
# 'USER': DATABASES['default']['USER'],
# 'PASSWORD': DATABASES['default']['PASSWORD'],
# 'HOST': os.environ.get('DJANGO_REPLICA_DB_HOST', ''),
# 'PORT': os.environ.get('DJANGO_REPLICA_DB_PORT', ''),
# 'OPTIONS': {
# 'init_command': 'SET storage_engine=MyISAM',
# },
# }
# DATABASES['read_replica'] = read_replica
#
# # Set up database router
# DATABASE_ROUTERS = ['core.db.routers.NonPriorityRouter',]
# Set up Sentry for error logging
sentry_dsn = get_from_env('DJANGO_SENTRY_DSN', fail_if_not_found=False)
if sentry_dsn is not None:
USE_SENTRY = True
# Set up Sentry
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
environment='production',
dsn=sentry_dsn,
integrations=[DjangoIntegration()],
before_send=before_send,
)
# Turn off default admin error emails
LOGGING['loggers']['django.request']['handlers'] = []
# Home page stuff
INSTANCE_TITLE = 'GraceDB'
# Add sub-bullet with igwn-alert group:
group_sub_bullet = """<ul>
<li> Messages are sent to group: <span class="text-monospace"> {0} </span></li>
</ul>""".format(LVALERT_OVERSEER_INSTANCES[0]['igwn_alert_group'])
INSTANCE_LIST = INSTANCE_LIST + group_sub_bullet
INSTANCE_INFO = """
<h5>GraceDB Notifications</h5>
<hr>
<p>
GraceDB notifies registered users of Gravitational-Wave candidate detections
in real-time during LIGO/Virgo/KAGRA observation periods. Current notifications
mechanisms are:
</p>
<ul>
{}
</ul>
""".format(INSTANCE_LIST)
# Safety check on debug mode for production
if (DEBUG == True):
raise RuntimeError("Turn off debug mode for production")
# Hardcode pipelines not approved for production:
UNAPPROVED_PIPELINES += ['aframe', 'GWAK']
# Settings for a test/dev GraceDB instance running in a container
from .base import *
TIER = "test"
CONFIG_NAME = "TEST"
# Debug settings
DEBUG = True
# Override EMBB email address
# TP (8 Aug 2017): not sure why?
EMBB_MAIL_ADDRESS = 'gracedb@{fqdn}'.format(fqdn=SERVER_FQDN)
# Add middleware
debug_middleware = 'debug_toolbar.middleware.DebugToolbarMiddleware'
MIDDLEWARE += [
debug_middleware,
#'silk.middleware.SilkyMiddleware',
#'core.middleware.profiling.ProfileMiddleware',
#'core.middleware.admin.AdminsOnlyMiddleware',
]
# Add to installed apps
INSTALLED_APPS += [
'debug_toolbar',
#'silk'
]
# Add testserver to ALLOWED_HOSTS
ALLOWED_HOSTS += ['testserver']
# Settings for django-silk profiler
SILKY_AUTHENTICATION = True
SILKY_AUTHORISATION = True
if 'silk' in INSTALLED_APPS:
# Needed to prevent RequestDataTooBig for files > 2.5 MB
# when silk is being used. This setting is typically used to
# prevent DOS attacks, so should not be changed in production.
DATA_UPLOAD_MAX_MEMORY_SIZE = 20*(1024**2)
# Tuple of IPs which are marked as internal, useful for debugging.
# Tanner (5 Dec. 2017): DON'T CHANGE THIS! Django Debug Toolbar exposes
# some headers which we want to keep hidden. So to be safe, we only allow
# it to be used through this server. You need to configure a SOCKS proxy
# on your local machine to use DJDT (see admin docs).
INTERNAL_IPS = [
INTERNAL_IP_ADDRESS,
]
# Enforce that phone and email alerts are off XXX: Set by deployment variables!
#SEND_PHONE_ALERTS = False
#SEND_EMAIL_ALERTS = False
#SEND_MATTERMOST_ALERTS = True
# Set up Sentry for error logging
sentry_dsn = get_from_env('DJANGO_SENTRY_DSN', fail_if_not_found=False)
if sentry_dsn is not None:
USE_SENTRY = True
# Set up Sentry
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
environment='test',
dsn=sentry_dsn,
integrations=[DjangoIntegration()],
before_send=before_send,
)
# Turn off default admin error emails
LOGGING['loggers']['django.request']['handlers'] = []
# Home page stuff
INSTANCE_TITLE = 'GraceDB Testing Server'
# Add sub-bullet with igwn-alert group:
group_sub_bullet = """<ul>
<li> Messages are sent to group: <span class="text-monospace"> {0} </span></li>
</ul>""".format(LVALERT_OVERSEER_INSTANCES[0]['igwn_alert_group'])
INSTANCE_LIST = INSTANCE_LIST + group_sub_bullet
INSTANCE_INFO = """
<h5>Testing Instance</h5>
<hr>
<p>
This GraceDB instance is designed for Quality Assurance (QA) testing and
validation for GraceDB and electromagnetic follow-up (EMFollow) developers.
Software should meet QA milestones on the test instance before being moved
to Playground or Production. Note, on this GraceDB instance:
</p>
<ul>
{}
<li>Only LIGO logins are provided (no login via InCommon or Google).</li>
</ul>
""".format(INSTANCE_LIST)
# Settings for a production GraceDB instance.
# Starts with base.py settings and overrides or adds to them.
from .base import *
# TP 12/22/2016: I don't think we need this anymore.
#SHIB_AUTHENTICATION_SESSION_INITIATOR = 'https://archie.phys.uwm.edu/Shibboleth.sso/Login'
CONFIG_NAME = "PRODUCTION"
# TP 12/22/2016: Doesn't seem to be used anywhere.
SITE_ID = 3
# LVAlert Overseer settings
ALERT_XMPP_SERVERS = ["lvalert.cgca.uwm.edu"]
LVALERT_OVERSEER_PORTS = {
"lvalert.cgca.uwm.edu": 8000,
}
# Turn on alerts
SEND_XMPP_ALERTS = True
SEND_PHONE_ALERTS = True
SEND_EMAIL_ALERTS = True
# For running a VM that is provisioned by Puppet with a secret.py file
# for secret settings
from ..base import *
# Get secret settings:
# DB_PASSWORD, SECRET_KEY, TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN
from ..secret import *
import socket
# Nested dict of settings for all databases
DATABASES = {
'default' : {
'NAME': 'gracedb',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'gracedb',
'PASSWORD': DB_PASSWORD,
'HOST':'127.0.0.1',
'PORT':'5432',
'CONN_MAX_AGE': 3600,
},
}
# Set up allowed hosts
SERVER_FQDN = socket.getfqdn()
SERVER_HOSTNAME = INTERNAL_HOSTNAME
LIGO_FQDN = '{hostname}.ligo.org'.format(hostname=SERVER_HOSTNAME)
ALLOWED_HOSTS += [SERVER_FQDN, LIGO_FQDN]
# Email settings - dependent on server hostname and FQDN ----------------------
EMAIL_HOST = 'localhost'
SERVER_EMAIL = 'GraceDB <gracedb@{fqdn}>'.format(fqdn=SERVER_FQDN)
ALERT_EMAIL_FROM = SERVER_EMAIL
ALERT_EMAIL_TO = []
ALERT_EMAIL_BCC = []
ALERT_TEST_EMAIL_FROM = SERVER_EMAIL
ALERT_TEST_EMAIL_TO = []
# EMBB email settings
EMBB_MAIL_ADDRESS = 'embb@{fqdn}.ligo.org'.format(fqdn=SERVER_FQDN)
EMBB_SMTP_SERVER = 'localhost'
EMBB_MAIL_ADMINS = [admin[1] for admin in ADMINS]
EMBB_IGNORE_ADDRESSES = ['Mailer-Daemon@{fqdn}'.format(fqdn=SERVER_FQDN)]
# Load modified caching middleware:
# https://docs.djangoproject.com/en/2.2/ref/middleware/#middleware-ordering
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.gzip.GZipMiddleware',
'events.middleware.PerformanceMiddleware',
'core.middleware.accept.AcceptMiddleware',
'core.middleware.api.ClientVersionMiddleware',
'core.middleware.api.CliExceptionMiddleware',
'core.middleware.proxy.XForwardedForMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'user_sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'core.middleware.maintenance.MaintenanceModeMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'ligoauth.middleware.ShibbolethWebAuthMiddleware',
'ligoauth.middleware.ControlRoomMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
# Set caches:
CACHE_MIDDLEWARE_SECONDS = 5
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyMemcacheCache',
'LOCATION': 'localhost:11211',
'TIMEOUT': 60,
'KEY_PREFIX': 'NULL',
'OPTIONS': {
'ignore_exc': True,
}
},
# For API throttles
'throttles': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'api_throttle_cache', # Table name
},
}
# FIXME: hardwire this for now in the VMs for testing
ENABLE_REDIS_QUEUE = True
if ENABLE_REDIS_QUEUE:
# For async alert follow-up:
CACHES.update({"async_followup": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{REDIS_QUEUE_ADDRESS}:{REDIS_QUEUE_PORT}/{REDIS_QUEUE_DATABASE}",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}})
# Set queue backend for async django tasks:
# example django-redis connection
Q_CLUSTER = {
'name': Q_CLUSTER_NAME,
'label': Q_CLUSTER_LABEL,
'retry': REDIS_QUEUE_RETRY,
'timeout': REDIS_QUEUE_TIMEOUT,
'workers': REDIS_QUEUE_WORKERS,
'recycle': REDIS_QUEUE_RECYCLE,
'django_redis': 'async_followup'
}
# DB "cool-down" factor for when a db conflict is detected. This
# factor scales a random number of seconds between zero and one.
DB_SLEEP_FACTOR = get_from_env(
'DJANGO_DB_SLEEP_FACTOR',
default_value=1.0,
fail_if_not_found=False
)
# Fix the factor (str to float)
try:
DB_SLEEP_FACTOR = float(DB_SLEEP_FACTOR)
except:
DB_SLEEP_FACTOR = 1.0
BETA_REPORTS_LINK = True
## EGAD (External GraceDB Alert Dispatcher) configuration
ENABLE_EGAD_EMAIL = parse_envvar_bool(
get_from_env('ENABLE_EGAD_EMAIL',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_KAFKA = parse_envvar_bool(
get_from_env('ENABLE_EGAD_KAFKA',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_MATTERMOST = parse_envvar_bool(
get_from_env('ENABLE_EGAD_MATTERMOST',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_PHONE = parse_envvar_bool(
get_from_env('ENABLE_EGAD_PHONE',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD = (
ENABLE_EGAD_EMAIL or ENABLE_EGAD_KAFKA
or ENABLE_EGAD_MATTERMOST or ENABLE_EGAD_PHONE
)
# Pull in remaining (phone/email) alert variables from
# the environment. Default to false.
SEND_PHONE_ALERTS = parse_envvar_bool(get_from_env(
'SEND_PHONE_ALERTS',
default_value='False',
fail_if_not_found=False
))
SEND_EMAIL_ALERTS = parse_envvar_bool(get_from_env(
'SEND_EMAIL_ALERTS',
default_value='False',
fail_if_not_found=False
))
SEND_MATTERMOST_ALERTS = parse_envvar_bool(get_from_env(
'SEND_MATTERMOST_ALERTS',
default_value='False',
fail_if_not_found=False
))
INSTANCE_STUB = """
<li>Phone alerts (calls/SMS) are {0}</li>
<li>Email alerts are {1}</li>
<li><span class="text-monospace">igwn-alert</span> messages to <span class="text-monospace">{2}</span> are {3}</li>
"""
INSTANCE_LIST = INSTANCE_STUB.format(ENABLED[SEND_PHONE_ALERTS],
ENABLED[SEND_EMAIL_ALERTS],
LVALERT_OVERSEER_INSTANCES[0]['lvalert_server'],
ENABLED[SEND_XMPP_ALERTS])
if (len(LVALERT_OVERSEER_INSTANCES) == 2):
IGWN_STUB = '<li><span class="text-monospace">igwn-alert</span> messages to <span class="text-monospace">{0}</span> are {1}</li>'
IGWN_LIST = IGWN_STUB.format(LVALERT_OVERSEER_INSTANCES[1]['lvalert_server'],
ENABLED[SEND_XMPP_ALERTS])
INSTANCE_LIST = INSTANCE_LIST + IGWN_LIST
# Set SciToken accepted audience to server FQDN
SCITOKEN_AUDIENCE = ["https://" + SERVER_FQDN, "https://" + LIGO_FQDN]