Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • alexander.pace/server
  • geoffrey.mo/gracedb-server
  • deep.chatterjee/gracedb-server
  • cody.messick/server
  • sushant.sharma-chaudhary/server
  • michael-coughlin/server
  • daniel.wysocki/gracedb-server
  • roberto.depietri/gracedb
  • philippe.grassia/gracedb
  • tri.nguyen/gracedb
  • jonah-kanner/gracedb
  • brandon.piotrzkowski/gracedb
  • joseph-areeda/gracedb
  • duncanmmacleod/gracedb
  • thomas.downes/gracedb
  • tanner.prestegard/gracedb
  • leo-singer/gracedb
  • computing/gracedb/server
18 results
Show changes
Commits on Source (785)
Showing
with 1375 additions and 163 deletions
...@@ -8,3 +8,5 @@ docs/user_docs/build/* ...@@ -8,3 +8,5 @@ docs/user_docs/build/*
docs/admin_docs/build/* docs/admin_docs/build/*
static_root/* static_root/*
.pytest_cache .pytest_cache
junit.xml
.coverage
...@@ -16,30 +16,85 @@ stages: ...@@ -16,30 +16,85 @@ stages:
before_script: before_script:
- docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY - docker login -u gitlab-ci-token -p $CI_JOB_TOKEN $CI_REGISTRY
include:
# Container scanning
- component: $CI_SERVER_FQDN/computing/gitlab/components/container-scanning/container-scanning@~latest
inputs:
job_name: branch_scan
# Software scanning
- component: $CI_SERVER_FQDN/computing/gitlab/components/sast/sast@~latest
inputs:
run_advanced_sast: true
- component: $CI_SERVER_FQDN/computing/gitlab/components/secret-detection/secret-detection@~latest
- component: $CI_SERVER_FQDN/computing/gitlab/components/python/dependency-scanning@~latest
# -- software scanning
# overwrite some settings for the scanning jobs
dependency_scanning:
stage: test
needs: []
variables:
DEBIAN_FRONTEND: "noninteractive"
before_script:
# install some underlying utilities using `apt` so that the dependency
# scanner can use pip to install everything else
- apt-get update -yqq
- apt-get install -yqq
libkrb5-dev
libldap-dev
libsasl2-dev
.sast-analyzer:
stage: test
needs: []
before_script: []
secret_detection:
stage: test
needs: []
before_script: []
# -- testing
.test: &test .test: &test
image: ligo/base:stretch image: igwn/base:bookworm
services: services:
- mysql:5.5 - postgres:15.6
- memcached
variables: variables:
AWS_SES_ACCESS_KEY_ID: "fake_aws_id" AWS_SES_ACCESS_KEY_ID: "fake_aws_id"
AWS_SES_SECRET_ACCESS_KEY: "fake_aws_key" AWS_SES_SECRET_ACCESS_KEY: "fake_aws_key"
DJANGO_ALERT_EMAIL_FROM: "fake_email" DJANGO_ALERT_EMAIL_FROM: "fake_email"
DJANGO_DB_HOST: "mysql" DJANGO_DB_HOST: "postgres"
DJANGO_DB_PORT: "3306" DJANGO_DB_PORT: "5432"
DJANGO_DB_NAME: "fake_name" DJANGO_DB_NAME: "fake_name"
DJANGO_DB_USER: "root" DJANGO_DB_USER: "runner"
DJANGO_DB_PASSWORD: "fake_password" DJANGO_DB_PASSWORD: ""
DJANGO_PRIMARY_FQDN: "fake_fqdn" DJANGO_PRIMARY_FQDN: "fake_fqdn"
DJANGO_SECRET_KEY: "fake_key" DJANGO_SECRET_KEY: "fake_key"
DJANGO_SETTINGS_MODULE: "config.settings.container.dev" DJANGO_SETTINGS_MODULE: "config.settings.container.dev"
DJANGO_TWILIO_ACCOUNT_SID: "fake_sid" DJANGO_TWILIO_ACCOUNT_SID: "fake_sid"
DJANGO_TWILIO_AUTH_TOKEN: "fake_token" DJANGO_TWILIO_AUTH_TOKEN: "fake_token"
DJANGO_DOCKER_MEMCACHED_ADDR: "memcached:11211"
EGAD_URL: "fake_url"
EGAD_API_KEY: "fake_key"
ENABLE_LVALERT_OVERSEER: "false"
ENABLE_IGWN_OVERSEER: "false"
LVALERT_OVERSEER_PORT: "2" LVALERT_OVERSEER_PORT: "2"
LVALERT_SERVER: "fake_server" LVALERT_SERVER: "fake_server"
LVALERT_USER: "fake_user" LVALERT_USER: "fake_user"
LVALERT_PASSWORD: "fake_password" LVALERT_PASSWORD: "fake_password"
MYSQL_DB: "${DJANGO_DB_NAME}" ENABLE_IGWN_OVERSEER: "false"
MYSQL_ROOT_PASSWORD: "${DJANGO_DB_PASSWORD}" IGWN_ALERT_OVERSEER_PORT: "2"
IGWN_ALERT_SERVER: "fake_server"
IGWN_ALERT_USER: "fake_user"
IGWN_ALERT_PASSWORD: "fake_password"
POSTGRES_DB: "${DJANGO_DB_NAME}"
POSTGRES_USER: "${DJANGO_DB_USER}"
POSTGRES_PASSWORD: "${DJANGO_DB_PASSWORD}"
POSTGRES_HOST_AUTH_METHOD: trust
before_script: before_script:
# create apt cache directory # create apt cache directory
- mkdir -pv ${APT_CACHE_DIR} - mkdir -pv ${APT_CACHE_DIR}
...@@ -48,26 +103,46 @@ before_script: ...@@ -48,26 +103,46 @@ before_script:
- PYTHON_MAJOR="${PYTHON_VERSION:0:1}" - PYTHON_MAJOR="${PYTHON_VERSION:0:1}"
- PYTHON="python3" - PYTHON="python3"
# install build requirements # install build requirements
- apt-get -y install gnupg
- sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
- wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
- apt-get -yqq update - apt-get -yqq update
- apt-get -o dir::cache::archives="${APT_CACHE_DIR}" install -yqq - apt-get -o dir::cache::archives="${APT_CACHE_DIR}" install -yqq
git git
libmariadbclient-dev gnupg
libldap2-dev libldap2-dev
libsasl2-dev libsasl2-dev
libssl-dev libssl-dev
libxml2-dev libxml2-dev
krb5-user
libkrb5-dev
libsasl2-modules-gssapi-mit
swig swig
pkg-config
libpng-dev
libfreetype6-dev
libxslt-dev
${PYTHON}-pip ${PYTHON}-pip
postgresql-15
postgresql-client-15
libpq-dev
# upgrade pip (requirement for lalsuite)
- ${PYTHON} -m pip install --upgrade pip --break-system-packages
# install everything else from pip # install everything else from pip
- ${PYTHON} -m pip install -r requirements.txt - ${PYTHON} -m pip install -r requirements.txt --break-system-packages
# create logs path required for tests # create logs path required for tests
- mkdir -pv ../logs/ - mkdir -pv ../logs/
# list packages # list packages
- ${PYTHON} -m pip list installed - ${PYTHON} -m pip list installed
script: script:
- PYTHONPATH=${PYTHONPATH}:${PWD}/gracedb ${PYTHON} -m pytest --cov ./gracedb --junitxml=${CI_PROJECT_DIR}/junit.xml - PYTHONPATH=${PYTHONPATH}:${PWD}/gracedb ${PYTHON} -m pytest --cov-report term-missing --cov ./gracedb --junitxml=${CI_PROJECT_DIR}/junit.xml
after_script: after_script:
- rm -fvr ${PIP_CACHE_DIR}/log - rm -fvr ${PIP_CACHE_DIR}/log
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
artifacts: artifacts:
reports: reports:
junit: junit.xml junit: junit.xml
...@@ -76,23 +151,56 @@ before_script: ...@@ -76,23 +151,56 @@ before_script:
paths: paths:
- .cache/pip - .cache/pip
- .cache/apt - .cache/apt
coverage: '/^TOTAL\s+.*\s+(\d+\.?\d*)%/'
tags:
- executor-docker
test:3.5: test:3.11:
<<: *test <<: *test
# -- docker
branch_image: branch_image:
stage: branch stage: branch
script: script:
- docker build --pull -t $DOCKER_BRANCH . - docker build --pull -t $DOCKER_BRANCH .
- docker push $DOCKER_BRANCH - docker push $DOCKER_BRANCH
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
tags:
- executor-docker
branch_scan:
stage: branch
needs: [branch_image]
# default rules spawn a merge request pipeline, we don't want that
rules:
- if: $CI_COMMIT_BRANCH
variables:
GIT_STRATEGY: fetch
# image to scan
CS_IMAGE: "$DOCKER_BRANCH"
# image to compare to
CS_DEFAULT_BRANCH_IMAGE: "$CI_REGISTRY/computing/gitlab/server:latest"
# path to Dockerfile for remediation
CS_DOCKERFILE_PATH: "Dockerfile"
before_script: []
latest_image: latest_image:
stage: latest stage: latest
dependencies: dependencies:
- branch_image - branch_image
only:
refs:
- master
script: script:
- docker pull $DOCKER_BRANCH
- docker tag $DOCKER_BRANCH $DOCKER_LATEST - docker tag $DOCKER_BRANCH $DOCKER_LATEST
- docker push $DOCKER_LATEST - docker push $DOCKER_LATEST
retry:
max: 2
when:
- runner_system_failure
- stuck_or_timeout_failure
tags:
- executor-docker
FROM ligo/base:stretch FROM debian:bookworm
LABEL name="LIGO GraceDB Django application" \ LABEL name="LIGO GraceDB Django application" \
maintainer="tanner.prestegard@ligo.org" \ maintainer="alexander.pace@ligo.org" \
date="20190920" date="20240306"
ARG SETTINGS_MODULE="config.settings.container.dev" ARG SETTINGS_MODULE="config.settings.container.dev"
COPY docker/SWITCHaai-swdistrib.gpg /etc/apt/trusted.gpg.d COPY docker/SWITCHaai-swdistrib.gpg /etc/apt/trusted.gpg.d
COPY docker/backports.pref /etc/apt/preferences.d COPY docker/backports.pref /etc/apt/preferences.d
RUN echo 'deb http://pkg.switch.ch/switchaai/debian stretch main' > /etc/apt/sources.list.d/shibboleth.list
RUN echo 'deb http://deb.debian.org/debian stretch-backports main' > /etc/apt/sources.list.d/backports.list
RUN curl -sL https://deb.nodesource.com/setup_8.x | bash -
RUN apt-get update && \ RUN apt-get update && \
apt-get -y install gnupg curl
RUN echo 'deb http://deb.debian.org/debian bookworm-backports main' > /etc/apt/sources.list.d/backports.list
RUN echo 'deb http://apt.postgresql.org/pub/repos/apt bookworm-pgdg main' > /etc/apt/sources.list.d/pgdg.list
RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
RUN apt-get update && \
apt-get --assume-yes upgrade && \
apt-get install --install-recommends --assume-yes \ apt-get install --install-recommends --assume-yes \
apache2 \ apache2 \
emacs-nox \
gcc \ gcc \
git \ git \
krb5-user \
libkrb5-dev \
libapache2-mod-shib \
libapache2-mod-xsendfile \ libapache2-mod-xsendfile \
libmariadbclient-dev \
libldap2-dev \ libldap2-dev \
libldap-2.5-0 \
libsasl2-dev \ libsasl2-dev \
libsasl2-modules-gssapi-mit \
libxml2-dev \ libxml2-dev \
pkg-config \
libpng-dev \
libpq-dev \
libfreetype6-dev \
libxslt-dev \
libsqlite3-dev \ libsqlite3-dev \
ligo-ca-certs \ php \
mariadb-client \ php8.2-pgsql \
nodejs \ php8.2-mbstring \
osg-ca-certs \ postgresql-client-15 \
python3.5 \ python3 \
python3.5-dev \ python3-dev \
python3-libxml2 \ python3-libxml2 \
python3-pip \ python3-pip \
procps \ procps \
shibboleth \ redis \
supervisor \ shibboleth-sp-common \
shibboleth-sp-utils \
libssl-dev \ libssl-dev \
swig \ swig \
htop \
telnet \
vim && \ vim && \
apt-get clean && \ apt-get clean && \
npm install -g bower curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - && \
apt-get update && apt-get install --assume-yes yarn
# Install AWS X-ray daemon
RUN curl -O https://s3.us-east-2.amazonaws.com/aws-xray-assets.us-east-2/xray-daemon/aws-xray-daemon-3.x.deb
RUN dpkg -i aws-xray-daemon-3.x.deb
RUN rm aws-xray-daemon-3.x.deb
# Install osg-ca-certs:
RUN curl -O https://hypatia.aei.mpg.de/lsc-amd64-bookworm/osg-ca-certs_1.132NEW-1+deb12u0_all.deb
RUN dpkg -i osg-ca-certs_1.132NEW-1+deb12u0_all.deb
RUN rm osg-ca-certs_1.132NEW-1+deb12u0_all.deb
# Install ligo-ca-certs:
RUN curl -O https://hypatia.aei.mpg.de/lsc-amd64-bookworm/ligo-ca-certs_1.0.2-0+deb12u0_all.deb
RUN dpkg -i ligo-ca-certs_1.0.2-0+deb12u0_all.deb
RUN rm ligo-ca-certs_1.0.2-0+deb12u0_all.deb
# Docker scripts:
COPY docker/entrypoint /usr/local/bin/entrypoint COPY docker/entrypoint /usr/local/bin/entrypoint
COPY docker/cleanup /usr/local/bin/cleanup COPY docker/cleanup /usr/local/bin/cleanup
# Supervisord configs:
COPY docker/supervisord.conf /etc/supervisor/supervisord.conf COPY docker/supervisord.conf /etc/supervisor/supervisord.conf
COPY docker/supervisord-apache2.conf /etc/supervisor/conf.d/apache2.conf COPY docker/supervisord-apache2.conf /etc/supervisor/conf.d/apache2.conf
COPY docker/supervisord-lvalert-overseer.conf /etc/supervisor/conf.d/overseer.conf COPY docker/supervisord-igwn-alert-overseer.conf /etc/supervisor/conf.d/igwn-overseer.conf
COPY docker/supervisord-shibd.conf /etc/supervisor/conf.d/shibd.conf COPY docker/supervisord-shibd.conf /etc/supervisor/conf.d/shibd.conf
COPY docker/shibboleth-ds /etc/shibboleth-ds COPY docker/supervisord-aws-xray.conf /etc/supervisor/conf.d/aws-xray.conf
COPY docker/supervisord-qcluster.conf /etc/supervisor/conf.d/qcluster.conf
# Apache configs:
COPY docker/apache-config /etc/apache2/sites-available/gracedb.conf COPY docker/apache-config /etc/apache2/sites-available/gracedb.conf
COPY docker/mpm_prefork.conf /etc/apache2/mods-enabled/mpm_prefork.conf
# Enable mpm_event module:
RUN rm /etc/apache2/mods-enabled/mpm_prefork.*
RUN rm /etc/apache2/mods-enabled/php8.2.*
RUN cp /etc/apache2/mods-available/mpm_event.* /etc/apache2/mods-enabled/
# Shibboleth configs and certs:
COPY docker/shibboleth-ds /etc/shibboleth-ds
COPY docker/login.ligo.org.cert.LIGOCA.pem /etc/shibboleth/login.ligo.org.cert.LIGOCA.pem COPY docker/login.ligo.org.cert.LIGOCA.pem /etc/shibboleth/login.ligo.org.cert.LIGOCA.pem
COPY docker/inc-md-cert.pem /etc/shibboleth/inc-md-cert.pem COPY docker/inc-md-cert.pem /etc/shibboleth/inc-md-cert.pem
COPY docker/check_shibboleth_status /usr/local/bin/check_shibboleth_status COPY docker/check_shibboleth_status /usr/local/bin/check_shibboleth_status
...@@ -59,16 +109,17 @@ ADD . /app/gracedb_project ...@@ -59,16 +109,17 @@ ADD . /app/gracedb_project
# install gracedb application itself # install gracedb application itself
WORKDIR /app/gracedb_project WORKDIR /app/gracedb_project
RUN bower install --allow-root RUN pip3 install --upgrade pip --break-system-packages
RUN pip3 install --upgrade pip RUN pip3 install -r requirements.txt --break-system-packages
RUN pip3 install --upgrade setuptools wheel && \
pip3 install -r requirements.txt # install supervisor from pip
RUN pip3 install supervisor --break-system-packages
# Give pip-installed packages priority over distribution packages # Give pip-installed packages priority over distribution packages
ENV PYTHONPATH /usr/local/lib/python3.5/dist-packages:$PYTHONPATH ENV PYTHONPATH /usr/local/lib/python3.11/dist-packages:$PYTHONPATH
ENV ENABLE_SHIBD false ENV ENABLE_SHIBD false
ENV ENABLE_OVERSEER true ENV ENABLE_OVERSEER true
ENV VIRTUAL_ENV dummy ENV VIRTUAL_ENV /dummy/
# Expose port and run Gunicorn # Expose port and run Gunicorn
EXPOSE 8000 EXPOSE 8000
...@@ -89,12 +140,20 @@ RUN DJANGO_SETTINGS_MODULE=${SETTINGS_MODULE} \ ...@@ -89,12 +140,20 @@ RUN DJANGO_SETTINGS_MODULE=${SETTINGS_MODULE} \
DJANGO_SECRET_KEY=fake_key \ DJANGO_SECRET_KEY=fake_key \
DJANGO_PRIMARY_FQDN=fake_fqdn \ DJANGO_PRIMARY_FQDN=fake_fqdn \
DJANGO_ALERT_EMAIL_FROM=fake_email \ DJANGO_ALERT_EMAIL_FROM=fake_email \
EGAD_URL=fake_url \
EGAD_API_KEY=fake_key \
LVALERT_USER=fake_user \ LVALERT_USER=fake_user \
LVALERT_PASSWORD=fake_password \ LVALERT_PASSWORD=fake_password \
LVALERT_SERVER=fake_server \ LVALERT_SERVER=fake_server \
LVALERT_OVERSEER_PORT=2 \ LVALERT_OVERSEER_PORT=2 \
IGWN_ALERT_USER=fake_user \
IGWN_ALERT_PASSWORD=fake_password \
IGWN_ALERT_SERVER=fake_server \
IGWN_ALERT_OVERSEER_PORT=2 \
IGWN_ALERT_GROUP=fake_group \
DJANGO_TWILIO_ACCOUNT_SID=fake_sid \ DJANGO_TWILIO_ACCOUNT_SID=fake_sid \
DJANGO_TWILIO_AUTH_TOKEN=fake_token \ DJANGO_TWILIO_AUTH_TOKEN=fake_token \
DJANGO_AWS_ELASTICACHE_ADDR=fake_address:11211 \
AWS_SES_ACCESS_KEY_ID=fake_aws_id \ AWS_SES_ACCESS_KEY_ID=fake_aws_id \
AWS_SES_SECRET_ACCESS_KEY=fake_aws_key \ AWS_SES_SECRET_ACCESS_KEY=fake_aws_key \
python3 manage.py collectstatic --noinput python3 manage.py collectstatic --noinput
...@@ -103,6 +162,9 @@ RUN rm -rf /app/logs/* /app/project_data/* ...@@ -103,6 +162,9 @@ RUN rm -rf /app/logs/* /app/project_data/*
RUN useradd -M -u 50001 -g www-data -s /bin/false gracedb RUN useradd -M -u 50001 -g www-data -s /bin/false gracedb
#RUN groupadd -r xray
#RUN useradd -M -u 50002 -g xray -s /bin/false xray
# set secure file/directory permissions. In particular, ADD command at # set secure file/directory permissions. In particular, ADD command at
# beginning of recipe inherits umask of user running the build # beginning of recipe inherits umask of user running the build
RUN chmod 0755 /usr/local/bin/entrypoint && \ RUN chmod 0755 /usr/local/bin/entrypoint && \
...@@ -112,5 +174,19 @@ RUN chmod 0755 /usr/local/bin/entrypoint && \ ...@@ -112,5 +174,19 @@ RUN chmod 0755 /usr/local/bin/entrypoint && \
find /app/gracedb_project -type d -exec chmod 0755 {} + && \ find /app/gracedb_project -type d -exec chmod 0755 {} + && \
find /app/gracedb_project -type f -exec chmod 0644 {} + find /app/gracedb_project -type f -exec chmod 0644 {} +
# create and set scitoken key cache directory
RUN mkdir /app/scitokens_cache && \
chown gracedb:www-data /app/scitokens_cache && \
chmod 0750 /app/scitokens_cache
ENV XDG_CACHE_HOME /app/scitokens_cache
# patch voeventparse for python3.10+:
RUN sed -i 's/collections.Iterable/collections.abc.Iterable/g' /usr/local/lib/python3.11/dist-packages/voeventparse/voevent.py
# Remove packages that expose security vulnerabilities and close out.
# Edit: zlib1g* can't be removed because of a PrePend error
RUN apt-get --assume-yes --purge autoremove wget libaom3 node-ip
RUN apt-get clean
ENTRYPOINT [ "/usr/local/bin/entrypoint" ] ENTRYPOINT [ "/usr/local/bin/entrypoint" ]
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"] CMD ["/usr/local/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
{ {
"name": "gracedb", "name": "gracedb",
"dependencies": { "dependencies": {
"dgrid": "0.4.0",
"dijit": "1.10.4", "dijit": "1.10.4",
"dojox": "1.10.4", "dojox": "1.10.4"
"jquery": "3.2.1",
"jquery-ui": "1.12.1",
"moment-timezone": "0.5.0",
"moment": "2.11.1",
"tablesaw": "3.1.2"
},
"resolutions": {
"webcomponentsjs": "^0.6.0"
} }
} }
...@@ -6,6 +6,14 @@ from os.path import abspath, dirname, join ...@@ -6,6 +6,14 @@ from os.path import abspath, dirname, join
import sys import sys
import multiprocessing import multiprocessing
# Useful function for getting environment variables
def get_from_env(envvar, default_value=None, fail_if_not_found=True):
value = os.environ.get(envvar, default_value)
if (value == default_value and fail_if_not_found):
raise ImproperlyConfigured(
'Could not get environment variable {0}'.format(envvar))
return value
# Parameters # Parameters
GUNICORN_PORT = 8080 GUNICORN_PORT = 8080
LOG_DIR = abspath(join(dirname(__file__), "..", "..", "logs")) LOG_DIR = abspath(join(dirname(__file__), "..", "..", "logs"))
...@@ -14,27 +22,100 @@ LOG_DIR = abspath(join(dirname(__file__), "..", "..", "logs")) ...@@ -14,27 +22,100 @@ LOG_DIR = abspath(join(dirname(__file__), "..", "..", "logs"))
# Bind to localhost on specified port # Bind to localhost on specified port
bind = "127.0.0.1:{port}".format(port=GUNICORN_PORT) bind = "127.0.0.1:{port}".format(port=GUNICORN_PORT)
# Number of workers = 2*CPU + 1 (recommendation from Gunicorn documentation) # Number of workers -----------------------------------------------------------
workers = multiprocessing.cpu_count()*2 + 1 # 2*CPU + 1 (recommendation from Gunicorn documentation)
# bumped to 4*CPU + 1 after testing. Maybe increase this number in the cloud
# deployment?
workers = int(get_from_env('GUNICORN_WORKERS',
default_value=multiprocessing.cpu_count()*3 + 1,
fail_if_not_found=False))
# NOTE: it was found in extensive testing that threads > 1 are prone
# to connection lockups. Leave this at 1 for safety until there are
# fixes in gunicorn.
# Why not sync? The sync worker is prone to timeout for long requests,
# like big queries. But gthread sends a heartbeat back to the main worker
# to keep it alive. We could just set the timeout to a really large number
# which would keep the long requests stable, but if there is a stuck worker,
# then they would be subject to that really long timeout. It's a tradeoff.
# All this goes away with async workers, but as of 3.2, django's ORM does support
# async, and testing failed pretty catastrophically and unreliably.
threads = int(get_from_env('GUNICORN_THREADS',
default_value=1,
fail_if_not_found=False))
# Worker connections. Limit the number of connections between apache<-->gunicorn
worker_connections = workers * threads
# Worker class. # Worker class ----------------------------------------------------------------
# # sync by default, generally safe and low-resource:
worker_class = 'gthread' # https://docs.gunicorn.org/en/stable/design.html#sync-workers
threads = 2
# Adding options for timeout. Not specified, the timeout default worker_class = get_from_env('GUNICORN_WORKER_CLASS',
# is 30 seconds. Source: default_value='gthread',
# fail_if_not_found=False)
# Timeout ---------------------------------------------------------------------
# If not specified, the timeout default is 30 seconds:
# https://gunicorn-docs.readthedocs.io/en/stable/settings.html#worker-processes # https://gunicorn-docs.readthedocs.io/en/stable/settings.html#worker-processes
#
timeout = 120
# Max requests settings - a worker restarts after handling this many timeout = get_from_env('GUNICORN_TIMEOUT',
# requests. May be useful if we have memory leak problems. default_value=30,
fail_if_not_found=False)
graceful_timeout = timeout
# max_requests settings -------------------------------------------------------
# The maximum number of requests a worker will process before restarting.
# May be useful if we have memory leak problems.
# The jitter is drawn from a uniform distribution: # The jitter is drawn from a uniform distribution:
# randint(0, max_requests_jitter) # randint(0, max_requests_jitter)
#max_requests = 0
#max_requests_jitter = 0 max_requests = get_from_env('GUNICORN_MAX_REQUESTS',
default_value=5000,
fail_if_not_found=False)
max_requests_jitter = get_from_env('GUNICORN_MAX_REQUESTS_JITTER',
default_value=250,
fail_if_not_found=False)
# keepalive -------------------------------------------------------------------
# The number of seconds to wait for requests on a Keep-Alive connection.
# Generally set in the 1-5 seconds range for servers with direct connection
# to the client (e.g. when you don’t have separate load balancer).
# When Gunicorn is deployed behind a load balancer, it often makes sense to set
# this to a higher value.
# NOTE: force gunicorn to close its connection to apache after each request.
# This has been the source of so many 502's. Basically in periods of high activity,
# gunicorn would hold on to open sockets with apache, and just deadlock itself:
# https://github.com/benoitc/gunicorn/issues/2917
keepalive = get_from_env('GUNICORN_KEEPALIVE',
default_value=0,
fail_if_not_found=False)
# preload_app -----------------------------------------------------------------
# Load application code before the worker processes are forked.
# By preloading an application you can save some RAM resources as well as speed
# up server boot times. Although, if you defer application loading to each
# worker process, you can reload your application code easily by restarting
# workers.
# If you aren't going to make use of on-the-fly reloading, consider preloading
# your application code to reduce its memory footprint. So, turn this on in
# production. This is default set to False for development, but
# **TURN THIS TO TRUE FOR AWS DEPLOYMENT **
preload_app = get_from_env('GUNICORN_PRELOAD_APP',
default_value=True,
fail_if_not_found=False)
# Logging --------------------------------------------------------------------- # Logging ---------------------------------------------------------------------
# Access log # Access log
...@@ -44,11 +125,40 @@ access_log_format = ('GUNICORN | %(h)s %(l)s %(u)s %(t)s ' ...@@ -44,11 +125,40 @@ access_log_format = ('GUNICORN | %(h)s %(l)s %(u)s %(t)s '
# Error log # Error log
errorlog = join(LOG_DIR, "gunicorn_error.log") errorlog = join(LOG_DIR, "gunicorn_error.log")
loglevel = 'debug'
# debug logging doesn't provide actual information. And this will
# eliminate the "Connection closed." messages while still giving info
# about worker restarts.
loglevel = 'info'
capture_output = True capture_output = True
# using /dev/shm/ instead of /tmp for the temporary worker directory. See:
# https://pythonspeed.com/articles/gunicorn-in-docker/
# “in AWS an EBS root instance volume may sometimes hang for half a minute
# and during this time Gunicorn workers may completely block.”
worker_tmp_dir='/dev/shm'
# Override logger class to modify error format # Override logger class to modify error format
from gunicorn.glogging import Logger from gunicorn.glogging import Logger
class CustomLogger(Logger): class CustomLogger(Logger):
error_fmt = 'GUNICORN | ' + Logger.error_fmt error_fmt = 'GUNICORN | ' + Logger.error_fmt
logger_class = CustomLogger logger_class = CustomLogger
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)", worker.pid)
def pre_fork(server, worker):
pass
def pre_exec(server):
server.log.info("Forked child, re-executing.")
def when_ready(server):
server.log.info("Server is ready. Spawning workers")
def worker_int(worker):
worker.log.info("worker received INT or QUIT signal")
def worker_abort(worker):
worker.log.info("worker received SIGABRT signal")
from cloghandler import ConcurrentRotatingFileHandler from concurrent_log_handler import ConcurrentRotatingFileHandler
from datetime import datetime, timedelta from datetime import datetime, timedelta
import os, time, logging import os, time, logging, multiprocessing
from os.path import abspath, dirname, join from os.path import abspath, dirname, join
import socket import socket
from django.core.exceptions import ImproperlyConfigured from django.core.exceptions import ImproperlyConfigured
from aws_xray_sdk.core.exceptions.exceptions import SegmentNotFoundException
# Set up path to root of project # Set up path to root of project
BASE_DIR = abspath(join(dirname(__file__), "..", "..")) BASE_DIR = abspath(join(dirname(__file__), "..", ".."))
...@@ -21,25 +22,61 @@ def get_from_env(envvar, default_value=None, fail_if_not_found=True): ...@@ -21,25 +22,61 @@ def get_from_env(envvar, default_value=None, fail_if_not_found=True):
'Could not get environment variable {0}'.format(envvar)) 'Could not get environment variable {0}'.format(envvar))
return value return value
def parse_envvar_bool(x):
return x.lower() in ['t', 'true', '1']
# a sentry before_send function that filters aws SegmentNotFoundException's.
# these exceptions are harmless and occur when performing management tasks
# outside of the core gracedb app. but sentry picks it up and reports it as
# an error which is ANNOYING.
def before_send(event, hint):
if "exc_info" in hint:
exc_type, exc_value, tb = hint["exc_info"]
if isinstance(exc_value, (SegmentNotFoundException,)):
return None
return event
# Maintenance mode # Maintenance mode
MAINTENANCE_MODE = False MAINTENANCE_MODE = False
MAINTENANCE_MODE_MESSAGE = None MAINTENANCE_MODE_MESSAGE = None
# Enable/Disable Information Banner:
INFO_BANNER_ENABLED = False
INFO_BANNER_MESSAGE = "TEST MESSAGE"
# Beta reports page:
BETA_REPORTS_LINK = False
# Version --------------------------------------------------------------------- # Version ---------------------------------------------------------------------
PROJECT_VERSION = '2.7.1' PROJECT_VERSION = '2.31.0'
# Unauthenticated access ------------------------------------------------------ # Unauthenticated access ------------------------------------------------------
# This variable should eventually control whether unauthenticated access is # This variable should eventually control whether unauthenticated access is
# allowed *ANYWHERE* on this service, except the home page, which is always # allowed *ANYWHERE* on this service, except the home page, which is always
# public. For now, it just controls the API and the public alerts page. # public. For now, it just controls the API and the public alerts page.
UNAUTHENTICATED_ACCESS = True # Update: make this updatable from the environment:
UNAUTHENTICATED_ACCESS = parse_envvar_bool(
get_from_env('ENABLE_UNAUTHENTICATED_ACCESS',
fail_if_not_found=False, default_value="true")
)
# Miscellaneous settings ------------------------------------------------------ # Miscellaneous settings ------------------------------------------------------
# Debug mode is off by default # Debug mode is off by default
DEBUG = False DEBUG = False
# When debug mode is enabled, use custom reporter
DEFAULT_EXCEPTION_REPORTER = 'core.utils.CustomExceptionReporter'
# Number of results to show on latest page # Number of results to show on latest page
LATEST_RESULTS_NUMBER = 50 LATEST_RESULTS_NUMBER = 25
# Maximum number of log messages to display before throwing
# a warning. NOTE: There should be a better way of doing this,
# but just put in the hard cutoff for right now
# Set to cover this:
# https://gracedb.ligo.org/events/G184098
TOO_MANY_LOG_ENTRIES = int(get_from_env('DJANGO_TOO_MANY_LOG_ENTRIES',
fail_if_not_found=False, default_value=2000))
# Path to root URLconf # Path to root URLconf
ROOT_URLCONF = '{module}.urls'.format(module=os.path.basename(CONFIG_ROOT)) ROOT_URLCONF = '{module}.urls'.format(module=os.path.basename(CONFIG_ROOT))
...@@ -51,9 +88,9 @@ TEST_RUNNER = 'django.test.runner.DiscoverRunner' ...@@ -51,9 +88,9 @@ TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# MANAGERS defines who gets broken link notifications when # MANAGERS defines who gets broken link notifications when
# BrokenLinkEmailsMiddleware is enabled # BrokenLinkEmailsMiddleware is enabled
ADMINS = [ ADMINS = [
("Tanner Prestegard", "tanner.prestegard@ligo.org"),
("Alexander Pace", "alexander.pace@ligo.org"), ("Alexander Pace", "alexander.pace@ligo.org"),
("Duncan Meacher", "duncan.meacher@ligo.org"), ("Duncan Meacher", "duncan.meacher@ligo.org"),
("Daniel Wysocki", "daniel.wysocki@ligo.org"),
] ]
MANAGERS = ADMINS MANAGERS = ADMINS
...@@ -118,6 +155,24 @@ LOGOUT_REDIRECT_URL = 'home' ...@@ -118,6 +155,24 @@ LOGOUT_REDIRECT_URL = 'home'
SEND_XMPP_ALERTS = False SEND_XMPP_ALERTS = False
SEND_PHONE_ALERTS = False SEND_PHONE_ALERTS = False
SEND_EMAIL_ALERTS = False SEND_EMAIL_ALERTS = False
SEND_MATTERMOST_ALERTS = False
# igwn-alert group settings. the default development group is 'lvalert-dev'
# for the container deployments, the variable will be overwriten by the
# IGWN_ALERT_GROUP environment variable.
DEFAULT_IGWN_ALERT_GROUP = 'lvalert-dev'
# enable/disable sending alerts to topics that have the search tag
# for g/e-events. default to false, so only send to {group}_{pipeline}
SEND_TO_SEARCH_TOPICS = parse_envvar_bool(
get_from_env('IGWN_ALERT_SEARCH_TOPICS',
fail_if_not_found=False, default_value="false")
)
# overseer timeout:
OVERSEER_TIMEOUT = float(get_from_env('IGWN_ALERT_OVERSEER_TIMEOUT',
fail_if_not_found=False, default_value=0.1))
# Use LVAlert Overseer? # Use LVAlert Overseer?
USE_LVALERT_OVERSEER = True USE_LVALERT_OVERSEER = True
# For each LVAlert server, a separate instance of LVAlert Overseer # For each LVAlert server, a separate instance of LVAlert Overseer
...@@ -126,8 +181,9 @@ USE_LVALERT_OVERSEER = True ...@@ -126,8 +181,9 @@ USE_LVALERT_OVERSEER = True
# listen_port: port which that instance of overseer is listening on # listen_port: port which that instance of overseer is listening on
LVALERT_OVERSEER_INSTANCES = [ LVALERT_OVERSEER_INSTANCES = [
{ {
"lvalert_server": "lvalert-test.cgca.uwm.edu", "lvalert_server": "kafka://kafka.scimma.org/",
"listen_port": 8001, "listen_port": 8002,
"igwn_alert_group": DEFAULT_IGWN_ALERT_GROUP,
}, },
] ]
...@@ -149,6 +205,8 @@ ACCESS_MANAGERS_GROUP = 'access_managers' ...@@ -149,6 +205,8 @@ ACCESS_MANAGERS_GROUP = 'access_managers'
EM_ADVOCATE_GROUP = 'em_advocates' EM_ADVOCATE_GROUP = 'em_advocates'
# Superevent managers # Superevent managers
SUPEREVENT_MANAGERS_GROUP = 'superevent_managers' SUPEREVENT_MANAGERS_GROUP = 'superevent_managers'
# RRT group name:
RRT_MEMBERS_GROUP = 'rrt_members'
# Analysis groups # Analysis groups
# Analysis group name for non-GW events # Analysis group name for non-GW events
...@@ -195,12 +253,27 @@ COINC_PIPELINES = [ ...@@ -195,12 +253,27 @@ COINC_PIPELINES = [
'spiir', 'spiir',
'MBTAOnline', 'MBTAOnline',
'pycbc', 'pycbc',
'MBTA',
'PyGRB',
] ]
GRB_PIPELINES = [ GRB_PIPELINES = [
'Fermi', 'Fermi',
'Swift', 'Swift',
'INTEGRAL',
'AGILE',
'CHIME',
'SVOM',
] ]
# List of pipelines that have been depreciated:
DEPRECIATED_PIPELINES = [
'X',
'Q',
'Omega',
]
UNAPPROVED_PIPELINES = []
# VOEvent stream -------------------------------------------------------------- # VOEvent stream --------------------------------------------------------------
VOEVENT_STREAM = 'gwnet/LVC' VOEVENT_STREAM = 'gwnet/LVC'
...@@ -222,7 +295,7 @@ REPORT_INFO_URL_PREFIX = "/report_info/" ...@@ -222,7 +295,7 @@ REPORT_INFO_URL_PREFIX = "/report_info/"
REPORT_IFAR_IMAGE_DIR = PROJECT_DATA_DIR REPORT_IFAR_IMAGE_DIR = PROJECT_DATA_DIR
# Stuff for the new rates plot # Stuff for the new rates plot
BINNED_COUNT_PIPELINES = ['gstlal', 'MBTAOnline', 'CWB', 'oLIB', 'spiir'] BINNED_COUNT_PIPELINES = ['gstlal', 'MBTAOnline', 'MBTA', 'CWB', 'oLIB', 'spiir']
BINNED_COUNT_FILE = join(PROJECT_DATA_DIR, "binned_counts.json") BINNED_COUNT_FILE = join(PROJECT_DATA_DIR, "binned_counts.json")
# Defaults for RSS feed # Defaults for RSS feed
...@@ -299,6 +372,11 @@ X509_INFOS_HEADER = 'HTTP_X_FORWARDED_TLS_CLIENT_CERT_INFOS' ...@@ -299,6 +372,11 @@ X509_INFOS_HEADER = 'HTTP_X_FORWARDED_TLS_CLIENT_CERT_INFOS'
# Path to CA store for X509 certificate verification # Path to CA store for X509 certificate verification
CAPATH = '/etc/grid-security/certificates' CAPATH = '/etc/grid-security/certificates'
# SciTokens claims settings
SCITOKEN_ISSUER = ['https://cilogon.org/igwn', 'https://test.cilogon.org/igwn', 'https://osdf.igwn.org/cit']
SCITOKEN_AUDIENCE = ["ANY"]
SCITOKEN_SCOPE = "gracedb.read"
# List of authentication backends to use when attempting to authenticate # List of authentication backends to use when attempting to authenticate
# a user. Will be used in this order. Authentication for the API is # a user. Will be used in this order. Authentication for the API is
# handled by the REST_FRAMEWORK dictionary. # handled by the REST_FRAMEWORK dictionary.
...@@ -318,6 +396,7 @@ MIDDLEWARE = [ ...@@ -318,6 +396,7 @@ MIDDLEWARE = [
'django.middleware.common.CommonMiddleware', 'django.middleware.common.CommonMiddleware',
'core.middleware.proxy.XForwardedForMiddleware', 'core.middleware.proxy.XForwardedForMiddleware',
'user_sessions.middleware.SessionMiddleware', 'user_sessions.middleware.SessionMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware', 'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware',
'ligoauth.middleware.ShibbolethWebAuthMiddleware', 'ligoauth.middleware.ShibbolethWebAuthMiddleware',
...@@ -348,11 +427,15 @@ INSTALLED_APPS = [ ...@@ -348,11 +427,15 @@ INSTALLED_APPS = [
'ligoauth', 'ligoauth',
'search', 'search',
'superevents', 'superevents',
'gwtc',
'rest_framework', 'rest_framework',
'guardian', 'guardian',
'django_twilio', 'django_twilio',
'django_extensions', 'django_extensions',
'django.contrib.sessions', 'django.contrib.sessions',
'computedfields',
'django_postgres_vacuum',
'django_q',
] ]
# Aliases for django-extensions shell_plus # Aliases for django-extensions shell_plus
...@@ -387,11 +470,12 @@ REST_FRAMEWORK = { ...@@ -387,11 +470,12 @@ REST_FRAMEWORK = {
), ),
'DEFAULT_THROTTLE_RATES': { 'DEFAULT_THROTTLE_RATES': {
'anon_burst': '300/minute', 'anon_burst': '300/minute',
'event_creation': '10/second', 'event_creation': '50/second',
'annotation' : '10/second', 'annotation' : '50/second',
}, },
'DEFAULT_AUTHENTICATION_CLASSES': ( 'DEFAULT_AUTHENTICATION_CLASSES': (
'api.backends.GraceDbAuthenticatedAuthentication', 'api.backends.GraceDbAuthenticatedAuthentication',
'api.backends.GraceDbSciTokenAuthentication',
'api.backends.GraceDbX509Authentication', 'api.backends.GraceDbX509Authentication',
'api.backends.GraceDbBasicAuthentication', 'api.backends.GraceDbBasicAuthentication',
), ),
...@@ -407,9 +491,6 @@ if UNAUTHENTICATED_ACCESS is True: ...@@ -407,9 +491,6 @@ if UNAUTHENTICATED_ACCESS is True:
REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'] = \ REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'] = \
('rest_framework.permissions.IsAuthenticatedOrReadOnly',) ('rest_framework.permissions.IsAuthenticatedOrReadOnly',)
# Location of packages installed by bower
BOWER_DIR = join(BASE_DIR, "..", "bower_components")
# Location of static components, CSS, JS, etc. # Location of static components, CSS, JS, etc.
STATIC_ROOT = join(BASE_DIR, "static_root") STATIC_ROOT = join(BASE_DIR, "static_root")
STATIC_URL = "/static/" STATIC_URL = "/static/"
...@@ -420,7 +501,6 @@ STATICFILES_FINDERS = [ ...@@ -420,7 +501,6 @@ STATICFILES_FINDERS = [
] ]
STATICFILES_DIRS = [ STATICFILES_DIRS = [
join(PROJECT_ROOT, "static"), join(PROJECT_ROOT, "static"),
BOWER_DIR,
] ]
# Added in order to perform data migrations on Django apps # Added in order to perform data migrations on Django apps
...@@ -610,3 +690,101 @@ LOGGING = { ...@@ -610,3 +690,101 @@ LOGGING = {
}, },
}, },
} }
# Turn off debug/error emails when in maintenance mode.
if MAINTENANCE_MODE:
LOGGING['loggers']['django.request']['handlers'].remove('mail_admins')
# Turn off logging emails of django requests:
# FIXME: figure out more reliable logging solution
LOGGING['loggers']['django.request']['handlers'].remove('mail_admins')
# Define some words for the instance stub:
ENABLED = {True: "enabled", False: "disabled"}
# Upgrading to django 3.2 produces warning: "Auto-created primary key used
# when not defining a primary key type, by default 'django.db.models.AutoField'.
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# Define window for neighbouring s events of a given g event.
EVENT_SUPEREVENT_WINDOW_BEFORE = 100
EVENT_SUPEREVENT_WINDOW_AFTER = 100
# Define which observation periods to show on the public events page:
# TODO: Group O4b and O4a under O4, once implemented.
PUBLIC_PAGE_RUNS = ['O4', 'O4c', 'O4b', 'O4a', 'ER16', 'ER15', 'O3']
# Define how long to cache the public page:
PUBLIC_PAGE_CACHING = int(get_from_env('DJANGO_PUBLIC_PAGE_CACHING',
fail_if_not_found=False, default_value=300))
# Define the number of results per page on the public page:
PUBLIC_PAGE_RESULTS = int(get_from_env('DJANGO_PUBLIC_PAGE_RESULTS',
fail_if_not_found=False, default_value=15))
# Define DATA_UPLOAD_MAX_MEMORY_SIZE for larger uploads:
DATA_UPLOAD_MAX_MEMORY_SIZE = int(get_from_env('DJANGO_DATA_UPLOAD_MAX_MEMORY_SIZE',
fail_if_not_found=False, default_value=20*1024*1024))
# Choose whether to use to Julian or Civil definition of year
# when displaying far's in /year:
DISPLAY_CIVIL_YEAR_FAR = parse_envvar_bool(
get_from_env('DJANGO_DISPLAY_CIVIL_YEAR_FAR',
fail_if_not_found=False, default_value="false")
)
if DISPLAY_CIVIL_YEAR_FAR:
DAYS_PER_YEAR = 365.0
else:
DAYS_PER_YEAR = 365.25
# Put in some setting for the redis queue backend
ENABLE_REDIS_QUEUE = parse_envvar_bool(
get_from_env('DJANGO_ENABLE_REDIS_QUEUE',
fail_if_not_found=False, default_value="false")
)
REDIS_QUEUE_ADDRESS = get_from_env('DJANGO_REDIS_QUEUE_ADDRESS',
fail_if_not_found=False, default_value="127.0.0.1")
REDIS_QUEUE_PORT = int(get_from_env('DJANGO_REDIS_QUEUE_PORT',
fail_if_not_found=False, default_value="6379")
)
REDIS_QUEUE_DATABASE = int(get_from_env('DJANGO_REDIS_QUEUE_DATABASE',
fail_if_not_found=False, default_value=0)
)
REDIS_QUEUE_WORKERS = int(get_from_env('DJANGO_REDIS_QUEUE_WORKERS',
default_value=multiprocessing.cpu_count(),
fail_if_not_found=False))
REDIS_QUEUE_RETRY = int(get_from_env('DJANGO_REDIS_QUEUE_RETRY',
default_value=40,
fail_if_not_found=False))
REDIS_QUEUE_TIMEOUT = int(get_from_env('DJANGO_REDIS_QUEUE_TIMEOUT',
default_value=30,
fail_if_not_found=False))
REDIS_QUEUE_RECYCLE = int(get_from_env('DJANGO_REDIS_QUEUE_RECYCLE',
default_value=500,
fail_if_not_found=False))
ENABLE_REDIS_CLUSTERED = parse_envvar_bool(
get_from_env('DJANGO_ENABLE_REDIS_CLUSTERED',
fail_if_not_found=False, default_value="false")
)
# Define some defaults for the q-cluster parameters:
Q_CLUSTER_NAME = 'gracedb-async-queue'
Q_CLUSTER_LABEL = 'gracedb q cluster'
if not ENABLE_REDIS_CLUSTERED:
Q_CLUSTER_NAME+=f'-{INTERNAL_HOSTNAME}'
Q_CLUSTER_LABEL+=f', {INTERNAL_HOSTNAME}'
# Define MAX_DATATABLES_RESULTS to limit memory usage for web queries:
MAX_DATATABLES_RESULTS = int(get_from_env('DJANGO_MAX_DATATABLES_RESULTS',
fail_if_not_found=False, default_value=1000))
...@@ -31,26 +31,87 @@ if SERVER_FQDN is None: ...@@ -31,26 +31,87 @@ if SERVER_FQDN is None:
raise ImproperlyConfigured('Could not get FQDN from envvars.') raise ImproperlyConfigured('Could not get FQDN from envvars.')
LIGO_FQDN = SERVER_FQDN LIGO_FQDN = SERVER_FQDN
# Get LVAlert server
lvalert_server = os.environ.get('LVALERT_SERVER', None) ## EGAD (External GraceDB Alert Dispatcher) configuration
if lvalert_server is None: ENABLE_EGAD_EMAIL = parse_envvar_bool(
raise ImproperlyConfigured('Could not get LVAlert server from envvars.') get_from_env('ENABLE_EGAD_EMAIL',
fail_if_not_found=False, default_value="false")
# Get LVAlert Overseer listen port )
lvalert_overseer_port = os.environ.get('LVALERT_OVERSEER_PORT', None) ENABLE_EGAD_KAFKA = parse_envvar_bool(
if lvalert_overseer_port is None: get_from_env('ENABLE_EGAD_KAFKA',
raise ImproperlyConfigured('Could not get LVAlert overseer port ' fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_MATTERMOST = parse_envvar_bool(
get_from_env('ENABLE_EGAD_MATTERMOST',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_PHONE = parse_envvar_bool(
get_from_env('ENABLE_EGAD_PHONE',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD = (
ENABLE_EGAD_EMAIL or ENABLE_EGAD_KAFKA
or ENABLE_EGAD_MATTERMOST or ENABLE_EGAD_PHONE
)
EGAD_URL = get_from_env('EGAD_URL',
fail_if_not_found=ENABLE_EGAD, default_value=None)
EGAD_API_KEY = get_from_env('EGAD_API_KEY',
fail_if_not_found=ENABLE_EGAD, default_value=None)
# Turn LVAlert on/off from the environment. Adding this
# to turn lvalerts on/off from docker compose/update instead
# of having to rebuild containers. If the environment variable
# isn't set, then revert to the hardwired behavior:
xmpp_env_var = get_from_env('SEND_LVALERT_XMPP_ALERTS',
default_value=SEND_XMPP_ALERTS,
fail_if_not_found=False)
# Fix for other boolean values:
if (isinstance(xmpp_env_var, str) and
xmpp_env_var.lower() in ['true','t','1']):
SEND_XMPP_ALERTS=True
elif (isinstance(xmpp_env_var, str) and
xmpp_env_var.lower() in ['false','f','0']):
SEND_XMPP_ALERTS=False
else:
SEND_XMPP_ALERTS = True
# Get igwn_alert_overseer status:
igwn_alert_on = get_from_env(
'ENABLE_IGWN_OVERSEER',
default_value=False,
fail_if_not_found=False
)
if (isinstance(igwn_alert_on, str) and
igwn_alert_on.lower() in ['true', 't', '1']):
igwn_alert_overseer_on = True
else:
igwn_alert_overseer_on = False
# Get igwn-alert server
igwn_alert_server = os.environ.get('IGWN_ALERT_SERVER', None)
if igwn_alert_server is None:
raise ImproperlyConfigured('Could not get igwn-alert server from envvars.')
# Get igwn-alert Overseer listen port
igwn_alert_overseer_port = os.environ.get('IGWN_ALERT_OVERSEER_PORT', None)
if igwn_alert_overseer_port is None:
raise ImproperlyConfigured('Could not get igwn-alert overseer port '
'from envvars.') 'from envvars.')
# Get LVAlert username # Get igwn-alert group from envirnment:
lvalert_user = os.environ.get('LVALERT_USER', None) igwn_alert_group = os.environ.get('IGWN_ALERT_GROUP', DEFAULT_IGWN_ALERT_GROUP)
if lvalert_user is None:
raise ImproperlyConfigured('Could not get LVAlert username from envvars.')
# Get LVAlert password # Get igwn-alert username
lvalert_password = os.environ.get('LVALERT_PASSWORD', None) igwn_alert_user = os.environ.get('IGWN_ALERT_USER', None)
if lvalert_password is None: if igwn_alert_user is None:
raise ImproperlyConfigured('Could not get LVAlert password from envvars.') raise ImproperlyConfigured('Could not get igwn-alert username from envvars.')
# Get igwn-alert password
igwn_alert_password = os.environ.get('IGWN_ALERT_PASSWORD', None)
if igwn_alert_password is None:
raise ImproperlyConfigured('Could not get igwn-alert password from envvars.')
# Get Twilio account information from environment # Get Twilio account information from environment
TWILIO_ACCOUNT_SID = os.environ.get('DJANGO_TWILIO_ACCOUNT_SID', None) TWILIO_ACCOUNT_SID = os.environ.get('DJANGO_TWILIO_ACCOUNT_SID', None)
...@@ -67,12 +128,50 @@ maintenance_mode = get_from_env( ...@@ -67,12 +128,50 @@ maintenance_mode = get_from_env(
default_value=False, default_value=False,
fail_if_not_found=False fail_if_not_found=False
) )
# DB "cool-down" factor for when a db conflict is detected. This
# factor scales a random number of seconds between zero and one.
DB_SLEEP_FACTOR = get_from_env(
'DJANGO_DB_SLEEP_FACTOR',
default_value=1.0,
fail_if_not_found=False
)
# Fix the factor (str to float)
try:
DB_SLEEP_FACTOR = float(DB_SLEEP_FACTOR)
except:
DB_SLEEP_FACTOR = 1.0
if (isinstance(maintenance_mode, str) and if (isinstance(maintenance_mode, str) and
maintenance_mode.lower() in ['true', 't', '1']): maintenance_mode.lower() in ['true', 't', '1']):
MAINTENANCE_MODE = True MAINTENANCE_MODE = True
MAINTENANCE_MODE_MESSAGE = \ MAINTENANCE_MODE_MESSAGE = \
get_from_env('DJANGO_MAINTENANCE_MODE_MESSAGE', fail_if_not_found=False) get_from_env('DJANGO_MAINTENANCE_MODE_MESSAGE', fail_if_not_found=False)
# Get info banner settings from environment
info_banner_enabled = get_from_env(
'DJANGO_INFO_BANNER_ENABLED',
default_value=False,
fail_if_not_found=False
)
# fix for other booleans:
if (isinstance(info_banner_enabled, str) and
info_banner_enabled.lower() in ['true','t','1']):
INFO_BANNER_ENABLED = True
INFO_BANNER_MESSAGE = \
get_from_env('DJANGO_INFO_BANNER_MESSAGE', fail_if_not_found=False)
# Get reports page boolean:
beta_reports_link = get_from_env(
'DJANGO_BETA_REPORTS_LINK',
default_value=False,
fail_if_not_found=False
)
# fix for other booleans:
if (isinstance(beta_reports_link, str) and
beta_reports_link.lower() in ['true','t','1']):
BETA_REPORTS_LINK = True
# Get email settings from environment # Get email settings from environment
EMAIL_BACKEND = 'django_ses.SESBackend' EMAIL_BACKEND = 'django_ses.SESBackend'
AWS_SES_ACCESS_KEY_ID = get_from_env('AWS_SES_ACCESS_KEY_ID') AWS_SES_ACCESS_KEY_ID = get_from_env('AWS_SES_ACCESS_KEY_ID')
...@@ -84,6 +183,102 @@ AWS_SES_REGION_ENDPOINT = get_from_env('AWS_SES_REGION_ENDPOINT', ...@@ -84,6 +183,102 @@ AWS_SES_REGION_ENDPOINT = get_from_env('AWS_SES_REGION_ENDPOINT',
AWS_SES_AUTO_THROTTLE = 0.25 AWS_SES_AUTO_THROTTLE = 0.25
ALERT_EMAIL_FROM = get_from_env('DJANGO_ALERT_EMAIL_FROM') ALERT_EMAIL_FROM = get_from_env('DJANGO_ALERT_EMAIL_FROM')
# memcached settings. this variable should be set in the deployment to the
# same name as the service name in the docker deployment.
DOCKER_MEMCACHED_ADDR = get_from_env('DJANGO_DOCKER_MEMCACHED_ADDR',
default_value="memcached:11211",
fail_if_not_found=False)
DOCKER_MEMCACHED_SECONDS = get_from_env('DJANGO_DOCKER_MEMCACHED_SECONDS',
default_value="15",
fail_if_not_found=False)
try:
CACHE_MIDDLEWARE_SECONDS = int(DOCKER_MEMCACHED_SECONDS)
except:
CACHE_MIDDLEWARE_SECONDS = 15
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyMemcacheCache',
'LOCATION': DOCKER_MEMCACHED_ADDR,
'OPTIONS': {
'ignore_exc': True,
}
},
# For API throttles
'throttles': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'api_throttle_cache', # Table name
},
}
if ENABLE_REDIS_QUEUE:
# For async alert follow-up:
CACHES.update({"async_followup": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{REDIS_QUEUE_ADDRESS}:{REDIS_QUEUE_PORT}/{REDIS_QUEUE_DATABASE}",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}})
# Set queue backend for async django tasks:
# example django-redis connection
Q_CLUSTER = {
'name': Q_CLUSTER_NAME,
'label': Q_CLUSTER_LABEL,
'retry': REDIS_QUEUE_RETRY,
'timeout': REDIS_QUEUE_TIMEOUT,
'workers': REDIS_QUEUE_WORKERS,
'recycle': REDIS_QUEUE_RECYCLE,
'django_redis': 'async_followup'
}
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
'core.middleware.maintenance.MaintenanceModeMiddleware',
'events.middleware.PerformanceMiddleware',
'core.middleware.accept.AcceptMiddleware',
'core.middleware.api.ClientVersionMiddleware',
'core.middleware.api.CliExceptionMiddleware',
'django.middleware.common.CommonMiddleware',
'core.middleware.proxy.XForwardedForMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'user_sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'ligoauth.middleware.ShibbolethWebAuthMiddleware',
'ligoauth.middleware.ControlRoomMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
# Set up AWS X-ray patching if enabled
ENABLE_AWS_XRAY = (
get_from_env("ENABLE_AWS_XRAY",
default_value="false", fail_if_not_found=False).lower()
in ['true', 't', '1']
)
if ENABLE_AWS_XRAY:
# AWS X-ray middleware must be first in the list to measure timing
# accurately
MIDDLEWARE.insert(0, 'aws_xray_sdk.ext.django.middleware.XRayMiddleware')
# Include X-ray as an installed app in order to allow configuration beyond
# the default
INSTALLED_APPS.append('aws_xray_sdk.ext.django')
# Settings for AWS X-ray
XRAY_RECORDER = {
'AWS_XRAY_DAEMON_ADDRESS': '127.0.0.1:2000',
'AUTO_INSTRUMENT': True,
'AWS_XRAY_CONTEXT_MISSING': 'LOG_ERROR',
'PLUGINS': (),
'SAMPLING': True,
'SAMPLING_RULES': None,
'AWS_XRAY_TRACING_NAME': 'GraceDB',
'DYNAMIC_NAMING': None,
'STREAMING_THRESHOLD': None,
}
# Priority server settings ---------------------------------------------------- # Priority server settings ----------------------------------------------------
PRIORITY_SERVER = False PRIORITY_SERVER = False
...@@ -102,38 +297,75 @@ if PRIORITY_SERVER: ...@@ -102,38 +297,75 @@ if PRIORITY_SERVER:
# Database settings ----------------------------------------------------------- # Database settings -----------------------------------------------------------
# New postgresql database
# Configured for the CI pipeline:
# https://docs.gitlab.com/ee/ci/services/postgres.html
DATABASES = { DATABASES = {
'default' : { 'default' : {
'NAME': db_name, 'NAME': db_name,
'ENGINE': 'django.db.backends.mysql', 'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': db_user, 'USER': db_user,
'PASSWORD': db_password, 'PASSWORD': db_password,
'HOST': os.environ.get('DJANGO_DB_HOST', ''), 'HOST': os.environ.get('DJANGO_DB_HOST', ''),
'PORT': os.environ.get('DJANGO_DB_PORT', ''), 'PORT': os.environ.get('DJANGO_DB_PORT', ''),
'OPTIONS': { 'CONN_MAX_AGE': 3600,
'init_command': 'SET storage_engine=MyISAM', 'TEST' : {
# NOTE: for mysql>=5.7 this will need to be changed to 'NAME': 'gracedb_test_db',
#'init_command': 'SET default_storage_engine=MyISAM',
}, },
} },
} }
# Main server "hostname" - a little hacky but OK # Main server "hostname" - a little hacky but OK
SERVER_HOSTNAME = SERVER_FQDN.split('.')[0] SERVER_HOSTNAME = SERVER_FQDN.split('.')[0]
# LVAlert Overseer settings - get from environment # igwn_alert Overseer settings - get from environment
LVALERT_OVERSEER_INSTANCES = [ LVALERT_OVERSEER_INSTANCES = []
{ LVALERT_OVERSEER_INSTANCES.append(
"lvalert_server": lvalert_server, {
"listen_port": int(lvalert_overseer_port), "lvalert_server": igwn_alert_server,
"username": lvalert_user, "listen_port": int(igwn_alert_overseer_port),
"password": lvalert_password, "igwn_alert_group": igwn_alert_group,
}, "username": igwn_alert_user,
] "password": igwn_alert_password,
}
)
# Pull in remaining (phone/email) alert variables from
# the environment. Default to false.
SEND_PHONE_ALERTS = parse_envvar_bool(get_from_env(
'SEND_PHONE_ALERTS',
default_value='False',
fail_if_not_found=False
))
SEND_EMAIL_ALERTS = parse_envvar_bool(get_from_env(
'SEND_EMAIL_ALERTS',
default_value='False',
fail_if_not_found=False
))
SEND_MATTERMOST_ALERTS = parse_envvar_bool(get_from_env(
'SEND_MATTERMOST_ALERTS',
default_value='False',
fail_if_not_found=False
))
INSTANCE_STUB = """
<li>Phone alerts (calls/SMS) are {0}</li>
<li>Email alerts are {1}</li>
<li><span class="text-monospace">igwn-alert</span> messages to <span class="text-monospace">{2}</span> are {3}</li>
"""
INSTANCE_LIST = INSTANCE_STUB.format(ENABLED[SEND_PHONE_ALERTS],
ENABLED[SEND_EMAIL_ALERTS],
LVALERT_OVERSEER_INSTANCES[0]['lvalert_server'],
ENABLED[SEND_XMPP_ALERTS])
# Use full client certificate to authenticate # Use full client certificate to authenticate
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = ( REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = (
'api.backends.GraceDbAuthenticatedAuthentication', 'api.backends.GraceDbAuthenticatedAuthentication',
'api.backends.GraceDbSciTokenAuthentication',
'api.backends.GraceDbX509FullCertAuthentication', 'api.backends.GraceDbX509FullCertAuthentication',
'api.backends.GraceDbBasicAuthentication', 'api.backends.GraceDbBasicAuthentication',
) )
...@@ -160,3 +392,10 @@ EMBB_IGNORE_ADDRESSES = ['Mailer-Daemon@{fqdn}'.format(fqdn=SERVER_FQDN)] ...@@ -160,3 +392,10 @@ EMBB_IGNORE_ADDRESSES = ['Mailer-Daemon@{fqdn}'.format(fqdn=SERVER_FQDN)]
for key in LOGGING['loggers']: for key in LOGGING['loggers']:
LOGGING['loggers'][key]['handlers'] = ['console'] LOGGING['loggers'][key]['handlers'] = ['console']
LOGGING['loggers']['django.request']['handlers'].append('mail_admins') LOGGING['loggers']['django.request']['handlers'].append('mail_admins')
# Turn off debug/error emails when in maintenance mode.
if MAINTENANCE_MODE:
LOGGING['loggers']['django.request']['handlers'].remove('mail_admins')
# Set SciToken accepted audience to server FQDN
SCITOKEN_AUDIENCE = ["https://" + SERVER_FQDN, "https://" + LIGO_FQDN]
# Settings for a test/dev GraceDB instance running in a container # Settings for a test/dev GraceDB instance running in a container
from .base import * from .base import *
CONFIG_NAME = "TEST" TIER = "dev"
CONFIG_NAME = "DEV"
# Debug settings # Debug settings
DEBUG = True DEBUG = True
...@@ -28,6 +29,11 @@ INSTALLED_APPS += [ ...@@ -28,6 +29,11 @@ INSTALLED_APPS += [
# Add testserver to ALLOWED_HOSTS # Add testserver to ALLOWED_HOSTS
ALLOWED_HOSTS += ['testserver'] ALLOWED_HOSTS += ['testserver']
# Enforce that phone and email alerts are off XXX: Set by deployment variables!
#SEND_PHONE_ALERTS = False
#SEND_EMAIL_ALERTS = False
#SEND_MATTERMOST_ALERTS = True
# Settings for django-silk profiler # Settings for django-silk profiler
SILKY_AUTHENTICATION = True SILKY_AUTHENTICATION = True
SILKY_AUTHORISATION = True SILKY_AUTHORISATION = True
...@@ -55,10 +61,36 @@ if sentry_dsn is not None: ...@@ -55,10 +61,36 @@ if sentry_dsn is not None:
import sentry_sdk import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init( sentry_sdk.init(
environment='test', environment='dev',
dsn=sentry_dsn, dsn=sentry_dsn,
integrations=[DjangoIntegration()] integrations=[DjangoIntegration()],
before_send=before_send,
) )
# Turn off default admin error emails # Turn off default admin error emails
LOGGING['loggers']['django.request']['handlers'] = [] LOGGING['loggers']['django.request']['handlers'] = []
# Home page stuff
INSTANCE_TITLE = 'GraceDB Development VM'
# Add sub-bullet with igwn-alert group:
group_sub_bullet = """<ul>
<li> Messages are sent to group: <span class="text-monospace"> {0} </span></li>
</ul>""".format(LVALERT_OVERSEER_INSTANCES[0]['igwn_alert_group'])
INSTANCE_LIST = INSTANCE_LIST + group_sub_bullet
INSTANCE_TITLE = 'GraceDB Development Server'
INSTANCE_INFO = """
<h5>Development Instance</h5>
<hr>
<p>
This GraceDB instance is designed for GraceDB maintainers to develop and
test in the AWS cloud architecture. There is <b>no guarantee</b> that the
behavior of this instance will mimic the production system at any time.
Events and associated data may change or be removed at any time.
</p>
<ul>
{}
<li>Only LIGO logins are provided (no login via InCommon or Google).</li>
</ul>
""".format(INSTANCE_LIST)
# Settings for a playground GraceDB instance (for user testing) running
# in a container on AWS. These settings inherent from base.py)
# and overrides or adds to them.
from .base import *
TIER = "playground"
CONFIG_NAME = "USER TESTING"
# Debug settings
DEBUG = False
# Override EMBB email address
# TP (8 Aug 2017): not sure why?
EMBB_MAIL_ADDRESS = 'gracedb@{fqdn}'.format(fqdn=SERVER_FQDN)
# Enforce that phone and email alerts are off XXX: Set by deployment variables!
#SEND_PHONE_ALERTS = False
#SEND_EMAIL_ALERTS = False
# Enable Mattermost alerts
SEND_MATTERMOST_ALERTS = True
# Add testserver to ALLOWED_HOSTS
ALLOWED_HOSTS += ['testserver']
# Set up Sentry for error logging
sentry_dsn = get_from_env('DJANGO_SENTRY_DSN', fail_if_not_found=False)
if sentry_dsn is not None:
USE_SENTRY = True
# Set up Sentry
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
environment='playground',
dsn=sentry_dsn,
integrations=[DjangoIntegration()],
before_send=before_send,
)
# Turn off default admin error emails
LOGGING['loggers']['django.request']['handlers'] = []
# Home page stuff
INSTANCE_TITLE = 'GraceDB Playground'
# Add sub-bullet with igwn-alert group:
group_sub_bullet = """<ul>
<li> Messages are sent to group: <span class="text-monospace"> {0} </span></li>
</ul>""".format(LVALERT_OVERSEER_INSTANCES[0]['igwn_alert_group'])
INSTANCE_LIST = INSTANCE_LIST + group_sub_bullet
INSTANCE_INFO = """
<h5>Playground instance</h5>
<hr>
<p>
This GraceDB instance is designed for users to develop and test their own
applications. It mimics the production instance in all but the following ways:
</p>
<ul>
{}
<li>Only LIGO logins are provided (no login via InCommon or Google).</li>
<li>Events and associated data will <b>not</b> be preserved indefinitely.
A nightly cron job removes events older than 21 days.</li>
</ul>
""".format(INSTANCE_LIST)
# Safety check on debug mode for playground
if (DEBUG == True):
raise RuntimeError("Turn off debug mode for playground")
# Settings for a production GraceDB instance running in a container # Settings for a production GraceDB instance running in a container
from .base import * from .base import *
TIER = "production"
DEBUG = False DEBUG = False
# Turn on alerts # Turn on alerts: XXX: Set by deployment variables!
SEND_XMPP_ALERTS = True #SEND_PHONE_ALERTS = True
SEND_PHONE_ALERTS = True #SEND_EMAIL_ALERTS = True
SEND_EMAIL_ALERTS = True #SEND_MATTERMOST_ALERTS = True
# TP, March 2019: for now, it looks infeasible to use multiple databases # TP, March 2019: for now, it looks infeasible to use multiple databases
# since there are many operations which normal LVC users can do that # since there are many operations which normal LVC users can do that
...@@ -51,12 +53,38 @@ if sentry_dsn is not None: ...@@ -51,12 +53,38 @@ if sentry_dsn is not None:
sentry_sdk.init( sentry_sdk.init(
environment='production', environment='production',
dsn=sentry_dsn, dsn=sentry_dsn,
integrations=[DjangoIntegration()] integrations=[DjangoIntegration()],
before_send=before_send,
) )
# Turn off default admin error emails # Turn off default admin error emails
LOGGING['loggers']['django.request']['handlers'] = [] LOGGING['loggers']['django.request']['handlers'] = []
# Home page stuff
INSTANCE_TITLE = 'GraceDB'
# Add sub-bullet with igwn-alert group:
group_sub_bullet = """<ul>
<li> Messages are sent to group: <span class="text-monospace"> {0} </span></li>
</ul>""".format(LVALERT_OVERSEER_INSTANCES[0]['igwn_alert_group'])
INSTANCE_LIST = INSTANCE_LIST + group_sub_bullet
INSTANCE_INFO = """
<h5>GraceDB Notifications</h5>
<hr>
<p>
GraceDB notifies registered users of Gravitational-Wave candidate detections
in real-time during LIGO/Virgo/KAGRA observation periods. Current notifications
mechanisms are:
</p>
<ul>
{}
</ul>
""".format(INSTANCE_LIST)
# Safety check on debug mode for production # Safety check on debug mode for production
if (DEBUG == True): if (DEBUG == True):
raise RuntimeError("Turn off debug mode for production") raise RuntimeError("Turn off debug mode for production")
# Hardcode pipelines not approved for production:
UNAPPROVED_PIPELINES += ['aframe', 'GWAK']
# Settings for a test/dev GraceDB instance running in a container
from .base import *
TIER = "test"
CONFIG_NAME = "TEST"
# Debug settings
DEBUG = True
# Override EMBB email address
# TP (8 Aug 2017): not sure why?
EMBB_MAIL_ADDRESS = 'gracedb@{fqdn}'.format(fqdn=SERVER_FQDN)
# Add middleware
debug_middleware = 'debug_toolbar.middleware.DebugToolbarMiddleware'
MIDDLEWARE += [
debug_middleware,
#'silk.middleware.SilkyMiddleware',
#'core.middleware.profiling.ProfileMiddleware',
#'core.middleware.admin.AdminsOnlyMiddleware',
]
# Add to installed apps
INSTALLED_APPS += [
'debug_toolbar',
#'silk'
]
# Add testserver to ALLOWED_HOSTS
ALLOWED_HOSTS += ['testserver']
# Settings for django-silk profiler
SILKY_AUTHENTICATION = True
SILKY_AUTHORISATION = True
if 'silk' in INSTALLED_APPS:
# Needed to prevent RequestDataTooBig for files > 2.5 MB
# when silk is being used. This setting is typically used to
# prevent DOS attacks, so should not be changed in production.
DATA_UPLOAD_MAX_MEMORY_SIZE = 20*(1024**2)
# Tuple of IPs which are marked as internal, useful for debugging.
# Tanner (5 Dec. 2017): DON'T CHANGE THIS! Django Debug Toolbar exposes
# some headers which we want to keep hidden. So to be safe, we only allow
# it to be used through this server. You need to configure a SOCKS proxy
# on your local machine to use DJDT (see admin docs).
INTERNAL_IPS = [
INTERNAL_IP_ADDRESS,
]
# Enforce that phone and email alerts are off XXX: Set by deployment variables!
#SEND_PHONE_ALERTS = False
#SEND_EMAIL_ALERTS = False
#SEND_MATTERMOST_ALERTS = True
# Set up Sentry for error logging
sentry_dsn = get_from_env('DJANGO_SENTRY_DSN', fail_if_not_found=False)
if sentry_dsn is not None:
USE_SENTRY = True
# Set up Sentry
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
environment='test',
dsn=sentry_dsn,
integrations=[DjangoIntegration()],
before_send=before_send,
)
# Turn off default admin error emails
LOGGING['loggers']['django.request']['handlers'] = []
# Home page stuff
INSTANCE_TITLE = 'GraceDB Testing Server'
# Add sub-bullet with igwn-alert group:
group_sub_bullet = """<ul>
<li> Messages are sent to group: <span class="text-monospace"> {0} </span></li>
</ul>""".format(LVALERT_OVERSEER_INSTANCES[0]['igwn_alert_group'])
INSTANCE_LIST = INSTANCE_LIST + group_sub_bullet
INSTANCE_INFO = """
<h5>Testing Instance</h5>
<hr>
<p>
This GraceDB instance is designed for Quality Assurance (QA) testing and
validation for GraceDB and electromagnetic follow-up (EMFollow) developers.
Software should meet QA milestones on the test instance before being moved
to Playground or Production. Note, on this GraceDB instance:
</p>
<ul>
{}
<li>Only LIGO logins are provided (no login via InCommon or Google).</li>
</ul>
""".format(INSTANCE_LIST)
...@@ -10,13 +10,13 @@ import socket ...@@ -10,13 +10,13 @@ import socket
DATABASES = { DATABASES = {
'default' : { 'default' : {
'NAME': 'gracedb', 'NAME': 'gracedb',
'ENGINE': 'django.db.backends.mysql', 'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'gracedb', 'USER': 'gracedb',
'PASSWORD': DB_PASSWORD, 'PASSWORD': DB_PASSWORD,
'OPTIONS': { 'HOST':'127.0.0.1',
'init_command': 'SET storage_engine=MyISAM', 'PORT':'5432',
}, 'CONN_MAX_AGE': 3600,
} },
} }
# Set up allowed hosts # Set up allowed hosts
...@@ -38,3 +38,148 @@ EMBB_MAIL_ADDRESS = 'embb@{fqdn}.ligo.org'.format(fqdn=SERVER_FQDN) ...@@ -38,3 +38,148 @@ EMBB_MAIL_ADDRESS = 'embb@{fqdn}.ligo.org'.format(fqdn=SERVER_FQDN)
EMBB_SMTP_SERVER = 'localhost' EMBB_SMTP_SERVER = 'localhost'
EMBB_MAIL_ADMINS = [admin[1] for admin in ADMINS] EMBB_MAIL_ADMINS = [admin[1] for admin in ADMINS]
EMBB_IGNORE_ADDRESSES = ['Mailer-Daemon@{fqdn}'.format(fqdn=SERVER_FQDN)] EMBB_IGNORE_ADDRESSES = ['Mailer-Daemon@{fqdn}'.format(fqdn=SERVER_FQDN)]
# Load modified caching middleware:
# https://docs.djangoproject.com/en/2.2/ref/middleware/#middleware-ordering
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.gzip.GZipMiddleware',
'events.middleware.PerformanceMiddleware',
'core.middleware.accept.AcceptMiddleware',
'core.middleware.api.ClientVersionMiddleware',
'core.middleware.api.CliExceptionMiddleware',
'core.middleware.proxy.XForwardedForMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'user_sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'core.middleware.maintenance.MaintenanceModeMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'ligoauth.middleware.ShibbolethWebAuthMiddleware',
'ligoauth.middleware.ControlRoomMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
# Set caches:
CACHE_MIDDLEWARE_SECONDS = 5
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyMemcacheCache',
'LOCATION': 'localhost:11211',
'TIMEOUT': 60,
'KEY_PREFIX': 'NULL',
'OPTIONS': {
'ignore_exc': True,
}
},
# For API throttles
'throttles': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'api_throttle_cache', # Table name
},
}
# FIXME: hardwire this for now in the VMs for testing
ENABLE_REDIS_QUEUE = True
if ENABLE_REDIS_QUEUE:
# For async alert follow-up:
CACHES.update({"async_followup": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"redis://{REDIS_QUEUE_ADDRESS}:{REDIS_QUEUE_PORT}/{REDIS_QUEUE_DATABASE}",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}})
# Set queue backend for async django tasks:
# example django-redis connection
Q_CLUSTER = {
'name': Q_CLUSTER_NAME,
'label': Q_CLUSTER_LABEL,
'retry': REDIS_QUEUE_RETRY,
'timeout': REDIS_QUEUE_TIMEOUT,
'workers': REDIS_QUEUE_WORKERS,
'recycle': REDIS_QUEUE_RECYCLE,
'django_redis': 'async_followup'
}
# DB "cool-down" factor for when a db conflict is detected. This
# factor scales a random number of seconds between zero and one.
DB_SLEEP_FACTOR = get_from_env(
'DJANGO_DB_SLEEP_FACTOR',
default_value=1.0,
fail_if_not_found=False
)
# Fix the factor (str to float)
try:
DB_SLEEP_FACTOR = float(DB_SLEEP_FACTOR)
except:
DB_SLEEP_FACTOR = 1.0
BETA_REPORTS_LINK = True
## EGAD (External GraceDB Alert Dispatcher) configuration
ENABLE_EGAD_EMAIL = parse_envvar_bool(
get_from_env('ENABLE_EGAD_EMAIL',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_KAFKA = parse_envvar_bool(
get_from_env('ENABLE_EGAD_KAFKA',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_MATTERMOST = parse_envvar_bool(
get_from_env('ENABLE_EGAD_MATTERMOST',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD_PHONE = parse_envvar_bool(
get_from_env('ENABLE_EGAD_PHONE',
fail_if_not_found=False, default_value="false")
)
ENABLE_EGAD = (
ENABLE_EGAD_EMAIL or ENABLE_EGAD_KAFKA
or ENABLE_EGAD_MATTERMOST or ENABLE_EGAD_PHONE
)
# Pull in remaining (phone/email) alert variables from
# the environment. Default to false.
SEND_PHONE_ALERTS = parse_envvar_bool(get_from_env(
'SEND_PHONE_ALERTS',
default_value='False',
fail_if_not_found=False
))
SEND_EMAIL_ALERTS = parse_envvar_bool(get_from_env(
'SEND_EMAIL_ALERTS',
default_value='False',
fail_if_not_found=False
))
SEND_MATTERMOST_ALERTS = parse_envvar_bool(get_from_env(
'SEND_MATTERMOST_ALERTS',
default_value='False',
fail_if_not_found=False
))
INSTANCE_STUB = """
<li>Phone alerts (calls/SMS) are {0}</li>
<li>Email alerts are {1}</li>
<li><span class="text-monospace">igwn-alert</span> messages to <span class="text-monospace">{2}</span> are {3}</li>
"""
INSTANCE_LIST = INSTANCE_STUB.format(ENABLED[SEND_PHONE_ALERTS],
ENABLED[SEND_EMAIL_ALERTS],
LVALERT_OVERSEER_INSTANCES[0]['lvalert_server'],
ENABLED[SEND_XMPP_ALERTS])
if (len(LVALERT_OVERSEER_INSTANCES) == 2):
IGWN_STUB = '<li><span class="text-monospace">igwn-alert</span> messages to <span class="text-monospace">{0}</span> are {1}</li>'
IGWN_LIST = IGWN_STUB.format(LVALERT_OVERSEER_INSTANCES[1]['lvalert_server'],
ENABLED[SEND_XMPP_ALERTS])
INSTANCE_LIST = INSTANCE_LIST + IGWN_LIST
# Set SciToken accepted audience to server FQDN
SCITOKEN_AUDIENCE = ["https://" + SERVER_FQDN, "https://" + LIGO_FQDN]
...@@ -4,10 +4,13 @@ ...@@ -4,10 +4,13 @@
import socket import socket
from .base import * from .base import *
CONFIG_NAME = "TEST" TIER = "dev"
CONFIG_NAME = "DEV"
# Debug settings # Debug settings
DEBUG = True DEBUG = True
SEND_XMPP_ALERTS=True
SEND_MATTERMOST_ALERTS=True
# Override EMBB email address # Override EMBB email address
# TP (8 Aug 2017): not sure why? # TP (8 Aug 2017): not sure why?
...@@ -25,7 +28,6 @@ MIDDLEWARE += [ ...@@ -25,7 +28,6 @@ MIDDLEWARE += [
# Add to installed apps # Add to installed apps
INSTALLED_APPS += [ INSTALLED_APPS += [
'debug_toolbar', 'debug_toolbar',
#'silk'
] ]
# Add testserver to ALLOWED_HOSTS # Add testserver to ALLOWED_HOSTS
...@@ -48,3 +50,33 @@ if 'silk' in INSTALLED_APPS: ...@@ -48,3 +50,33 @@ if 'silk' in INSTALLED_APPS:
INTERNAL_IPS = [ INTERNAL_IPS = [
INTERNAL_IP_ADDRESS, INTERNAL_IP_ADDRESS,
] ]
INSTANCE_TITLE = 'GraceDB Development VM'
# Add sub-bullet with igwn-alert group:
if (len(LVALERT_OVERSEER_INSTANCES) == 2):
igwn_alert_group = os.environ.get('IGWN_ALERT_GROUP', 'lvalert-dev')
group_sub_bullet = """<ul>
<li> Messages are sent to group: <span class="text-monospace"> {0} </span></li>
</ul>""".format(igwn_alert_group)
INSTANCE_LIST = INSTANCE_LIST + group_sub_bullet
INSTANCE_INFO = """
<h5>Development Instance</h5>
<hr>
<p>
This GraceDB instance is designed for GraceDB maintainers to develop and
test in the AWS cloud architecture. There is <b>no guarantee</b> that the
behavior of this instance will mimic the production system at any time.
Events and associated data may change or be removed at any time.
</p>
<ul>
{}
<li>Only LIGO logins are provided (no login via InCommon or Google).</li>
</ul>
""".format(INSTANCE_LIST)
# Turn off public page caching for development and testing:
PUBLIC_PAGE_CACHING = 0
# Hardcode pipelines not approved for production (for vm testing)
# UNAPPROVED_PIPELINES += ['aframe', 'GWAK']
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
# from base.py settings) and overrides or adds to them. # from base.py settings) and overrides or adds to them.
from .base import * from .base import *
TIER = "playground"
CONFIG_NAME = "USER TESTING" CONFIG_NAME = "USER TESTING"
# Debug settings # Debug settings
...@@ -15,6 +16,9 @@ EMBB_MAIL_ADDRESS = 'gracedb@{fqdn}'.format(fqdn=SERVER_FQDN) ...@@ -15,6 +16,9 @@ EMBB_MAIL_ADDRESS = 'gracedb@{fqdn}'.format(fqdn=SERVER_FQDN)
# Turn on XMPP alerts # Turn on XMPP alerts
SEND_XMPP_ALERTS = True SEND_XMPP_ALERTS = True
# Turn on Mattermost alerts
SEND_MATTERMOST_ALERTS = True
# Enforce that phone and email alerts are off # Enforce that phone and email alerts are off
SEND_PHONE_ALERTS = False SEND_PHONE_ALERTS = False
SEND_EMAIL_ALERTS = False SEND_EMAIL_ALERTS = False
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
# settings) and overrides or adds to them. # settings) and overrides or adds to them.
from .base import * from .base import *
TIER = "production"
DEBUG = False DEBUG = False
# LVAlert Overseer settings # LVAlert Overseer settings
...@@ -17,6 +19,7 @@ LVALERT_OVERSEER_INSTANCES = [ ...@@ -17,6 +19,7 @@ LVALERT_OVERSEER_INSTANCES = [
SEND_XMPP_ALERTS = True SEND_XMPP_ALERTS = True
SEND_PHONE_ALERTS = True SEND_PHONE_ALERTS = True
SEND_EMAIL_ALERTS = True SEND_EMAIL_ALERTS = True
SEND_MATTERMOST_ALERTS = True
# Safety check on debug mode for production # Safety check on debug mode for production
if (DEBUG == True): if (DEBUG == True):
......
from django.conf import settings from django.conf import settings
from django.conf.urls import url, include
from django.urls import re_path, include
from django.contrib import admin from django.contrib import admin
from django.contrib.auth.views import LogoutView from django.contrib.auth.views import LogoutView
...@@ -24,56 +25,54 @@ feeds = { ...@@ -24,56 +25,54 @@ feeds = {
urlpatterns = [ urlpatterns = [
url(r'^$', events.views.index, name="home"), re_path(r'^$', events.views.index, name="home"),
url(r'^navbar_only$', TemplateView.as_view( re_path(r'^navbar_only$', TemplateView.as_view(
template_name='navbar_only.html'), name="navbar-only"), template_name='navbar_only.html'), name="navbar-only"),
url(r'^SPInfo', TemplateView.as_view(template_name='gracedb/spinfo.html'), re_path(r'^SPInfo', TemplateView.as_view(template_name='gracedb/spinfo.html'),
name="spinfo"), name="spinfo"),
url(r'^SPPrivacy', TemplateView.as_view( re_path(r'^SPPrivacy', TemplateView.as_view(
template_name='gracedb/spprivacy.html'), name="spprivacy"), template_name='gracedb/spprivacy.html'), name="spprivacy"),
url(r'^DiscoveryService', TemplateView.as_view( re_path(r'^DiscoveryService', TemplateView.as_view(
template_name='discovery.html'), name="discovery"), template_name='discovery.html'), name="discovery"),
url(r'^events/', include('events.urls')), re_path(r'^events/', include('events.urls')),
url(r'^superevents/', include('superevents.urls')), re_path(r'^superevents/', include('superevents.urls')),
url(r'^alerts/', include('alerts.urls')), re_path(r'^alerts/', include('alerts.urls')),
url(r'^feeds/(?P<url>.*)/$', EventFeed()), re_path(r'^feeds/(?P<url>.*)/$', EventFeed()),
url(r'^feeds/$', feedview, name="feeds"), re_path(r'^feeds/$', feedview, name="feeds"),
url(r'^other/$', TemplateView.as_view(template_name='other.html'), re_path(r'^other/$', TemplateView.as_view(template_name='other.html'),
name='other'), name='other'),
url(r'^performance/$', events.views.performance, name="performance"), re_path(r'^performance/$', events.views.performance, name="performance"),
url(r'^reports/$', events.reports.histo, name="reports"), re_path(r'^reports/$', events.reports.reports_page_context, name="reports"),
url(r'^reports/cbc_report/(?P<format>(json|flex))?$', re_path(r'^latest/$', search.views.latest, name="latest"),
events.reports.cbc_report, name="cbc_report"),
url(r'^latest/$', search.views.latest, name="latest"),
#(r'^reports/(?P<path>.+)$', 'django.views.static.serve', #(r'^reports/(?P<path>.+)$', 'django.views.static.serve',
# {'document_root': settings.LATENCY_REPORT_DEST_DIR}), # {'document_root': settings.LATENCY_REPORT_DEST_DIR}),
url(r'^search/$', search.views.search, name="mainsearch"), re_path(r'^search/$', search.views.search, name="mainsearch"),
# Authentication # Authentication
url(r'^login/$', ShibLoginView.as_view(), name='login'), re_path(r'^login/$', ShibLoginView.as_view(), name='login'),
url(r'^post-login/$', ShibPostLoginView.as_view(), name='post-login'), re_path(r'^post-login/$', ShibPostLoginView.as_view(), name='post-login'),
url(r'^logout/$', LogoutView.as_view(), name='logout'), re_path(r'^logout/$', LogoutView.as_view(), name='logout'),
# Password management # Password management
url('^manage-password/$', manage_password, name='manage-password'), re_path('^manage-password/$', manage_password, name='manage-password'),
# API URLs # API URLs
url(r'^api/', include('api.urls')), re_path(r'^api/', include('api.urls')),
# Legacy API URLs - must be maintained! # Legacy API URLs - must be maintained!
url(r'^apibasic/', include('api.urls', namespace='legacy_apibasic')), re_path(r'^apibasic/', include('api.urls', namespace='legacy_apibasic')),
url(r'^apiweb/', include('api.urls', namespace='legacy_apiweb')), re_path(r'^apiweb/', include('api.urls', namespace='legacy_apiweb')),
# Heartbeat URL # Heartbeat URL
url(r'^heartbeat/$', core.views.heartbeat, name='heartbeat'), re_path(r'^heartbeat/$', core.views.heartbeat, name='heartbeat'),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs' # Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation: # to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')), # (r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls), re_path(r'^admin/', admin.site.urls),
# Sessions # Sessions
url(r'^', include('user_sessions.urls', 'user_sessions')), re_path(r'^', include('user_sessions.urls', 'user_sessions')),
] ]
...@@ -83,12 +82,12 @@ urlpatterns = [ ...@@ -83,12 +82,12 @@ urlpatterns = [
if ('silk' in settings.INSTALLED_APPS): if ('silk' in settings.INSTALLED_APPS):
# Add django-silk # Add django-silk
urlpatterns = [ urlpatterns = [
url(r'^silk/', include('silk.urls', namespace='silk')) re_path(r'^silk/', include('silk.urls', namespace='silk'))
] + urlpatterns ] + urlpatterns
# Add django-debug-toolbar # Add django-debug-toolbar
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS: if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar import debug_toolbar
urlpatterns = [ urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)), re_path(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns ] + urlpatterns
...@@ -23,6 +23,13 @@ ServerName ${DJANGO_PRIMARY_FQDN} ...@@ -23,6 +23,13 @@ ServerName ${DJANGO_PRIMARY_FQDN}
Require all granted Require all granted
</Directory> </Directory>
# Improve proxy behavior with gunicorn:
# https://serverfault.com/questions/206738/intermittent-error-when-using-mod-proxy-to-do-reverse-proxy-to-soap-service#comment1327184_209006
# https://github.com/benoitc/gunicorn/issues/207
SetEnv force-proxy-request-1.0 1
SetEnv proxy-nokeepalive 1
## Custom fragment ## Custom fragment
# gUnicorn edits # gUnicorn edits
Alias /shibboleth-ds/idpselect_config.js /etc/shibboleth-ds/idpselect_config.js Alias /shibboleth-ds/idpselect_config.js /etc/shibboleth-ds/idpselect_config.js
...@@ -40,7 +47,33 @@ ServerName ${DJANGO_PRIMARY_FQDN} ...@@ -40,7 +47,33 @@ ServerName ${DJANGO_PRIMARY_FQDN}
ProxyPass "/static" "!" ProxyPass "/static" "!"
ProxyPass "/documentation" "!" ProxyPass "/documentation" "!"
ProxyPass "/admin_docs" "!" ProxyPass "/admin_docs" "!"
ProxyPass "/" "http://localhost:8080/" ProxyPass "/" "http://localhost:8080/" timeout=120
ProxyPassReverse "/" "http://localhost:8080/"
# This section is for apache2 timeout and keepalive tuning parameters.
# https://ioflood.com/blog/2020/02/21/what-is-apache-keepalive-timeout-how-to-optimize-this-critical-setting/
# KeepAlive will... keep a connection alive for subsequent requests.
# Turn this on.
KeepAlive On
# The maximum number of requests served to a client before terminating the connection.
# This can be large, possibly safely unlimited. (0 = unlimited)
MaxKeepAliveRequests 0
# The number of seconds Apache will wait for a subsequent request before closing the
# connection. Once a request has been received, the timeout value specified by the
# Timeout directive applies. Setting KeepAliveTimeout to a high value may cause
# performance problems in heavily loaded servers. The higher the timeout, the more
# server processes will be kept occupied waiting on connections with idle clients
KeepAliveTimeout 5
# Amount of time the server will wait for certain events before failing a
# request. The TimeOut directive defines the length of time Apache will wait for
# I/O (e.g., when reading data from the client, when writing data to the client, etc.)
# Default: 300s. Try setting this lower, then do a test like a long query with the API
# and in the browser and see what happens.
Timeout 60
# Unset certain headers to help prevent spoofing # Unset certain headers to help prevent spoofing
RequestHeader unset REMOTE_USER RequestHeader unset REMOTE_USER
...@@ -58,6 +91,9 @@ ServerName ${DJANGO_PRIMARY_FQDN} ...@@ -58,6 +91,9 @@ ServerName ${DJANGO_PRIMARY_FQDN}
# Set X_FORWARDED_PROTO to https # Set X_FORWARDED_PROTO to https
RequestHeader set X_FORWARDED_PROTO "https" RequestHeader set X_FORWARDED_PROTO "https"
# Increase the max allowable header size:
LimitRequestFieldSize 16384
# Set up mod_xsendfile for serving static event files as directed by Django # Set up mod_xsendfile for serving static event files as directed by Django
XSendFile On XSendFile On
XSendFilePath /app/db_data/ XSendFilePath /app/db_data/
...@@ -82,7 +118,7 @@ ServerName ${DJANGO_PRIMARY_FQDN} ...@@ -82,7 +118,7 @@ ServerName ${DJANGO_PRIMARY_FQDN}
Require all granted Require all granted
</Directory> </Directory>
Alias /robots.txt /home/gracedb/gracedb_project/static_root/robots.txt Alias /robots.txt /app/gracedb_project/static_root/robots.txt
<Location /Shibboleth.sso> <Location /Shibboleth.sso>
SetHandler shib SetHandler shib
...@@ -119,7 +155,7 @@ ServerName ${DJANGO_PRIMARY_FQDN} ...@@ -119,7 +155,7 @@ ServerName ${DJANGO_PRIMARY_FQDN}
AuthType Shibboleth AuthType Shibboleth
ShibRequestSetting requireSession true ShibRequestSetting requireSession true
ShibUseHeaders On ShibUseHeaders On
Require shib-user tanner.prestegard@LIGO.ORG alexander.pace@LIGO.ORG patrick.brady@LIGO.ORG thomas.downes@LIGO.ORG Require shib-user duncan.meacher@ligo.org alexander.pace@ligo.org daniel.wysocki@ligo.org patrick.brady@ligo.org
</Location> </Location>
</VirtualHost> </VirtualHost>
#!/bin/sh #!/bin/sh
python3 /app/gracedb_project/manage.py update_user_accounts_from_ligo_ldap people python3 /app/gracedb_project/manage.py update_user_accounts_from_ligo_ldap kagra
python3 /app/gracedb_project/manage.py update_user_accounts_from_ligo_ldap ligo
python3 /app/gracedb_project/manage.py update_user_accounts_from_ligo_ldap robots
python3 /app/gracedb_project/manage.py update_catalog_managers_group
python3 /app/gracedb_project/manage.py remove_inactive_alerts python3 /app/gracedb_project/manage.py remove_inactive_alerts
python3 /app/gracedb_project/manage.py clearsessions python3 /app/gracedb_project/manage.py clearsessions 2>&1 | grep -v segment
python3 /app/gracedb_project/manage.py remove_old_mdc_public_perms
PGPASSWORD=$DJANGO_DB_PASSWORD psql -h $DJANGO_DB_HOST -U $DJANGO_DB_USER -c "VACUUM VERBOSE ANALYZE;" $DJANGO_DB_NAME
#!/bin/bash #!/bin/bash
export LVALERT_OVERSEER_RESOURCE=${LVALERT_USER}_overseer_$(python3 -c 'import uuid; print(uuid.uuid4().hex)') export LVALERT_OVERSEER_RESOURCE=${LVALERT_USER}_overseer_$(python3 -c 'import uuid; print(uuid.uuid4().hex)')
# Change the file permissions and ownership on /app/db_data:
chown gracedb:www-data /app/db_data
chmod 755 /app/db_data
## PGA: 2019-10-15: use certs from secrets for Shibboleth SP
SHIB_SP_CERT=/run/secrets/saml_certificate
SHIB_SP_KEY=/run/secrets/saml_private_key
if [[ -f $SHIB_SP_CERT && -f $SHIB_SP_KEY ]]
then
echo "Using Shibboleth Cert from docker secrets over the image one"
cp -f $SHIB_SP_CERT /etc/shibboleth/sp-cert.pem
cp -f $SHIB_SP_KEY /etc/shibboleth/sp-key.pem
chown _shibd:_shibd /etc/shibboleth/sp-{cert,key}.pem
chmod 0600 /etc/shibboleth/sp-key.pem
fi
## PGA 2019-10-16: use secrets for sensitive environment variables
LIST="aws_ses_access_key_id
aws_ses_secret_access_key
django_db_password
django_secret_key
django_twilio_account_sid
django_twilio_auth_token
lvalert_password
igwn_alert_password
gracedb_ldap_keytab
egad_url
egad_api_key
django_sentry_dsn"
for SECRET in $LIST
do
VARNAME=$( tr [:lower:] [:upper:] <<<$SECRET)
[ -f /run/secrets/$SECRET ] && export $VARNAME="$(< /run/secrets/$SECRET)"
done
# get x509 cert for ldap access from environment variable.
echo "${GRACEDB_LDAP_KEYTAB}" | base64 -d | install -m 0600 /dev/stdin keytab
kinit ldap/gracedb.ligo.org@LIGO.ORG -k -t keytab
exec "$@" exec "$@"
# prefork MPM
# StartServers: number of server processes to start
# MinSpareServers: minimum number of server processes which are kept spare
# MaxSpareServers: maximum number of server processes which are kept spare
# MaxRequestWorkers: maximum number of server processes allowed to start
# MaxConnectionsPerChild: maximum number of requests a server process serves
<IfModule mpm_prefork_module>
StartServers 5
MinSpareServers 5
MaxSpareServers 10
MaxRequestWorkers 128
ServerLimit 128
MaxConnectionsPerChild 0
</IfModule>
# vim: syntax=apache ts=4 sw=4 sts=4 sr noet