Skip to content

Commit

Permalink
Merge branch 'master' into signalr
Browse files Browse the repository at this point in the history
  • Loading branch information
imnasnainaec authored Nov 24, 2020
2 parents 61a6511 + 16f08d4 commit 4278b21
Show file tree
Hide file tree
Showing 16 changed files with 369 additions and 22 deletions.
2 changes: 1 addition & 1 deletion deploy/roles/aws_access/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
---
aws_config_dir: "/home/{{ combine_user }}/.aws"
aws_upload_profile: upload
aws_backup_profile: upload
aws_download_profile: download
2 changes: 1 addition & 1 deletion deploy/roles/combine_backup/templates/combine-backup.j2
Original file line number Diff line number Diff line change
Expand Up @@ -38,5 +38,5 @@ done
# need to specify full path because $PATH does not contain
# /usr/local/bin when run as a cron job
if [ -e /usr/local/bin/aws ] ; then
/usr/local/bin/aws s3 cp ${BACKUP_FILE} ${AWS_FILE} --profile {{ aws_upload_profile }}
/usr/local/bin/aws s3 cp ${BACKUP_FILE} ${AWS_FILE} --profile {{ aws_backup_profile }}
fi
10 changes: 6 additions & 4 deletions docker_deploy/group_vars/qa/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@ cert_email: ""
aws_user: "{{ combine_user }}"
aws_group: "{{ combine_group }}"

# my_default_aws_profile: ecr_read_only
# my_aws_profiles:
# - ecr_read_write
# - s3_read_write
aws_backup_profile: s3_read_write
aws_ecr_profile: ecr_read_write

my_aws_profiles:
- "{{ aws_backup_profile }}"
- "{{ aws_ecr_profile }}"
11 changes: 9 additions & 2 deletions docker_deploy/group_vars/server/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,13 @@ aws_group: "{{ combine_group }}"
# - store backups
# ECR:
# - install container images
aws_backup_profile: s3_read_write
aws_ecr_profile: ecr_read_only

my_aws_profiles:
- s3_read_write
- ecr_read_only
- "{{ aws_backup_profile }}"
- "{{ aws_ecr_profile }}"

# Define backup times (UTC)
backup_hour: "3"
backup_minute: "15"
4 changes: 4 additions & 0 deletions docker_deploy/playbook_target_setup.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,3 +46,7 @@
name: aws_access
tags:
- aws

- name: setup container backups
import_role:
name: combine_backup
8 changes: 8 additions & 0 deletions docker_deploy/roles/combine_backup/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
---
# backup_hour: 7
# backup_minute: 15

aws_s3_backup_loc: thecombine.app/backups

backend_files_subdir: ".CombineFiles"
mongo_files_subdir: "dump"
39 changes: 39 additions & 0 deletions docker_deploy/roles/combine_backup/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
---

#######################################################
#
# Install and configure the backup script for the docker
# containers

- name: create folders for backups
file:
name: "{{ item }}"
owner: "{{ combine_user }}"
group: "{{ combine_group }}"
mode: 0755
state: directory
with_items:
- "{{ combine_backup_dir }}"
- "{{ combine_app_dir }}/bin"

- name: install backup/restore scripts
template:
src: "{{ item }}.j2"
dest: "{{ combine_app_dir }}/bin/{{ item }}"
owner: "{{ combine_user }}"
group: "{{ combine_group }}"
mode: 0755
with_items:
- combine-backup
- combine-restore

- name: schedule regular backups
cron:
name: combine daily backup
job: "{{ combine_app_dir }}/bin/combine-backup"
user: "{{ combine_user }}"
hour: "{{ backup_hour }}"
minute: "{{ backup_minute }}"
when:
- backup_hour is defined
- backup_minute is defined
117 changes: 117 additions & 0 deletions docker_deploy/roles/combine_backup/templates/combine-backup.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
#!/bin/bash

######################################################
# Backup job for docker containers running TheCombine
######################################################

set -e

VERBOSE="0"

echo_verbose() {
if [ "${VERBOSE}" == "1" ] ; then
echo $@
fi
}

usage() {
cat <<USAGE
Usage: $0 [-h] [-v]
Creates a backup of the combine database and backend containers
Options:
-h: help - print this message
-v: verbose - print progress of backup
USAGE
}

while [[ $# -gt 0 ]] ; do
arg="$1"

case ${arg} in
-h|--help)
usage
exit 0
;;
-v|--verbose)
VERBOSE="1"
;;
-?)
echo "Invalid option: ${arg}."
usage
exit 2
;;
*)
echo "Unrecognized argument: ${arg}"
usage
exit 1
;;
esac
shift
done

echo_verbose "1. Setup some useful environment variables"
DATE_STR=`date +%Y-%m-%d-%H-%M-%S`
COMBINE_HOST="{{ combine_server_name | replace('.', '-') }}"
BACKUP_FILE="combine-backup.tar.gz"
AWS_FILE="s3://{{ aws_s3_backup_loc }}/${COMBINE_HOST}-${DATE_STR}.tar.gz"
COMBINE_APP_DIR=${COMBINE_APP_DIR:="{{ combine_app_dir }}"}
BACKUP_DIR=${BACKUP_DIR:="{{ combine_backup_dir }}"}
BACKEND_FILES_SUBDIR="{{ backend_files_subdir }}"
DB_FILES_SUBDIR="{{ mongo_files_subdir }}"

cd ${COMBINE_APP_DIR}

echo_verbose "2. Prepare the backup directory"
if [ ! -e "${BACKUP_DIR}" ] ; then
mkdir -p ${BACKUP_DIR}
else
for item in ${BACKUP_DIR}/${DB_FILES_SUBDIR} ${BACKUP_DIR}/${BACKEND_FILES_SUBDIR} ${BACKUP_DIR}/${BACKUP_FILE}
do
if [ -e "${item}" ] ; then
rm -rf ${item}
fi
done
fi

echo_verbose "3. Stop the current containers"
docker-compose down

echo_verbose "4. Start up just the backend and the database"
aws ecr get-login-password --profile {{ aws_ecr_profile }} | docker login --username AWS --password-stdin {{ aws_ecr }}
docker-compose up --detach database backend


echo_verbose "5. Dump the database"
docker-compose exec database mongodump --db CombineDatabase --gzip --quiet
DB_CONTAINER=`docker ps | grep database | sed "s/.* \([^ ][^ ]*\)$/\1/"`
docker cp ${DB_CONTAINER}:${DB_FILES_SUBDIR}/ ${BACKUP_DIR}

echo_verbose "6. Copy the backend files (commands are run relative the 'app' user's home directory)"
BE_CONTAINER=`docker ps | grep backend | sed "s/.* \([^ ][^ ]*\)$/\1/"`
docker cp ${BE_CONTAINER}:/home/app/${BACKEND_FILES_SUBDIR}/ ${BACKUP_DIR}

echo_verbose "7. create the tarball for the backup"
cd ${BACKUP_DIR}
tar --create --file=${BACKUP_FILE} --gzip --verbose ${BACKEND_FILES_SUBDIR} ${DB_FILES_SUBDIR}

echo_verbose "8. Remove old backup files"
# Running in ${BACKUP_DIR}
rm -rf ${BACKEND_FILES_SUBDIR} ${DB_FILES_SUBDIR}
ALL_BACKUPS=(`ls combine-backup*.tar.gz`)

for bu in "${ALL_BACKUPS[@]}"; do
if [ "$bu" != "$BACKUP_FILE" ] ; then
echo "Removing $bu"
rm $bu
fi
done

echo_verbose "9. push backup to AWS S3 storage"
# need to specify full path because $PATH does not contain
# /usr/local/bin when run as a cron job
/usr/local/bin/aws s3 cp ${BACKUP_FILE} ${AWS_FILE} --profile {{ aws_backup_profile }}

echo_verbose "10. Restart the containers"
cd ${COMBINE_APP_DIR}
docker-compose down
docker-compose up --detach
174 changes: 174 additions & 0 deletions docker_deploy/roles/combine_backup/templates/combine-restore.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,174 @@
#!/bin/bash

#########################################################
# Restore scrips for docker containers running TheCombine
#########################################################

set -e

VERBOSE="0"
BACKUP=""
CLEAN="0"

echo_verbose() {
if [ "${VERBOSE}" == "1" ] ; then
echo $@
fi
}

usage() {
cat <<USAGE
Usage: $0 [-h] [-v] [backup_file]
Restores the combine database and backend containers from the specified
backup on AWS. If the backup is not specified, the currently available
backups are listed.
Options:
-c, --clean:
remove backend files in /home/app/.CombineFiles in the backend
container. Normally, the files are restored on top of the existing
backend files.
-h, --help:
print this message
-v: verbose
print progress of backup
USAGE
}

while [[ $# -gt 0 ]] ; do
arg="$1"

case ${arg} in
-h|--help)
usage
exit 0
;;
-v|--verbose)
VERBOSE="1"
;;
-c|--clean)
CLEAN="1"
;;
-?)
echo "Invalid option: ${arg}."
usage
exit 1
;;
*)
BACKUP=`basename $arg`
;;
esac
shift
done

echo_verbose "1. Prepare for the restore"

COMBINE_HOST={{ ansible_hostname }}
RESTORE_FILE="combine-backup.tar.gz"
AWS_BACKUPS="s3://{{ aws_s3_backup_loc }}"
COMBINE_APP_DIR=${COMBINE_APP_DIR:="{{ combine_app_dir }}"}
RESTORE_DIR=${RESTORE_DIR:="{{ combine_restore_dir }}"}
BACKEND_FILES_SUBDIR="{{ backend_files_subdir }}"
DB_FILES_SUBDIR="{{ mongo_files_subdir }}"

cd ${COMBINE_APP_DIR}

echo_verbose "2. Prepare the restore directory"
if [ ! -e "${RESTORE_DIR}" ] ; then
mkdir -p ${RESTORE_DIR}
else
for item in ${RESTORE_DIR}/${DB_FILES_SUBDIR} ${RESTORE_DIR}/${BACKEND_FILES_SUBDIR} ${RESTORE_DIR}/${RESTORE_FILE}
do
if [ -e "${item}" ] ; then
rm -rf ${item}
fi
done
fi

if [ ! -x /usr/local/bin/aws ] ; then
echo "aws-cli v2 is not installed."
exit 2
else
AWS_VER=`/usr/local/bin/aws --version`
aws_ver_pattern='aws-cli/([0-9][0-9]*).*'
if [[ $AWS_VER =~ $aws_ver_pattern ]] ; then
AWS_MAJ_VERSION=${BASH_REMATCH[1]}
if [ "${AWS_MAJ_VERSION}" != "2" ] ; then
echo "aws-cli is installed but is not version 2:"
echo "${AWS_VER}"
exit 3
fi
else
echo "Cannot determine the AWS version. :-("
exit 4
fi
fi

if [ -z "${BACKUP}" ] ; then
aws_backup_list=( $(/usr/local/bin/aws s3 ls ${AWS_BACKUPS} --recursive --profile {{ aws_backup_profile }} | sed "s/.* backups\///") )
aws_backups_available=${{ '{#' }}aws_backup_list[@]}
if [[ $aws_backups_available -eq 0 ]] ; then
echo "No backups available from ${AWS_BACKUPS}"
exit 0
fi
echo "Backup List:"
for key in "${!aws_backup_list[@]}" ; do
index=$((key+1))
echo -e "\t$index: ${aws_backup_list[$key]}"
done
read -p "Enter the number of the backup you would like to restore (0 = None):" backup_num
if [ -z "$backup_num" ] || [ "$backup_num" == "0" ] ; then
echo "No backup selected. Exiting."
exit 0
fi
num_re='^[0-9]+$'
if ! [[ $backup_num =~ $num_re ]] || [[ $backup_num -gt ${aws_backups_available} ]]; then
echo "Invalid selection"
exit 5
fi
key=$((backup_num-1))
BACKUP=${aws_backup_list[$key]}
fi
echo "BACKUP == '${BACKUP}'"

echo_verbose "3. Fetch the selected backup, ${BACKUP}"
AWS_FILE="${AWS_BACKUPS}/${BACKUP}"
/usr/local/bin/aws s3 cp ${AWS_FILE} ${RESTORE_DIR}/${RESTORE_FILE} --profile {{ aws_backup_profile }}

echo_verbose "4. Unpack the backup"
tar xzvf ${RESTORE_DIR}/${RESTORE_FILE} -C ${RESTORE_DIR}

echo verbose "5. Stop the current containers"
docker-compose down

echo_verbose "6. Start up just the backend and the database"
aws ecr get-login-password --profile {{ aws_ecr_profile }} | docker login --username AWS --password-stdin {{ aws_ecr }}
docker-compose up --detach database backend


echo_verbose "7. Restore the database"
DB_CONTAINER=`docker ps | grep database | sed "s/.* \([^ ][^ ]*\)$/\1/"`
docker cp ${RESTORE_DIR}/${DB_FILES_SUBDIR}/ ${DB_CONTAINER}:${DB_FILES_SUBDIR}
docker-compose exec database mongorestore --drop --gzip --quiet
docker-compose exec database rm -rf ${DB_FILES_SUBDIR}

echo_verbose "8. Copy the backend files"
# if CLEAN is set, delete the existing files
if [ "$CLEAN" == "1" ] ; then
# we run the rm command inside a bash shell so that the shell will do wildcard
# expansion
docker-compose exec --user root --workdir /home/app/${BACKEND_FILES_SUBDIR} backend /bin/bash -c "rm -rf *"
fi
BE_CONTAINER=`docker ps | grep backend | sed "s/.* \([^ ][^ ]*\)$/\1/"`
docker cp ${RESTORE_DIR}/${BACKEND_FILES_SUBDIR}/ ${BE_CONTAINER}:/home/app
# change permissions for the copied files. Since the tarball is created outside
# of the container, the app user will not be the owner (the backend process is
# running as "app"). In addition, it is possible that the backup is from a
# different host with different UIDs.
docker-compose exec --user root backend find /home/app/${BACKEND_FILES_SUBDIR} -exec chown app:app {} \;

echo_verbose "9. Cleanup Restore files"
rm -rf ${RESTORE_DIR}

echo_verbose "10. Restart the containers"
docker-compose down
docker-compose up --detach
Loading

0 comments on commit 4278b21

Please sign in to comment.