diff --git a/.github/workflows/combine_deploy_image.yml b/.github/workflows/combine_deploy_image.yml new file mode 100644 index 0000000000..f6325b713d --- /dev/null +++ b/.github/workflows/combine_deploy_image.yml @@ -0,0 +1,34 @@ +name: "Update combine_deploy image" + +on: + push: + branches: [master] + paths: + - "deploy/**" + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-east-1 + - name: Login to AWS ECR + uses: docker/login-action@v2 + with: + registry: public.ecr.aws + username: ${{ secrets.AWS_ACCESS_KEY_ID }} + password: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + - name: Build combine_deploy + uses: docker/build-push-action@v3 + with: + context: "{{defaultContext}}:deploy" + push: true + tags: public.ecr.aws/thecombine/combine_deploy:latest diff --git a/deploy/ansible/roles/docker_install/defaults/main.yml b/deploy/ansible/roles/docker_install/defaults/main.yml index 037a43f787..bdbc2bb4a9 100644 --- a/deploy/ansible/roles/docker_install/defaults/main.yml +++ b/deploy/ansible/roles/docker_install/defaults/main.yml @@ -2,6 +2,5 @@ install_credential_helper: false credential_helper_version: v0.6.3 -# Kubernetes/minikube is validated on Docker 19.03.x docker_packages: - containerd.io diff --git a/deploy/ansible/roles/ethernet_config/defaults/main.yaml b/deploy/ansible/roles/ethernet_config/defaults/main.yaml deleted file mode 100644 index 1a791c33b6..0000000000 --- a/deploy/ansible/roles/ethernet_config/defaults/main.yaml +++ /dev/null @@ -1,2 +0,0 @@ ---- -eth_optional: no diff --git a/deploy/ansible/roles/ethernet_config/handlers/main.yaml b/deploy/ansible/roles/ethernet_config/handlers/main.yaml deleted file mode 100644 index 6c5e357221..0000000000 --- a/deploy/ansible/roles/ethernet_config/handlers/main.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- name: Apply netplan - command: /usr/sbin/netplan apply diff --git a/deploy/ansible/roles/ethernet_config/tasks/main.yml b/deploy/ansible/roles/ethernet_config/tasks/main.yml deleted file mode 100644 index 366bd2819e..0000000000 --- a/deploy/ansible/roles/ethernet_config/tasks/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: List netplan configuration files - find: - paths: /etc/netplan - patterns: "*.yaml" - when: eth_optional - register: net_config - -- name: Set Ethernet I/F as optional - lineinfile: - path: "{{ item.path }}" - state: present - insertafter: "^ en[a-z]\\d" - line: " optional: true" - when: eth_optional - with_items: "{{ net_config.files }}" - notify: Apply netplan diff --git a/deploy/docker_home/restore_config.sh b/deploy/docker_home/restore_config.sh index e05e814a9b..d33beba068 100755 --- a/deploy/docker_home/restore_config.sh +++ b/deploy/docker_home/restore_config.sh @@ -1,6 +1,6 @@ # restore /etc/hosts entries for combine targets if [ -f "/config/hosts" ] ; then - cp /config/hosts /etc/hosts + cp /config/hosts /etc fi if [ -d "/config/.ssh" ] ; then @@ -10,3 +10,8 @@ fi if [ -d "/config/.kube" ] ; then cp -r /config/.kube ${HOME} fi + +if [ -f "/config/.env" ] ; then + cp /config/.env ${HOME} + . ${HOME}/.env +fi diff --git a/deploy/docker_home/save_config.sh b/deploy/docker_home/save_config.sh index 3fd39ddbdb..1092e30ad9 100755 --- a/deploy/docker_home/save_config.sh +++ b/deploy/docker_home/save_config.sh @@ -8,4 +8,7 @@ if [ -d "/config" ] ; then if [ -d "${HOME}/.kube" ] ; then cp -r ${HOME}/.kube /config fi + if [ -f "${HOME}/.env" ] ; then + cp ${HOME}/.env /config + fi fi diff --git a/deploy/scripts/setup_target.py b/deploy/scripts/setup_target.py index 4a8192278b..ebf4ebecf8 100755 --- a/deploy/scripts/setup_target.py +++ b/deploy/scripts/setup_target.py @@ -17,8 +17,12 @@ def parse_args() -> argparse.Namespace: parser.add_argument("ip", help="IPv4 address for the target device.") parser.add_argument("name", help="Name of the target device.") parser.add_argument( - "--user", default="sillsdev", help="Username for ssh connection to the target device." + "--target-user", + "-t", + default="sillsdev", + help="Username for ssh connection to the target device.", ) + parser.add_argument("--local-user", "-l", help="Local user for creating ssh keys.") parser.add_argument("--hosts", default="/etc/hosts", help="File for host definition.") return parser.parse_args() @@ -72,10 +76,34 @@ def main() -> None: args = parse_args() # Add the target IP and target name to /etc/hosts (or other hosts file) update_hosts_file(args.ip, args.name, Path(args.hosts).resolve()) + + """ + Set up the ssh key and copy it do the target. + + Usually this script needs to be run with `sudo` so that the /etc/hosts file can + be modified. This results in, the key getting setup for the root user instead of + the user that invoked the script with `sudo`. + The --local-user/-l option is available to generate the key for a local user instead + of root. Some things to note are: + 1. This script switches to the local user with `su` to run the two commands for setting + up the ssh key + 2. The --session-command option for su needs to be used instead of -c + (at least for ssh-copy-id) + 3. The command needs to be quoted. + """ + if args.local_user is None: + cmd_prefix = "" + cmd_suffix = "" + else: + cmd_prefix = f'su {args.local_user} --session-command "' + cmd_suffix = '"' # Generate ssh keys - os.system("ssh-keygen") + ssh_cmd = f"{cmd_prefix}ssh-keygen{cmd_suffix}" + os.system(ssh_cmd) # Copy ssh id to target - os.system(f"ssh-copy-id {args.user}@{args.name}") + ssh_cmd = f"{cmd_prefix}ssh-copy-id {args.target_user}@{args.name}{cmd_suffix}" + print(ssh_cmd) + os.system(ssh_cmd) if __name__ == "__main__": diff --git a/dev-requirements.txt b/dev-requirements.txt index 32abc48451..cabe2e6030 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with python 3.9 +# This file is autogenerated by pip-compile with python 3.10 # To update, run: # # pip-compile dev-requirements.in @@ -63,9 +63,7 @@ humanfriendly==10.0 idna==3.3 # via requests importlib-metadata==4.12.0 - # via - # markdown - # mkdocs + # via mkdocs isort==5.10.1 # via -r dev-requirements.in jinja2==3.1.2 @@ -199,9 +197,7 @@ types-requests==2.28.3 types-urllib3==1.26.16 # via types-requests typing-extensions==4.3.0 - # via - # black - # mypy + # via mypy urllib3==1.26.10 # via # kubernetes diff --git a/docs/deploy/README.md b/docs/deploy/README.md index e75b6c0ddc..2f25c68f9e 100644 --- a/docs/deploy/README.md +++ b/docs/deploy/README.md @@ -2,159 +2,270 @@ This document describes how to deploy _The Combine_ to a target Kubernetes cluster. -## Assumptions - -_The Combine_ is designed to be installed on a server on the internet or an organization's intranet or on a standalone -PC such as an Intel NUC. The instructions assume that: - -1. a server already has Kubernetes installed and that the basic infrastructure and namespaces are already configured; - and -2. a standalone PC is running an up-to-date version of Ubuntu Server with an OpenSSH server running. - ## Conventions -- the term _NUC_ will be used to describe a target that is a standalone PC. It can be any 64-bit Intel Architecture +- the _host_ machine is the machine that is used to perform the installation. It may be a Linux, Windows, or MacOS machine. -- most of the commands described in this document are to be run from within the `git` repository for _The Combine_ that - has been cloned on the host machine. This directory is referred to as \. -- the target machine where _The Combine_ is being installed will be referred to as _\_ -- the user on the target machine that will be used for installing docker, etc. will be referred to as _\_. - You must be able to log in to _\_ as _\_ and _\_ must have `sudo` privileges. +- the _target_ machine is the machine where _The Combine_ is to be installed. It shall be referred to as _\_. +- some of the commands described in this document are to be run from within the `git` repository for _The Combine_ that + has been cloned on the host machine. This directory shall be referred to as ``. ## Contents -1. [Step-by-step Instructions](#step-by-step-instructions) - 1. [Prepare your host system](#prepare-your-host-system) - 1. [Linux Host](#linux-host) - 2. [Installing Kubernetes and Initializing Your Cluster](#installing-kubernetes-and-initializing-your-cluster) - 1. [Minimum System Requirements](#minimum-system-requirements) - 2. [Installing Kubernetes](#installing-kubernetes) - 3. [Installing _The Combine_ Helm Charts](#installing-the-combine-helm-charts) - 1. [Setup](#setup) - 2. [Install _The Combine_ Cluster](#install-the-combine-cluster) - 4. [Maintenance Scripts for Kubernetes](#maintenance-scripts-for-kubernetes) - 5. [Creating Your Own Inventory File](#creating-your-own-inventory-file) -2. [Automated Backups](#automated-backups) -3. [Design](#design) -4. [Install Ubuntu Server](#install-ubuntu-server) - -## Step-by-step Instructions - -### Prepare your host system - -_The Combine_ can be installed on a system that already has Kubernetes installed from any host system type. This is the -normal case for the QA and Live servers that are managed by the Operations Team. To install _The Combine_ to an existing -Kubernetes cluster, you will need the following tools: - -- Git -- [kubectl](https://kubernetes.io/docs/tasks/tools/) for examining and modifying your Kubernetes cluster -- [Helm](https://helm.sh/docs/intro/install/) for installing Helm Charts (Kubernetes Packages) -- [Docker](https://docs.docker.com/get-docker/) or [Docker Desktop](../../README.md#docker-desktop-for-linux) -- Python - See the project [README](../../README.md#python) for instructions on how to setup Python and the virtual - environment -- clone the project repo: +1. [System Design](#system-design) +2. [Deployment Scenarios](#deployment-scenarios) + 1. [Development Environment](#development-environment) + 2. [QA/Production Server](#qaproduction-server) + 3. [NUC](#nuc) +3. [Install Ubuntu Server](#install-ubuntu-server) +4. [Install Kubernetes Engine](#install-kubernetes-engine) +5. [Setup Kubectl and Environment](#setup-kubectl-and-environment) + 1. [Setup Kubectl](#setup-kubectl) + 2. [Setup Environment](#setup-environment) +6. [Install Helm Charts Required by _The Combine_](#install-helm-charts-required-by-the-combine) +7. [Install _The Combine_](#install-the-combine) +8. [Maintenance](#maintenance) + 1. [Maintenance Scripts for Kubernetes](#maintenance-scripts-for-kubernetes) + 2. [Checking Certificate Expiration](#checking-certificate-expiration) + 3. [Creating your own Configurations](#creating-your-own-configurations) + +## System Design + +_The Combine_ is designed as a collection of helm charts to be installed on a Kubernetes cluster. _The Combine's_ +Kubernetes resources are described in the design document at +[./kubernetes_design/README.md](./kubernetes_design/README.md). + +## Deployment Scenarios + +The tools and methods for deploying _The Combine_ are a function of the type of system you wish to deploy, the +_deployment scenario_, and the operating system of the host machine. + +### Development Environment + +The _Development Environment_ scenario is for software developers who need to test out changes to the application in +development before they are deployed. This allows the developer to deploy _The Combine_ to a local Kubernetes +environment that is closer to the production environment. The tools and methods for deploying _The Combine_ in a +development environment are described in the +[Setup Local Kubernetes Cluster](https://github.com/sillsdev/TheCombine#setup-local-kubernetes-cluster) section of the +project README.md file. + +### QA/Production Server + +For _The Combine_, the QA and Production servers are servers where the Kubernetes Cluster is created and maintained by a +separate organization. The characteristics of these systems are: + +- The Kubernetes cluster has been created as follows: + + - [cert-manager](https://cert-manager.io/) is installed + - an NGINX ingress controller is installed + - the namespace `thecombine` is created + - the TLS certificate for the server is installed in `thecombine` namespace as a `kubernetes.io/tls` secret with the + name `thecombine-app-tls` + +- The QA server has services to login to a private AWS Elastic Container Registry to run private images for _The + Combine_. In contrast, the Production server only runs public images. +- On the Production server an additional namespace `combine-cert-proxy`. + +#### Tools Required for a QA/Production Server Installation + +The host tools required to install _The Combine_ on a QA or Production server are described in +[Install Kubernetes Tools](https://github.com/sillsdev/TheCombine#install-kubernetes-tools) in the project README.md +file. + +#### Steps to Install on a QA/Production Server + +To install _The Combine_ on one of these systems, follow the steps in + +- [Setup Kubectl and Environment](#setup-kubectl-and-environment) +- [Install _The Combine_](#install-the-combine) + +### NUC + +_The Combine_ is designed to be installed on an _Intel NUC_ or other mini-computer and to operate where no internet is +available. The installation process assumes that a WiFi interface is available as well as a wired Ethernet interface. + +#### Tools Required to Install on a NUC + +There are two options for toolsets to install _The Combine_ on a NUC: + +##### Locally Installed Tools + +Locally installed tools can be used to install from a Linux, MacOS, or Windows Subsystem for Linux host machine. The +required tools are: + +- _The Combine_ source tree + + Clone the repo: ```bash - git clone https://github.com/sillsdev/TheCombine + git clone https://github.com/sillsdev/TheCombine.git ``` -#### Linux Host +- [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#latest-releases-via-apt-ubuntu) +- Python: See the instructions for installing Python and dependent libraries in the project + [README.md](https://github.com/sillsdev/TheCombine#python) +- [Docker Engine](https://docs.docker.com/engine/install/) or [Docker Desktop](https://docs.docker.com/get-docker/) -Some extra tools are required to setup a machine that does not have an existing Kubernetes cluster. The methods -described here must be performed on a Linux host. +##### Install From Docker Image -The extra tools that are needed are: +You can use a Docker image to install _The Combine_ using a host machine running Windows, Linux, or MacOS. The only tool +that is needed is Docker. You can install either [Docker Engine](https://docs.docker.com/engine/install/) or +[Docker Desktop](https://docs.docker.com/get-docker/) -- [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#latest-releases-via-apt-ubuntu) +Once you have installed _Docker_, pull the `combine_deploy` image. Open a terminal window (PowerShell, Command Prompt, +or Unix shell) and run: -### Installing Kubernetes and Initializing Your Cluster +```console +docker pull public.ecr.aws/thecombine/combine_deploy:latest +``` -This section describes how to install Kubernetes and start the Kubernetes cluster on the target system. If you are -installing _The Combine_ on an existing cluster, skip this section and go to -[Installing _The Combine_ Helm Charts](#installing-the-combine-helm-charts). +The Docker image contains all the additional tools that are needed. It also has all of the installation scripts so that +you do not need to clone _The Combine's_ GitHub repo. The disadvantage of using the Docker image is that any changes to +_The Combine_ configuration files will not be preserved. This is not a concern for most users. -#### Minimum System Requirements +#### Steps to Install on a NUC -The minimum target system requirements for installing _The Combine_ are: +To install _The Combine_ on one of these systems, follow the steps in -- Ubuntu 20.04 Server operating system (22.04 is recommended). See [Install Ubuntu Server](#install-ubuntu-server). -- 4 GB RAM -- 32 GB Storage +- [Install Ubuntu Server](#install-ubuntu-server) +- [Install Kubernetes Engine](#install-kubernetes-engine) +- [Setup Kubectl and Environment](#setup-kubectl-and-environment) +- [Install Helm Charts Required by _The Combine_](#install-helm-charts-required-by-the-combine) +- [Install _The Combine_](#install-the-combine) -#### Installing Kubernetes +## Install Ubuntu Server -This section covers how to install Kubernetes and prepare the cluster for installing _The Combine_. If you are -installing/upgrading _The Combine_ on the QA server or the Production (or Live) server, skip to the next section. These -systems are managed and prepared by the Operations Team. +Note: In the instructions below, each step indicates whether the step is to be performed on the Host PC (_[Host]_) or +the target PC (_[NUC]_). -For the NUCs or other test systems that are managed by the development team, we will install, [k3s](https://k3s.io/), a -lightweight, Kubernetes engine from Rancher. When that is installed, we will create the namespaces that are needed for -_The Combine_. +To install the OS on a new target machine, such as, a new NUC, follow these steps: -Note that these steps need to be done from a Linux host machine with Ansible installed. +1. _[Host]_ Download the ISO image for Ubuntu Server from Ubuntu (currently at ; + click on _Option 2 - Manual server installation_ and then _Download Ubuntu Server 22.04 LTS_) -1. First, setup ssh access to the target if it has not been done already: +2. _[Host]_ copy the .iso file to a bootable USB stick: - 1. If you do not have an ssh key pair, create one using: + 1. Ubuntu host: Use the _Startup Disk Creator_, or + 2. Windows host: follow the + [tutorial](https://ubuntu.com/tutorials/tutorial-create-a-usb-stick-on-windows#1-overview) on ubuntu.com. - ```bash - ssh-keygen - ``` +3. _[NUC]_ Connect the NUC to a wired, Ethernet network connection, an HDMI Display and a USB Keyboard. - 2. Copy your ssh id to the target system using: +4. _[NUC]_ Boot the NUC from the bootable media and follow the installation instructions. In particular, - ```bash - ssh-copy-id @ - ``` + 1. You will want the installer to format the entire disk. Using LVM is not recommended. -2. To install Kubernetes and setup your configuration file for running `kubectl`, run this command from the `deploy` - folder in the project: + 2. Profile setup - ```bash - ansible-playbook playbook_kube_install.yml --limit -u -K + The instructions assume the following profile entries during installation: + + | Item | Value | + | ---------------- | --------------------------------- | + | Your Name | SIL Language Software Development | + | Your Server Name | nuc1, nuc2, or nuc3 | + | Pick a username | sillsdev | + + You may choose any name, username that you like. If you use a different servername than one of the three listed, + you will need to provide alternate configuration files. See the + [Creating your own Configurations](#creating-your-own-configurations) section. This is not recommended when + running the installation from a Docker image. + + 3. Make sure that you install the OpenSSH server when prompted: + + ![alt text](images/ubuntu-software-selection.png "Ubuntu Server Software Selection") + + In addition, you may have your SSH keys from _Github_ or _Launchpad_ preinstalled as authorized keys. + + 4. You do not need to install any additional snaps; the _Ansible_ playbooks will install any needed software. + +5. _[NUC]_ When installation is complete, log into the NUC using the username and password provided during installation + and update all packages: + + ```console + sudo apt update && sudo apt upgrade -y ``` - **Notes:** +6. _[NUC]_ Reboot: - - Do not add the `-K` option if you do not need to enter your password to run `sudo` commands _on the target - machine_. - - The _\_ must be listed in `/deploy/hosts.yml`. If it is not, then you need to create your own - inventory file (see [below](#creating-your-own-inventory-file)). - - The _\_ can be a hostname or a group in the inventory file, e.g. `qa`. - - Each time you may be prompted for passwords: - - `BECOME password` - enter your `sudo` password for the _\_ on the _\_ machine. + ```console + sudo reboot + ``` - When the playbook has finished the installation, it will have installed a `kubectl` configuration file on your host - machine in `${HOME}/.kube//config`. +7. _[NUC]_ Lookup IP Address for the NUC: -3. Setup the `kubectl` config file for the target for the steps that follow. There are several ways to do this: + From the NUC, run the command `ip address`. Record the current IP address for the Ethernet interface; the Ethernet + interface starts with `en`, followed by a letter and then a digit (`en[a-z][0-9]`). - 1. If you have no other targets that you are working with, copy/move/link the configuration file to `~/.kube/config` - 2. setup an environment variable to specify the `kubeconfig` file: +8. _[Host]_ Setup your host's connection to the NUC: - ```bash - export KUBECONFIG=~/.kube//config - ``` + - if using the Docker image open a terminal window and run: - where `` is the name of the target that was installed, e.g. `nuc1` + ```console + docker run -it -v nuc-config:/config public.ecr.aws/thecombine/combine_deploy:latest + setup_target.py + ``` - 3. Add `--kubeconfig=~/.kube//config` to each `helm` and `kubectl` command. The `setup_combine.py` command - accepts a `kubeconfig` option as well. + Where `` is the IP address found in step 7 and `` is the server name specified when Ubuntu was + installed. -4. Install the charts needed for _The Combine_ + - if using local tools, open a terminal window and run: - From the project directory with an activated _Python_ virtual environment, run: + ```console + cd /deploy/scripts + sudo ./setup_target.py -l + ``` - ```bash - python deploy/scripts/setup_cluster.py --type nuc - ``` + Where `` is the IP address found in step 7, `` is the server name specified when Ubuntu was + installed, and `` is your current username. + + The `setup_target.py` script will do the following: + + - Add the NUC's IP address to your `/etc/hosts` file + - Generate an SSH key for you + - Copy your SSH public key to the NUC + + Note that if an SSH key exists, you will have the option to overwrite it or skip the key generation. When your SSH + key is copied to the NUC, it will copy the default key, `${HOME}/.ssh/id_rsa.pub`. + +## Install Kubernetes Engine + +This step does more than just install the Kubernetes engine. It performs the following tasks: -### Installing _The Combine_ Helm Charts +- Updates and upgrades all the packages installed on the target; +- Sets up the WiFi interface as a WiFi Access Point; +- Configures the network interfaces; +- Installs `containerd` for managing containers; +- Installs `k3s` Kubernetes engine; and +- Sets up a local configuration file for `kubectl` to access the cluster. -#### Setup +To run this step: -If you do not have a `kubectl` configuration file for the _\_ system, you need to install it. For the NUCs, it -is setup automatically by the Ansible playbook run in the previous section. +- if using the Docker image open a terminal window and run: + + ```console + docker run -it -v nuc-config:/config public.ecr.aws/thecombine/combine_deploy:latest + cd ~/ansible + ansible-playbook -i hosts playbook_kube_install.yml --limit -u -K -e link_kubeconfig=true + ``` + +- if using local tools, open a terminal window and run: + + ```console + cd /deploy/ansible + ansible-playbook -i hosts playbook_kube_install.yml --limit -u -K + ``` + + Where + + - `` is the server name specified when Ubuntu was installed, e.g. `nuc1`; and + - `` is the user name specified when Ubuntu was installed, e.g. `sillsdev`. + +## Setup Kubectl and Environment + +### Setup Kubectl + +If you do not have a `kubectl` configuration file for the `` system, you need to install it. For the NUCs, it is +setup automatically by the Ansible playbook run in the previous section. For the Production or QA server, @@ -162,187 +273,205 @@ For the Production or QA server, created by the operations group. 2. Copy your `kubectl` configuration to the clipboard and paste it into a file on your host machine, e.g. `${HOME}/.kube/prod/config` for the production server. -3. Setup the following environment variables: - - AWS_ACCOUNT - - AWS_DEFAULT_REGION - - AWS_ACCESS_KEY_ID - - AWS_SECRET_ACCESS_KEY - - COMBINE_JWT_SECRET_KEY - - COMBINE_SMTP_USERNAME - - COMBINE_SMTP_PASSWORD +### Setup Environment - These can be set in your `.profile` (Linux or Mac 10.14-), your `.zprofile` (Mac 10.15+), or the _System_ app - (Windows). If you are a member of the development team and need the environment variable values, send a request - explaining your need to [admin@thecombine.app](mailto:admin@thecombine.app). +The setup scripts require the following environment variables to be set: -4. Set the KUBECONFIG environment variable to the location of the `kubectl` configuration file. (This is not necessary - if the configuration file is at `${HOME}/.kube/config`.) +- AWS_ACCOUNT +- AWS_DEFAULT_REGION +- AWS_ACCESS_KEY_ID +- AWS_SECRET_ACCESS_KEY +- COMBINE_JWT_SECRET_KEY +- COMBINE_SMTP_USERNAME +- COMBINE_SMTP_PASSWORD +- COMBINE_ADMIN_USERNAME +- COMBINE_ADMIN_PASSWORD +- COMBINE_ADMIN_EMAIL -#### Install _The Combine_ Cluster +You may also set the KUBECONFIG environment variable to the location of the `kubectl` configuration file. This is not +necessary if the configuration file is at `${HOME}/.kube/config`. -To install/upgrade _The Combine_ change directory to the project root directory and run the following command within -your Python virtual environment: +If using local tools, these can be set in your `.profile` (Linux or Mac 10.14-), your `.zprofile` (Mac 10.15+), or the +_System_ app (Windows). -```bash -python deploy/scripts/setup_combine.py -``` +If using the docker image, -Notes: +1. Start the `combine_deploy` image: -- You will be prompted for the _target_ where _The Combine_ is to be installed as well as version to install. The - version is the Docker image tag in the AWS ECR image repository. The standard releases are tagged with the version - number, e.g. _0.7.15_. -- The _target_ must be one listed in `/deploy/scripts/setup_files/config.yaml`. -- Run `python deploy/scripts/setup_combine.py --help` for additional options such as specifying a different - configuration file for additional targets. + ```console + docker run -it -v nuc-config:/config public.ecr.aws/thecombine/combine_deploy:latest + ``` -### Maintenance Scripts for Kubernetes +2. In the docker image terminal window, run: -There are several maintenance scripts that can be run in the kubernetes cluster: + ```console + nano ~/.env + ``` -- `combine-backup-job.sh` - performs a backup of _The Combine_ database and backend files, pushes the backup to AWS S3 - storage and then removes old backups keeping the latest 3 backups. -- `combine_backup.py` - just performs the backup and pushes the result to AWS S3 storage. -- `combine-clean-aws.py` - removes the oldest backups, keeping up to `max_backups`. The default for `max_backups` is 3. -- `combine_restore.py` - restores _The Combine_ database and backend files from one of the backups in AWS S3 storage. +3. Enter the variable definitions using the form: -The `combine-backup-job.sh` is currently being run daily on _The Combine_ as a Kubernetes CronJob. + ```config + export VARIABLE=VALUE + ``` -In addition to the daily backup, any of the scripts can be run on-demand using the `kubectl` command. Using the -`kubectl` command takes the form: +4. Enter `Ctrl-X` to exit and save the changes. +5. Apply the definitions to the current session by running: -```bash -kubectl [--kubeconfig=