diff --git a/.binder/README.md b/.binder/README.md new file mode 100644 index 0000000000..5914e4391b --- /dev/null +++ b/.binder/README.md @@ -0,0 +1,3 @@ +# What is this folder about? + +It's contains the dependency information required by [a notebook](doc/ntbk/draw_function.ipynb) that [we reference](doc/source/cost.rst) in our documentation to run on mybinder.org. All it takes is the click of [a link](http://mybinder.org/v2/gh/jupyterhub/zero-to-jupyterhub-k8s/master?filepath=doc/ntbk/draw_function.ipynb) thanks to this. diff --git a/binder/environment.yml b/.binder/environment.yml similarity index 63% rename from binder/environment.yml rename to .binder/environment.yml index e00b478e8f..7838b30cb0 100644 --- a/binder/environment.yml +++ b/.binder/environment.yml @@ -1,6 +1,3 @@ -# Dependencies for the guides cloud cost estimator notebook -# http://mybinder.org/v2/gh/jupyterhub/zero-to-jupyterhub-k8s/master?filepath=doc/ntbk/draw_function.ipynb - # Need to install bqplot with conda so it calls the javascript extension. name: bqplot channels: diff --git a/.circleci/README.md b/.circleci/README.md new file mode 100644 index 0000000000..882bb52d92 --- /dev/null +++ b/.circleci/README.md @@ -0,0 +1,8 @@ +# What is this folder about? + +We use CircleCI to build documentation previews for PRs, as configured through +[.circleci/config.yml], this allows us to easily preview documentation changes +in a PR in its final form before the PR is merged. + +When a PR is merged [readthedocs.yml](readthedocs.yml) will help ReadTheDocs +build and publish it on https://z2jh.jupyter.org. diff --git a/.circleci/config.yml b/.circleci/config.yml index af5a7e5587..c79ebbced7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,3 @@ -# This CircleCI build lets us preview the documentation inside PRs before they -# are merged! And when they are, the readthedocs.yml file will help ReadTheDocs -# build and publish it on https://z2jh.jupyter.org. version: 2 jobs: build_docs: diff --git a/.gitignore b/.gitignore index c3bbe92c58..509d2f3d9d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,9 @@ ### Zero to JupyterHub Kubernetes ### tools/templates/rendered-templates/ bin/ -ci/.vagrant +.vagrant/ tools/github.sqlite +ci/daemonset-calico-node.yaml .vscode diff --git a/.travis.yml b/.travis.yml index cea1c19bc1..150c552f9c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,28 +3,81 @@ language: python python: - 3.6 git: + ## depth set to false overrides travis default behavior to use shallow clones + ## with depth 50 that can cause issues + ## + ## ref: https://github.com/jupyterhub/chartpress#shallow-clones + ## depth: false services: - docker +## stages declares and orders stages +## +## ref: https://docs.travis-ci.com/user/build-stages/#build-stages-and-deployments +## +stages: + - name: lint and validate + - name: test + - name: publish + ## if conditions gives us control if the stage should run + ## + ## ref: https://docs.travis-ci.com/user/conditions-v1 + ## + if: > + branch in (master) AND + type in (push) install: - - ./ci/docker-fixes.sh - pip3 install --no-cache-dir -r dev-requirements.txt - - . "ci/${RUNNER}-${SCENARIO}.env" - - ./ci/install-${RUNNER}.sh -script: - - ./ci/travis-script.sh + - . ci/common ci +stage: test +script: + - setup_kubectl + - setup_kind + - setup_helm + - ./ci/travis-docker-fix + - ./ci/start-k8s + - ./ci/upgrade + - ./ci/test env: - # Different scenarios described in - # /ci/minikube-${SCENARIO}.env - matrix: - - SCENARIO=1.13-default RUNNER=minikube - - SCENARIO=1.12-netpol RUNNER=minikube - - SCENARIO=1.11-default RUNNER=minikube - - SCENARIO=1.15-default RUNNER=kind - - SCENARIO=1.14-default RUNNER=kind - - SCENARIO=1.13-default RUNNER=kind - - SCENARIO=1.12-default RUNNER=kind - global: - - secure: jpFpbMccpjGP+otWH2Z03VFdtR9AAu2vzrNxsoZ3IvJvrO4MfzYJ3uSCDQuB0NG9gBgaAscpTJtliPTEi7njXHLcsFeWXLUmeBEHLozYxfzDQzMvW3EYdNWcC7oVAAt3de0i0ojw9rGswiofhbu2dAe+Xd2bejv1+PVJcEC3SRPGy17kb6bme6gD3zty5ft4VpzP0nomUNqfZBRLUYxSZuKlHJaZ6Nuq434rKmXrcN6uy+eEWDorTbjyM22IIYgUmrhg++Qtu/MBR7/rriPhyRltCU14361bcxqyq2Hw+HNG8D3hsqo5TiEiYwxOQcXRgddL+Ci6/y0L1EvqOQc+1V8ycwNs2oNicwNgSn5A+9HpF495Kae039hGtj2Gpt4IbplSYwKFq/sFTq+CekxdD2YVQmGvsjep4bNVL66o2RSZVAW1Bg/G8/sSe3BwgD8IToy9+1NHPPuaVupeukRqNyUDcVvWH8hdb8AkXYY87+546etYDpn91GQnhTEberKbXX4UCmpKNXpXoprLE8nQLGb6TIoHPTyA+RRNQ4erDzMjqF43UVmhOZTtkGaRgIWK7vDAKpLUnuOguuhJUNpYpRggGQsMV8cZnaCumy5OFUf6i6rfN0Ru6a+/Bm7grJiAcnZlU7igaxgI38QaJgCKcqqzIImdcRYNQC74/Ok/1oM= - - secure: BK++GwKVPoS0iG8aB7wQ13daTgJR9MifHA+l9xr/tSZ3SUL6nc7kjxLbliRQJCqT9lcOODsd+v2u9PziEzBp0CCh67ftFxJw8riP2+FgdmHTK4yav9QpSwoBJHhV2SgBMGlXiqdUVC7wpgjzzK63V8abvzAhXkthWPl3kYpUI//xGYyuBNXVHEOImHB3F1M5bn90lflFtRfq2iH5FigGesMi2BFfTVeqvbzZVZrAs0E1/NRdO+/cRq0c9aRpNLkh254k1tcKbUvULQq1iLQuHN2Ramn3NgNnx93sbwp1e7ZjmETbjr9cwMIDg5mh25H0Rjf2Nn8cqHbBCWzoMkjZW097HRVDYht2kJZQIbQcaxX38DW6vykUwGWSBAWbtvCUwYwU57s/dIbSYUTQErkYYmhiq52cdOtnxZ2/ULoElCVyR8lTmQuANJrq9YFC9q1ly69YuMWWnFgwxWpK1JCgAJGELgj5EvcghEtNmkEFh5f6pmbKBE7PKQPTovzNKcdRauR/L+MsmhVYukCfNZq57LrruIQIX1GQNw9w3Ck8P4EPtNjdI4umCSy6nZSyTevWgVTmIP9EwXa5Cap32ZU+iDtw+wUBAr3sjROJOYGKlL/ktWsWbjog5hIG0rrb8PbgOfbLRZSEYGL9sYsyXXyW5oI37lB7AqG6D7vOA4TdmTQ= + ## NOTE: The environment variables will be expanded to multiple jobs. For + ## additional individual jobs, only the first entry is used. + ## + ## ref: https://docs.travis-ci.com/user/build-stages/#build-stages-and-build-matrix-expansion + ## + ## + ## KUBE_VERSION should match a released kindest/node image tag, but they are + ## currently not automatically published. + ## + ## ref: https://hub.docker.com/r/kindest/node/tags + ## ref: https://github.com/kubernetes-sigs/kind/issues/197 + ## + # - KUBE_VERSION=1.16 + - KUBE_VERSION=1.15.3 + - KUBE_VERSION=1.14.6 + - KUBE_VERSION=1.13.10 + - KUBE_VERSION=1.12.10 + +jobs: + ## include additional individual jobs + ## + include: + - stage: lint and validate + script: + - setup_helm + - setup_kubeval + - python3 tools/templates/lint-and-validate.py --kubernetes-versions $LINT_KUBE_VERSIONS + env: [] + - stage: publish + script: + - setup_helm + - setup_git_crypt + - ./ci/travis-docker-fix + - ./ci/publish + env: + ## encrypted environment variables, used on push to master in the + ## publish script to in turn decrypt a SSH key + ## + ## ref: https://docs.travis-ci.com/user/environment-variables/#encrypting-environment-variables + - secure: jpFpbMccpjGP+otWH2Z03VFdtR9AAu2vzrNxsoZ3IvJvrO4MfzYJ3uSCDQuB0NG9gBgaAscpTJtliPTEi7njXHLcsFeWXLUmeBEHLozYxfzDQzMvW3EYdNWcC7oVAAt3de0i0ojw9rGswiofhbu2dAe+Xd2bejv1+PVJcEC3SRPGy17kb6bme6gD3zty5ft4VpzP0nomUNqfZBRLUYxSZuKlHJaZ6Nuq434rKmXrcN6uy+eEWDorTbjyM22IIYgUmrhg++Qtu/MBR7/rriPhyRltCU14361bcxqyq2Hw+HNG8D3hsqo5TiEiYwxOQcXRgddL+Ci6/y0L1EvqOQc+1V8ycwNs2oNicwNgSn5A+9HpF495Kae039hGtj2Gpt4IbplSYwKFq/sFTq+CekxdD2YVQmGvsjep4bNVL66o2RSZVAW1Bg/G8/sSe3BwgD8IToy9+1NHPPuaVupeukRqNyUDcVvWH8hdb8AkXYY87+546etYDpn91GQnhTEberKbXX4UCmpKNXpXoprLE8nQLGb6TIoHPTyA+RRNQ4erDzMjqF43UVmhOZTtkGaRgIWK7vDAKpLUnuOguuhJUNpYpRggGQsMV8cZnaCumy5OFUf6i6rfN0Ru6a+/Bm7grJiAcnZlU7igaxgI38QaJgCKcqqzIImdcRYNQC74/Ok/1oM= + - secure: BK++GwKVPoS0iG8aB7wQ13daTgJR9MifHA+l9xr/tSZ3SUL6nc7kjxLbliRQJCqT9lcOODsd+v2u9PziEzBp0CCh67ftFxJw8riP2+FgdmHTK4yav9QpSwoBJHhV2SgBMGlXiqdUVC7wpgjzzK63V8abvzAhXkthWPl3kYpUI//xGYyuBNXVHEOImHB3F1M5bn90lflFtRfq2iH5FigGesMi2BFfTVeqvbzZVZrAs0E1/NRdO+/cRq0c9aRpNLkh254k1tcKbUvULQq1iLQuHN2Ramn3NgNnx93sbwp1e7ZjmETbjr9cwMIDg5mh25H0Rjf2Nn8cqHbBCWzoMkjZW097HRVDYht2kJZQIbQcaxX38DW6vykUwGWSBAWbtvCUwYwU57s/dIbSYUTQErkYYmhiq52cdOtnxZ2/ULoElCVyR8lTmQuANJrq9YFC9q1ly69YuMWWnFgwxWpK1JCgAJGELgj5EvcghEtNmkEFh5f6pmbKBE7PKQPTovzNKcdRauR/L+MsmhVYukCfNZq57LrruIQIX1GQNw9w3Ck8P4EPtNjdI4umCSy6nZSyTevWgVTmIP9EwXa5Cap32ZU+iDtw+wUBAr3sjROJOYGKlL/ktWsWbjog5hIG0rrb8PbgOfbLRZSEYGL9sYsyXXyW5oI37lB7AqG6D7vOA4TdmTQ= diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ae3c82a20d..63fdbb9924 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,341 +1,140 @@ # Contributing -Welcome! As a [Jupyter](https://jupyter.org) project, we follow the [Jupyter contributor guide](https://jupyter.readthedocs.io/en/latest/contributor/content-contributor.html). +Welcome! As a [Jupyter](https://jupyter.org) project, we follow the [Jupyter +contributor +guide](https://jupyter.readthedocs.io/en/latest/contributor/content-contributor.html). -## Setting up minikube for local development +## Local development for a code contribution -We recommend using [minikube](https://github.com/kubernetes/minikube) for local -development. +### Prepare git -1. [Download & install minikube](https://github.com/kubernetes/minikube#installation). +1. Install [git](https://www.git-scm.com/). To verify it is installed, run this + from a terminal. - For MacOS: You may install minikube using Homebrew `brew cask install minikube` or - from a binary at https://github.com/kubernetes/minikube/releases. - If you need to install Docker Community Edition (CE) for Mac, please - follow the [Docker instructions](https://store.docker.com/editions/community/docker-ce-desktop-mac). - -2. [Download & install helm](https://github.com/helm/helm#install). - - You may install Helm using one of the following steps: - - * With the following curl command: - - ``` - curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash - ``` - * From one of the binaries at https://github.com/helm/helm/releases - * For MacOS, using Homebrew: `brew install kubernetes-helm` - -3. Start minikube. - - For minikube version 0.26 and higher: ```bash - minikube start + git version ``` - For older minikube versions: - ```bash - minikube start --extra-config=apiserver.Authorization.Mode=RBAC - ``` +1. Make a GitHub fork of [this + repository](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) by creating + and then logging into your GitHub account and clicking the Fork button. - Note on troubleshooting: if you recently upgraded minikube and are now seeing - errors, you may need to clear out the `~/.minikube` and `~/.kube` directories - and reboot. +1. Clone your fork to your local computer. -4. Use the docker daemon inside minikube for building: ```bash - eval $(minikube docker-env) - ``` - -5. Clone the zero-to-jupyterhub repo: - ```bash - git clone git@github.com:jupyterhub/zero-to-jupyterhub-k8s.git + git clone http://github.com//zero-to-jupyterhub-k8s.git cd zero-to-jupyterhub-k8s - ``` -6. Create a virtualenv & install the libraries required for builds to happen: - ```bash - python3 -m venv . - source bin/activate - python3 -m pip install -r dev-requirements.txt + # make it easy to reference the projects GitHub repository as "upstream" + git remote add upstream https://github.com/jupyterhub/zero-to-jupyterhub-k8s + + # make it obvious what you reference by renaming a reference to your + # personal GitHub repository to "fork" + git remote rename origin fork ``` -7. Now run `chartpress` to build the requisite docker images inside minikube: - ```bash - chartpress - ``` +### Prepare Virtual Machine software - This will build the docker images inside minikube & modify - `jupyterhub/values.yaml` with the appropriate values to make the chart - installable! +A `Vagrantfile` is a way to prepare a Virtual Machine (VM), and we [have +one](ci/Vagrantfile) to prepare a VM for local development! We can use it to get +a VM up and running, enter it with SSH, develop and run tets, and later shut +down without influencing our system exce -8. Configure helm and minikube for RBAC: - ```bash - kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default - kubectl --namespace kube-system create sa tiller - kubectl create clusterrolebinding tiller \ - --clusterrole cluster-admin \ - --serviceaccount=kube-system:tiller - helm init --service-account tiller - ``` +1. Install VirtualBox by [downloading and running an + installer](https://www.virtualbox.org/wiki/Downloads). -9. Install / Upgrade JupyterHub Chart! - ```bash - helm upgrade --wait --install --namespace=hub hub jupyterhub/ -f minikube-config.yaml - ``` +1. Install Vagrant by [downloading and running an + installer](https://www.vagrantup.com/downloads.html). - You can easily change the options in `minikube-config.yaml` file to test what - you want, or create another `config.yaml` file & pass that as an additional - `-f config.yaml` file to the `helm upgrade` command. +### Develop and run tests -10. Retrieve the URL for your instance of JupyterHub: +1. Start a prepared VM and SSH into it. ```bash - minikube service --namespace=hub proxy-public - ``` - - Navigate to the URL in your browser. You should now have JupyterHub running - on minikube. - -11. Make the changes you want. + ## if you have suspended a VM earlier, use "vagrat resume" instead + vagrant up - To view your changes on the running development instance of JupyterHub: + ## enter a SSH session with the VM + vagrant ssh + ``` - - Re-run step 7 if you changed anything under the `images` directory - - Re-run step 9 if you changed things only under the `jupyterhub` directory. +2. Develop and test within the VM + + ```bash + ## run within the SSH session + cd zero-to-jupyterhub-k8s + + ## initialize some environment variables etc (notice the leading dot) + . ./dev init + ## start a k8s cluster + ./dev start-k8s -## Travis CI tests + ## install/upgrade the helm chart + ./dev upgrade -Travis tests are automatically run on every pull request. -Since the Travis environment is not accessible it can be difficult to debug CI failures. -A [`Vagrantfile`](ci/Vagrantfile) which partially simulates the Travis environment is included, and may be useful when updating the CI deployments, though it is by no means an exact replica. + ## see the results + # visit http://localhost:8080 -1. Start and login to the Vagrant box: + ## make a change + # ... - ```bash - cd ci - vagrant up - vagrant ssh + ## run tests + ./dev test ``` - -2. Run the test script. - Optionally edit `SCENARIO` in [`./ci/vagrant-run.sh`](./ci/vagrant-run.sh) - if you want to test a different scenario + +3. Close the SSH session ```bash - cd /zero-to-jupyterhub-k8s - ./ci/vagrant-run.sh + ## exit the SSH session + exit + vagrant suspend + # vagrant halt + # vagrant destroy ``` +> **NOTE:** You can also use `vagrant destroy` to reset the VM state entirely, +> but the start-k8s script will reset the k8s cluster if you have the same k8s +> version set as previous so it should be fine to just `halt` and do `up` again +> later. ---- - -## Best practices +### Debugging issues -We strive to follow the guidelines provided by [kubernetes/charts](https://github.com/kubernetes/charts/blob/master/REVIEW_GUIDELINES.md) and the [Helm Chart Best Practices Guide](https://github.com/kubernetes/helm/tree/master/docs/chart_best_practices) they refer to. +Various things can go wrong while working with the local development +environment, here are some typical issues and what to do about them. -## Releasing a new version of the helm chart +#### Network issues -The following steps can be followed to release a new version of the Helm Chart. -Presently, we expect a release approximately every 5-7 weeks. +Network and DNS issues are typically symptoms of unreliable internet (as +experienced by the VirtualMachine). You can recognize such issues if you get +errors like the ones below. +```shell +# while installing docker +curl: (6) Could not resolve host: download.docker.com -### Create an issue for the new release +# while running pip install +Retrying (Retry(total=4, connect=None, read=None, redirect=None, status=None)) after connection broken by 'NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution',)': /simple/chartpress/ -Use this issue to coordinate efforts and keep track of progress. You can -copy / paste the raw Markdown from the following list, which will be covered -in more detail below. +# while running apt-get install while building a docker image with chartpress +E: Failed to fetch http://archive.ubuntu.com/ubuntu/pool/main/r/rtmpdump/librtmp1_2.4+20151223.gitfa8646d.1-1_amd64.deb Could not connect to archive.ubuntu.com:80 (91.189.88.174). - connect (113: No route to host) Could not connect to archive.ubuntu.com:80 (91.189.88.31). - connect (113: No route to host) [IP: 91.189.88.174 80] +# [...] +subprocess.CalledProcessError: Command '['docker', 'build', '-t', 'jupyterhub/k8s-hub:0.9-217f798', 'images/hub', '--build-arg', 'JUPYTERHUB_VERSION=git+https://github.com/jupyterhub/jupyterhub@master']' returned non-zero exit status 100. +# while installing a dependency for our k8s cluster +Unable to connect to the server: dial tcp: lookup docs.projectcalico.org on 127.0.0.53:53: read udp 127.0.0.1:56409->127.0.0.53:53: i/o timeout ``` -Title: Release {{release-name}} -Content: - -This issue will be used to coordinate the next release of the helm -chart, {{release-name}}. Instructions for creating the release can be found in -[CONTRIBUTING.md](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/CONTRIBUTING.md#releasing-a-new-version-of-the-helm-chart). -Below is the checklist for this release. - -- [ ] Code, tests, and documentation to support a release are stable. -- [ ] Make a CHANGELOG -- [ ] Generate and add the list of contributors -- [ ] Build and push a new Docker image to DockerHub -- [ ] Commit version bump in `Chart.yaml` and `Values.yaml` -- [ ] Update references in documentation to the new version (note: documentation - should be stable and there should be no anticipated major changes to content). -- [ ] Confirm that a new deployment using the updated instructions works -- [ ] Create and push a new tag for this release -- [ ] Create and publish a new GitHub release -- [ ] Write / publish a blog post based largely off of the CHANGELOG -- [ ] Set ReadTheDocs to begin using `latest` by default -- [ ] Celebrate! -``` - -As there are often many documentation improvements following the release of -a new version, we set ReadTheDocs to serve `latest/` until the first docs are -written that are next-version-specific. As soon as documentation must be -written for the **next** version of the Helm Chart, you can use the following -checklist: - -``` -- [ ] Create a new tag for a documentation release (same release name with `-doc` at the end) -- [ ] Publish this tag -- [ ] Set ReadTheDocs to point to the **new tag** by default instead of `latest` -- [ ] Continue making next-version-specific changes to the documentation. -``` - -**Note**: Switching the documentation to `latest` after a new release is a stop-gap -measure to accomodate the fact that the documentation is still changing relatively -rapidly. Once the documentation as a whole stabilizes (after a few more release -cycles), we plan to begin switching straight from the last version to the new version -of documentation without going through latest. - -### Make a CHANGELOG - -This needs to be manually created, following the format of -current [CHANGELOG](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/CHANGELOG.md). The general structure should be: - -* A short description of the general theme / points of interest for - this release. -* Breaking changes + a link to the [upgrade instructions](https://zero-to-jupyterhub.readthedocs.io/en/v0.5-doc/upgrading.html) in the docs -* A list of features with brief descriptions under each. -* The contributor list mentioned in the section below. - -### Add list of contributors - -We try to recognize *all* sorts of contributors, rather -than just code committers. - -Use the script in `tools/contributors.py` to list all -contributions (anyone who made a commit or a comment) -since the latest release. For each -release, you'll need to find the versions of all repos -involved: - -* [z2jh](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) -* [KubeSpawner](https://github.com/jupyterhub/kubespawner) -* [JupyterHub](https://github.com/jupyterhub/jupyterhub) -* [OAuthenticator](https://github.com/jupyterhub/oauthenticator) - -Edit `contributors.py` to have the appropriate dates -for each of these versions. Then, run the script and paste -the output into the changelog. For an -example, see [the v0.5 list of contributors](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/v0.5/CHANGELOG.md#contributors). +As you may notice, typical keywords associated with network errors are: -### Push built images to DockerHub + bump version - -The JupyterHub helm chart uses a Docker image that's registered -on DockerHub. When releasing a new version of the helm chart, -you also need to push a new version of this image. To do so, -you must have: +- "resolve host" +- "name resolution" +- "timeout" +- "no route to host" -1. Docker running locally -2. An account on DockerHub that you are logged into from - your local docker installation. -3. Push rights for images under `jupyterhub/` on - the DockerHub registry. -4. Push rights to the `jupyterhub/helm-chart` repository on GitHub. -5. A local SSH key that will let you push to the `helm-chart` repository - on GitHub. See [these instructions](https://help.github.com/articles/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent) for information on how to create this. - -**Note**: If you don't have a DockerHub account, or don't have push rights to -the DockerHub registry, open an issue and ping one of the core devs. - -If you have all of this, you can then: - -1. Check out latest master of [z2jh](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) -2. Run `chartpress --tag --push --publish-chart`. - * For example, to relase `v0.5`, you would run - `chartpress --tag v0.5 --push --publish-chart`. - Note the `v` before version. -3. This will also modify the files `Chart.yaml` and `values.yaml`. - Commit these changes. -4. Look through the [z2jh documentation](https://zero-to-jupyterhub.readthedocs.io) and find any references to - the Helm Chart version (e.g., look for the flag `--version`, as well - as for all `helm upgrade` and `helm install` commands). - Update these references to point to the new version you are releasing. -5. Make a PR to the z2jh repository and notify the team to take a look. - -After this PR gets merged: - -1. Go to https://zero-to-jupyterhub.readthedocs.io/en/latest and - deploy a JupyterHub using the instructions (make sure that - you're reading from `/en/latest`). Make sure your latest - changes are present, and that the JupyterHub successfully deploys - and functions properly. - -Next, move on to making a GitHub release, described below. - -### Tagging and making a GitHub release - -Now that our Docker image is pushed and we have updated the documentation -for z2jh, it's time to make a new GitHub release. To do this, you must have: - -1. Push rights to the `jupyterhub/zero-to-jupyterhub-k8s` repo - -You will need to make a git tag, and then create a GitHub release. - -1. Make sure you're on branch `master` with your latest changes from - the section above pulled. -2. Make a git tag with: - ``` - git tag -a - ``` - - Where `` should be the new version that you're releasing. - Note the `v` before the version number. - - Git will ask you to include a message with the tag. - Paste the entire contents of the CHANGELOG for this particular release. - An easy way to do this is to paste the contents in a text file, and - then refer to that text file with the call to commit: - `git tag -a -F ` -3. Push the tags to the `jupyterhub/zero-to-jupyterhub-k8s` repo with - `git push --tags`. - Note that `` is whatever your local git uses to refer - to the `jupyerhub/` organization's repository (e.g., `official` - or `upstream`) -3. Make a **GitHub Release**: - * go to https://github.com/jupyterhub/zero-to-jupyterhub-k8s/releases and click 'Draft new release'. - * The title should be the new version, followed by the name of the cricketer for the release. Like so:`v0.5: "Hamid Hassan"`. - * The description should include the entire changelog entry for this release. - * Make sure the title/description/tag name look correct, and then click - on `Publish Release`. - -You've just made a GitHub release! - - -### RTD update - -Wait a few hours to let the release 'cool' and make sure that links, -webpages, etc have updated. Then, update our documentation settings on -readthedocs to show `latest` by default. This marks the official -'release' of the version! - -### Last step - release a blog post and tell the world! - -The final step is to release a blog post. This doesn't have to be -done by the person who performed all of the above actions. - -To release a blog post for the new version, start a draft on the Jupyter Medium -blog. Copy/paste the section of the CHANGELOG corresponding to the new -release, then make minor modifications to make it more blog-friendly. +## Helm chart practices -Don't forget to tell the JupyterHub community about the new release, and -to encourage people to talk about it on social media! - -That's it! Congratulations on making a new release of JupyterHub! - -### Extra step - release a documentation release - -It is common that documentation changes are made shortly after a new release. -To handle this, we often create a documentation release a few days after a -major release. - -To do this, confirm that all changes to the documentation -are merged into master, then create a new tag with the same release name and -`-doc` appended to the end. Create a GitHub release with the new tag and a -description that points to the original release description. Finally, set -our ReadTheDocs settings to point users to the new `-doc` tag by default instead -of `latest`. +We strive to follow the guidelines provided by +[kubernetes/charts](https://github.com/kubernetes/charts/blob/master/REVIEW_GUIDELINES.md) +and the [Helm chart best practices +guide](https://github.com/kubernetes/helm/tree/master/docs/chart_best_practices). diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000000..158f24f4b0 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,203 @@ +# Releasing a new version of the helm chart + +The following steps can be followed to release a new version of the Helm Chart. + +## Create an issue for the new release + +Use this issue to coordinate efforts and keep track of progress. You can +copy / paste the raw Markdown from the following list, which will be covered +in more detail below. + +``` +Title: Release {{release-name}} +Content: + +This issue will be used to coordinate the next release of the helm +chart, {{release-name}}. Instructions for creating the release can be found in +[CONTRIBUTING.md](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/CONTRIBUTING.md#releasing-a-new-version-of-the-helm-chart). +Below is the checklist for this release. + +- [ ] Code, tests, and documentation to support a release are stable. +- [ ] Make a CHANGELOG +- [ ] Generate and add the list of contributors +- [ ] Build and push a new Docker image to DockerHub +- [ ] Commit version bump in `Chart.yaml` and `Values.yaml` +- [ ] Update references in documentation to the new version (note: documentation + should be stable and there should be no anticipated major changes to content). +- [ ] Confirm that a new deployment using the updated instructions works +- [ ] Create and push a new tag for this release +- [ ] Create and publish a new GitHub release +- [ ] Write / publish a blog post based largely off of the CHANGELOG +- [ ] Set ReadTheDocs to begin using `latest` by default +- [ ] Celebrate! +``` + +As there are often many documentation improvements following the release of +a new version, we set ReadTheDocs to serve `latest/` until the first docs are +written that are next-version-specific. As soon as documentation must be +written for the **next** version of the Helm Chart, you can use the following +checklist: + +``` +- [ ] Create a new tag for a documentation release (same release name with `-doc` at the end) +- [ ] Publish this tag +- [ ] Set ReadTheDocs to point to the **new tag** by default instead of `latest` +- [ ] Continue making next-version-specific changes to the documentation. +``` + +**Note**: Switching the documentation to `latest` after a new release is a stop-gap +measure to accomodate the fact that the documentation is still changing relatively +rapidly. Once the documentation as a whole stabilizes (after a few more release +cycles), we plan to begin switching straight from the last version to the new version +of documentation without going through latest. + +## Make a CHANGELOG + +This needs to be manually created, following the format of +current [CHANGELOG](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/CHANGELOG.md). The general structure should be: + +* A short description of the general theme / points of interest for + this release. +* Breaking changes + a link to the [upgrade instructions](https://zero-to-jupyterhub.readthedocs.io/en/v0.5-doc/upgrading.html) in the docs +* A list of features with brief descriptions under each. +* The contributor list mentioned in the section below. + +## Add list of contributors + +We try to recognize *all* sorts of contributors, rather +than just code committers. + +Use the script in `tools/contributors.py` to list all +contributions (anyone who made a commit or a comment) +since the latest release. For each +release, you'll need to find the versions of all repos +involved: + +* [z2jh](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) +* [KubeSpawner](https://github.com/jupyterhub/kubespawner) +* [JupyterHub](https://github.com/jupyterhub/jupyterhub) +* [OAuthenticator](https://github.com/jupyterhub/oauthenticator) + +Edit `contributors.py` to have the appropriate dates +for each of these versions. Then, run the script and paste +the output into the changelog. For an +example, see [the v0.5 list of contributors](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/v0.5/CHANGELOG.md#contributors). + + +## Push built images to DockerHub + bump version + +The JupyterHub helm chart uses a Docker image that's registered +on DockerHub. When releasing a new version of the helm chart, +you also need to push a new version of this image. To do so, +you must have: + +1. Docker running locally +2. An account on DockerHub that you are logged into from + your local docker installation. +3. Push rights for images under `jupyterhub/` on + the DockerHub registry. +4. Push rights to the `jupyterhub/helm-chart` repository on GitHub. +5. A local SSH key that will let you push to the `helm-chart` repository + on GitHub. See [these instructions](https://help.github.com/articles/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent) for information on how to create this. + +**Note**: If you don't have a DockerHub account, or don't have push rights to +the DockerHub registry, open an issue and ping one of the core devs. + +If you have all of this, you can then: + +1. Check out latest master of [z2jh](https://github.com/jupyterhub/zero-to-jupyterhub-k8s) +2. Run `chartpress --tag --push --publish-chart`. + * For example, to relase `v0.5`, you would run + `chartpress --tag v0.5 --push --publish-chart`. + Note the `v` before version. +3. This will also modify the files `Chart.yaml` and `values.yaml`. + Commit these changes. +4. Look through the [z2jh documentation](https://zero-to-jupyterhub.readthedocs.io) and find any references to + the Helm Chart version (e.g., look for the flag `--version`, as well + as for all `helm upgrade` and `helm install` commands). + Update these references to point to the new version you are releasing. +5. Make a PR to the z2jh repository and notify the team to take a look. + +After this PR gets merged: + +1. Go to https://zero-to-jupyterhub.readthedocs.io/en/latest and + deploy a JupyterHub using the instructions (make sure that + you're reading from `/en/latest`). Make sure your latest + changes are present, and that the JupyterHub successfully deploys + and functions properly. + +Next, move on to making a GitHub release, described below. + +## Tagging and making a GitHub release + +Now that our Docker image is pushed and we have updated the documentation +for z2jh, it's time to make a new GitHub release. To do this, you must have: + +1. Push rights to the `jupyterhub/zero-to-jupyterhub-k8s` repo + +You will need to make a git tag, and then create a GitHub release. + +1. Make sure you're on branch `master` with your latest changes from + the section above pulled. +2. Make a git tag with: + ``` + git tag -a + ``` + + Where `` should be the new version that you're releasing. + Note the `v` before the version number. + + Git will ask you to include a message with the tag. + Paste the entire contents of the CHANGELOG for this particular release. + An easy way to do this is to paste the contents in a text file, and + then refer to that text file with the call to commit: + `git tag -a -F ` +3. Push the tags to the `jupyterhub/zero-to-jupyterhub-k8s` repo with + `git push --tags`. + Note that `` is whatever your local git uses to refer + to the `jupyerhub/` organization's repository (e.g., `official` + or `upstream`) +3. Make a **GitHub Release**: + * go to https://github.com/jupyterhub/zero-to-jupyterhub-k8s/releases and click 'Draft new release'. + * The title should be the new version, followed by the name of the cricketer for the release. Like so:`v0.5: "Hamid Hassan"`. + * The description should include the entire changelog entry for this release. + * Make sure the title/description/tag name look correct, and then click + on `Publish Release`. + +You've just made a GitHub release! + + +## ReadTheDocs update + +Wait a few hours to let the release 'cool' and make sure that links, +webpages, etc have updated. Then, update our documentation settings on +readthedocs to show `latest` by default. This marks the official +'release' of the version! + +## Last step - release a blog post and tell the world! + +The final step is to release a blog post. This doesn't have to be +done by the person who performed all of the above actions. + +To release a blog post for the new version, start a draft on the Jupyter Medium +blog. Copy/paste the section of the CHANGELOG corresponding to the new +release, then make minor modifications to make it more blog-friendly. + +Don't forget to tell the JupyterHub community about the new release, and +to encourage people to talk about it on social media! + +That's it! Congratulations on making a new release of JupyterHub! + +## Extra step - release a documentation release + +It is common that documentation changes are made shortly after a new release. +To handle this, we often create a documentation release a few days after a +major release. + +To do this, confirm that all changes to the documentation +are merged into master, then create a new tag with the same release name and +`-doc` appended to the end. Create a GitHub release with the new tag and a +description that points to the original release description. Finally, set +our ReadTheDocs settings to point users to the new `-doc` tag by default instead +of `latest`. + diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 0000000000..cdb76ffe68 --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,20 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +Vagrant.configure("2") do |config| + config.vm.box = "generic/ubuntu1804" + config.vm.provider "virtualbox" do |vb| + vb.memory = "3072" + vb.cpus = 2 + end + + config.vm.provider :libvirt do |lv| + lv.memory = 3072 + lv.cpus = 2 + end if Vagrant.has_plugin?('vagrant-libvirt') + + config.vm.network "forwarded_port", guest: 8080, host: 8080 + + config.vm.provision "shell", path: "vagrant-vm-setup.sh" + config.vm.synced_folder ".", "/home/vagrant/zero-to-jupyterhub-k8s" +end diff --git a/chartpress.yaml b/chartpress.yaml index 31b44966c1..5edf63cf18 100644 --- a/chartpress.yaml +++ b/chartpress.yaml @@ -9,7 +9,7 @@ charts: hub: valuesPath: hub.image buildArgs: - JUPYTERHUB_VERSION: 1.0.0 + JUPYTERHUB_VERSION: git+https://github.com/jupyterhub/jupyterhub@89b0c42 network-tools: valuesPath: singleuser.networkTools.image image-awaiter: diff --git a/ci/Vagrantfile b/ci/Vagrantfile deleted file mode 100644 index 5270af33fb..0000000000 --- a/ci/Vagrantfile +++ /dev/null @@ -1,28 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Simulate a travis Xenial environment for local travis configuration - -# Example: -# $ vagrant up -# $ vagrant ssh -# vagrant$ cd /zero-to-jupyterhub-k8s -# vagrant$ ./ci/vagrant-run.sh - -Vagrant.configure("2") do |config| - config.vm.box = "generic/ubuntu1604" - config.vm.provider "virtualbox" do |vb| - vb.memory = "3072" - vb.cpus = 2 - end - - config.vm.provider :libvirt do |lv| - lv.memory = 3072 - lv.cpus = 2 - end if Vagrant.has_plugin?('vagrant-libvirt') - - config.vm.network "forwarded_port", guest: 31212, host: 31212 - - config.vm.provision "shell", path: "xenial-setup.sh" - config.vm.synced_folder "../", "/zero-to-jupyterhub-k8s" -end diff --git a/ci/common b/ci/common new file mode 100755 index 0000000000..7763c6d6b0 --- /dev/null +++ b/ci/common @@ -0,0 +1,109 @@ +#!/bin/bash + +## common - source this file with ". ./ci/common" to set environment +## variables and make useful functions available. + +mkdir -p bin +export PATH="$PWD/bin:$PATH" + +## NOTE: export HUB_API_URL is required for it to be accessible from pytest +## +export HUB_API_URL=http://127.0.0.1:8080/hub/api + +## NOTE: We need to allow our CI system to override these env. variables +## +if [ -z ${KUBE_VERSION:-} ]; then + ## NOTE: KUBE_VERSION is limited by the available kindest/node images + ## + ## ref: https://hub.docker.com/r/kindest/node/tags + ## ref: https://github.com/kubernetes/kubernetes/releases + export KUBE_VERSION=1.13.10 +fi +if [ -z ${KIND_VERSION:-} ]; then + ## ref: https://github.com/kubernetes-sigs/kind/releases + export KIND_VERSION=0.5.1 +fi +if [ -z ${HELM_VERSION:-} ]; then + ## ref: https://github.com/helm/helm/releases + export HELM_VERSION=2.14.3 +fi +if [ -z ${KUBEVAL_VERSION:-} ]; then + ## ref: https://github.com/instrumenta/kubeval/releases + export KUBEVAL_VERSION=0.14.0 +fi + +## Valid versions to list under LINT_KUBE_VERSIONS are those in the +## kubernetes-json-schema repoistory, used by kubeval. +## +## ref: https://github.com/instrumenta/kubernetes-json-schema +## +if [ -z ${LINT_KUBE_VERSIONS:-} ]; then + export LINT_KUBE_VERSIONS=1.11.0,1.12.0,1.13.0,1.14.0,1.15.0 +fi + +## NOTE: The setup_... functions cache downloads but ensure the correct version +## +setup_kubectl () { + echo "setup kubectl ${KUBE_VERSION}" + if ! [ -f "bin/kubectl-${KUBE_VERSION}" ]; then + curl -Lo "bin/kubectl-${KUBE_VERSION}" "https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/kubectl" + chmod +x "bin/kubectl-${KUBE_VERSION}" + fi + cp "bin/kubectl-${KUBE_VERSION}" bin/kubectl +} + +setup_kind () { + echo "setup kind ${KIND_VERSION}" + if ! [ -f "bin/kind-${KIND_VERSION}" ]; then + curl -Lo "bin/kind-${KIND_VERSION}" "https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64" + chmod +x "bin/kind-${KIND_VERSION}" + fi + cp "bin/kind-${KIND_VERSION}" bin/kind +} + +setup_helm () { + echo "setup helm ${HELM_VERSION}" + if ! [ -f "bin/helm-${HELM_VERSION}" ]; then + curl -Lo "bin/helm-${HELM_VERSION}.tar.gz" "https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz" + tar -xf "bin/helm-${HELM_VERSION}.tar.gz" --directory bin --strip-components 1 linux-amd64/helm + rm "bin/helm-${HELM_VERSION}.tar.gz" + mv bin/helm "bin/helm-${HELM_VERSION}" + fi + cp bin/helm-${HELM_VERSION} bin/helm +} + +setup_kubeval () { + echo "setup kubeval ${KUBEVAL_VERSION}" + if ! [ -f "bin/kubeval-${KUBEVAL_VERSION}" ]; then + curl -Lo "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" "https://github.com/instrumenta/kubeval/releases/download/${KUBEVAL_VERSION}/kubeval-linux-amd64.tar.gz" + tar -xf "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" --directory bin + rm "bin/kubeval-${KUBEVAL_VERSION}.tar.gz" + mv bin/kubeval "bin/kubeval-${KUBEVAL_VERSION}" + fi + cp bin/kubeval-${KUBEVAL_VERSION} bin/kubeval +} + +setup_git_crypt () { + GIT_CRYPT_VERSION=0.5.0 + GIT_CRYPT_VERSION_SHA=46c288cc849c23a28239de3386c6050e5c7d7acd50b1d0248d86e6efff09c61b + echo "setup git-crypt ${GIT_CRYPT_VERSION}" + if ! [ -f "bin/git-crypt-${GIT_CRYPT_VERSION}" ]; then + curl -Lo "bin/git-crypt-${GIT_CRYPT_VERSION}" https://github.com/minrk/git-crypt-bin/releases/download/${GIT_CRYPT_VERSION}/git-crypt + chmod +x "bin/git-crypt-${GIT_CRYPT_VERSION}" + echo "${GIT_CRYPT_VERSION_SHA} bin/git-crypt-${GIT_CRYPT_VERSION}" | shasum -a 256 -c - + fi + cp bin/git-crypt-${GIT_CRYPT_VERSION} bin/git-crypt +} + +if [ "$1" = "ci" ]; then + export KIND_CLUSTER=jh-ci-${KUBE_VERSION} + export KUBECONFIG=~/.kube/kind-config-${KIND_CLUSTER} +else + setup_kubectl + setup_kind + setup_helm + setup_kubeval + + export KIND_CLUSTER=dev + export KUBECONFIG=~/.kube/kind-config-${KIND_CLUSTER} +fi diff --git a/ci/docker-fixes.sh b/ci/docker-fixes.sh deleted file mode 100755 index 8523cb9625..0000000000 --- a/ci/docker-fixes.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -ex - -# https://github.com/moby/moby/issues/39120 -sudo cat /etc/docker/daemon.json -echo '{"mtu": 1460}' | sudo dd of=/etc/docker/daemon.json -sudo systemctl restart docker -docker ps -a diff --git a/ci/install-kind.sh b/ci/install-kind.sh deleted file mode 100755 index 94c1ca3c99..0000000000 --- a/ci/install-kind.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash -set -ex - -mkdir -p bin - -# nsenter is included on xenial - -# install socat (required by helm) -sudo apt-get update && sudo apt-get install -y socat - -# install kubectl, kind -# based on https://blog.travis-ci.com/2017-10-26-running-kubernetes-on-travis-ci-with-minikube -if ! [ -f "bin/kubectl" ]; then - echo "installing kubectl" - curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/kubectl - chmod +x kubectl - mv kubectl bin/ -fi - -if ! [ -f "bin/kind" ]; then - echo "installing kind" - curl -Lo kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64 - chmod +x kind - mv kind bin/ -fi - -echo "installing kubeval" -if ! [ -f bin/kubeval-${KUBEVAL_VERSION} ]; then - curl -sSLo bin/kubeval-${KUBEVAL_VERSION}.tar.gz https://github.com/garethr/kubeval/releases/download/${KUBEVAL_VERSION}/kubeval-linux-amd64.tar.gz - tar --extract --file bin/kubeval-${KUBEVAL_VERSION}.tar.gz --directory bin - rm bin/kubeval-${KUBEVAL_VERSION}.tar.gz - mv bin/kubeval bin/kubeval-${KUBEVAL_VERSION} -fi -cp bin/kubeval-${KUBEVAL_VERSION} bin/kubeval - -echo "starting cluster with kind" -$PWD/bin/kind create cluster --image kindest/node:v${KUBE_VERSION} -export KUBECONFIG="$($PWD/bin/kind get kubeconfig-path --name=kind)" - -kubectl get nodes - -echo "installing helm" -curl -ssL https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz \ - | tar -xz -C bin --strip-components 1 linux-amd64/helm -chmod +x bin/helm - -kubectl --namespace kube-system create sa tiller -kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller -helm init --service-account tiller - - -echo "waiting for tiller" -kubectl --namespace=kube-system rollout status --watch deployment/tiller-deploy - -echo "installing git-crypt" -curl -L https://github.com/minrk/git-crypt-bin/releases/download/0.5.0/git-crypt > bin/git-crypt -echo "46c288cc849c23a28239de3386c6050e5c7d7acd50b1d0248d86e6efff09c61b bin/git-crypt" | shasum -a 256 -c - -chmod +x bin/git-crypt diff --git a/ci/install-minikube.sh b/ci/install-minikube.sh deleted file mode 100755 index 0fc3e9220b..0000000000 --- a/ci/install-minikube.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/bash -set -ex - -mkdir -p bin - -# nsenter is included on xenial - -# install socat (required by helm) -sudo apt-get update && sudo apt-get install -y socat - -# install kubectl, minikube -# based on https://blog.travis-ci.com/2017-10-26-running-kubernetes-on-travis-ci-with-minikube -echo "installing kubectl" -curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/kubectl -chmod +x kubectl -mv kubectl bin/ - -echo "installing minikube" -curl -Lo minikube https://storage.googleapis.com/minikube/releases/v${MINIKUBE_VERSION}/minikube-linux-amd64 -chmod +x minikube -mv minikube bin/ -# Reduce CI logs clutter -bin/minikube config set WantKubectlDownloadMsg false -bin/minikube config set WantReportErrorPrompt false - -# FIXME: Workaround missing crictl on K8s 1.11 only -# - Issue: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1123 -# - CI fail: https://travis-ci.org/jupyterhub/zero-to-jupyterhub-k8s/jobs/485093909 -if [ ! -z "${CRICTL_VERSION}" ]; then - echo "installing crictl" - if ! [ -f bin/crictl-${CRICTL_VERSION} ]; then - curl -sSLo bin/crictl-${CRICTL_VERSION}.tar.gz https://github.com/kubernetes-sigs/cri-tools/releases/download/v${CRICTL_VERSION}/crictl-v${CRICTL_VERSION}-linux-amd64.tar.gz - tar --extract --file bin/crictl-${CRICTL_VERSION}.tar.gz --directory bin - rm bin/crictl-${CRICTL_VERSION}.tar.gz - mv bin/crictl bin/crictl-${CRICTL_VERSION} - fi - cp bin/crictl-${CRICTL_VERSION} bin/crictl - # minikube is run with sudo so the modified PATH is lost - sudo ln -s "${PWD}/bin/crictl-${CRICTL_VERSION}" /usr/bin/crictl -fi - - -echo "installing kubeval" -if ! [ -f bin/kubeval-${KUBEVAL_VERSION} ]; then - curl -sSLo bin/kubeval-${KUBEVAL_VERSION}.tar.gz https://github.com/garethr/kubeval/releases/download/${KUBEVAL_VERSION}/kubeval-linux-amd64.tar.gz - tar --extract --file bin/kubeval-${KUBEVAL_VERSION}.tar.gz --directory bin - rm bin/kubeval-${KUBEVAL_VERSION}.tar.gz - mv bin/kubeval bin/kubeval-${KUBEVAL_VERSION} -fi -cp bin/kubeval-${KUBEVAL_VERSION} bin/kubeval - -echo "starting minikube with RBAC" -sudo CHANGE_MINIKUBE_NONE_USER=true $PWD/bin/minikube start $MINIKUBE_ARGS -minikube update-context - -# If using CNI the node will not be NotReady until a CNI config exists -if [ "$INSTALL_CALICO" = "1" ]; then - echo "installing calico" - # https://github.com/projectcalico/calico/issues/1456#issuecomment-422957446 - kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/etcd.yaml - kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/rbac.yaml - curl -sf https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/calico.yaml -O - CALICO_ETCD_IP=$(kubectl get service --namespace=kube-system calico-etcd -o jsonpath='{.spec.clusterIP}') - sed -i -e "s/10\.96\.232\.136/$CALICO_ETCD_IP/" calico.yaml - kubectl apply -f calico.yaml - - echo "waiting for calico" - JSONPATH='{.status.numberReady}' - until [ "$(kubectl get daemonsets calico-node -n kube-system -o jsonpath="$JSONPATH")" = "1" ]; do - sleep 1 - done -fi - -echo "waiting for kubernetes" -JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' -until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do - sleep 1 -done -kubectl get nodes - -echo "installing helm" -curl -ssL https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz \ - | tar -xz -C bin --strip-components 1 linux-amd64/helm -chmod +x bin/helm - -kubectl --namespace kube-system create sa tiller -kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller -helm init --service-account tiller - - -echo "waiting for tiller" -kubectl --namespace=kube-system rollout status --watch deployment/tiller-deploy - -echo "installing git-crypt" -curl -L https://github.com/minrk/git-crypt-bin/releases/download/0.5.0/git-crypt > bin/git-crypt -echo "46c288cc849c23a28239de3386c6050e5c7d7acd50b1d0248d86e6efff09c61b bin/git-crypt" | shasum -a 256 -c - -chmod +x bin/git-crypt diff --git a/ci/kind-1.12-default.env b/ci/kind-1.12-default.env deleted file mode 100644 index 41f9075004..0000000000 --- a/ci/kind-1.12-default.env +++ /dev/null @@ -1,11 +0,0 @@ -export KUBE_VERSION=1.12.10 -export KIND_VERSION=0.5.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export Z2JH_HELM_ARGS="-f minikube-config.yaml" -export DISABLE_TEST_NETPOL=1 -export INSTALL_CALICO=0 - -export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/kind-1.13-default.env b/ci/kind-1.13-default.env deleted file mode 100644 index 506fa99ad7..0000000000 --- a/ci/kind-1.13-default.env +++ /dev/null @@ -1,11 +0,0 @@ -export KUBE_VERSION=1.13.10 -export KIND_VERSION=0.5.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export Z2JH_HELM_ARGS="-f minikube-config.yaml" -export DISABLE_TEST_NETPOL=1 -export INSTALL_CALICO=0 - -export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/kind-1.14-default.env b/ci/kind-1.14-default.env deleted file mode 100644 index 32ce01a12c..0000000000 --- a/ci/kind-1.14-default.env +++ /dev/null @@ -1,11 +0,0 @@ -export KUBE_VERSION=1.14.6 -export KIND_VERSION=0.5.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export Z2JH_HELM_ARGS="-f minikube-config.yaml" -export DISABLE_TEST_NETPOL=1 -export INSTALL_CALICO=0 - -export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/kind-1.15-default.env b/ci/kind-1.15-default.env deleted file mode 100644 index e9a031e390..0000000000 --- a/ci/kind-1.15-default.env +++ /dev/null @@ -1,11 +0,0 @@ -export KUBE_VERSION=1.15.3 -export KIND_VERSION=0.5.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export Z2JH_HELM_ARGS="-f minikube-config.yaml" -export DISABLE_TEST_NETPOL=1 -export INSTALL_CALICO=0 - -export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/kind-config.yaml b/ci/kind-config.yaml new file mode 100644 index 0000000000..e71c9ac52a --- /dev/null +++ b/ci/kind-config.yaml @@ -0,0 +1,11 @@ +## kind create cluster --config kind-config.yaml +## +## ref: https://github.com/kubernetes-sigs/kind/blob/master/site/content/docs/user/kind-example-config.yaml +## ref: https://godoc.org/sigs.k8s.io/kind/pkg/apis/config/v1alpha3#Cluster +## +kind: Cluster +apiVersion: kind.sigs.k8s.io/v1alpha3 +networking: + disableDefaultCNI: true +nodes: +- role: control-plane diff --git a/ci/kind-load-docker-images.py b/ci/kind-load-docker-images.py new file mode 100755 index 0000000000..58996177bd --- /dev/null +++ b/ci/kind-load-docker-images.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +""" +Run `kind load docker-image ` on all the docker images within +values.yaml that is available locally on the host as first verified with `docker +images --quiet `. If we could capture this directly from chartpress +build output it would be quicker. +""" + +import sys +import argparse +import pipes +import subprocess + +import yaml + + +def check_output(cmd, **kwargs): + """Run a subcommand and exit if it fails""" + try: + return subprocess.check_output(cmd, **kwargs) + except subprocess.CalledProcessError as e: + print( + "`{}` exited with status {}".format( + " ".join(map(pipes.quote, cmd)), e.returncode + ), + file=sys.stderr, + ) + sys.exit(e.returncode) + + +def get_element_from_path(path, dictionary): + keys = path.split(".") + e = dictionary + for key in keys: + e = e[key] + return e + + +def extract_images_from_values(chartpress_file, values_file): + """Returns a list of image:tag strings given a values.yaml file.""" + + with open(chartpress_file) as f: + chartpress = yaml.full_load(f) + + with open(values_file) as f: + values = yaml.full_load(f) + + image_paths = [] + for chart in chartpress["charts"]: + for k, v in chart["images"].items(): + image_paths.append(v["valuesPath"]) + + images = [] + for image_path in image_paths: + image = get_element_from_path(image_path, values) + images.append(image["name"] + ":" + image["tag"]) + + return images + + +def kind_load_docker_images(kind_cluster, images): + """Calls `kind load docker-image ` on provided images available locally.""" + + for image in images: + if not check_output(["docker", "images", "--quiet", image]): + continue + + check_output(["kind", "load", "docker-image", "--name", kind_cluster, image]) + print("### Loaded %s" % image) + + +if __name__ == "__main__": + argparser = argparse.ArgumentParser() + argparser.add_argument( + "--kind-cluster", + default="kind", + help="Specify a kind cluster to load the docker images into.", + ) + argparser.add_argument( + "--values", + default="jupyterhub/values.yaml", + help="Specify a values.yaml file to look in.", + ) + argparser.add_argument( + "--chartpress", + default="chartpress.yaml", + help="Specify a chartpress.yaml with information about where to look for images.", + ) + args = argparser.parse_args() + + images = extract_images_from_values( + chartpress_file=args.chartpress, values_file=args.values + ) + kind_load_docker_images(args.kind_cluster, images) diff --git a/ci/minikube-1.11-default.env b/ci/minikube-1.11-default.env deleted file mode 100644 index 69be176f95..0000000000 --- a/ci/minikube-1.11-default.env +++ /dev/null @@ -1,15 +0,0 @@ -export KUBE_VERSION=1.11.7 -export MINIKUBE_VERSION=0.33.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export MINIKUBE_ARGS="--vm-driver=none --kubernetes-version=v${KUBE_VERSION}" -export Z2JH_HELM_ARGS="-f minikube-config.yaml" -export DISABLE_TEST_NETPOL=1 -export INSTALL_CALICO=0 - -export RUN_PUBLISH_SCRIPT=0 - -# FIXME: Issue 1123 -export CRICTL_VERSION=1.11.1 diff --git a/ci/minikube-1.12-netpol.env b/ci/minikube-1.12-netpol.env deleted file mode 100644 index 65863b74f2..0000000000 --- a/ci/minikube-1.12-netpol.env +++ /dev/null @@ -1,12 +0,0 @@ -export KUBE_VERSION=1.12.5 -export MINIKUBE_VERSION=0.33.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export MINIKUBE_ARGS="--vm-driver=none --kubernetes-version=v${KUBE_VERSION} --network-plugin cni --extra-config=kubelet.network-plugin=cni" -export Z2JH_HELM_ARGS="-f minikube-config.yaml -f minikube-netpol.yaml" -export DISABLE_TEST_NETPOL=0 -export INSTALL_CALICO=1 - -export RUN_PUBLISH_SCRIPT=0 diff --git a/ci/minikube-1.13-default.env b/ci/minikube-1.13-default.env deleted file mode 100644 index 84b51dab60..0000000000 --- a/ci/minikube-1.13-default.env +++ /dev/null @@ -1,12 +0,0 @@ -export KUBE_VERSION=1.13.2 -export MINIKUBE_VERSION=0.33.1 -export HELM_VERSION=2.12.3 -export KUBEVAL_VERSION=0.7.3 -export PATH="$PWD/bin:$PATH" - -export MINIKUBE_ARGS="--vm-driver=none --kubernetes-version=v${KUBE_VERSION}" -export Z2JH_HELM_ARGS="-f minikube-config.yaml" -export DISABLE_TEST_NETPOL=1 -export INSTALL_CALICO=0 - -export RUN_PUBLISH_SCRIPT=1 diff --git a/ci/publish-chart.sh b/ci/publish similarity index 91% rename from ci/publish-chart.sh rename to ci/publish index daa89417fb..01e5bbb0e9 100755 --- a/ci/publish-chart.sh +++ b/ci/publish @@ -4,7 +4,7 @@ set -eu # Decrypt a private SSH key having its public key registered on GitHub. It will # be used to establish an identity with rights to push to the repo hosting our # Helm charts: https://github.com/jupyterhub/helm-chart -openssl aes-256-cbc -K $encrypted_c6b45058ffe8_key -iv $encrypted_c6b45058ffe8_iv -in ci/id_rsa.enc -out ci/id_rsa -d +openssl aes-256-cbc -K $encrypted_c6b45058ffe8_key -iv $encrypted_c6b45058ffe8_iv -in ci/publish-id_rsa.enc -out ci/id_rsa -d chmod 0400 ci/id_rsa docker login -u "${DOCKER_USERNAME}" -p "${DOCKER_PASSWORD}" @@ -20,4 +20,4 @@ chartpress --commit-range "${TRAVIS_COMMIT_RANGE}" --push --publish-chart # Let us log the changes chartpress did, it should include replacements for # fields in values.yaml, such as what tag for various images we are using. -git diff +git --no-pager diff diff --git a/ci/id_rsa.enc b/ci/publish-id_rsa.enc similarity index 100% rename from ci/id_rsa.enc rename to ci/publish-id_rsa.enc diff --git a/ci/start-k8s b/ci/start-k8s new file mode 100755 index 0000000000..ce87244736 --- /dev/null +++ b/ci/start-k8s @@ -0,0 +1,100 @@ +#!/bin/bash +set -eu + +## NOTE: This script assumes we have installed kind, but the common script doesn't +## +if [ "${KIND_CLUSTER:-}" == "" ]; then + echo "Run \". ./dev init\" first!" + exit 1 +elif [ "${KIND_CLUSTER:-}" != "dev" ]; then + if [ "${KUBECONFIG:-}" != "$(kind get kubeconfig-path --name="jh-ci-${KUBE_VERSION:-}")" ]; then + echo "Assertion error: KUBECONFIG out of sync with KUBE_VERSION" + echo "KUBECONFIG=${KUBECONFIG:-}" + echo "KUBE_VERSION=${KUBE_VERSION:-}" + echo "Run \". ./ci/common\" to update your KUBECONFIG environment variable based on your KUBE_VERSION variable." + exit 1 + elif [ "${KIND_CLUSTER:-}" != "jh-ci-${KUBE_VERSION:-}" ]; then + echo "Assertion error: KIND_CLUSTER out of sync with KUBE_VERSION" + echo "KIND_CLUSTER=${KIND_CLUSTER:-}" + echo "KUBE_VERSION=${KUBE_VERSION:-}" + echo "Run \". ./ci/common\" to update your KIND_CLUSTER environment variable based on your KUBE_VERSION variable." + exit 1 + fi +fi + +# If the kind k8s cluster for this k8s version is already running, restart it +if kind get clusters | grep --word-regexp ${KIND_CLUSTER}; then + echo "deleting existing kind k8s cluster: ${KIND_CLUSTER}" + kind delete cluster --name=${KIND_CLUSTER} +fi + +echo "starting kind k8s cluster: ${KIND_CLUSTER}" +kind create cluster --name=${KIND_CLUSTER} --image="kindest/node:v${KUBE_VERSION}" --config ci/kind-config.yaml +kubectl config set-context --current --namespace jh-ci +kubectl get nodes + +# To test network policies, we need a custom CNI like Calico. We have disabled +# the default CNI through kind-config.yaml and will need to manually install a +# CNI for the nodes to become Ready. +echo "installing a custom CNI: Calico (async, in cluster)" +# Setup daemonset/calico-etcd, a prerequisite for calico-node +kubectl apply -f https://docs.projectcalico.org/v3.9/getting-started/kubernetes/installation/hosted/etcd.yaml +# NOTE: A toleration to schedule on a node that isn't ready is missing, but +# this pod will be part of making sure the node can become ready. +# +# toleration: +# - key: node.kubernetes.io/not-ready +# effect: NoSchedule +kubectl patch -n kube-system daemonset/calico-etcd --type='json' \ + -p='[{"op":"add", "path":"/spec/template/spec/tolerations/-", "value":{"key":"node.kubernetes.io/not-ready", "effect":"NoSchedule"}}]' + +# Setup daemonset/calico-node, that will allow nodes to enter a ready state +curl -sSo ci/daemonset-calico-node.yaml https://docs.projectcalico.org/v3.9/getting-started/kubernetes/installation/hosted/calico.yaml +# NOTE: Connection details to daemonset/calico-etcd is missing so we need to +# manually add them. +CALICO_ETCD_IP=$(kubectl get service -n kube-system calico-etcd -o jsonpath='{.spec.clusterIP}') +CALICO_ETCD_PORT=$(kubectl get service -n kube-system calico-etcd -o jsonpath='{.spec.ports[0].port}') +sed -i -e "s/:/$CALICO_ETCD_IP:$CALICO_ETCD_PORT/" ci/daemonset-calico-node.yaml +kubectl apply -f ci/daemonset-calico-node.yaml +# NOTE: daemonset/calico-node pods' main container fails to startup without +# an additional environment variable configured to disable a check +# that we fail. +# +# env: +# - name: FELIX_IGNORELOOSERPF +# value: "true" +kubectl patch -n kube-system daemonset/calico-node --type='json' \ + -p='[{"op":"add", "path":"/spec/template/spec/containers/0/env/-", "value":{"name":"FELIX_IGNORELOOSERPF", "value":"true"}}]' + +echo "waiting for kubernetes nodes (in cluster)" +# NOTE: kubectl wait has a bug relating to using the --all flag in 1.13 at least +# Due to this, we wait only for the kind-control-plane node, which +# currently is the only node we start with kind but could be configured in +# kind-config.yaml. +# +# ref: https://github.com/kubernetes/kubernetes/pull/71746 +kubectl wait node/${KIND_CLUSTER}-control-plane --for condition=ready --timeout 2m || { + r=$? + echo "kubernetes nodes never became ready" + kubectl describe nodes || true + kubectl describe -n kube-system daemonset/calico-etcd || true + kubectl logs -n kube-system daemonset/calico-etcd || true + kubectl describe -n kube-system daemonset/calico-node || true + kubectl logs -n kube-system daemonset/calico-node || true + exit $r +} + +echo "installing tiller (async, in cluster)" +kubectl create serviceaccount tiller -n kube-system +kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller +helm init --service-account tiller + +echo "waiting for tiller (in cluster)" +kubectl rollout status -n kube-system deployment/tiller-deploy --timeout 1m || { + r=$? + echo "tiller never became ready" + kubectl describe nodes || true + kubectl describe -n kube-system deployment/tiller || true + kubectl logs -n kube-system deployment/tiller || true + exit $r +} diff --git a/ci/test b/ci/test new file mode 100755 index 0000000000..d7d3993d49 --- /dev/null +++ b/ci/test @@ -0,0 +1,30 @@ +#!/bin/bash +set -eu + +display_logs() { + echo "***** node *****" + kubectl describe node + echo "***** pods *****" + kubectl get pods + echo "***** events *****" + kubectl get events + echo "***** hub *****" + kubectl logs deploy/hub + echo "***** proxy *****" + kubectl logs deploy/proxy +} + +echo "running tests from outside the cluster:" +echo "- kubectl port-forward has enabled communication with services in the cluster" +## NOTE: -x / --exitfirst makes us avoid noise in the hub and proxy pod logs +## following a failure we are interested in debugging. +## +pytest ./tests -v --exitfirst || { + r=$? + echo "a test failed, here is relevant debugging information" + display_logs + exit $r +} + +## If tests succeeded show all pods to see if any were restarted +kubectl get pods diff --git a/ci/test.sh b/ci/test.sh deleted file mode 100755 index 7590a81396..0000000000 --- a/ci/test.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/sh - -set -eux - - -TEST_NAMESPACE=jupyterhub-test - -if [ "$RUNNER" = "kind" ]; then - export KUBECONFIG="$($PWD/bin/kind get kubeconfig-path --name=kind)" -else - # Is there a standard interface name? - for iface in eth0 ens4 enp0s3; do - IP=$(/sbin/ifconfig $iface | grep 'inet addr' | cut -d: -f2 | awk '{print $1}'); - if [ -n "$IP" ]; then - echo "IP: $IP" - break - fi - done - if [ -z "$IP" ]; then - echo "Failed to get IP, current interfaces:" - /sbin/ifconfig -a - exit 2 - fi -fi - -helm install --wait --name jupyterhub-test --namespace $TEST_NAMESPACE ./jupyterhub/ $Z2JH_HELM_ARGS - -if [ "$RUNNER" = "kind" ]; then - kubectl port-forward -n $TEST_NAMESPACE svc/proxy-public 8080:80 & - TEST_URL=http://127.0.0.1:8080 - export HUB_API_URL=http://127.0.0.1:8080/hub/api -else - TEST_URL=http://$IP:31212 -fi - -echo "waiting for servers to become responsive" -until curl --fail -s $TEST_URL/hub/api; do - kubectl --namespace=$TEST_NAMESPACE describe pod - sleep 10 -done - -echo "getting jupyterhub version" -curl -s $TEST_URL/hub/api | grep version - -echo "running tests" - -display_logs() { - echo "***** minikube *****" - minikube logs - echo "***** node *****" - kubectl describe node - echo "***** pods *****" - kubectl --namespace $TEST_NAMESPACE get pods - echo "***** events *****" - kubectl --namespace $TEST_NAMESPACE get events - echo "***** hub *****" - kubectl --namespace $TEST_NAMESPACE logs deploy/hub - echo "***** proxy *****" - kubectl --namespace $TEST_NAMESPACE logs deploy/proxy -} - -# Run this first to ensure the hub can talk to the proxy -# (it will automatically retry) -pytest tests/test_hub_is_ready.py - -# Now sleep, and retry again, in case a race condition meant the two were -# momentarily able to communicate whilst already shutting down -sleep 1m -pytest tests/test_hub_is_ready.py - -# Hopefully this works now! If tests still failing output logs -pytest || { - r=$? - echo "tests failed" - display_logs - exit $r -} - -# If tests succeeded show all pods to see if any were restarted -kubectl --namespace $TEST_NAMESPACE get pods diff --git a/ci/travis-docker-fix b/ci/travis-docker-fix new file mode 100755 index 0000000000..e7f04525f1 --- /dev/null +++ b/ci/travis-docker-fix @@ -0,0 +1,13 @@ +#!/bin/bash +set -eu + +# This is a workaround to an issue caused by the existance of a docker registry +# mirror in our CI environment. Without this fix that removes the mirror, +# chartpress fails to realize the existance of already built images and rebuilds +# them. +# +# ref: https://github.com/moby/moby/issues/39120 +sudo cat /etc/docker/daemon.json +echo '{"mtu": 1460}' | sudo dd of=/etc/docker/daemon.json +sudo systemctl restart docker +docker ps -a diff --git a/ci/travis-script.sh b/ci/travis-script.sh deleted file mode 100755 index 408d0a06c5..0000000000 --- a/ci/travis-script.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -eux - -python3 tools/templates/lint-and-validate.py -# render & publish chart -if [[ - "$TRAVIS_BRANCH" == "master" && - "$TRAVIS_PULL_REQUEST" == "false" && - "$RUN_PUBLISH_SCRIPT" == "1" -]]; then - ./ci/publish-chart.sh -else - chartpress --commit-range ${TRAVIS_COMMIT_RANGE} -fi -git diff - -./ci/test.sh diff --git a/ci/upgrade b/ci/upgrade new file mode 100755 index 0000000000..30f2180ce7 --- /dev/null +++ b/ci/upgrade @@ -0,0 +1,35 @@ +#!/bin/bash +set -eu + +## set TRAVIS_COMMIT_RANGE if it is unset on a local CI run +## +## NOTE: Use an open ended range from the upstream or origin master branch to the +## current state including unstaged changes. +## +if [ -z ${TRAVIS_COMMIT_RANGE:-} ]; then + if git remote -v | grep --word-regex upstream; then + GIT_REMOTE=upstream/ + elif git remote -v | grep --word-regex origin; then + GIT_REMOTE=origin/ + fi + export TRAVIS_COMMIT_RANGE=${GIT_REMOTE:-}master.. +fi + +echo "build images and update the default values.yaml to reference them" +chartpress --commit-range ${TRAVIS_COMMIT_RANGE} +git --no-pager diff + +echo "load the images the kind cluster" +python3 ci/kind-load-docker-images.py --kind-cluster $KIND_CLUSTER --values ./jupyterhub/values.yaml + +echo "install our deployment" +helm upgrade --install jh-ci --wait --namespace jh-ci ./jupyterhub \ + --values dev-config.yaml \ + --values dev-config-netpol.yaml + +echo "waiting for hub and proxy to become responsive" +kubectl rollout status deployment/proxy --timeout 1m +kubectl rollout status deployment/hub --timeout 1m + +echo "couple a localhost port with svc/proxy-public to access JupyterHub API" +kubectl port-forward svc/proxy-public 8080:80 > /dev/null 2>&1 & diff --git a/ci/vagrant-run-kind.sh b/ci/vagrant-run-kind.sh deleted file mode 100755 index 06f56d19af..0000000000 --- a/ci/vagrant-run-kind.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh -# Run this inside vagrant to test the travis scripts - -set -eux -export SCENARIO=1.15-default -export RUNNER=kind -export TRAVIS_BRANCH=master -export TRAVIS_PULL_REQUEST=true -export TRAVIS_COMMIT_RANGE=`git rev-parse --short origin/master`..`git rev-parse --short HEAD` - -pip3 install --no-cache-dir -r dev-requirements.txt -. ./ci/kind-${SCENARIO}.env -./ci/install-kind.sh -./ci/travis-script.sh diff --git a/ci/vagrant-run.sh b/ci/vagrant-run.sh deleted file mode 100755 index fed4d2f91a..0000000000 --- a/ci/vagrant-run.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -# Run this inside vagrant to test the travis scripts - -set -eux -export SCENARIO=1.12-netpol -export TRAVIS_BRANCH=master -export TRAVIS_PULL_REQUEST=true -export TRAVIS_COMMIT_RANGE=`git rev-parse --short origin/master`..`git rev-parse --short HEAD` - -pip3 install --no-cache-dir -r dev-requirements.txt -. ./ci/minikube-${SCENARIO}.env -./ci/install-minikube.sh -./ci/travis-script.sh diff --git a/ci/xenial-setup.sh b/ci/xenial-setup.sh deleted file mode 100755 index a28ff44692..0000000000 --- a/ci/xenial-setup.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh - -set -eux - -apt-get -q update -apt-get -q install -y python3-pip - -# https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#install-docker-ce -#apt-get -q install -y linux-image-extra-$(uname -r) linux-image-extra-virtual - -DOCKER_DEB=docker-ce_18.06.0~ce~3-0~ubuntu_amd64.deb -curl -O https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/$DOCKER_DEB -# dpkg won't install dependencies -dpkg -i $DOCKER_DEB || apt-get install -f -y -docker info -usermod -G docker vagrant - -install -o vagrant -g vagrant -d /home/vagrant/bin - -# Workaround Minikube DNS problems -# https://github.com/kubernetes/minikube/issues/2027#issuecomment-338221646 -cat << EOF > /etc/resolv.conf -nameserver 8.8.4.4 -nameserver 8.8.8.8 -EOF -sed -i -re "s/^(127.0.0.1\\s.+)/\\1 `hostname`/" /etc/hosts - -# chartpress requires Python 3.6+, Xenial has 3.5 -# http://ubuntuhandbook.org/index.php/2017/07/install-python-3-6-1-in-ubuntu-16-04-lts/ -add-apt-repository -y ppa:jonathonf/python-3.6 -apt-get update -apt-get install -y python3.6 -update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.6 1 diff --git a/dev b/dev new file mode 100755 index 0000000000..a6ac040bc0 --- /dev/null +++ b/dev @@ -0,0 +1,38 @@ +#!/bin/bash + +## dev is a script to help us get started performing typical task during local +## development without needing to learn everything at once +## +## - init +## - start-k8s +## - upgrade +## - lint-and-validate +## - test +## + +## if the script is sourced +if [ "${BASH_SOURCE[0]}" != "${0}" ]; then + if ! [ "$1" = "init" ]; then + echo "Only source the init command, run your command without a leading dot!" + else + if [ "$1" = "init" ]; then + . ./ci/common + pip3 install -r dev-requirements.txt + fi + fi +## else, the script isn't sourced +else + if [ "$1" = "init" ]; then + echo "The init command needs to be sourced, run it with \". ./ci/dev init\"" + else + if [ "$1" = "start-k8s" ]; then + ./ci/start-k8s + elif [ "$1" = "upgrade" ]; then + ./ci/upgrade + elif [ "$1" = "lint-and-validate" ]; then + python3 tools/templates/lint-and-validate.py --kubernetes-versions 1.13.0 + elif [ "$1" = "test" ]; then + ./ci/upgrade && ./ci/test + fi + fi +fi diff --git a/minikube-netpol.yaml b/dev-config-netpol.yaml similarity index 100% rename from minikube-netpol.yaml rename to dev-config-netpol.yaml diff --git a/minikube-config.yaml b/dev-config.yaml similarity index 100% rename from minikube-config.yaml rename to dev-config.yaml diff --git a/dev-requirements.txt b/dev-requirements.txt index c444f67c04..740c5fe01c 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,10 +1,22 @@ -# chartpress is important for local development, CI and CD -# - builds images and can push them also (--push) -# - updates image names and tags in values.yaml -# - can publish the built Helm chart (--publish) +## chartpress is important for local development, CI and CD +## - builds images and can push them also (--push) +## - updates image names and tags in values.yaml +## - can publish the built Helm chart (--publish) +## +## chartpress is used by +## - test +## - publish +## +## ref: https://github.com/jupyterhub/chartpress +## chartpress==0.3.1 -# yamllint and pytest are important for local development, CI +## pytest run tests that require requests, pytest is run from test +## script +## pytest>=3.7.1 requests -yamllint>=1.1.1 + +## yamllint is used by the tools/templates/lint-and-validate.py +## +yamllint>=1.17.0 diff --git a/images/hub/Dockerfile b/images/hub/Dockerfile index 2f8e483a99..58a545222f 100644 --- a/images/hub/Dockerfile +++ b/images/hub/Dockerfile @@ -1,5 +1,7 @@ FROM ubuntu:18.04 +## NOTE: This is a default and be overrridden by chartpress using the +## chartpress.yaml configuration ARG JUPYTERHUB_VERSION=1.0.* RUN apt-get update && \ diff --git a/images/singleuser-sample/Dockerfile b/images/singleuser-sample/Dockerfile index 6131c9b0e5..332cd40aa1 100644 --- a/images/singleuser-sample/Dockerfile +++ b/images/singleuser-sample/Dockerfile @@ -9,7 +9,7 @@ FROM jupyter/base-notebook:8ccdfc1da8d5 # Example install of git and nbgitpuller. # NOTE: git is already available in the jupyter/minimal-notebook image. USER root -RUN apt-get update && apt-get install --yes \ +RUN apt-get update && apt-get install --yes --no-install-recommends \ git \ && rm -rf /var/lib/apt/lists/* USER $NB_USER diff --git a/jupyterhub/Chart.yaml b/jupyterhub/Chart.yaml index 9eb00fa384..28e84f0488 100644 --- a/jupyterhub/Chart.yaml +++ b/jupyterhub/Chart.yaml @@ -1,6 +1,6 @@ name: jupyterhub -version: 0.9-dev -appVersion: 1.0.0 +version: 0.9-9d92fb1 +appVersion: 1.0.1dev description: Multi-user Jupyter installation home: https://z2jh.jupyter.org sources: diff --git a/jupyterhub/values.yaml b/jupyterhub/values.yaml index 5effaf50ff..1e033f54fc 100644 --- a/jupyterhub/values.yaml +++ b/jupyterhub/values.yaml @@ -48,7 +48,7 @@ hub: extraVolumeMounts: [] image: name: jupyterhub/k8s-hub - tag: generated-by-chartpress + tag: 'generated-by-chartpress' resources: requests: cpu: 200m @@ -201,7 +201,7 @@ singleuser: networkTools: image: name: jupyterhub/k8s-network-tools - tag: generated-by-chartpress + tag: 'generated-by-chartpress' cloudMetadata: enabled: false ip: 169.254.169.254 @@ -243,7 +243,7 @@ singleuser: storageAccessModes: [ReadWriteOnce] image: name: jupyterhub/k8s-singleuser-sample - tag: generated-by-chartpress + tag: 'generated-by-chartpress' pullPolicy: IfNotPresent imagePullSecret: enabled: false @@ -312,7 +312,7 @@ prePuller: enabled: true image: name: jupyterhub/k8s-image-awaiter - tag: generated-by-chartpress + tag: 'generated-by-chartpress' continuous: enabled: true extraImages: {} diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000000..ffc9e22431 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,3 @@ +# What is this folder about? + +We have setup tests for [pytest](https://docs.pytest.org/en/latest/) that will run in our CI/CD pipeline on Travis. These test assumes it is able to speak directly to a running hub within a Kubernetes cluster etc. In practice, they assume you have been using `dev` script to set it all up. diff --git a/tests/conftest.py b/tests/conftest.py index e74f169ed3..e7e9abb0b7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,14 +1,18 @@ +## conftest.py has a special meaning to pytest +## ref: https://docs.pytest.org/en/latest/writing_plugins.html#conftest-py-plugins +## import os import requests -import pytest import uuid + +import pytest import yaml @pytest.fixture(scope='module') def request_data(): basedir = os.path.dirname(os.path.dirname(__file__)) - with open(os.path.join(basedir, 'minikube-config.yaml')) as f: + with open(os.path.join(basedir, 'dev-config.yaml')) as f: y = yaml.safe_load(f) token = y['hub']['services']['test']['apiToken'] return { diff --git a/tests/test_hub_is_ready.py b/tests/test_hub_is_ready.py deleted file mode 100644 index 0a2c5e7a8c..0000000000 --- a/tests/test_hub_is_ready.py +++ /dev/null @@ -1,16 +0,0 @@ -import requests -import time - - -def test_hub_can_talk_to_proxy(api_request, request_data): - endtime = time.time() + request_data['test_timeout'] - while time.time() < endtime: - try: - r = api_request.get('/proxy') - if r.status_code == 200: - break - print(r.json()) - except requests.RequestException as e: - print(e) - time.sleep(1) - assert r.status_code == 200, 'Failed to get /proxy' diff --git a/tests/test_spawn.py b/tests/test_spawn.py index cb78ec3aa7..f6f72ad18e 100644 --- a/tests/test_spawn.py +++ b/tests/test_spawn.py @@ -6,6 +6,12 @@ import requests import yaml +## DEV NOTES: +## A lot of logs are currently in the code for debugging purposes. +## +## ref: https://travis-ci.org/jupyterhub/zero-to-jupyterhub-k8s/jobs/589410196 +## + # Makes heavy use of JupyterHub's API: # http://petstore.swagger.io/?url=https://raw.githubusercontent.com/jupyterhub/jupyterhub/master/docs/rest-api.yml @@ -17,12 +23,17 @@ chart = yaml.safe_load(f) jupyterhub_version = chart['appVersion'] + def test_api(api_request): print("asking for the hub's version") r = api_request.get('') assert r.status_code == 200 assert r.json().get("version", "version-missing") == jupyterhub_version + """kubectl logs deploy/hub + [I 2019-09-25 12:03:12.051 JupyterHub log:174] 200 GET /hub/api (test@127.0.0.1) 9.57ms + """ + def test_api_info(api_request): print("asking for the hub information") @@ -31,6 +42,10 @@ def test_api_info(api_request): result = r.json() assert result['spawner']['class'] == 'kubespawner.spawner.KubeSpawner' + """kubectl logs deploy/hub + [I 2019-09-25 12:03:12.086 JupyterHub log:174] 200 GET /hub/api/info (test@127.0.0.1) 10.21ms + """ + def test_api_create_user(api_request, jupyter_user): print("creating the testuser") @@ -39,6 +54,17 @@ def test_api_create_user(api_request, jupyter_user): assert r.status_code == 200 assert r.json()['name'] == jupyter_user + """kubectl logs deploy/hub + [I 2019-09-25 12:03:12.126 JupyterHub log:174] 201 POST /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 20.74ms + [I 2019-09-25 12:03:12.153 JupyterHub log:174] 200 GET /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 11.91ms + [D 2019-09-25 12:03:12.180 JupyterHub user:240] Creating for testuser-7c70eb90-035b-4d9f-92a5-482e441e307d: + [I 2019-09-25 12:03:12.204 JupyterHub reflector:199] watching for pods with label selector='component=singleuser-server' in namespace jh-ci + [D 2019-09-25 12:03:12.205 JupyterHub reflector:202] Connecting pods watcher + [I 2019-09-25 12:03:12.229 JupyterHub reflector:199] watching for events with field selector='involvedObject.kind=Pod' in namespace jh-ci + [D 2019-09-25 12:03:12.229 JupyterHub reflector:202] Connecting events watcher + [I 2019-09-25 12:03:12.269 JupyterHub log:174] 204 DELETE /hub/api/users/testuser-7c70eb90-035b-4d9f-92a5-482e441e307d (test@127.0.0.1) 98.85ms + """ + def test_api_list_users(api_request, jupyter_user): print("asking for information") @@ -46,6 +72,31 @@ def test_api_list_users(api_request, jupyter_user): assert r.status_code == 200 assert any(u['name'] == jupyter_user for u in r.json()) + """kubectl logs deploy/hub + [I 2019-09-25 12:03:12.303 JupyterHub log:174] 201 POST /hub/api/users/testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f (test@127.0.0.1) 15.53ms + [I 2019-09-25 12:03:12.331 JupyterHub log:174] 200 GET /hub/api/users (test@127.0.0.1) 10.83ms + [D 2019-09-25 12:03:12.358 JupyterHub user:240] Creating for testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f: + [I 2019-09-25 12:03:12.365 JupyterHub log:174] 204 DELETE /hub/api/users/testuser-0d2b0fc9-5ac4-4d8c-8d25-c4545665f81f (test@127.0.0.1) 18.44ms + """ + + +def test_hub_can_talk_to_proxy(api_request, request_data): + endtime = time.time() + request_data['test_timeout'] + while time.time() < endtime: + try: + r = api_request.get('/proxy') + if r.status_code == 200: + break + print(r.json()) + except requests.RequestException as e: + print(e) + time.sleep(1) + assert r.status_code == 200, 'Failed to get /proxy' + + """ + [I 2019-09-25 12:03:12.395 JupyterHub log:174] 200 GET /hub/api/proxy (test@127.0.0.1) 13.48ms + """ + def test_api_request_user_spawn(api_request, jupyter_user, request_data): print("asking kubespawner to spawn testusers singleuser-server pod") @@ -60,9 +111,109 @@ def test_api_request_user_spawn(api_request, jupyter_user, request_data): finally: _delete_server(api_request, jupyter_user, request_data['test_timeout']) + """ + ## the jupyterhub_user fixture initialize + ## + [I 2019-09-25 12:03:12.427 JupyterHub log:174] 201 POST /hub/api/users/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc (test@127.0.0.1) 16.25ms + + ## test_api_request_user_spawn posts to start a server + ## _wait_for_user_to_spawn starts waiting + ## + [D 2019-09-25 12:03:12.472 JupyterHub user:240] Creating for testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc: + [D 2019-09-25 12:03:12.475 JupyterHub base:780] Initiating spawn for testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc + [D 2019-09-25 12:03:12.475 JupyterHub base:787] 0/64 concurrent spawns + [D 2019-09-25 12:03:12.476 JupyterHub base:792] 0 active servers + [D 2019-09-25 12:03:12.491 JupyterHub user:542] Calling Spawner.start for testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc + [W 2019-09-25 12:03:12.494 JupyterHub base:900] User testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc is slow to start (timeout=0) + [I 2019-09-25 12:03:12.495 JupyterHub log:174] 202 POST /hub/api/users/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc/server (test@127.0.0.1) 41.05ms + [I 2019-09-25 12:03:12.583 JupyterHub log:174] 200 GET /hub/api/users/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc (test@127.0.0.1) 63.93ms + [I 2019-09-25 12:03:13.628 JupyterHub log:174] 200 GET /hub/api/users/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc (test@127.0.0.1) 25.40ms + [I 2019-09-25 12:03:14.662 JupyterHub log:174] 200 GET /hub/api/users/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc (test@127.0.0.1) 11.28ms + [I 2019-09-25 12:03:15.310 JupyterHub log:174] 200 GET /hub/health (@172.17.0.2) 1.51ms + [I 2019-09-25 12:03:15.714 JupyterHub log:174] 200 GET /hub/api/users/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc (test@127.0.0.1) 18.27ms + [I 2019-09-25 12:03:16.162 JupyterHub log:174] 200 GET /hub/api (@172.17.0.2) 0.88ms + [D 2019-09-25 12:03:16.664 JupyterHub spawner:1740] pod jupyter-testuser-2dc947c528-2d06d0-2d4d0f-2dbe5e-2dceb6f1c32dfc events before launch: 2019-09-25 12:03:12+00:00 [Normal] Successfully assigned jh-ci/jupyter-testuser-2dc947c528-2d06d0-2d4d0f-2dbe5e-2dceb6f1c32dfc to kind-control-plane + 2019-09-25 12:03:13+00:00 [Normal] Container image "jupyterhub/k8s-network-tools:0.9-b51ffeb" already present on machine + 2019-09-25 12:03:13+00:00 [Normal] Created container + 2019-09-25 12:03:13+00:00 [Normal] Started container + 2019-09-25 12:03:15+00:00 [Normal] Container image "jupyterhub/k8s-singleuser-sample:0.9-b51ffeb" already present on machine + 2019-09-25 12:03:15+00:00 [Normal] Created container + 2019-09-25 12:03:15+00:00 [Normal] Started container + [D 2019-09-25 12:03:16.666 JupyterHub spawner:1084] Polling subprocess every 30s + [D 2019-09-25 12:03:16.672 JupyterHub utils:218] Server at http://192.168.82.9:8888/user/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc/ responded with 302 + [W 2019-09-25 12:03:16.674 JupyterHub _version:56] jupyterhub version 1.0.0 != jupyterhub-singleuser version 0.9.2. This could cause failure to authenticate and result in redirect loops! + [I 2019-09-25 12:03:16.674 JupyterHub base:810] User testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc took 4.199 seconds to start + [I 2019-09-25 12:03:16.675 JupyterHub proxy:261] Adding user testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc to proxy /user/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc/ => http://192.168.82.9:8888 + [D 2019-09-25 12:03:16.675 JupyterHub proxy:765] Proxy: Fetching POST http://10.99.19.102:8001/api/routes/user/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc + + ## _wait_for_user_to_spawn got a valid response and returns + ## + [I 2019-09-25 12:03:16.749 JupyterHub log:174] 200 GET /hub/api/users/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc (test@127.0.0.1) 12.00ms + + ## _delete_server asked to terminate the user server + ## kubespawner fails to delete the pod in the highlighted code: + ## ref: https://github.com/jupyterhub/jupyterhub/blob/e4d4e059bd6ecc749a6276a80eada8f0de8ad206/jupyterhub/proxy.py#L758-L774 + ## + ## the test will fail here, but after also error because it tries to delete a jupyterhub user + ## but by now, the hub pod has crashed and cannot respond to that. + ## + [I 2019-09-25 12:03:16.796 JupyterHub proxy:281] Removing user testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc from proxy (/user/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc/) + [D 2019-09-25 12:03:16.796 JupyterHub proxy:765] Proxy: Fetching DELETE http://10.99.19.102:8001/api/routes/user/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc + [D 2019-09-25 12:03:16.799 JupyterHub user:724] Stopping testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc + [I 2019-09-25 12:03:16.800 JupyterHub spawner:1758] Deleting pod jupyter-testuser-2dc947c528-2d06d0-2d4d0f-2dbe5e-2dceb6f1c32dfc + [E 2019-09-25 12:03:21.132 JupyterHub app:2482] + Traceback (most recent call last): + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/app.py", line 2480, in launch_instance_async + await self.start() + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/app.py", line 2405, in start + await self.proxy.check_routes(self.users, self._service_map) + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/proxy.py", line 62, in locked_method + return await method(*args, **kwargs) + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/proxy.py", line 315, in check_routes + routes = await self.get_all_routes() + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/proxy.py", line 804, in get_all_routes + resp = await self.api_request('', client=client) + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/proxy.py", line 773, in api_request + result = await client.fetch(req) + tornado.curl_httpclient.CurlError: HTTP 599: Connection timed out after 20000 milliseconds + + [D 2019-09-25 12:03:21.134 JupyterHub application:647] Exiting application: jupyterhub + ERROR:asyncio:Task exception was never retrieved + future: exception=SystemExit(1,)> + Traceback (most recent call last): + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/app.py", line 2480, in launch_instance_async + await self.start() + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/app.py", line 2405, in start + await self.proxy.check_routes(self.users, self._service_map) + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/proxy.py", line 62, in locked_method + return await method(*args, **kwargs) + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/proxy.py", line 315, in check_routes + routes = await self.get_all_routes() + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/proxy.py", line 804, in get_all_routes + resp = await self.api_request('', client=client) + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/proxy.py", line 773, in api_request + result = await client.fetch(req) + tornado.curl_httpclient.CurlError: HTTP 599: Connection timed out after 20000 milliseconds + During handling of the above exception, another exception occurred: + Traceback (most recent call last): + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/app.py", line 2492, in launch_instance + loop.start() + File "/usr/local/lib/python3.6/dist-packages/tornado/platform/asyncio.py", line 148, in start + self.asyncio_loop.run_forever() + File "/usr/lib/python3.6/asyncio/base_events.py", line 438, in run_forever + self._run_once() + File "/usr/lib/python3.6/asyncio/base_events.py", line 1451, in _run_once + handle._run() + File "/usr/lib/python3.6/asyncio/events.py", line 145, in _run + self._callback(*self._args) + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/app.py", line 2483, in launch_instance_async + self.exit(1) + File "/usr/local/lib/python3.6/dist-packages/traitlets/config/application.py", line 648, in exit + sys.exit(exit_status) + SystemExit: 1 + """ + -@pytest.mark.skipif(os.getenv('DISABLE_TEST_NETPOL') == '1', - reason="DISABLE_TEST_NETPOL set") def test_singleuser_netpol(api_request, jupyter_user, request_data): print("asking kubespawner to spawn a singleuser-server pod to test network policies") r = api_request.post('/users/' + jupyter_user + '/server') @@ -73,17 +224,17 @@ def test_singleuser_netpol(api_request, jupyter_user, request_data): print(server_model) pod_name = server_model['state']['pod_name'] - # Must match CIDR in minikube-netpol.yaml + # Must match CIDR in dev-config-netpol.yaml allowed_url = 'http://jupyter.org' blocked_url = 'http://mybinder.org' c = subprocess.run([ - 'kubectl', '--namespace=jupyterhub-test', 'exec', pod_name, '--', + 'kubectl', '--namespace=jh-ci', 'exec', pod_name, '--', 'wget', '-q', '-t1', '-T5', allowed_url]) assert c.returncode == 0, "Unable to get allowed domain" c = subprocess.run([ - 'kubectl', '--namespace=jupyterhub-test', 'exec', pod_name, '--', + 'kubectl', '--namespace=jh-ci', 'exec', pod_name, '--', 'wget', '-q', '-t1', '-T5', blocked_url]) assert c.returncode > 0, "Blocked domain was allowed" @@ -91,6 +242,8 @@ def test_singleuser_netpol(api_request, jupyter_user, request_data): _delete_server(api_request, jupyter_user, request_data['test_timeout']) + + def _wait_for_user_to_spawn(api_request, jupyter_user, timeout): endtime = time.time() + timeout while time.time() < endtime: @@ -114,6 +267,32 @@ def _wait_for_user_to_spawn(api_request, jupyter_user, timeout): def _delete_server(api_request, jupyter_user, timeout): + # FIXME: Can fail with a 503 response from proxy if the hub pod crashes + # attempting to respond to this request. The hub pod crashes because its own + # request + + """ + [I 2019-09-25 12:03:16.796 JupyterHub proxy:281] Removing user testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc from proxy (/user/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc/) + [D 2019-09-25 12:03:16.796 JupyterHub proxy:765] Proxy: Fetching DELETE http://10.99.19.102:8001/api/routes/user/testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc + [D 2019-09-25 12:03:16.799 JupyterHub user:724] Stopping testuser-c947c528-06d0-4d0f-be5e-ceb6f1c32dfc + [I 2019-09-25 12:03:16.800 JupyterHub spawner:1758] Deleting pod jupyter-testuser-2dc947c528-2d06d0-2d4d0f-2dbe5e-2dceb6f1c32dfc + [E 2019-09-25 12:03:21.132 JupyterHub app:2482] + Traceback (most recent call last): + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/app.py", line 2480, in launch_instance_async + await self.start() + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/app.py", line 2405, in start + await self.proxy.check_routes(self.users, self._service_map) + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/proxy.py", line 62, in locked_method + return await method(*args, **kwargs) + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/proxy.py", line 315, in check_routes + routes = await self.get_all_routes() + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/proxy.py", line 804, in get_all_routes + resp = await self.api_request('', client=client) + File "/usr/local/lib/python3.6/dist-packages/jupyterhub/proxy.py", line 773, in api_request + result = await client.fetch(req) + tornado.curl_httpclient.CurlError: HTTP 599: Connection timed out after 20000 milliseconds + """ + r = api_request.delete('/users/' + jupyter_user + '/server') assert r.status_code in (202, 204) diff --git a/tools/templates/lint-and-validate.py b/tools/templates/lint-and-validate.py index c7b209bcb2..bb65ef401f 100755 --- a/tools/templates/lint-and-validate.py +++ b/tools/templates/lint-and-validate.py @@ -8,15 +8,14 @@ pip install yamllint -- https://github.com/garethr/kubeval +- https://github.com/instrumenta/kubeval -LATEST=curl --silent "https://api.github.com/repos/garethr/kubeval/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' -wget https://github.com/garethr/kubeval/releases/download/$LATEST/kubeval-linux-amd64.tar.gz +LATEST=curl --silent "https://api.github.com/repos/instrumenta/kubeval/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' +wget https://github.com/instrumenta/kubeval/releases/download/$LATEST/kubeval-linux-amd64.tar.gz tar xf kubeval-darwin-amd64.tar.gz mv kubeval /usr/local/bin """ - import os import sys import argparse @@ -40,7 +39,7 @@ def check_call(cmd, **kwargs): ) sys.exit(e.returncode) -def lint(yamllint_config, values, kubernetes_version, output_dir, debug): +def lint(yamllint_config, values, kubernetes_versions, output_dir, debug): """Calls `helm lint`, `helm template`, `yamllint` and `kubeval`.""" print("### Clearing output directory") @@ -52,7 +51,7 @@ def lint(yamllint_config, values, kubernetes_version, output_dir, debug): ]) print("### Linting started") - print("### 1/4 - helm lint") + print("### 1/4 - helm lint: lint helm templates") helm_lint_cmd = [ 'helm', 'lint', '../../jupyterhub', '--values', values, @@ -61,7 +60,7 @@ def lint(yamllint_config, values, kubernetes_version, output_dir, debug): helm_lint_cmd.append('--debug') check_call(helm_lint_cmd) - print("### 2/4 - helm template") + print("### 2/4 - helm template: generate kubernetes resources") helm_template_cmd = [ 'helm', 'template', '../../jupyterhub', '--values', values, @@ -71,31 +70,33 @@ def lint(yamllint_config, values, kubernetes_version, output_dir, debug): helm_template_cmd.append('--debug') check_call(helm_template_cmd) - print("### 3/4 - yamllint") + print("### 3/4 - yamllint: yaml lint generated kubernetes resources") check_call([ 'yamllint', '-c', yamllint_config, output_dir ]) - print("### 4/4 - kubeval") - for filename in glob.iglob(output_dir + '/**/*.yaml', recursive=True): - check_call([ - 'kubeval', filename, - '--kubernetes-version', kubernetes_version, - '--strict', - ]) + print("### 4/4 - kubeval: validate generated kubernetes resources") + for kubernetes_version in kubernetes_versions.split(","): + print("#### kubernetes_version ", kubernetes_version) + for filename in glob.iglob(output_dir + '/**/*.yaml', recursive=True): + check_call([ + 'kubeval', filename, + '--kubernetes-version', kubernetes_version, + '--strict', + ]) print() - print("### Linting and validation of templates finished: All good!") + print("### Linting and validation of helm templates and generated kubernetes resources OK!") if __name__ == '__main__': argparser = argparse.ArgumentParser() argparser.add_argument('--debug', action='store_true', help='Run helm lint and helm template with the --debug flag') argparser.add_argument('--values', default='lint-and-validate-values.yaml', help='Specify Helm values in a YAML file (can specify multiple)') - argparser.add_argument('--kubernetes-version', default='1.11.0', help='Version of Kubernetes to validate against') + argparser.add_argument('--kubernetes-versions', default='1.15.0', help='List of Kubernetes versions to validate against separated by ","') argparser.add_argument('--output-dir', default='rendered-templates', help='Output directory for the rendered templates. Warning: content in this will be wiped.') argparser.add_argument('--yamllint-config', default='yamllint-config.yaml', help='Specify a yamllint config') args = argparser.parse_args() - lint(args.yamllint_config, args.values, args.kubernetes_version, args.output_dir, args.debug) + lint(args.yamllint_config, args.values, args.kubernetes_versions, args.output_dir, args.debug) diff --git a/vagrant-vm-setup.sh b/vagrant-vm-setup.sh new file mode 100644 index 0000000000..e173a10b78 --- /dev/null +++ b/vagrant-vm-setup.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -eu + +## Install pip +## +## NOTE: pip installs executable packages in ~/.local/bin +## +apt-get -q update +apt-get -q install -y python3-pip +echo 'PATH=$PATH:~/.local/bin' >> /home/vagrant/.bashrc + +## Install Docker CE +## +## ref: https://docs.docker.com/install/linux/docker-ce/ubuntu/#install-using-the-convenience-script +## +curl -sSL https://get.docker.com | sh +usermod -aG docker vagrant + +## When we run ./ci/vagrant-run-ci.sh we get some environment variables set, +## but these will be lost if the script quits due to an error. +echo 'PATH=$PATH:~/zero-to-jupyterhub-k8s/bin' >> /home/vagrant/.bashrc