diff --git a/jupyterhub/templates/proxy/deployment.yaml b/jupyterhub/templates/proxy/deployment.yaml index f72d403776..13543b4454 100644 --- a/jupyterhub/templates/proxy/deployment.yaml +++ b/jupyterhub/templates/proxy/deployment.yaml @@ -11,6 +11,8 @@ spec: selector: matchLabels: {{- include "jupyterhub.matchLabels" . | nindent 6 }} + strategy: + {{- .Values.proxy.deploymentStrategy | toYaml | trimSuffix "\n" | nindent 4 }} template: metadata: labels: diff --git a/jupyterhub/values.yaml b/jupyterhub/values.yaml index 94c56f9201..98f451d9a6 100644 --- a/jupyterhub/values.yaml +++ b/jupyterhub/values.yaml @@ -18,10 +18,13 @@ hub: consecutiveFailureLimit: 5 activeServerLimit: deploymentStrategy: - # sqlite-pvc backed hub requires Recreate strategy to work + ## type: Recreate + ## - sqlite-pvc backed hubs require the Recreate deployment strategy as a + ## typical PVC storage can only be bound to one pod at the time. + ## - JupyterHub isn't designed to support being run in parallell. More work + ## needs to be done in JupyterHub itself for a fully highly available (HA) + ## deployment of JupyterHub on k8s is to be possible. type: Recreate - # This is required for upgrading to work - rollingUpdate: db: type: sqlite-pvc upgrade: @@ -82,6 +85,21 @@ rbac: proxy: secretToken: '' + deploymentStrategy: + ## type: Recreate + ## - JupyterHub's interaction with the CHP proxy becomes a lot more robust + ## with this configuration. To understand this, consider that JupyterHub + ## during startup will interact a lot with the k8s service to reach a + ## ready proxy pod. If the hub pod during a helm upgrade is restarting + ## directly while the proxy pod is making a rolling upgrade, the hub pod + ## could end up running a sequence of interactions with the old proxy pod + ## and finishing up the sequence of interactions with the new proxy pod. + ## As CHP proxy pods carry individual state this is very error prone. One + ## outcome when not using Recreate as a strategy has been that user pods + ## have been deleted by the hub pod because it considered them unreachable + ## as it only configured the old proxy pod but not the new before trying + ## to reach them. + type: Recreate service: type: LoadBalancer labels: {}