Skip to content

Commit

Permalink
Env var up dirty runs, fix up destroy function
Browse files Browse the repository at this point in the history
  • Loading branch information
kegsay committed Oct 17, 2023
1 parent 01bba76 commit 5bf1834
Show file tree
Hide file tree
Showing 6 changed files with 63 additions and 20 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,13 @@ jobs:
- homeserver: Synapse
tags: synapse_blacklist
packages: ./tests/msc3874 ./tests/msc3902
env: "COMPLEMENT_SHARE_ENV_PREFIX=PASS_ PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite"
env: "COMPLEMENT_ENABLE_DIRTY_RUNS=1 COMPLEMENT_SHARE_ENV_PREFIX=PASS_ PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite"
timeout: 20m

- homeserver: Dendrite
tags: dendrite_blacklist
packages: ""
env: ""
env: "COMPLEMENT_ENABLE_DIRTY_RUNS=1"
timeout: 10m

steps:
Expand Down
5 changes: 5 additions & 0 deletions ENVIRONMENT.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,11 @@ If 1, prints out more verbose logging such as HTTP request/response bodies.
- Type: `bool`
- Default: 0

#### `COMPLEMENT_ENABLE_DIRTY_RUNS`
If 1, eligible tests will be provided with reusable deployments rather than a clean deployment. Eligible tests are tests run with `Deploy(t, numHomeservers)`. If enabled, COMPLEMENT_ALWAYS_PRINT_SERVER_LOGS and COMPLEMENT_POST_TEST_SCRIPT are ignored. Enabling dirty runs can greatly speed up tests, at the cost of clear server logs and the chance of tests polluting each other. Tests using `OldDeploy` and blueprints will still have a fresh image for each test. Fresh images can still be desirable e.g user directory tests need a clean homeserver else search results can be polluted, tests which can blacklist a server over federation also need isolated deployments to stop failures impacting other tests. For these reasons, there will always be a way for a test to override this setting and get a dedicated deployment. Eventually, dirty runs will become the default running mode of Complement, with an environment variable to disable this behaviour being added later, once this has stablised.
- Type: `bool`
- Default: 0

#### `COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT`
The hostname of Complement from the perspective of a Homeserver running inside a container. This can be useful for container runtimes using another hostname to access the host from a container, like Podman that uses `host.containers.internal` instead.
- Type: `string`
Expand Down
18 changes: 18 additions & 0 deletions internal/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,23 @@ type Complement struct {
// like Podman that uses `host.containers.internal` instead.
HostnameRunningComplement string

// Name: COMPLEMENT_ENABLE_DIRTY_RUNS
// Default: 0
// Description: If 1, eligible tests will be provided with reusable deployments rather than a clean deployment.
// Eligible tests are tests run with `Deploy(t, numHomeservers)`. If enabled, COMPLEMENT_ALWAYS_PRINT_SERVER_LOGS
// and COMPLEMENT_POST_TEST_SCRIPT are ignored.
//
// Enabling dirty runs can greatly speed up tests, at the cost of clear server logs and the chance of tests
// polluting each other. Tests using `OldDeploy` and blueprints will still have a fresh image for each test.
// Fresh images can still be desirable e.g user directory tests need a clean homeserver else search results can
// be polluted, tests which can blacklist a server over federation also need isolated deployments to stop failures
// impacting other tests. For these reasons, there will always be a way for a test to override this setting and
// get a dedicated deployment.
//
// Eventually, dirty runs will become the default running mode of Complement, with an environment variable to
// disable this behaviour being added later, once this has stablised.
EnableDirtyRuns bool

HSPortBindingIP string

// Name: COMPLEMENT_POST_TEST_SCRIPT
Expand All @@ -106,6 +123,7 @@ func NewConfigFromEnvVars(pkgNamespace, baseImageURI string) *Complement {
}
cfg.DebugLoggingEnabled = os.Getenv("COMPLEMENT_DEBUG") == "1"
cfg.AlwaysPrintServerLogs = os.Getenv("COMPLEMENT_ALWAYS_PRINT_SERVER_LOGS") == "1"
cfg.EnableDirtyRuns = os.Getenv("COMPLEMENT_ENABLE_DIRTY_RUNS") == "1"
cfg.EnvVarsPropagatePrefix = os.Getenv("COMPLEMENT_SHARE_ENV_PREFIX")
cfg.PostTestScript = os.Getenv("COMPLEMENT_POST_TEST_SCRIPT")
cfg.SpawnHSTimeout = time.Duration(parseEnvWithDefault("COMPLEMENT_SPAWN_HS_TIMEOUT_SECS", 30)) * time.Second
Expand Down
6 changes: 6 additions & 0 deletions internal/docker/deployer.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,12 @@ func (d *Deployer) Deploy(ctx context.Context, blueprintName string) (*Deploymen
return dep, lastErr
}

func (d *Deployer) PrintLogs(dep *Deployment) {
for _, hsDep := range dep.HS {
printLogs(d.Docker, hsDep.ContainerID, hsDep.ContainerID)
}
}

// Destroy a deployment. This will kill all running containers.
func (d *Deployer) Destroy(dep *Deployment, printServerLogs bool, testName string, failed bool) {
for _, hsDep := range dep.HS {
Expand Down
10 changes: 9 additions & 1 deletion internal/docker/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ type Deployment struct {
Deployer *Deployer
// The name of the deployed blueprint
BlueprintName string
// Set to true if this deployment is a dirty deployment and so should not be destroyed.
Dirty bool
// A map of HS name to a HomeserverDeployment
HS map[string]*HomeserverDeployment
Config *config.Complement
Expand Down Expand Up @@ -56,7 +58,13 @@ func (hsDep *HomeserverDeployment) SetEndpoints(baseURL string, fedBaseURL strin
// will print container logs before killing the container.
func (d *Deployment) Destroy(t *testing.T) {
t.Helper()
// d.Deployer.Destroy(d, d.Deployer.config.AlwaysPrintServerLogs || t.Failed(), t.Name(), t.Failed())
if d.Dirty {
if t.Failed() {
d.Deployer.PrintLogs(d)
}
return
}
d.Deployer.Destroy(d, d.Deployer.config.AlwaysPrintServerLogs || t.Failed(), t.Name(), t.Failed())
}

func (d *Deployment) GetConfig() *config.Complement {
Expand Down
40 changes: 23 additions & 17 deletions test_package.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,11 @@ type TestPackage struct {
complementBuilder *docker.Builder
// a counter to stop tests from allocating the same container name
namespaceCounter uint64

// pointers to existing deployments for Deploy(t, 1) style deployments which are reused when run
// in dirty mode.
existingDeployments map[int]*docker.Deployment
existingDeploymentsMu *sync.Mutex
}

// NewTestPackage creates a new test package which can be used to deploy containers for all tests
Expand All @@ -70,9 +75,11 @@ func NewTestPackage(pkgNamespace string) (*TestPackage, error) {
logrus.SetLevel(logrus.ErrorLevel)

return &TestPackage{
complementBuilder: builder,
namespaceCounter: 0,
Config: cfg,
complementBuilder: builder,
namespaceCounter: 0,
Config: cfg,
existingDeployments: make(map[int]*docker.Deployment),
existingDeploymentsMu: &sync.Mutex{},
}, nil
}

Expand Down Expand Up @@ -104,20 +111,16 @@ func (tp *TestPackage) OldDeploy(t *testing.T, blueprint b.Blueprint) Deployment
return dep
}

var (
existingDeployments = map[int]*docker.Deployment{}
mu sync.Mutex
)

func (tp *TestPackage) Deploy(t *testing.T, numServers int) Deployment {
t.Helper()
mu.Lock()
existingDep := existingDeployments[numServers]
if existingDep != nil {
mu.Unlock()
return existingDep
if tp.Config.EnableDirtyRuns {
tp.existingDeploymentsMu.Lock()
existingDep := tp.existingDeployments[numServers]
tp.existingDeploymentsMu.Unlock()
if existingDep != nil {
return existingDep
}
}
mu.Unlock()
blueprint := mapServersToBlueprint(numServers)
timeStartBlueprint := time.Now()
if err := tp.complementBuilder.ConstructBlueprintIfNotExist(blueprint); err != nil {
Expand All @@ -134,9 +137,12 @@ func (tp *TestPackage) Deploy(t *testing.T, numServers int) Deployment {
t.Fatalf("Deploy: Deploy returned error %s", err)
}
t.Logf("Deploy times: %v blueprints, %v containers", timeStartDeploy.Sub(timeStartBlueprint), time.Since(timeStartDeploy))
mu.Lock()
existingDeployments[numServers] = dep
mu.Unlock()
if tp.Config.EnableDirtyRuns {
dep.Dirty = true // stop this deployment being destroyed.
tp.existingDeploymentsMu.Lock()
tp.existingDeployments[numServers] = dep
tp.existingDeploymentsMu.Unlock()
}
return dep
}

Expand Down

0 comments on commit 5bf1834

Please sign in to comment.