Skip to content

Commit

Permalink
Add benchmarking code (GoogleContainerTools#448)
Browse files Browse the repository at this point in the history
* adding benchmarking code

* enable writing to file

* fix build

* time more stuff

* adding benchmarking to integration tests

* compare docker and kaniko times in integration tests

* Switch to setting benchmark file with an env var

* close file at the right time

* fix integration test with environment variables

* fix integration tests

* Adding benchmarking documentation to DEVELOPEMENT.md

* human readable benchmarking steps
  • Loading branch information
sharifelgamal committed Nov 28, 2018
1 parent a49fd79 commit 7cde036
Show file tree
Hide file tree
Showing 6 changed files with 85 additions and 11 deletions.
21 changes: 21 additions & 0 deletions DEVELOPMENT.md
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,27 @@ Requirements:

These tests will be kicked off by [reviewers](#reviews) for submitted PRs.

### Benchmarking

The goal is for Kaniko to be at least as fast at building Dockerfiles as Docker is, and to that end, we've built
in benchmarking to check the speed of not only each full run, but also how long each step of each run takes. To turn
on benchmarking, just set the `BENCHMARK_FILE` environment variable, and kaniko will output all the benchmark info
of each run to that file location.
```shell
docker run -v $(pwd):/workspace -v ~/.config:/root/.config \
-e BENCHMARK_FILE=/workspace/benchmark_file \
gcr.io/kaniko-project/executor:latest \
--dockerfile=<path to Dockerfile> --context=/workspace \
--destination=gcr.io/my-repo/my-image
```
Additionally, the integration tests can output benchmarking information to a `benchmarks` directory under the
`integration` directory if the `BENCHMARK` environment variable is set to `true.`
```shell
BENCHMARK=true go test -v --bucket $GCS_BUCKET --repo $IMAGE_REPO
```
## Creating a PR
When you have changes you would like to propose to kaniko, you will need to:
Expand Down
1 change: 1 addition & 0 deletions cmd/executor/cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ func init() {
addHiddenFlags(RootCmd)
}

// RootCmd is the kaniko command that is run
var RootCmd = &cobra.Command{
Use: "executor",
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
Expand Down
16 changes: 15 additions & 1 deletion integration/images.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,15 @@ import (
"runtime"
"strconv"
"strings"

"github.com/GoogleContainerTools/kaniko/pkg/timing"
)

const (
// ExecutorImage is the name of the kaniko executor image
ExecutorImage = "executor-image"
WarmerImage = "warmer-image"
//WarmerImage is the name of the kaniko cache warmer image
WarmerImage = "warmer-image"

dockerPrefix = "docker-"
kanikoPrefix = "kaniko-"
Expand Down Expand Up @@ -159,7 +162,9 @@ func (d *DockerFileBuilder) BuildImage(imageRepo, gcsBucket, dockerfilesPath, do
additionalFlags...)...,
)

timer := timing.Start(dockerfile + "_docker")
_, err := RunCommandWithoutTest(dockerCmd)
timing.DefaultRun.Stop(timer)
if err != nil {
return fmt.Errorf("Failed to build image %s with docker command \"%s\": %s", dockerImage, dockerCmd.Args, err)
}
Expand All @@ -182,21 +187,30 @@ func (d *DockerFileBuilder) BuildImage(imageRepo, gcsBucket, dockerfilesPath, do
}
}

benchmarkEnv := "BENCHMARK_FILE=false"
if os.Getenv("BENCHMARK") == "true" {
os.Mkdir("benchmarks", 0755)
benchmarkEnv = "BENCHMARK_FILE=/workspace/benchmarks/" + dockerfile
}

// build kaniko image
additionalFlags = append(buildArgs, additionalKanikoFlagsMap[dockerfile]...)
kanikoImage := GetKanikoImage(imageRepo, dockerfile)
kanikoCmd := exec.Command("docker",
append([]string{"run",
"-v", os.Getenv("HOME") + "/.config/gcloud:/root/.config/gcloud",
"-v", cwd + ":/workspace",
"-e", benchmarkEnv,
ExecutorImage,
"-f", path.Join(buildContextPath, dockerfilesPath, dockerfile),
"-d", kanikoImage, reproducibleFlag,
contextFlag, contextPath},
additionalFlags...)...,
)

timer = timing.Start(dockerfile + "_kaniko")
_, err = RunCommandWithoutTest(kanikoCmd)
timing.DefaultRun.Stop(timer)
if err != nil {
return fmt.Errorf("Failed to build image %s with kaniko command \"%s\": %s", dockerImage, kanikoCmd.Args, err)
}
Expand Down
11 changes: 11 additions & 0 deletions integration/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/daemon"

"github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/testutil"
)
Expand Down Expand Up @@ -215,6 +216,16 @@ func TestRun(t *testing.T) {

})
}

if os.Getenv("BENCHMARK") == "true" {
f, err := os.Create("benchmark")
if err != nil {
t.Logf("Failed to create benchmark file")
} else {
f.WriteString(timing.Summary())
}
defer f.Close()
}
}

func TestLayers(t *testing.T) {
Expand Down
45 changes: 37 additions & 8 deletions pkg/executor/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/pkg/snapshot"
"github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
)

Expand All @@ -56,14 +57,18 @@ type stageBuilder struct {

// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*stageBuilder, error) {
t := timing.Start("Retrieving Source Image")
sourceImage, err := util.RetrieveSourceImage(stage, opts)
if err != nil {
return nil, err
}
timing.DefaultRun.Stop(t)
t = timing.Start("Retrieving Config File")
imageConfig, err := util.RetrieveConfigFile(sourceImage)
if err != nil {
return nil, err
}
timing.DefaultRun.Stop(t)
if err := resolveOnBuild(&stage, &imageConfig.Config); err != nil {
return nil, err
}
Expand Down Expand Up @@ -179,17 +184,21 @@ func (s *stageBuilder) build() error {
}
}
if shouldUnpack {
t := timing.Start("FS Unpacking")
if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
return err
}
timing.DefaultRun.Stop(t)
}
if err := util.DetectFilesystemWhitelist(constants.WhitelistPath); err != nil {
return err
}
// Take initial snapshot
t := timing.Start("Initial FS snapshot")
if err := s.snapshotter.Init(); err != nil {
return err
}
timing.DefaultRun.Stop(t)

cacheGroup := errgroup.Group{}
for index, command := range cmds {
Expand All @@ -199,7 +208,7 @@ func (s *stageBuilder) build() error {

// Add the next command to the cache key.
compositeKey.AddKey(command.String())

t := timing.Start("Command: " + command.String())
// If the command uses files from the context, add them.
files, err := command.FilesUsedFromContext(&s.cf.Config, args)
if err != nil {
Expand Down Expand Up @@ -239,6 +248,7 @@ func (s *stageBuilder) build() error {
if err := s.saveSnapshotToImage(command.String(), tarPath); err != nil {
return err
}
timing.DefaultRun.Stop(t)
}
if err := cacheGroup.Wait(); err != nil {
logrus.Warnf("error uploading layer to cache: %s", err)
Expand All @@ -247,15 +257,21 @@ func (s *stageBuilder) build() error {
}

func (s *stageBuilder) takeSnapshot(files []string) (string, error) {
var snapshot string
var err error
t := timing.Start("Snapshotting FS")
if files == nil || s.opts.SingleSnapshot {
return s.snapshotter.TakeSnapshotFS()
}
// Volumes are very weird. They get created in their command, but snapshotted in the next one.
// Add them to the list of files to snapshot.
for v := range s.cf.Config.Volumes {
files = append(files, v)
snapshot, err = s.snapshotter.TakeSnapshotFS()
} else {
// Volumes are very weird. They get created in their command, but snapshotted in the next one.
// Add them to the list of files to snapshot.
for v := range s.cf.Config.Volumes {
files = append(files, v)
}
snapshot, err = s.snapshotter.TakeSnapshot(files)
}
return s.snapshotter.TakeSnapshot(files)
timing.DefaultRun.Stop(t)
return snapshot, err
}

func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool {
Expand Down Expand Up @@ -315,6 +331,7 @@ func (s *stageBuilder) saveSnapshotToImage(createdBy string, tarPath string) err

// DoBuild executes building the Dockerfile
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
t := timing.Start("Total Build Time")
// Parse dockerfile and unpack base image to root
stages, err := dockerfile.Stages(opts)
if err != nil {
Expand Down Expand Up @@ -349,6 +366,17 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
return nil, err
}
}
timing.DefaultRun.Stop(t)
benchmarkFile := os.Getenv("BENCHMARK_FILE")
// false is a keyword for integration tests to turn off benchmarking
if benchmarkFile != "" && benchmarkFile != "false" {
f, err := os.Create(benchmarkFile)
if err != nil {
logrus.Warnf("Unable to create benchmarking file %s: %s", benchmarkFile, err)
}
defer f.Close()
f.WriteString(timing.Summary())
}
return sourceImage, nil
}
if stage.SaveStage {
Expand All @@ -364,6 +392,7 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
return nil, err
}
}

return nil, err
}

Expand Down
2 changes: 0 additions & 2 deletions pkg/timing/timing.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package timing

import (
"bytes"
"fmt"
"sync"
"text/template"
"time"
Expand All @@ -42,7 +41,6 @@ func (tr *TimedRun) Stop(t *Timer) {
if _, ok := tr.categories[t.category]; !ok {
tr.categories[t.category] = 0
}
fmt.Println(stop)
tr.cl.Lock()
defer tr.cl.Unlock()
tr.categories[t.category] += stop.Sub(t.startTime)
Expand Down

0 comments on commit 7cde036

Please sign in to comment.