diff --git a/go.mod b/go.mod index 5bea55445..b18dfbee7 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( github.com/k14s/kbld v0.32.0 github.com/lithammer/dedent v1.1.0 github.com/logrusorgru/aurora v2.0.3+incompatible + github.com/novln/docker-parser v1.0.0 github.com/onsi/ginkgo/v2 v2.9.2 github.com/onsi/gomega v1.27.4 github.com/otiai10/copy v1.4.2 diff --git a/go.sum b/go.sum index bcf1a91bb..7978eee81 100644 --- a/go.sum +++ b/go.sum @@ -972,6 +972,8 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nightlyone/lockfile v1.0.0/go.mod h1:rywoIealpdNse2r832aiD9jRk8ErCatROs6LzC841CI= github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= +github.com/novln/docker-parser v1.0.0 h1:PjEBd9QnKixcWczNGyEdfUrP6GR0YUilAqG7Wksg3uc= +github.com/novln/docker-parser v1.0.0/go.mod h1:oCeM32fsoUwkwByB5wVjsrsVQySzPWkl3JdlTn1txpE= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= diff --git a/pkg/airgapped/helper.go b/pkg/airgapped/helper.go new file mode 100644 index 000000000..6d50e9cf6 --- /dev/null +++ b/pkg/airgapped/helper.go @@ -0,0 +1,40 @@ +// Copyright 2023 VMware, Inc. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package airgapped + +import ( + "fmt" + "strings" + + dockerparser "github.com/novln/docker-parser" + "github.com/pkg/errors" +) + +// GetPluginInventoryMetadataImage returns the plugin inventory metadata +// image based on plugin inventory image. +// E.g. if plugin inventory image is `fake.repo.com/plugin/plugin-inventory:latest` +// it returns metadata image as `fake.repo.com/plugin/plugin-inventory-metadata:latest` +func GetPluginInventoryMetadataImage(pluginInventoryImage string) (string, error) { + ref, err := dockerparser.Parse(pluginInventoryImage) + if err != nil { + return "", errors.Wrapf(err, "invalid image %q", pluginInventoryImage) + } + return fmt.Sprintf("%s-metadata:%s", ref.Repository(), ref.Tag()), nil +} + +// GetImageRelativePath returns relative path of the image based on `basePath` +// E.g. If image is `fake.repo.com/plugin/plugin-inventory:latest` with +// basePath as `fake.repo.com/plugin` it should return +// `plugin-inventory:latest` if withTag is true and +// `plugin-inventory` if withTag is false +func GetImageRelativePath(image, basePath string, withTag bool) string { + relativePath := strings.TrimPrefix(image, basePath) + if withTag { + return relativePath + } + if idx := strings.LastIndex(relativePath, ":"); idx != -1 { + return relativePath[:idx] + } + return relativePath +} diff --git a/pkg/airgapped/helper_test.go b/pkg/airgapped/helper_test.go new file mode 100644 index 000000000..26fb18ddf --- /dev/null +++ b/pkg/airgapped/helper_test.go @@ -0,0 +1,102 @@ +// Copyright 2023 VMware, Inc. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package airgapped + +import ( + "testing" + + "github.com/tj/assert" +) + +func Test_GetPluginInventoryMetadataImage(t *testing.T) { + assert := assert.New(t) + + tests := []struct { + pluginInventoryImage string + expectedMetadataImage string + errString string + }{ + { + pluginInventoryImage: "fake.repo.com/plugin/plugin-inventory:latest", + expectedMetadataImage: "fake.repo.com/plugin/plugin-inventory-metadata:latest", + errString: "", + }, + { + pluginInventoryImage: "fake.repo.com/plugin/airgapped:v1.0.0", + expectedMetadataImage: "fake.repo.com/plugin/airgapped-metadata:v1.0.0", + errString: "", + }, + { + pluginInventoryImage: "fake.repo.com/plugin/metadata", + expectedMetadataImage: "fake.repo.com/plugin/metadata-metadata:latest", + errString: "", + }, + { + pluginInventoryImage: "invalid-inventory-image$#", + expectedMetadataImage: "", + errString: "invalid image", + }, + } + + for _, test := range tests { + t.Run(test.pluginInventoryImage, func(t *testing.T) { + actualMetadataImage, err := GetPluginInventoryMetadataImage(test.pluginInventoryImage) + assert.Equal(actualMetadataImage, test.expectedMetadataImage) + if test.errString == "" { + assert.Nil(err) + } else { + assert.Contains(err.Error(), test.errString) + } + }) + } +} + +func Test_GetImageRelativePath(t *testing.T) { + assert := assert.New(t) + + tests := []struct { + image string + basePath string + withTag bool + expectedRelativePath string + }{ + { + image: "fake.repo.com/plugin/plugin-inventory:latest", + basePath: "fake.repo.com/plugin/", + withTag: true, + expectedRelativePath: "plugin-inventory:latest", + }, + { + image: "fake.repo.com/plugin/plugin-inventory:latest", + basePath: "fake.repo.com/plugin/", + withTag: false, + expectedRelativePath: "plugin-inventory", + }, + { + image: "fake.repo.com/plugin/airgapped:v1.0.0", + basePath: "fake.repo.com/", + withTag: true, + expectedRelativePath: "plugin/airgapped:v1.0.0", + }, + { + image: "fake.repo.com/plugin/metadata", + basePath: "fake.repo.com/", + withTag: false, + expectedRelativePath: "plugin/metadata", + }, + { + image: "fake.repo.com/plugin/metadata:latest", + basePath: "fake.repo.com/plugin/metadata-metadata", + withTag: true, + expectedRelativePath: "fake.repo.com/plugin/metadata:latest", + }, + } + + for _, test := range tests { + t.Run(test.image, func(t *testing.T) { + actualImage := GetImageRelativePath(test.image, test.basePath, test.withTag) + assert.Equal(actualImage, test.expectedRelativePath) + }) + } +} diff --git a/pkg/airgapped/plugin_bundle_download.go b/pkg/airgapped/plugin_bundle_download.go index e3636e970..a91d3f06e 100644 --- a/pkg/airgapped/plugin_bundle_download.go +++ b/pkg/airgapped/plugin_bundle_download.go @@ -8,7 +8,6 @@ import ( "os" "path" "path/filepath" - "strings" "github.com/pkg/errors" "github.com/verybluebot/tarinator-go" @@ -23,6 +22,7 @@ import ( type DownloadPluginBundleOptions struct { PluginInventoryImage string ToTar string + Groups []string ImageProcessor carvelhelpers.ImageOperationsImpl } @@ -30,7 +30,8 @@ type DownloadPluginBundleOptions struct { // DownloadPluginBundle download the plugin bundle based on provided plugin inventory image // and save it as tar file func (o *DownloadPluginBundleOptions) DownloadPluginBundle() error { - err := o.validateInput() + // Validate the input options + err := o.validateOptions() if err != nil { return err } @@ -40,60 +41,159 @@ func (o *DownloadPluginBundleOptions) DownloadPluginBundle() error { if err != nil { return errors.Wrap(err, "unable to create temp directory") } - tempDir := filepath.Join(tempBaseDir, PluginBundleDirName) - err = os.Mkdir(tempDir, os.ModePerm) + tempPluginBundleDir := filepath.Join(tempBaseDir, PluginBundleDirName) + err = os.Mkdir(tempPluginBundleDir, os.ModePerm) if err != nil { return errors.Wrap(err, "unable to create temp directory") } defer os.RemoveAll(tempBaseDir) - // Download the plugin inventory oci image to '/db/' - inventoryFile := filepath.Join(tempBaseDir, "db", plugininventory.SQliteDBFileName) - if err := o.ImageProcessor.DownloadImageAndSaveFilesToDir(o.PluginInventoryImage, filepath.Dir(inventoryFile)); err != nil { - return errors.Wrapf(err, "failed to download plugin inventory image '%s'", o.PluginInventoryImage) + // Get selected plugin groups and plugins objects based on the inputs + selectedPluginEntries, selectedPluginGroups, err := o.getSelectedPluginInfo() + if err != nil { + return errors.Wrap(err, "error while getting selected plugin and plugin group information") } - // Read plugin inventory database and set pluginEntries to point to plugins that needs to be downloaded - imagePrefix := path.Dir(o.PluginInventoryImage) - pi := plugininventory.NewSQLiteInventory(inventoryFile, imagePrefix) - pluginEntries, err := pi.GetAllPlugins() + // Save plugin images and get list of images that needs to be copied as part of the upload process + imagesToCopy, err := o.saveAndGetImagesToCopy(selectedPluginEntries, tempPluginBundleDir) if err != nil { - return errors.Wrap(err, "unable to get plugin details from the database") + return errors.Wrap(err, "error while downloading and saving plugin images") } - // Download all plugin inventory database and plugins as tar file - allImages, err := o.downloadAllPluginImages(pluginEntries, imagePrefix, tempDir) + // Save plugin inventory metadata file and create an entry object + inventoryMetadataImageInfo, err := o.savePluginInventoryMetadata(selectedPluginGroups, selectedPluginEntries, tempPluginBundleDir) if err != nil { - return errors.Wrap(err, "error while downloading plugin images") + return errors.Wrap(err, "error while saving plugin inventory metadata") } - // Save all downloaded images as part of manifest file - err = saveManifestFile(allImages, tempDir) + // Save plugin migration manifest file to the plugin bundle directory + err = savePluginMigrationManifestFile(imagesToCopy, inventoryMetadataImageInfo, tempPluginBundleDir) if err != nil { - return errors.Wrap(err, "error while saving plugin bundle manifest") + return errors.Wrap(err, "error while saving plugin migration manifest") } + // Save entire plugin bundle as a single tar file which can be used with upload-bundle log.Infof("saving plugin bundle at: %s", o.ToTar) - // Save entire plugin bundle as a single tar file - err = tarinator.Tarinate([]string{tempDir}, o.ToTar) + err = tarinator.Tarinate([]string{tempPluginBundleDir}, o.ToTar) if err != nil { - return errors.Wrap(err, "error while creating tar file") + return errors.Wrap(err, "error while creating archive file") } return nil } -func (o *DownloadPluginBundleOptions) downloadAllPluginImages(pluginEntries []*plugininventory.PluginInventoryEntry, imagePrefix, tempDir string) ([]*ImageInfo, error) { - allImages := []*ImageInfo{} +// getSelectedPluginInfo returns the list of PluginInventoryEntry and +// PluginGroupEntry based on the DownloadPluginBundleOptions that needs to be +// considered for downloading plugin bundle. +// Downloads the the plugin inventory image and selects the plugins and plugin +// groups based on the DownloadPluginBundleOptions.Groups by quering the +// plugin inventory database +func (o *DownloadPluginBundleOptions) getSelectedPluginInfo() ([]*plugininventory.PluginInventoryEntry, []*plugininventory.PluginGroup, error) { + var err error + tempDBDir, err := os.MkdirTemp("", "") + if err != nil { + return nil, nil, errors.Wrap(err, "unable to create temp directory") + } + defer os.RemoveAll(tempDBDir) + + // Download the plugin inventory oci image to tempDBDir + inventoryFile := filepath.Join(tempDBDir, plugininventory.SQliteDBFileName) + if err := o.ImageProcessor.DownloadImageAndSaveFilesToDir(o.PluginInventoryImage, filepath.Dir(inventoryFile)); err != nil { + return nil, nil, errors.Wrapf(err, "failed to download plugin inventory image '%s'", o.PluginInventoryImage) + } + + // Read plugin inventory database and set pluginEntries to point to plugins that needs to be downloaded + pi := plugininventory.NewSQLiteInventory(inventoryFile, path.Dir(o.PluginInventoryImage)) + + selectedPluginGroups := []*plugininventory.PluginGroup{} + selectedPluginEntries := []*plugininventory.PluginInventoryEntry{} + + // If groups were not provided as argument select all available plugin groups and all available plugins + if len(o.Groups) == 0 { + selectedPluginGroups, err = pi.GetPluginGroups(plugininventory.PluginGroupFilter{}) + if err != nil { + return nil, nil, errors.Wrap(err, "unable to read all plugin groups from database") + } + selectedPluginEntries, err = pi.GetAllPlugins() + if err != nil { + return nil, nil, errors.Wrap(err, "unable to read all plugins from database") + } + } else { + // If groups were provided as argument select only provided plugin groups and + // plugins available from the specified plugin groups + for _, groupName := range o.Groups { + pluginGroups, pluginEntries, err := o.getAllPluginGroupsAndPluginEntriesFromPluginGroupName(groupName, pi) + if err != nil { + return nil, nil, err + } + selectedPluginGroups = append(selectedPluginGroups, pluginGroups...) + selectedPluginEntries = append(selectedPluginEntries, pluginEntries...) + } + } + return selectedPluginEntries, selectedPluginGroups, nil +} + +func (o *DownloadPluginBundleOptions) getAllPluginGroupsAndPluginEntriesFromPluginGroupName(pgName string, pi plugininventory.PluginInventory) ([]*plugininventory.PluginGroup, []*plugininventory.PluginInventoryEntry, error) { + pgi := plugininventory.PluginGroupIdentifierFromID(pgName) + if pgi.Name == "" || pgi.Vendor == "" || pgi.Publisher == "" { + return nil, nil, errors.Errorf("incorrect plugin group %q specified", pgName) + } + pgFilter := plugininventory.PluginGroupFilter{ + IncludeHidden: true, + Vendor: pgi.Vendor, + Publisher: pgi.Publisher, + Name: pgi.Name, + } + pluginGroups, err := pi.GetPluginGroups(pgFilter) + if err != nil { + return nil, nil, errors.Wrap(err, "unable to get plugin groups") + } + + if len(pluginGroups) == 0 { + return nil, nil, errors.Errorf("incorrect plugin group %q specified", pgName) + } + + var allPluginEntries []*plugininventory.PluginInventoryEntry + for _, pg := range pluginGroups { + for _, p := range pg.Plugins { + pif := &plugininventory.PluginInventoryFilter{ + Name: p.Name, + Target: p.Target, + Version: p.Version, + } + pluginEntries, err := pi.GetPlugins(pif) + if err != nil { + return nil, nil, errors.Wrapf(err, "unable to get plugins in plugin group %v", plugininventory.PluginGroupToID(pg)) + } + allPluginEntries = append(allPluginEntries, pluginEntries...) + } + } + return pluginGroups, allPluginEntries, nil +} + +// saveAndGetImagesToCopy saves the images after downloading them and +// returns the images to copy object +func (o *DownloadPluginBundleOptions) saveAndGetImagesToCopy(pluginEntries []*plugininventory.PluginInventoryEntry, downloadDir string) ([]*ImageCopyInfo, error) { + // Download all plugin inventory database and plugins as tar file + return o.downloadImagesAsTarFile(pluginEntries, downloadDir) +} + +// downloadImagesAsTarFile downloads plugin inventory image and all plugin images +// as tar file to the specified directory +func (o *DownloadPluginBundleOptions) downloadImagesAsTarFile(pluginEntries []*plugininventory.PluginInventoryEntry, downloadDir string) ([]*ImageCopyInfo, error) { + allImages := []*ImageCopyInfo{} // Download plugin inventory database as tar file - pluginInventoryFileNameTar := "plugin-inventory-image.tar" + pluginInventoryFileNameTar := "plugin-inventory-image.tar.gz" log.Infof("downloading image %q", o.PluginInventoryImage) - err := o.ImageProcessor.CopyImageToTar(o.PluginInventoryImage, filepath.Join(tempDir, pluginInventoryFileNameTar)) + err := o.ImageProcessor.CopyImageToTar(o.PluginInventoryImage, filepath.Join(downloadDir, pluginInventoryFileNameTar)) if err != nil { return nil, err } - allImages = append(allImages, &ImageInfo{FilePath: pluginInventoryFileNameTar, ImagePath: getImageRelativePath(o.PluginInventoryImage, imagePrefix)}) + allImages = append(allImages, &ImageCopyInfo{ + SourceTarFilePath: pluginInventoryFileNameTar, + RelativeImagePath: GetImageRelativePath(o.PluginInventoryImage, path.Dir(o.PluginInventoryImage), false), + }) // Process all plugin entries and download the oci image as tar file for _, pe := range pluginEntries { @@ -101,19 +201,24 @@ func (o *DownloadPluginBundleOptions) downloadAllPluginImages(pluginEntries []*p for _, a := range artifacts { log.Infof("---------------------------") log.Infof("downloading image %q", a.Image) - tarfileName := fmt.Sprintf("%s-%s-%s_%s-%s.tar", pe.Name, pe.Target, a.OS, a.Arch, version) - err = o.ImageProcessor.CopyImageToTar(a.Image, filepath.Join(tempDir, tarfileName)) + tarfileName := fmt.Sprintf("%s-%s-%s_%s-%s.tar.gz", pe.Name, pe.Target, a.OS, a.Arch, version) + err = o.ImageProcessor.CopyImageToTar(a.Image, filepath.Join(downloadDir, tarfileName)) if err != nil { return nil, err } - allImages = append(allImages, &ImageInfo{FilePath: tarfileName, ImagePath: getImageRelativePath(a.Image, imagePrefix)}) + allImages = append(allImages, &ImageCopyInfo{ + SourceTarFilePath: tarfileName, + RelativeImagePath: GetImageRelativePath(a.Image, path.Dir(o.PluginInventoryImage), false), + }) } } } return allImages, nil } -func (o *DownloadPluginBundleOptions) validateInput() error { +// validateOptions validates the provided options and returns +// error if contains invalid option +func (o *DownloadPluginBundleOptions) validateOptions() error { _, err := os.Stat(filepath.Dir(o.ToTar)) if err != nil { return errors.Wrapf(err, "invalid path for %q", o.ToTar) @@ -121,24 +226,62 @@ func (o *DownloadPluginBundleOptions) validateInput() error { return nil } -func getImageRelativePath(image, imagePrefix string) string { - relativePathWithVersion := strings.TrimPrefix(image, imagePrefix) - if idx := strings.LastIndex(relativePathWithVersion, ":"); idx != -1 { - return relativePathWithVersion[:idx] - } - return relativePathWithVersion -} - -func saveManifestFile(allImages []*ImageInfo, dir string) error { +// savePluginMigrationManifestFile save the plugin_migration_manifest.yaml file +// to the provided pluginBundleDir +func savePluginMigrationManifestFile(imagesToCopy []*ImageCopyInfo, inventoryMetadataImageInfo *ImagePublishInfo, pluginBundleDir string) error { // Save all downloaded images as part of manifest file - manifest := Manifest{Images: allImages} + manifest := PluginMigrationManifest{ + ImagesToCopy: imagesToCopy, + InventoryMetadataImage: inventoryMetadataImageInfo, + } bytes, err := yaml.Marshal(&manifest) if err != nil { return err } - err = os.WriteFile(filepath.Join(dir, PluginBundleManifestFile), bytes, 0644) + err = os.WriteFile(filepath.Join(pluginBundleDir, PluginMigrationManifestFile), bytes, 0644) if err != nil { return err } return nil } + +// savePluginInventoryMetadata saves the plugin inventory metadata database file +// and returns ImagePublishInfo object containing the details on where to publish +// the metadata database file as an oci image +func (o *DownloadPluginBundleOptions) savePluginInventoryMetadata(pgs []*plugininventory.PluginGroup, pes []*plugininventory.PluginInventoryEntry, pluginBundleDir string) (*ImagePublishInfo, error) { + inventoryMetadataDBFileName := plugininventory.SQliteInventoryMetadataDBFileName + inventoryMetadataDBFilePath := filepath.Join(pluginBundleDir, inventoryMetadataDBFileName) + inventoryMetadataDB := plugininventory.NewSQLiteInventoryMetadata(inventoryMetadataDBFilePath) + + err := inventoryMetadataDB.CreateInventoryMetadataDBSchema() + if err != nil { + return nil, err + } + + for _, pe := range pes { + for version := range pe.Artifacts { + err := inventoryMetadataDB.InsertPluginIdentifier(&plugininventory.PluginIdentifier{Name: pe.Name, Target: pe.Target, Version: version}) + if err != nil { + return nil, err + } + } + } + for _, pg := range pgs { + err := inventoryMetadataDB.InsertPluginGroupIdentifier(&plugininventory.PluginGroupIdentifier{Vendor: pg.Vendor, Publisher: pg.Publisher, Name: pg.Name}) + if err != nil { + return nil, err + } + } + + pluginInventoryMetadataImage, err := GetPluginInventoryMetadataImage(o.PluginInventoryImage) + if err != nil { + return nil, err + } + + imagePublishInfo := &ImagePublishInfo{ + SourceFilePath: inventoryMetadataDBFileName, + RelativeImagePathWithTag: GetImageRelativePath(pluginInventoryMetadataImage, path.Dir(o.PluginInventoryImage), true), + } + + return imagePublishInfo, nil +} diff --git a/pkg/airgapped/plugin_bundle_test.go b/pkg/airgapped/plugin_bundle_test.go index 3a15cf0fc..a0b8f52fb 100644 --- a/pkg/airgapped/plugin_bundle_test.go +++ b/pkg/airgapped/plugin_bundle_test.go @@ -39,14 +39,15 @@ var _ = Describe("Unit tests for download and upload bundle", func() { fakeImageOperations = &fakes.ImageOperationsImpl{} - // plugin entry to be added in the inventory database - pluginEntry := &plugininventory.PluginInventoryEntry{ - Name: "foo", - Target: "global", - Description: "Foo plugin", - Publisher: "fakepublisher", - Vendor: "fakevendor", - Hidden: false, + // plugin entry foo to be added in the inventory database + pluginEntryFoo := &plugininventory.PluginInventoryEntry{ + Name: "foo", + Target: "global", + Description: "Foo plugin", + Publisher: "fakepublisher", + Vendor: "fakevendor", + Hidden: false, + RecommendedVersion: "v0.0.2", Artifacts: map[string]distribution.ArtifactList{ "v0.0.2": []distribution.Artifact{ { @@ -65,15 +66,69 @@ var _ = Describe("Unit tests for download and upload bundle", func() { }, } + // plugin entry bar to be added in the inventory database + pluginEntryBar := &plugininventory.PluginInventoryEntry{ + Name: "bar", + Target: "kubernetes", + Description: "Bar plugin", + Publisher: "fakepublisher", + Vendor: "fakevendor", + Hidden: false, + RecommendedVersion: "v0.0.1", + Artifacts: map[string]distribution.ArtifactList{ + "v0.0.1": []distribution.Artifact{ + { + OS: "darwin", + Arch: "amd64", + Digest: "fake-digest-bar", + Image: "path/darwin/amd64/kubernetes/bar:v0.0.1", + }, + }, + }, + } + + pluginGroupEntry := &plugininventory.PluginGroup{ + Vendor: "fakevendor", + Publisher: "fakepublisher", + Name: "default:v1.0.0", + Hidden: false, + Plugins: []*plugininventory.PluginGroupPluginEntry{ + { + PluginIdentifier: plugininventory.PluginIdentifier{ + Name: "bar", + Target: "kubernetes", + Version: "v0.0.1", + }, + }, + }, + } + // Plugin bundle manifest file generated based on the above mentioned // plugin entry in the inventory database - pluginBundleManifestString := `images: - - filePath: plugin-inventory-image.tar - imagePath: /plugin-inventory - - filePath: foo-global-darwin_amd64-v0.0.2.tar - imagePath: /path/darwin/amd64/global/foo - - filePath: foo-global-linux_amd64-v0.0.2.tar - imagePath: /path/linux/amd64/global/foo + pluginBundleManifestCompleteRepositoryString := `inventoryMetadataImage: + sourceFilePath: plugin_inventory_metadata.db + relativeImagePathWithTag: /plugin-inventory-metadata:latest +imagesToCopy: + - sourceFilePath: plugin-inventory-image.tar.gz + relativeImagePath: /plugin-inventory + - sourceFilePath: bar-kubernetes-darwin_amd64-v0.0.1.tar.gz + relativeImagePath: /path/darwin/amd64/kubernetes/bar + - sourceFilePath: foo-global-darwin_amd64-v0.0.2.tar.gz + relativeImagePath: /path/darwin/amd64/global/foo + - sourceFilePath: foo-global-linux_amd64-v0.0.2.tar.gz + relativeImagePath: /path/linux/amd64/global/foo +` + + // Plugin bundle manifest file generated based on the above mentioned + // plugin entry in the inventory database with only single plugin group specified + pluginBundleManifestDefaultGroupOnlyString := `inventoryMetadataImage: + sourceFilePath: plugin_inventory_metadata.db + relativeImagePathWithTag: /plugin-inventory-metadata:latest +imagesToCopy: + - sourceFilePath: plugin-inventory-image.tar.gz + relativeImagePath: /plugin-inventory + - sourceFilePath: bar-kubernetes-darwin_amd64-v0.0.1.tar.gz + relativeImagePath: /path/darwin/amd64/kubernetes/bar ` // Configure the configuration before running the tests @@ -96,9 +151,9 @@ var _ = Describe("Unit tests for download and upload bundle", func() { defer os.RemoveAll(tempTestDir) }) - // downloadImageAndSaveFilesToDirStub fakes the image downloads and puts a database + // downloadInventoryImageAndSaveFilesToDirStub fakes the image downloads and puts a database // with the table schemas created to provided path - downloadImageAndSaveFilesToDirStub := func(image, path string) error { + downloadInventoryImageAndSaveFilesToDirStub := func(image, path string) error { dbFile := filepath.Join(path, plugininventory.SQliteDBFileName) err := utils.SaveFile(dbFile, []byte{}) Expect(err).ToNot(HaveOccurred()) @@ -107,11 +162,49 @@ var _ = Describe("Unit tests for download and upload bundle", func() { err = db.CreateSchema() Expect(err).ToNot(HaveOccurred()) - err = db.InsertPlugin(pluginEntry) + err = db.InsertPlugin(pluginEntryFoo) + Expect(err).ToNot(HaveOccurred()) + err = db.InsertPlugin(pluginEntryBar) + Expect(err).ToNot(HaveOccurred()) + err = db.InsertPluginGroup(pluginGroupEntry, true) Expect(err).ToNot(HaveOccurred()) return nil } + // downloadInventoryMetadataImageWithNoExistingPlugins fakes the image downloads and puts a database + // with the table schemas created to provided path + downloadInventoryMetadataImageWithNoExistingPlugins := func(image, path string) error { + dbFile := filepath.Join(path, plugininventory.SQliteInventoryMetadataDBFileName) + err := utils.SaveFile(dbFile, []byte{}) + Expect(err).ToNot(HaveOccurred()) + + db := plugininventory.NewSQLiteInventoryMetadata(dbFile) + err = db.CreateInventoryMetadataDBSchema() + Expect(err).ToNot(HaveOccurred()) + + return nil + } + + // downloadInventoryMetadataImageWithExistingPlugins fakes the image downloads and puts a database + // with the table schemas created to provided path + downloadInventoryMetadataImageWithExistingPlugins := func(image, path string) error { + dbFile := filepath.Join(path, plugininventory.SQliteInventoryMetadataDBFileName) + err := utils.SaveFile(dbFile, []byte{}) + Expect(err).ToNot(HaveOccurred()) + + db := plugininventory.NewSQLiteInventoryMetadata(dbFile) + err = db.CreateInventoryMetadataDBSchema() + Expect(err).ToNot(HaveOccurred()) + + err = db.InsertPluginGroupIdentifier(&plugininventory.PluginGroupIdentifier{Name: pluginGroupEntry.Name, Vendor: pluginGroupEntry.Vendor, Publisher: pluginGroupEntry.Publisher}) + Expect(err).ToNot(HaveOccurred()) + + err = db.InsertPluginIdentifier(&plugininventory.PluginIdentifier{Name: pluginEntryBar.Name, Target: pluginEntryBar.Target, Version: pluginEntryBar.RecommendedVersion}) + Expect(err).ToNot(HaveOccurred()) + + return nil + } + // copyImageToTarStub fakes the image downloads and creates a fake tar.gz file for images copyImageToTarStub := func(image, tarfile string) error { _, err := os.Create(tarfile) @@ -140,17 +233,17 @@ var _ = Describe("Unit tests for download and upload bundle", func() { }) var _ = It("when downloading plugin inventory image succeeds but copy image to tar fail with error, it should return an error", func() { - fakeImageOperations.DownloadImageAndSaveFilesToDirCalls(downloadImageAndSaveFilesToDirStub) + fakeImageOperations.DownloadImageAndSaveFilesToDirCalls(downloadInventoryImageAndSaveFilesToDirStub) fakeImageOperations.CopyImageToTarReturns(errors.New("fake error")) err := dpbo.DownloadPluginBundle() Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("error while downloading plugin images")) + Expect(err.Error()).To(ContainSubstring("error while downloading and saving plugin images")) Expect(err.Error()).To(ContainSubstring("fake error")) }) - var _ = It("when everything works as expected, it should download plugin bundle as tar file", func() { - fakeImageOperations.DownloadImageAndSaveFilesToDirCalls(downloadImageAndSaveFilesToDirStub) + var _ = It("when group is not specified and everything works as expected, it should download plugin bundle as tar file", func() { + fakeImageOperations.DownloadImageAndSaveFilesToDirCalls(downloadInventoryImageAndSaveFilesToDirStub) fakeImageOperations.CopyImageToTarCalls(copyImageToTarStub) err := dpbo.DownloadPluginBundle() @@ -164,25 +257,73 @@ var _ = Describe("Unit tests for download and upload bundle", func() { Expect(err).NotTo(HaveOccurred()) // Verify the plugin bundle manifest file is accurate - bytes, err := os.ReadFile(filepath.Join(tempDir, PluginBundleDirName, PluginBundleManifestFile)) + bytes, err := os.ReadFile(filepath.Join(tempDir, PluginBundleDirName, PluginMigrationManifestFile)) Expect(err).NotTo(HaveOccurred()) - Expect(bytes).To(Equal([]byte(pluginBundleManifestString))) - manifest := &Manifest{} + Expect(string(bytes)).To(Equal(pluginBundleManifestCompleteRepositoryString)) + manifest := &PluginMigrationManifest{} err = yaml.Unmarshal(bytes, &manifest) Expect(err).NotTo(HaveOccurred()) // Iterate through all the images in the manifest and verify the all image archive // files mentioned in the manifest exists in the bundle - for _, pi := range manifest.Images { - exists := utils.PathExists(filepath.Join(tempDir, PluginBundleDirName, pi.FilePath)) + for _, pi := range manifest.ImagesToCopy { + exists := utils.PathExists(filepath.Join(tempDir, PluginBundleDirName, pi.SourceTarFilePath)) + Expect(exists).To(BeTrue()) + } + }) + + var _ = It("when group specified does not exists, it should return an error", func() { + fakeImageOperations.DownloadImageAndSaveFilesToDirCalls(downloadInventoryImageAndSaveFilesToDirStub) + fakeImageOperations.CopyImageToTarCalls(copyImageToTarStub) + + dpbo.Groups = []string{"vmware-tanzu/does-not-exists"} + err := dpbo.DownloadPluginBundle() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("error while getting selected plugin and plugin group information")) + Expect(err.Error()).To(ContainSubstring("incorrect plugin group \"vmware-tanzu/does-not-exists\" specified")) + + dpbo.Groups = []string{"does-not-exists"} + err = dpbo.DownloadPluginBundle() + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("error while getting selected plugin and plugin group information")) + Expect(err.Error()).To(ContainSubstring("incorrect plugin group \"does-not-exists\" specified")) + }) + + var _ = It("when group is specified and everything works as expected, it should download plugin bundle as tar file", func() { + fakeImageOperations.DownloadImageAndSaveFilesToDirCalls(downloadInventoryImageAndSaveFilesToDirStub) + fakeImageOperations.CopyImageToTarCalls(copyImageToTarStub) + + dpbo.Groups = []string{"fakevendor-fakepublisher/default:v1.0.0"} + err := dpbo.DownloadPluginBundle() + Expect(err).NotTo(HaveOccurred()) + + // Verify that tar file was generated correctly with untar + tempDir, err := os.MkdirTemp("", "") + Expect(tempDir).ToNot(BeEmpty()) + Expect(err).NotTo(HaveOccurred()) + err = tarinator.UnTarinate(tempDir, dpbo.ToTar) + Expect(err).NotTo(HaveOccurred()) + + // Verify the plugin bundle manifest file is accurate + bytes, err := os.ReadFile(filepath.Join(tempDir, PluginBundleDirName, PluginMigrationManifestFile)) + Expect(err).NotTo(HaveOccurred()) + Expect(string(bytes)).To(Equal(pluginBundleManifestDefaultGroupOnlyString)) + manifest := &PluginMigrationManifest{} + err = yaml.Unmarshal(bytes, &manifest) + Expect(err).NotTo(HaveOccurred()) + + // Iterate through all the images in the manifest and verify the all image archive + // files mentioned in the manifest exists in the bundle + for _, pi := range manifest.ImagesToCopy { + exists := utils.PathExists(filepath.Join(tempDir, PluginBundleDirName, pi.SourceTarFilePath)) Expect(exists).To(BeTrue()) } }) }) - var _ = Context("Tests for uploading plugin bundle", func() { + var _ = Context("Tests for uploading plugin bundle when downloading entire plugin repository with all plugin", func() { JustBeforeEach(func() { - fakeImageOperations.DownloadImageAndSaveFilesToDirCalls(downloadImageAndSaveFilesToDirStub) + fakeImageOperations.DownloadImageAndSaveFilesToDirCalls(downloadInventoryImageAndSaveFilesToDirStub) fakeImageOperations.CopyImageToTarCalls(copyImageToTarStub) err := dpbo.DownloadPluginBundle() @@ -194,7 +335,7 @@ var _ = Describe("Unit tests for download and upload bundle", func() { err := upbo.UploadPluginBundle() Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unable to untar provided file")) + Expect(err.Error()).To(ContainSubstring("unable to extract provided file")) }) var _ = It("when incorrect tarfile is provided, it should return an error", func() { @@ -203,7 +344,7 @@ var _ = Describe("Unit tests for download and upload bundle", func() { err := upbo.UploadPluginBundle() Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("error while reading plugin bundle manifest")) + Expect(err.Error()).To(ContainSubstring("error while reading plugin migration manifest")) }) var _ = It("when uploading image fail with error, it should return an error", func() { @@ -215,7 +356,22 @@ var _ = Describe("Unit tests for download and upload bundle", func() { Expect(err.Error()).To(ContainSubstring("fake error")) }) - var _ = It("when uploading image succeeds, it should not return an error", func() { + var _ = It("when fetching the existing inventory metadata fails, it should not return an error", func() { + fakeImageOperations.DownloadImageAndSaveFilesToDirReturns(errors.New("fake-error")) + fakeImageOperations.CopyImageFromTarReturns(nil) + err := upbo.UploadPluginBundle() + Expect(err).NotTo(HaveOccurred()) + }) + + var _ = It("when uploading images succeeds and fetching the existing inventory metadata returns no existing plugins, it should not return an error", func() { + fakeImageOperations.DownloadImageAndSaveFilesToDirCalls(downloadInventoryMetadataImageWithNoExistingPlugins) + fakeImageOperations.CopyImageFromTarReturns(nil) + err := upbo.UploadPluginBundle() + Expect(err).NotTo(HaveOccurred()) + }) + + var _ = It("when uploading images succeeds and fetching the existing inventory metadata returns few existing plugins, merge should happen and it should not return an error", func() { + fakeImageOperations.DownloadImageAndSaveFilesToDirCalls(downloadInventoryMetadataImageWithExistingPlugins) fakeImageOperations.CopyImageFromTarReturns(nil) err := upbo.UploadPluginBundle() Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/airgapped/plugin_bundle_upload.go b/pkg/airgapped/plugin_bundle_upload.go index 14a6176b6..dd58b2e3b 100644 --- a/pkg/airgapped/plugin_bundle_upload.go +++ b/pkg/airgapped/plugin_bundle_upload.go @@ -12,6 +12,7 @@ import ( "gopkg.in/yaml.v3" "github.com/vmware-tanzu/tanzu-cli/pkg/carvelhelpers" + "github.com/vmware-tanzu/tanzu-cli/pkg/plugininventory" "github.com/vmware-tanzu/tanzu-plugin-runtime/log" ) @@ -35,26 +36,25 @@ func (o *UploadPluginBundleOptions) UploadPluginBundle() error { // Untar the specified plugin bundle to the temp directory err = tarinator.UnTarinate(tempDir, o.Tar) if err != nil { - return errors.Wrap(err, "unable to untar provided file") + return errors.Wrap(err, "unable to extract provided file") } - // Read the plugin_bundle_manifest file + // Read the plugin migration manifest file pluginBundleDir := filepath.Join(tempDir, PluginBundleDirName) - bytes, err := os.ReadFile(filepath.Join(pluginBundleDir, PluginBundleManifestFile)) + bytes, err := os.ReadFile(filepath.Join(pluginBundleDir, PluginMigrationManifestFile)) if err != nil { - return errors.Wrap(err, "error while reading plugin bundle manifest") + return errors.Wrap(err, "error while reading plugin migration manifest") } - - manifest := &Manifest{} + manifest := &PluginMigrationManifest{} err = yaml.Unmarshal(bytes, &manifest) if err != nil { - return errors.Wrap(err, "error while parsing plugin bundle manifest") + return errors.Wrap(err, "error while parsing plugin migration manifest") } // Iterate through all the images and publish them to the remote repository - for _, pi := range manifest.Images { - imageTar := filepath.Join(pluginBundleDir, pi.FilePath) - repoImagePath := filepath.Join(o.DestinationRepo, pi.ImagePath) + for _, ic := range manifest.ImagesToCopy { + imageTar := filepath.Join(pluginBundleDir, ic.SourceTarFilePath) + repoImagePath := filepath.Join(o.DestinationRepo, ic.RelativeImagePath) log.Infof("---------------------------") log.Infof("uploading image %q", repoImagePath) err = o.ImageProcessor.CopyImageFromTar(imageTar, repoImagePath) @@ -63,7 +63,43 @@ func (o *UploadPluginBundleOptions) UploadPluginBundle() error { } } log.Infof("---------------------------") - log.Infof("successfully published all images") + log.Infof("---------------------------") + + // Publish plugin inventory metadata image after merging inventory metadata + log.Infof("publishing plugin inventory metadata image...") + bundledPluginInventoryMetadataDBFilePath := filepath.Join(pluginBundleDir, manifest.InventoryMetadataImage.SourceFilePath) + pluginInventoryMetadataImageWithTag := filepath.Join(o.DestinationRepo, manifest.InventoryMetadataImage.RelativeImagePathWithTag) + err = o.mergePluginInventoryMetadata(pluginInventoryMetadataImageWithTag, bundledPluginInventoryMetadataDBFilePath, filepath.Join(tempDir, "inventory-metadata")) + if err != nil { + return errors.Wrap(err, "error while merging the plugin inventory metadata database before uploading metadata image") + } + + log.Infof("uploading image %q", pluginInventoryMetadataImageWithTag) + err = o.ImageProcessor.PushImage(pluginInventoryMetadataImageWithTag, []string{bundledPluginInventoryMetadataDBFilePath}) + if err != nil { + return errors.Wrap(err, "error while uploading image") + } + + log.Infof("---------------------------") + log.Infof("successfully published all plugin images to %q", o.DestinationRepo) + + return nil +} +// mergePluginInventoryMetadata merges the downloaded plugin inventory metadata with +// existing plugin inventory metadata available on the remote repository +func (o *UploadPluginBundleOptions) mergePluginInventoryMetadata(pluginInventoryMetadataImageWithTag, bundledPluginInventoryMetadataDBFilePath, tempDir string) error { + tempPluginInventoryMetadataDir := filepath.Join(tempDir, "inventory-metadata") + err := o.ImageProcessor.DownloadImageAndSaveFilesToDir(pluginInventoryMetadataImageWithTag, tempPluginInventoryMetadataDir) + if err == nil { + downloadedPluginInventoryMetadataDBFilePath := filepath.Join(tempPluginInventoryMetadataDir, plugininventory.SQliteInventoryMetadataDBFileName) + pluginInventoryDB := plugininventory.NewSQLiteInventoryMetadata(bundledPluginInventoryMetadataDBFilePath) + err = pluginInventoryDB.MergeInventoryMetadataDatabase(downloadedPluginInventoryMetadataDBFilePath) + if err != nil { + return err + } + } else { + log.Infof("plugin inventory metadata image %q is not present. Skipping merging of the plugin inventory metadata", pluginInventoryMetadataImageWithTag) + } return nil } diff --git a/pkg/airgapped/types.go b/pkg/airgapped/types.go index 74214ee0d..de6ed700c 100644 --- a/pkg/airgapped/types.go +++ b/pkg/airgapped/types.go @@ -5,15 +5,22 @@ package airgapped const PluginBundleDirName = "plugin_bundle" -const PluginBundleManifestFile = "plugin_bundle_manifest.yaml" +const PluginMigrationManifestFile = "plugin_migration_manifest.yaml" -// Manifest defines struct for plugin bundle manifest -type Manifest struct { - Images []*ImageInfo `yaml:"images"` +// PluginMigrationManifest defines struct for plugin bundle manifest +type PluginMigrationManifest struct { + InventoryMetadataImage *ImagePublishInfo `yaml:"inventoryMetadataImage"` + ImagesToCopy []*ImageCopyInfo `yaml:"imagesToCopy"` } -// ImageInfo maps the relative image path and local relative file path -type ImageInfo struct { - FilePath string `yaml:"filePath"` - ImagePath string `yaml:"imagePath"` +// ImageCopyInfo maps the relative image path and local relative file path +type ImageCopyInfo struct { + SourceTarFilePath string `yaml:"sourceFilePath"` + RelativeImagePath string `yaml:"relativeImagePath"` +} + +// ImagePublishInfo maps the relative image path and local relative file path +type ImagePublishInfo struct { + SourceFilePath string `yaml:"sourceFilePath"` + RelativeImagePathWithTag string `yaml:"relativeImagePathWithTag"` } diff --git a/pkg/carvelhelpers/image_operations.go b/pkg/carvelhelpers/image_operations.go index 63eb18b58..a7235898a 100644 --- a/pkg/carvelhelpers/image_operations.go +++ b/pkg/carvelhelpers/image_operations.go @@ -74,3 +74,12 @@ func (i *ImageOperationOptions) GetImageDigest(imageWithTag string) (string, str return hashAlgorithm, hashHexVal, nil } + +// PushImage publishes the image to the specified location +func (i *ImageOperationOptions) PushImage(imageWithTag string, filePaths []string) error { + reg, err := newRegistry() + if err != nil { + return errors.Wrapf(err, "unable to initialize registry") + } + return reg.PushImage(imageWithTag, filePaths) +} diff --git a/pkg/carvelhelpers/interface.go b/pkg/carvelhelpers/interface.go index 935ba21ff..120aa9741 100644 --- a/pkg/carvelhelpers/interface.go +++ b/pkg/carvelhelpers/interface.go @@ -22,4 +22,7 @@ type ImageOperationsImpl interface { GetFilesMapFromImage(imageWithTag string) (map[string][]byte, error) // GetImageDigest gets digest of the image GetImageDigest(imageWithTag string) (string, string, error) + // PushImage publishes the image to the specified location + // This is equivalent to `imgpkg push -i -f ` + PushImage(imageWithTag string, filePaths []string) error } diff --git a/pkg/command/plugin_bundle.go b/pkg/command/plugin_bundle.go index 6a878a39a..b7cd3425a 100644 --- a/pkg/command/plugin_bundle.go +++ b/pkg/command/plugin_bundle.go @@ -13,11 +13,12 @@ import ( type downloadPluginBundleOptions struct { pluginDiscoveryOCIImage string tarFile string + groups []string } var dpbo downloadPluginBundleOptions -func newDownloadBundlePluginCmd() *cobra.Command { // nolint:dupl +func newDownloadBundlePluginCmd() *cobra.Command { var downloadBundleCmd = &cobra.Command{ Use: "download-bundle", Short: "Download plugin bundle to the local system", @@ -26,6 +27,7 @@ func newDownloadBundlePluginCmd() *cobra.Command { // nolint:dupl options := airgapped.DownloadPluginBundleOptions{ PluginInventoryImage: dpbo.pluginDiscoveryOCIImage, ToTar: dpbo.tarFile, + Groups: dpbo.groups, ImageProcessor: carvelhelpers.NewImageOperationsImpl(), } return options.DownloadPluginBundle() @@ -35,6 +37,7 @@ func newDownloadBundlePluginCmd() *cobra.Command { // nolint:dupl f := downloadBundleCmd.Flags() f.StringVarP(&dpbo.pluginDiscoveryOCIImage, "image", "", "", "URI of the plugin discovery image providing the plugins") f.StringVarP(&dpbo.tarFile, "to-tar", "", "", "local tar file path to store the plugin images") + f.StringArrayVarP(&dpbo.groups, "group", "", []string{}, "only download the plugins specified in the plugin group") _ = downloadBundleCmd.MarkFlagRequired("image") _ = downloadBundleCmd.MarkFlagRequired("to-tar") @@ -49,7 +52,7 @@ type uploadPluginBundleOptions struct { var upbo uploadPluginBundleOptions -func newUploadBundlePluginCmd() *cobra.Command { // nolint:dupl +func newUploadBundlePluginCmd() *cobra.Command { var uploadBundleCmd = &cobra.Command{ Use: "upload-bundle", Short: "Upload plugin bundle to a repository", diff --git a/pkg/discovery/oci_dbbacked.go b/pkg/discovery/oci_dbbacked.go index 567431a6e..106ae52b6 100644 --- a/pkg/discovery/oci_dbbacked.go +++ b/pkg/discovery/oci_dbbacked.go @@ -13,6 +13,7 @@ import ( "github.com/pkg/errors" + "github.com/vmware-tanzu/tanzu-cli/pkg/airgapped" "github.com/vmware-tanzu/tanzu-cli/pkg/carvelhelpers" "github.com/vmware-tanzu/tanzu-cli/pkg/common" "github.com/vmware-tanzu/tanzu-cli/pkg/constants" @@ -147,9 +148,11 @@ func (od *DBBackedOCIDiscovery) listGroupsFromInventory() ([]*plugininventory.Pl // fetchInventoryImage downloads the OCI image containing the information about the // inventory of this discovery and stores it in the cache directory. func (od *DBBackedOCIDiscovery) fetchInventoryImage() error { - newCacheHashFile := od.checkImageCache() - if newCacheHashFile == "" { - // The cache can be re-used. We are done. + // check the cache to see if downloaded plugin inventory database is up-to-date or not + // by comparing the image digests + newCacheHashFileForInventoryImage, newCacheHashFileForMetadataImage := od.checkImageCache() + if newCacheHashFileForInventoryImage == "" && newCacheHashFileForMetadataImage == "" { + // The cache can be re-used. We are done. return nil } @@ -167,26 +170,74 @@ func (od *DBBackedOCIDiscovery) fetchInventoryImage() error { log.Fatal(nil, errMsg) } - if err := carvelhelpers.DownloadImageAndSaveFilesToDir(od.image, od.pluginDataDir); err != nil { - return errors.Wrapf(err, "failed to download OCI image from discovery '%s'", od.Name()) + // download plugin inventory image to get the 'plugin_inventory.db' + // also handle the air-gapped scenario where additional plugin inventory metadata image is present + err := od.downloadInventoryDatabase() + if err != nil { + return err } // Now that everything is ready, create the digest hash file - _, _ = os.Create(newCacheHashFile) + _, _ = os.Create(newCacheHashFileForInventoryImage) + // Also create digest hash file for inventory metadata image if not empty + if newCacheHashFileForMetadataImage != "" { + _, _ = os.Create(newCacheHashFileForMetadataImage) + } return nil } -// checkImageCache will get the image digest of this discovery -// and check if the cache already contains the up-to-date image. +// downloadInventoryDatabase downloads plugin inventory image to get the 'plugin_inventory.db' +// +// Additional check for airgapped environment as below: +// Also check if plugin inventory metadata image is present or not. if present, downloads the inventory +// metadata image to get the 'plugin_inventory_metadata.db' and update the 'plugin_inventory.db' +// based on the 'plugin_inventory_metadata.db' +func (od *DBBackedOCIDiscovery) downloadInventoryDatabase() error { + tempDir1, err := os.MkdirTemp("", "") + if err != nil { + return errors.Wrap(err, "unable to create temp directory") + } + tempDir2, err := os.MkdirTemp("", "") + if err != nil { + return errors.Wrap(err, "unable to create temp directory") + } + + // Download the plugin inventory image and save to tempDir1 + if err := carvelhelpers.DownloadImageAndSaveFilesToDir(od.image, tempDir1); err != nil { + return errors.Wrapf(err, "failed to download OCI image from discovery '%s'", od.Name()) + } + + inventoryDBFilePath := filepath.Join(tempDir1, plugininventory.SQliteDBFileName) + metadataDBFilePath := filepath.Join(tempDir2, plugininventory.SQliteInventoryMetadataDBFileName) + + // Download the plugin inventory metadata image if exists and save to tempDir2 + pluginInventoryMetadataImage, _ := airgapped.GetPluginInventoryMetadataImage(od.image) + if err := carvelhelpers.DownloadImageAndSaveFilesToDir(pluginInventoryMetadataImage, tempDir2); err == nil { + // Update the plugin inventory database (plugin_inventory.db) based on the plugin + // inventory metadata database (plugin_inventory_metadata.db) + err = plugininventory.NewSQLiteInventoryMetadata(metadataDBFilePath).UpdatePluginInventoryDatabase(inventoryDBFilePath) + if err != nil { + return errors.Wrap(err, "error while updating inventory database based on the inventory metadata database") + } + } + + // Copy the inventory database file from temp directory to pluginDataDir + return utils.CopyFile(inventoryDBFilePath, filepath.Join(od.pluginDataDir, plugininventory.SQliteDBFileName)) +} + +// checkImageCache will get the plugin inventory image digest as well as +// plugin inventory metadata image digest if exists for this discovery +// and check if the cache already contains the up-to-date database. +// Function returns two strings (hashFileForInventoryImage, HashFileForMetadataImage) // It returns an empty string if the cache can be used. Otherwise // it returns the name of the digest file that must be created once // the new DB image has been downloaded. -func (od *DBBackedOCIDiscovery) checkImageCache() string { +func (od *DBBackedOCIDiscovery) checkImageCache() (string, string) { // Get the latest digest of the discovery image. // If the cache already contains the image with this digest // we do not need to verify its signature nor to download it again. - _, hashHexVal, err := carvelhelpers.GetImageDigest(od.image) + _, hashHexValInventoryImage, err := carvelhelpers.GetImageDigest(od.image) if err != nil { // This will happen when the user has configured an invalid image discovery URI log.Warningf("Unable to resolve the plugin discovery image: %v", err) @@ -194,14 +245,27 @@ func (od *DBBackedOCIDiscovery) checkImageCache() string { log.Fatal(nil, fmt.Sprintf("Fatal: plugins discovery image resolution failed. Please check that the repository image URL %q is correct ", od.image)) } - // We store the digest hash of the cached DB as a file named "digest.. - // If this file exists, we are done. If not, we remove the current digest file - // as we are about to download a new DB and create a new digest file. - // First check any existing "digest.*" file; there should only be one, but - // to protect ourselves, we check first and if there are more then one due - // to some bug, we clean them up and invalidate the cache. - correctHashFile := filepath.Join(od.pluginDataDir, "digest."+hashHexVal) - matches, _ := filepath.Glob(filepath.Join(od.pluginDataDir, "digest.*")) + correctHashFileForInventoryImage := od.checkDigestFileExistance(hashHexValInventoryImage, "") + correctHashFileForMetadataImage := "" + + pluginInventoryMetadataImage, _ := airgapped.GetPluginInventoryMetadataImage(od.image) + _, hashHexValMetadataImage, err := carvelhelpers.GetImageDigest(pluginInventoryMetadataImage) + if err == nil { + correctHashFileForMetadataImage = od.checkDigestFileExistance(hashHexValMetadataImage, "metadata.") + } + return correctHashFileForInventoryImage, correctHashFileForMetadataImage +} + +// checkDigestFileExistance check the digest file already exists in the cache or not +// We store the digest hash of the cached DB as a file named "digest.. +// If this file exists, we are done. If not, we remove the current digest file +// as we are about to download a new DB and create a new digest file. +// First check any existing "digest.*" file; there should only be one, but +// to protect ourselves, we check first and if there are more then one due +// to some bug, we clean them up and invalidate the cache. +func (od *DBBackedOCIDiscovery) checkDigestFileExistance(hashHexVal, digestPrefix string) string { + correctHashFile := filepath.Join(od.pluginDataDir, digestPrefix+"digest."+hashHexVal) + matches, _ := filepath.Glob(filepath.Join(od.pluginDataDir, digestPrefix+"digest.*")) if len(matches) > 1 { // Too many digest files. This is a bug! Cleanup the cache. log.V(4).Warningf("Too many digest files in the cache! Invalidating the cache.") diff --git a/pkg/fakes/imageoperationsimpl.go b/pkg/fakes/imageoperationsimpl.go index 1d9f46da1..bdeae47f5 100644 --- a/pkg/fakes/imageoperationsimpl.go +++ b/pkg/fakes/imageoperationsimpl.go @@ -72,6 +72,18 @@ type ImageOperationsImpl struct { result2 string result3 error } + PushImageStub func(string, []string) error + pushImageMutex sync.RWMutex + pushImageArgsForCall []struct { + arg1 string + arg2 []string + } + pushImageReturns struct { + result1 error + } + pushImageReturnsOnCall map[int]struct { + result1 error + } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -393,6 +405,73 @@ func (fake *ImageOperationsImpl) GetImageDigestReturnsOnCall(i int, result1 stri }{result1, result2, result3} } +func (fake *ImageOperationsImpl) PushImage(arg1 string, arg2 []string) error { + var arg2Copy []string + if arg2 != nil { + arg2Copy = make([]string, len(arg2)) + copy(arg2Copy, arg2) + } + fake.pushImageMutex.Lock() + ret, specificReturn := fake.pushImageReturnsOnCall[len(fake.pushImageArgsForCall)] + fake.pushImageArgsForCall = append(fake.pushImageArgsForCall, struct { + arg1 string + arg2 []string + }{arg1, arg2Copy}) + stub := fake.PushImageStub + fakeReturns := fake.pushImageReturns + fake.recordInvocation("PushImage", []interface{}{arg1, arg2Copy}) + fake.pushImageMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ImageOperationsImpl) PushImageCallCount() int { + fake.pushImageMutex.RLock() + defer fake.pushImageMutex.RUnlock() + return len(fake.pushImageArgsForCall) +} + +func (fake *ImageOperationsImpl) PushImageCalls(stub func(string, []string) error) { + fake.pushImageMutex.Lock() + defer fake.pushImageMutex.Unlock() + fake.PushImageStub = stub +} + +func (fake *ImageOperationsImpl) PushImageArgsForCall(i int) (string, []string) { + fake.pushImageMutex.RLock() + defer fake.pushImageMutex.RUnlock() + argsForCall := fake.pushImageArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *ImageOperationsImpl) PushImageReturns(result1 error) { + fake.pushImageMutex.Lock() + defer fake.pushImageMutex.Unlock() + fake.PushImageStub = nil + fake.pushImageReturns = struct { + result1 error + }{result1} +} + +func (fake *ImageOperationsImpl) PushImageReturnsOnCall(i int, result1 error) { + fake.pushImageMutex.Lock() + defer fake.pushImageMutex.Unlock() + fake.PushImageStub = nil + if fake.pushImageReturnsOnCall == nil { + fake.pushImageReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.pushImageReturnsOnCall[i] = struct { + result1 error + }{result1} +} + func (fake *ImageOperationsImpl) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() @@ -406,6 +485,8 @@ func (fake *ImageOperationsImpl) Invocations() map[string][][]interface{} { defer fake.getFilesMapFromImageMutex.RUnlock() fake.getImageDigestMutex.RLock() defer fake.getImageDigestMutex.RUnlock() + fake.pushImageMutex.RLock() + defer fake.pushImageMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/pkg/fakes/registy.go b/pkg/fakes/registy.go index def9ada35..435d07549 100644 --- a/pkg/fakes/registy.go +++ b/pkg/fakes/registy.go @@ -111,6 +111,18 @@ type Registry struct { result1 []string result2 error } + PushImageStub func(string, []string) error + pushImageMutex sync.RWMutex + pushImageArgsForCall []struct { + arg1 string + arg2 []string + } + pushImageReturns struct { + result1 error + } + pushImageReturnsOnCall map[int]struct { + result1 error + } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -623,6 +635,73 @@ func (fake *Registry) ListImageTagsReturnsOnCall(i int, result1 []string, result }{result1, result2} } +func (fake *Registry) PushImage(arg1 string, arg2 []string) error { + var arg2Copy []string + if arg2 != nil { + arg2Copy = make([]string, len(arg2)) + copy(arg2Copy, arg2) + } + fake.pushImageMutex.Lock() + ret, specificReturn := fake.pushImageReturnsOnCall[len(fake.pushImageArgsForCall)] + fake.pushImageArgsForCall = append(fake.pushImageArgsForCall, struct { + arg1 string + arg2 []string + }{arg1, arg2Copy}) + stub := fake.PushImageStub + fakeReturns := fake.pushImageReturns + fake.recordInvocation("PushImage", []interface{}{arg1, arg2Copy}) + fake.pushImageMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *Registry) PushImageCallCount() int { + fake.pushImageMutex.RLock() + defer fake.pushImageMutex.RUnlock() + return len(fake.pushImageArgsForCall) +} + +func (fake *Registry) PushImageCalls(stub func(string, []string) error) { + fake.pushImageMutex.Lock() + defer fake.pushImageMutex.Unlock() + fake.PushImageStub = stub +} + +func (fake *Registry) PushImageArgsForCall(i int) (string, []string) { + fake.pushImageMutex.RLock() + defer fake.pushImageMutex.RUnlock() + argsForCall := fake.pushImageArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *Registry) PushImageReturns(result1 error) { + fake.pushImageMutex.Lock() + defer fake.pushImageMutex.Unlock() + fake.PushImageStub = nil + fake.pushImageReturns = struct { + result1 error + }{result1} +} + +func (fake *Registry) PushImageReturnsOnCall(i int, result1 error) { + fake.pushImageMutex.Lock() + defer fake.pushImageMutex.Unlock() + fake.PushImageStub = nil + if fake.pushImageReturnsOnCall == nil { + fake.pushImageReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.pushImageReturnsOnCall[i] = struct { + result1 error + }{result1} +} + func (fake *Registry) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() @@ -642,6 +721,8 @@ func (fake *Registry) Invocations() map[string][][]interface{} { defer fake.getImageDigestMutex.RUnlock() fake.listImageTagsMutex.RLock() defer fake.listImageTagsMutex.RUnlock() + fake.pushImageMutex.RLock() + defer fake.pushImageMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/pkg/plugininventory/data/sqlite/plugin_inventory_metadata_tables.sql b/pkg/plugininventory/data/sqlite/plugin_inventory_metadata_tables.sql new file mode 100644 index 000000000..c9c413a1f --- /dev/null +++ b/pkg/plugininventory/data/sqlite/plugin_inventory_metadata_tables.sql @@ -0,0 +1,14 @@ +CREATE TABLE IF NOT EXISTS "AvailablePluginBinaries" ( + "PluginName" TEXT NOT NULL, + "Target" TEXT NOT NULL, + "Version" TEXT NOT NULL, + PRIMARY KEY("PluginName", "Target", "Version") +); + +CREATE TABLE IF NOT EXISTS "AvailablePluginGroups" ( + "Vendor" TEXT NOT NULL, + "Publisher" TEXT NOT NULL, + "GroupName" TEXT NOT NULL, + PRIMARY KEY("Vendor", "Publisher", "GroupName") +); + \ No newline at end of file diff --git a/pkg/plugininventory/plugin_inventory.go b/pkg/plugininventory/plugin_inventory.go index 545c4de31..9650e03a1 100644 --- a/pkg/plugininventory/plugin_inventory.go +++ b/pkg/plugininventory/plugin_inventory.go @@ -8,6 +8,9 @@ package plugininventory import ( + "fmt" + "strings" + "github.com/vmware-tanzu/tanzu-cli/pkg/distribution" configtypes "github.com/vmware-tanzu/tanzu-plugin-runtime/config/types" ) @@ -108,6 +111,16 @@ type PluginGroupPluginEntry struct { Mandatory bool } +// PluginGroupIdentifier uniquely identifies a plugin group +type PluginGroupIdentifier struct { + // Vendor of the group + Vendor string + // Publisher of the group + Publisher string + // Name of the group + Name string +} + // PluginGroup represents a list of plugins. // The user will specify a group using // "-/: @@ -125,6 +138,27 @@ type PluginGroup struct { Plugins []*PluginGroupPluginEntry } +func PluginGroupToID(pg *PluginGroup) string { + return fmt.Sprintf("%s-%s/%s", pg.Vendor, pg.Publisher, pg.Name) +} + +func PluginGroupIdentifierFromID(id string) *PluginGroupIdentifier { + pg := &PluginGroupIdentifier{} + arr := strings.Split(id, "/") + if len(arr) != 2 { + return pg + } + pg.Name = arr[1] + + arr1 := strings.Split(arr[0], "-") + if len(arr1) != 2 { + return pg + } + pg.Vendor = arr1[0] + pg.Publisher = arr1[1] + return pg +} + // PluginGroupFilter allows to specify different criteria for // looking up plugin group entries. type PluginGroupFilter struct { diff --git a/pkg/plugininventory/plugin_inventory_metadata.go b/pkg/plugininventory/plugin_inventory_metadata.go new file mode 100644 index 000000000..0fdba945e --- /dev/null +++ b/pkg/plugininventory/plugin_inventory_metadata.go @@ -0,0 +1,154 @@ +// Copyright 2023 VMware, Inc. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package plugininventory + +import ( + "database/sql" + + // Import the sqlite3 driver + _ "modernc.org/sqlite" + + "github.com/pkg/errors" +) + +// PluginInventoryMetadata is the interface to interact with a plugin inventory +// metadata database and plugin inventory database. +// plugin and plugin group identifier +// It can be used to create database schema for metadata db, insert +// plugin and plugin group identifier and merging metadata database +// This interface also provides function to update plugin inventory database +// based on the plugin inventory metadata database +type PluginInventoryMetadata interface { + // CreateInventoryMetadataDBSchema creates table schemas for + // plugin inventory metadata database + // returns error if table creation fails for any reason + CreateInventoryMetadataDBSchema() error + + // InsertPluginIdentifier inserts the PluginIdentifier entry to the + // AvailablePluginBinaries table + InsertPluginIdentifier(*PluginIdentifier) error + + // InsertPluginGroupIdentifier inserts the PluginGroupIdentifier entry to the + // AvailablePluginGroups table + InsertPluginGroupIdentifier(*PluginGroupIdentifier) error + + // MergeInventoryMetadataDatabase merges two inventory metadata database by + // merging the content of AvailablePluginBinaries and AvailablePluginGroups tables + MergeInventoryMetadataDatabase(additionalMetadataDBFilePath string) error + + // UpdatePluginInventoryDatabase updates the plugin inventory database based + // on the plugin inventory metadata database by deleting entries that doesn't + // exists in plugin inventory metadata database + UpdatePluginInventoryDatabase(pluginInventoryDBFilePath string) error +} + +// SQLiteInventoryMetadata is an inventory metadata stored using SQLite +type SQLiteInventoryMetadata struct { + // inventoryMetadataDBFile represents the full path to the SQLite DB file + inventoryMetadataDBFile string +} + +const ( + // SQliteInventoryMetadataDBFileName is the name of the DB file that is stored in + // the OCI image describing the plugin inventory metadata. + SQliteInventoryMetadataDBFileName = "plugin_inventory_metadata.db" +) + +// NewSQLiteInventoryMetadata returns a new PluginInventoryMetadata connected to the data found at 'inventoryMetadataDBFile'. +func NewSQLiteInventoryMetadata(inventoryMetadataDBFile string) PluginInventoryMetadata { + return &SQLiteInventoryMetadata{ + inventoryMetadataDBFile: inventoryMetadataDBFile, + } +} + +// CreateInventoryMetadataDBSchema creates table schemas for +// plugin inventory metadata database +// returns error if table creation fails for any reason +func (b *SQLiteInventoryMetadata) CreateInventoryMetadataDBSchema() error { + db, err := sql.Open("sqlite", b.inventoryMetadataDBFile) + if err != nil { + return errors.Wrapf(err, "failed to open the DB at '%s'", b.inventoryMetadataDBFile) + } + defer db.Close() + + _, err = db.Exec(PluginInventoryMetadataCreateTablesSchema) + if err != nil { + return errors.Wrap(err, "error while creating tables to the database") + } + + return nil +} + +// InsertPluginIdentifier inserts the PluginIdentifier entry to the +// AvailablePluginBinaries table +func (b *SQLiteInventoryMetadata) InsertPluginIdentifier(pi *PluginIdentifier) error { + db, err := sql.Open("sqlite", b.inventoryMetadataDBFile) + if err != nil { + return errors.Wrapf(err, "failed to open the DB from '%s' file", b.inventoryMetadataDBFile) + } + defer db.Close() + + _, err = db.Exec("INSERT INTO AvailablePluginBinaries VALUES(?,?,?);", pi.Name, pi.Target, pi.Version) + if err != nil { + return errors.Wrapf(err, "unable to insert plugin identifier %v", pi) + } + return nil +} + +// InsertPluginGroupIdentifier inserts the PluginGroupIdentifier entry to the +// AvailablePluginGroups table +func (b *SQLiteInventoryMetadata) InsertPluginGroupIdentifier(pgi *PluginGroupIdentifier) error { + db, err := sql.Open("sqlite", b.inventoryMetadataDBFile) + if err != nil { + return errors.Wrapf(err, "failed to open the DB from '%s' file", b.inventoryMetadataDBFile) + } + defer db.Close() + + _, err = db.Exec("INSERT INTO AvailablePluginGroups VALUES(?,?,?);", pgi.Vendor, pgi.Publisher, pgi.Name) + if err != nil { + return errors.Wrapf(err, "unable to insert plugin group identifier %v", pgi) + } + return nil +} + +// MergeInventoryMetadataDatabase merges two inventory metadata database by +// merging the content of AvailablePluginBinaries and AvailablePluginGroups tables +func (b *SQLiteInventoryMetadata) MergeInventoryMetadataDatabase(additionalMetadataDBFilePath string) error { + db, err := sql.Open("sqlite", b.inventoryMetadataDBFile) + if err != nil { + return errors.Wrapf(err, "failed to open the DB from '%s' file", b.inventoryMetadataDBFile) + } + defer db.Close() + + mergeQuery := `attach ? as additionalMetadataDB; + INSERT OR REPLACE INTO AvailablePluginGroups SELECT Vendor,Publisher,GroupName FROM additionalMetadataDB.AvailablePluginGroups; + INSERT OR REPLACE INTO AvailablePluginBinaries SELECT PluginName,Target,Version FROM additionalMetadataDB.AvailablePluginBinaries;` + + _, err = db.Exec(mergeQuery, additionalMetadataDBFilePath) + if err != nil { + return errors.Wrapf(err, "unable to execute the query %v", mergeQuery) + } + return nil +} + +// UpdatePluginInventoryDatabase updates the plugin inventory database based +// on the plugin inventory metadata database by deleting entries that doesn't +// exists in plugin inventory metadata database +func (b *SQLiteInventoryMetadata) UpdatePluginInventoryDatabase(pluginInventoryDBFilePath string) error { + db, err := sql.Open("sqlite", b.inventoryMetadataDBFile) + if err != nil { + return errors.Wrapf(err, "failed to open the DB from '%s' file", b.inventoryMetadataDBFile) + } + defer db.Close() + + updateQuery := `attach ? as piDB; + DELETE FROM piDB.PluginGroups WHERE ROWID IN (SELECT a.ROWID FROM piDB.PluginGroups a LEFT JOIN AvailablePluginGroups b ON b.Vendor = a.Vendor AND b.Publisher = a.Publisher AND b.GroupName = a.GroupName WHERE b.GroupName IS null); + DELETE FROM piDB.PluginBinaries WHERE ROWID IN (SELECT a.ROWID FROM piDB.PluginBinaries a LEFT JOIN AvailablePluginBinaries b ON b.PluginName = a.PluginName AND b.Target = a.Target AND b.Version = a.Version WHERE b.PluginName IS null);` + + _, err = db.Exec(updateQuery, pluginInventoryDBFilePath) + if err != nil { + return errors.Wrap(err, "error while updating plugin inventory database") + } + return nil +} diff --git a/pkg/plugininventory/plugin_inventory_metadata_test.go b/pkg/plugininventory/plugin_inventory_metadata_test.go new file mode 100644 index 000000000..0d0b573ef --- /dev/null +++ b/pkg/plugininventory/plugin_inventory_metadata_test.go @@ -0,0 +1,478 @@ +// Copyright 2023 VMware, Inc. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package plugininventory + +import ( + "os" + "path/filepath" + + // Import the sqlite driver + _ "modernc.org/sqlite" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/vmware-tanzu/tanzu-cli/pkg/distribution" + "github.com/vmware-tanzu/tanzu-plugin-runtime/config/types" +) + +var pluginIdentifier1 = PluginIdentifier{ + Name: "plugin1", + Target: "global", + Version: "v1.0.0", +} +var pluginIdentifier2 = PluginIdentifier{ + Name: "plugin2", + Target: "kubernetes", + Version: "v2.0.0", +} +var pluginGroupIdentifier1 = PluginGroupIdentifier{ + Vendor: "fakevendor", + Publisher: "fakepublisher", + Name: "fake1:v1.0.0", +} +var pluginGroupIdentifier2 = PluginGroupIdentifier{ + Vendor: "fakevendor", + Publisher: "fakepublisher", + Name: "fake2:v2.0.0", +} + +var pluginEntry1 = PluginInventoryEntry{ + Name: "plugin1", + Target: types.TargetGlobal, + Description: "plugin1 description", + Publisher: "fakepublisher", + Vendor: "fakevendor", + RecommendedVersion: "v1.0.0", + Hidden: false, + Artifacts: distribution.Artifacts{ + "v1.0.0": []distribution.Artifact{ + { + OS: "linux", + Arch: "amd64", + Digest: "0000000000", + Image: "vmware/tkg/linux/amd64/global/plugin1:v1.0.0", + }, + }, + }, +} +var pluginEntry2 = PluginInventoryEntry{ + Name: "plugin2", + Target: types.TargetK8s, + Description: "plugin2 description", + Publisher: "otherpublisher", + Vendor: "othervendor", + RecommendedVersion: "v1.2.3", + Hidden: false, + Artifacts: distribution.Artifacts{ + "v2.0.0": []distribution.Artifact{ + { + OS: "linux", + Arch: "amd64", + Digest: "3333333333", + Image: "othervendor/otherpublisher/linux/amd64/k8s/plugin2:v2.0.0", + }, + }, + }, +} +var pluginEntry3 = PluginInventoryEntry{ + Name: "plugin3", + Target: types.TargetTMC, + Description: "plugin3 description", + Publisher: "otherpublisher", + Vendor: "othervendor", + RecommendedVersion: "v3.0.0", + Hidden: false, + Artifacts: distribution.Artifacts{ + "v3.0.0": []distribution.Artifact{ + { + OS: "linux", + Arch: "amd64", + Digest: "0000000000", + Image: "vmware/tmc/linux/amd64/tmc/plugin3:v3.0.0", + }, + }, + }, +} + +var pluginGroupEntry1 = PluginGroup{ + Name: "fake1:v1.0.0", + Vendor: "fakevendor", + Publisher: "fakepublisher", + Hidden: false, + Plugins: []*PluginGroupPluginEntry{ + { + PluginIdentifier: PluginIdentifier{Name: "plugin1", Target: types.TargetGlobal, Version: "v1.0.0"}, + Mandatory: false, + }, + }, +} +var pluginGroupEntry2 = PluginGroup{ + Name: "fake2:v2.0.0", + Vendor: "fakevendor", + Publisher: "fakepublisher", + Hidden: false, + Plugins: []*PluginGroupPluginEntry{ + { + PluginIdentifier: PluginIdentifier{Name: "plugin2", Target: types.TargetK8s, Version: "v2.0.0"}, + Mandatory: false, + }, + }, +} + +var _ = Describe("Unit tests for plugin inventory metadata", func() { + var ( + err error + metadataInventory PluginInventoryMetadata + additionalMetadataInventory PluginInventoryMetadata + pluginInventory PluginInventory + pluginInventoryFilePath string + additionalMetadataInventoryFilePath string + tmpDir string + ) + + createInventoryMetadataDB := func(createSchema bool) (PluginInventoryMetadata, string) { + tmpDir, err = os.MkdirTemp(os.TempDir(), "") + Expect(err).To(BeNil(), "unable to create temporary directory") + // Create empty file for the DB + dbFile, err := os.Create(filepath.Join(tmpDir, SQliteInventoryMetadataDBFileName)) + Expect(err).To(BeNil()) + mi := NewSQLiteInventoryMetadata(dbFile.Name()) + if createSchema { + err = mi.CreateInventoryMetadataDBSchema() + Expect(err).To(BeNil()) + } + return mi, dbFile.Name() + } + + createInventoryDB := func(createSchema bool) (PluginInventory, string) { + tmpDir, err = os.MkdirTemp(os.TempDir(), "") + Expect(err).To(BeNil(), "unable to create temporary directory") + + // Create empty file for the DB + dbFile, err := os.Create(filepath.Join(tmpDir, SQliteDBFileName)) + Expect(err).To(BeNil()) + + inventory := NewSQLiteInventory(dbFile.Name(), tmpDir) + if createSchema { + err = inventory.CreateSchema() + Expect(err).To(BeNil(), "failed to create DB table for testing") + } + return inventory, dbFile.Name() + } + + Describe("Insert plugin identifier", func() { + Context("With an empty DB file", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(false) + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should return an error", func() { + err = metadataInventory.InsertPluginIdentifier(&pluginIdentifier1) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("unable to insert plugin identifier")) + Expect(err.Error()).To(ContainSubstring("no such table: AvailablePluginBinaries")) + }) + }) + + Context("With an empty DB tables", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(true) + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should not return an error", func() { + err = metadataInventory.InsertPluginIdentifier(&pluginIdentifier1) + Expect(err).NotTo(HaveOccurred()) + + err = metadataInventory.InsertPluginIdentifier(&pluginIdentifier2) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("When same plugin indentifier entry already exists", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(true) + + err = metadataInventory.InsertPluginIdentifier(&pluginIdentifier1) + Expect(err).NotTo(HaveOccurred()) + + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should return an error", func() { + err = metadataInventory.InsertPluginIdentifier(&pluginIdentifier1) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("unable to insert plugin identifier")) + Expect(err.Error()).To(ContainSubstring("UNIQUE constraint failed")) + }) + }) + }) + + Describe("Insert plugin group identifier", func() { + Context("With an empty DB file", func() { + BeforeEach(func() { + tmpDir, err = os.MkdirTemp(os.TempDir(), "") + Expect(err).To(BeNil(), "unable to create temporary directory") + + // Create empty file for the DB + dbFile, err := os.Create(filepath.Join(tmpDir, SQliteInventoryMetadataDBFileName)) + Expect(err).To(BeNil()) + + metadataInventory = NewSQLiteInventoryMetadata(dbFile.Name()) + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should return an error", func() { + err = metadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier1) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("unable to insert plugin group identifier")) + Expect(err.Error()).To(ContainSubstring("no such table: AvailablePluginGroups")) + }) + }) + + Context("With an empty DB tables", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(true) + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should not return an error", func() { + err = metadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier1) + Expect(err).NotTo(HaveOccurred()) + + err = metadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier2) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("When same plugin group indentifier entry already exists", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(true) + + err = metadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier1) + Expect(err).NotTo(HaveOccurred()) + + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should return an error", func() { + err = metadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier1) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("unable to insert plugin group identifier")) + Expect(err.Error()).To(ContainSubstring("UNIQUE constraint failed")) + }) + }) + }) + + Describe("Merge Inventory Metadata Database", func() { + Context("when one of the database does not have tables created", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(true) + _, additionalMetadataInventoryFilePath = createInventoryMetadataDB(false) + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should return an error", func() { + err = metadataInventory.MergeInventoryMetadataDatabase(additionalMetadataInventoryFilePath) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("unable to execute the query")) + }) + }) + + Context("when both the databases have empty tables for plugins and plugin groups", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(true) + _, additionalMetadataInventoryFilePath = createInventoryMetadataDB(true) + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should not return an error", func() { + err = metadataInventory.MergeInventoryMetadataDatabase(additionalMetadataInventoryFilePath) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("when both inventory metadata databases does not have any overlap of plugins and plugin groups", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(true) + additionalMetadataInventory, additionalMetadataInventoryFilePath = createInventoryMetadataDB(true) + + err = metadataInventory.InsertPluginIdentifier(&pluginIdentifier1) + Expect(err).NotTo(HaveOccurred()) + err = metadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier1) + Expect(err).NotTo(HaveOccurred()) + + err = additionalMetadataInventory.InsertPluginIdentifier(&pluginIdentifier2) + Expect(err).NotTo(HaveOccurred()) + err = additionalMetadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier2) + Expect(err).NotTo(HaveOccurred()) + + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should not return an error", func() { + err = metadataInventory.MergeInventoryMetadataDatabase(additionalMetadataInventoryFilePath) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("when both inventory metadata databases does have some overlap of plugins and plugin groups", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(true) + additionalMetadataInventory, additionalMetadataInventoryFilePath = createInventoryMetadataDB(true) + + err = metadataInventory.InsertPluginIdentifier(&pluginIdentifier1) + Expect(err).NotTo(HaveOccurred()) + err = metadataInventory.InsertPluginIdentifier(&pluginIdentifier2) + Expect(err).NotTo(HaveOccurred()) + err = metadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier1) + Expect(err).NotTo(HaveOccurred()) + err = metadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier2) + Expect(err).NotTo(HaveOccurred()) + + err = additionalMetadataInventory.InsertPluginIdentifier(&pluginIdentifier2) + Expect(err).NotTo(HaveOccurred()) + err = additionalMetadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier2) + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should not return an error", func() { + err = metadataInventory.MergeInventoryMetadataDatabase(additionalMetadataInventoryFilePath) + Expect(err).NotTo(HaveOccurred()) + }) + }) + }) + + Describe("Update Plugin Inventory Database based on Metadata Database", func() { + Context("when plugin inventory database provided is invalid and does not have tables created", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(true) + _, pluginInventoryFilePath = createInventoryDB(false) + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should return an error", func() { + err = metadataInventory.UpdatePluginInventoryDatabase(pluginInventoryFilePath) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("error while updating plugin inventory database")) + }) + }) + + Context("when plugin inventory metadata database provided is invalid and does not have tables created", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(false) + _, pluginInventoryFilePath = createInventoryDB(true) + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should return an error", func() { + err = metadataInventory.UpdatePluginInventoryDatabase(pluginInventoryFilePath) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("error while updating plugin inventory database")) + }) + }) + + Context("when both the databases have empty tables and no plugins or plugin groups", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(true) + _, pluginInventoryFilePath = createInventoryDB(true) + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("should not return an error", func() { + err = metadataInventory.UpdatePluginInventoryDatabase(pluginInventoryFilePath) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + Context("when plugin inventory database and metadata database are valid", func() { + BeforeEach(func() { + metadataInventory, _ = createInventoryMetadataDB(true) + pluginInventory, pluginInventoryFilePath = createInventoryDB(true) + + err = pluginInventory.InsertPlugin(&pluginEntry1) + Expect(err).NotTo(HaveOccurred()) + err = pluginInventory.InsertPlugin(&pluginEntry2) + Expect(err).NotTo(HaveOccurred()) + err = pluginInventory.InsertPlugin(&pluginEntry3) + Expect(err).NotTo(HaveOccurred()) + err = pluginInventory.InsertPluginGroup(&pluginGroupEntry1, true) + Expect(err).NotTo(HaveOccurred()) + err = pluginInventory.InsertPluginGroup(&pluginGroupEntry2, true) + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func() { + os.RemoveAll(tmpDir) + }) + It("when metadata database tables are empty, it should not return an error and remove all plugin and plugin groups from inventory database", func() { + err = metadataInventory.UpdatePluginInventoryDatabase(pluginInventoryFilePath) + Expect(err).NotTo(HaveOccurred()) + + pluginEntries, err := pluginInventory.GetAllPlugins() + Expect(err).NotTo(HaveOccurred()) + Expect(len(pluginEntries)).To(Equal(0)) + + pluginGroupEntries, err := pluginInventory.GetPluginGroups(PluginGroupFilter{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(pluginGroupEntries)).To(Equal(0)) + }) + + It("when metadata database tables has overlapping entries, it should not return an error and remove all plugin and plugin groups from inventory database that are not present in metadata database - 1", func() { + err = metadataInventory.InsertPluginIdentifier(&pluginIdentifier1) + Expect(err).NotTo(HaveOccurred()) + err = metadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier1) + Expect(err).NotTo(HaveOccurred()) + + err = metadataInventory.UpdatePluginInventoryDatabase(pluginInventoryFilePath) + Expect(err).NotTo(HaveOccurred()) + + pluginEntries, err := pluginInventory.GetAllPlugins() + Expect(err).NotTo(HaveOccurred()) + Expect(len(pluginEntries)).To(Equal(1)) + + pluginGroupEntries, err := pluginInventory.GetPluginGroups(PluginGroupFilter{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(pluginGroupEntries)).To(Equal(1)) + }) + + It("when metadata database tables has overlapping entries, it should not return an error and remove all plugin and plugin groups from inventory database that are not present in metadata database - 2", func() { + err = metadataInventory.InsertPluginIdentifier(&pluginIdentifier1) + Expect(err).NotTo(HaveOccurred()) + err = metadataInventory.InsertPluginIdentifier(&pluginIdentifier2) + Expect(err).NotTo(HaveOccurred()) + err = metadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier1) + Expect(err).NotTo(HaveOccurred()) + err = metadataInventory.InsertPluginGroupIdentifier(&pluginGroupIdentifier2) + Expect(err).NotTo(HaveOccurred()) + + err = metadataInventory.UpdatePluginInventoryDatabase(pluginInventoryFilePath) + Expect(err).NotTo(HaveOccurred()) + + pluginEntries, err := pluginInventory.GetAllPlugins() + Expect(err).NotTo(HaveOccurred()) + Expect(len(pluginEntries)).To(Equal(2)) + + pluginGroupEntries, err := pluginInventory.GetPluginGroups(PluginGroupFilter{}) + Expect(err).NotTo(HaveOccurred()) + Expect(len(pluginGroupEntries)).To(Equal(2)) + }) + }) + }) +}) diff --git a/pkg/plugininventory/sqlite_inventory_schema.go b/pkg/plugininventory/sqlite_inventory_schema.go index 5f91ee06a..26374e18f 100644 --- a/pkg/plugininventory/sqlite_inventory_schema.go +++ b/pkg/plugininventory/sqlite_inventory_schema.go @@ -13,4 +13,9 @@ var ( CreateTablesSchema = strings.TrimSpace(createTablesSchema) //go:embed data/sqlite/create_tables.sql createTablesSchema string + + // PluginInventoryMetadataCreateTablesSchema defines the database schema to create sqlite database for available plugins + PluginInventoryMetadataCreateTablesSchema = strings.TrimSpace(pluginInventoryMetadataCreateTablesSchema) + //go:embed data/sqlite/plugin_inventory_metadata_tables.sql + pluginInventoryMetadataCreateTablesSchema string ) diff --git a/pkg/pluginmanager/manager.go b/pkg/pluginmanager/manager.go index 292ab7278..8371c14b9 100644 --- a/pkg/pluginmanager/manager.go +++ b/pkg/pluginmanager/manager.go @@ -151,7 +151,7 @@ func discoverPluginGroup(pd []configtypes.PluginDiscovery, groupID string) (*plu var matchingGroup *plugininventory.PluginGroup for _, discAndGroups := range groupsByDiscovery { for _, group := range discAndGroups.Groups { - id := fmt.Sprintf("%s-%s/%s", group.Vendor, group.Publisher, group.Name) + id := plugininventory.PluginGroupToID(group) if id == groupID { // Found the group. if matchingGroup == nil { diff --git a/pkg/registry/client.go b/pkg/registry/client.go index 6e92defff..31e10cf33 100644 --- a/pkg/registry/client.go +++ b/pkg/registry/client.go @@ -179,6 +179,7 @@ func (r *registry) CopyImageToTar(sourceImageName, destTarFile string) error { copyOptions := cmd.NewCopyOptions(ui.NewWrappingConfUI(writerUI, nil)) copyOptions.Concurrency = 3 + copyOptions.SignatureFlags = cmd.SignatureFlags{CopyCosignSignatures: true} isBundle, _ := bundle.NewBundle(sourceImageName, r.registry).IsBundle() if isBundle { copyOptions.BundleFlags = cmd.BundleFlags{Bundle: sourceImageName} @@ -265,3 +266,26 @@ func (r *registry) GetImageDigest(imageWithTag string) (string, string, error) { } return hash.Algorithm, hash.Hex, nil } + +// PushImage publishes the image to the specified location +// This is equivalent to `imgpkg push -i -f ` +func (r *registry) PushImage(imageWithTag string, filePaths []string) error { + // Creating a dummy writer to capture the logs + // currently this logs are not displayed or used directly + var outputBuf, errorBuf bytes.Buffer + writerUI := ui.NewWriterUI(&outputBuf, &errorBuf, nil) + + pushOptions := cmd.NewPushOptions(writerUI) + pushOptions.ImageFlags = cmd.ImageFlags{Image: imageWithTag} + pushOptions.FileFlags = cmd.FileFlags{Files: filePaths} + if r.opts != nil { + pushOptions.RegistryFlags = cmd.RegistryFlags{ + CACertPaths: r.opts.CACertPaths, + VerifyCerts: r.opts.VerifyCerts, + Insecure: r.opts.Insecure, + Anon: r.opts.Anon, + } + } + + return pushOptions.Run() +} diff --git a/pkg/registry/interface.go b/pkg/registry/interface.go index 40325fcc2..1293c5825 100644 --- a/pkg/registry/interface.go +++ b/pkg/registry/interface.go @@ -27,4 +27,7 @@ type Registry interface { // CopyImageFromTar publishes the image to destination repository from specified tar file // This is equivalent to `imgpkg copy --tar --to-repo ` command CopyImageFromTar(sourceTarFile, destImageRepo string) error + // PushImage publishes the image to the specified location + // This is equivalent to `imgpkg push -i -f ` + PushImage(imageWithTag string, filePaths []string) error } diff --git a/pkg/utils/files.go b/pkg/utils/files.go index 6d8238f09..b472f415f 100644 --- a/pkg/utils/files.go +++ b/pkg/utils/files.go @@ -37,7 +37,13 @@ func CopyFile(sourceFile, destFile string) error { if err != nil { return err } - + dirName := filepath.Dir(destFile) + if _, serr := os.Stat(dirName); serr != nil { + merr := os.MkdirAll(dirName, os.ModePerm) + if merr != nil { + return merr + } + } err = os.WriteFile(destFile, input, constants.ConfigFilePermissions) return err }